fuse_ops.cpp 28.5 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
/*
 * The MIT License (MIT)
 *
 * Copyright (c) 2015-2022 Advanced Micro Devices, Inc. All rights reserved.
 *
 * Permission is hereby granted, free of charge, to any person obtaining a copy
 * of this software and associated documentation files (the "Software"), to deal
 * in the Software without restriction, including without limitation the rights
 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
 * copies of the Software, and to permit persons to whom the Software is
 * furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice shall be included in
 * all copies or substantial portions of the Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL THE
 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
 * THE SOFTWARE.
 */
kahmed10's avatar
kahmed10 committed
24
25
#include <migraphx/pass_manager.hpp>
#include <migraphx/dead_code_elimination.hpp>
Paul's avatar
Paul committed
26
27
28
#include <migraphx/gpu/fuse_ops.hpp>
#include <migraphx/matcher.hpp>
#include <migraphx/gpu/miopen.hpp>
29
#include <migraphx/gpu/device_name.hpp>
30
#include <migraphx/gpu/oper.hpp>
31
#include <migraphx/gpu/gemm.hpp>
Paul's avatar
Paul committed
32
#include <migraphx/instruction.hpp>
33
#include <migraphx/register_op.hpp>
Paul's avatar
Paul committed
34
#include <migraphx/array.hpp>
35
#include <migraphx/permutation.hpp>
36
#include <migraphx/make_op.hpp>
kahmed10's avatar
kahmed10 committed
37
#include <cmath>
38
#include <set>
Paul's avatar
Paul committed
39
40

namespace migraphx {
Paul's avatar
Paul committed
41
inline namespace MIGRAPHX_INLINE_NS {
Paul's avatar
Paul committed
42
43
namespace gpu {

44
45
MIGRAPHX_DECLARE_ENV_VAR(MIGRAPHX_DISABLE_MIOPEN_FUSION)

Paul's avatar
Paul committed
46
47
48
49
50
51
52
53
struct fusion
{
    using op_t = miopenFusionOpDescriptor_t;
    shared<fusion_plan_descriptor> fp;

    // Used as a temporary hack to keep descriptor references alive
    std::vector<std::shared_ptr<void>> storage;

Paul's avatar
Paul committed
54
    template <class T>
Paul's avatar
Paul committed
55
56
57
58
59
60
61
    auto keep_alive(T x)
    {
        auto result = share(std::move(x));
        storage.push_back(result);
        return result;
    }

62
63
    fusion() = default;

Paul's avatar
Paul committed
64
65
    fusion(const shape& input)
    {
66
        assert(input.standard());
Paul's avatar
Paul committed
67
        auto t = make_tensor(input);
Paul's avatar
Paul committed
68
        fp     = make_fusion_plan(t);
69
        assert(fp);
Paul's avatar
Paul committed
70
71
72
        keep_alive(std::move(t));
    }

73
74
    bool empty() const { return fp == nullptr; }

Paul's avatar
Paul committed
75
76
    op_t operator[](std::size_t i) const
    {
77
        assert(fp);
Paul's avatar
Paul committed
78
79
80
        op_t result;
        auto status = miopenFusionPlanGetOp(fp.get(), i, &result);
        if(status != miopenStatusSuccess)
Paul's avatar
Paul committed
81
            MIGRAPHX_THROW("Failed retrieving operator at " + std::to_string(i));
Paul's avatar
Paul committed
82
83
84
        return result;
    }

85
86
87
88
89
    auto get() const
    {
        assert(fp);
        return fp.get();
    }
Paul's avatar
Paul committed
90
91
92

    op_t create_bias(const shape& bias)
    {
93
        assert(fp);
Paul's avatar
Paul committed
94
        op_t result;
Paul's avatar
Paul committed
95
96
        auto b      = shape{bias.type(), {1, bias.lens().at(1), 1, 1}};
        auto t      = keep_alive(make_tensor(b));
Paul's avatar
Paul committed
97
98
        auto status = miopenCreateOpBiasForward(fp.get(), &result, t.get());
        if(status != miopenStatusSuccess)
Paul's avatar
Paul committed
99
            MIGRAPHX_THROW("Creating operator failed");
Paul's avatar
Paul committed
100
101
102
103
104
        return result;
    }

    op_t create_relu()
    {
105
        assert(fp);
Paul's avatar
Paul committed
106
107
108
        op_t result;
        auto status = miopenCreateOpActivationForward(fp.get(), &result, miopenActivationRELU);
        if(status != miopenStatusSuccess)
Paul's avatar
Paul committed
109
            MIGRAPHX_THROW("Creating operator failed");
Paul's avatar
Paul committed
110
111
112
113
114
        return result;
    }

    op_t create_conv(const op::convolution& op, const shape& weights)
    {
115
        assert(fp);
Paul's avatar
Paul committed
116
        op_t result;
Paul's avatar
Paul committed
117
118
        auto cd     = keep_alive(make_conv(op));
        auto t      = keep_alive(make_tensor(weights));
Paul's avatar
Paul committed
119
120
        auto status = miopenCreateOpConvForward(fp.get(), &result, cd.get(), t.get());
        if(status != miopenStatusSuccess)
Paul's avatar
Paul committed
121
            MIGRAPHX_THROW("Creating operator failed");
Paul's avatar
Paul committed
122
123
        return result;
    }
Paul's avatar
Paul committed
124
125
126

    shape get_workspace(context&)
    {
127
        // assert(fp);
Paul's avatar
Paul committed
128
129
130
131
132
        // TODO: Use zero workspace for now
        std::size_t ws_size = 0;
        // int algo_count = 1;
        // miopenConvFwdAlgorithm_t algo;
        // miopenFusionPlanConvolutionGetAlgo(fp.get(), 1, &algo_count, &algo);
Paul's avatar
Paul committed
133
134
        // miopenFusionPlanGetWorkSpaceSize(ctx.get_stream().get_miopen(), fp.get(), &ws_size,
        // algo);
Paul's avatar
Paul committed
135
136
137
        return shape{shape::int8_type, {ws_size}};
    }

138
    bool compile(context& ctx)
Paul's avatar
Paul committed
139
    {
140
        assert(fp);
141
142
        return miopenCompileFusionPlan(ctx.get_stream().get_miopen(), fp.get()) ==
               miopenStatusSuccess;
Paul's avatar
Paul committed
143
144
    }

Paul's avatar
Paul committed
145
146
147
148
    argument execute(context& ctx,
                     const fused_operator_args& fargs,
                     const argument& x,
                     const argument& y) const
Paul's avatar
Paul committed
149
    {
150
        assert(fp);
Paul's avatar
Paul committed
151
152
        auto x_td   = make_tensor(x.get_shape());
        auto y_td   = make_tensor(y.get_shape());
Paul's avatar
Paul committed
153
        auto status = miopenExecuteFusionPlan(ctx.get_stream().get_miopen(),
Paul's avatar
Paul committed
154
155
156
157
158
159
                                              fp.get(),
                                              x_td.get(),
                                              x.implicit(),
                                              y_td.get(),
                                              y.implicit(),
                                              fargs.get());
Paul's avatar
Paul committed
160
        if(status != miopenStatusSuccess)
Paul's avatar
Paul committed
161
            MIGRAPHX_THROW("Failed to execute fusion plan");
Paul's avatar
Paul committed
162
163
        return y;
    }
Paul's avatar
Paul committed
164
165
};

166
167
168
169
170
171
const std::unordered_set<std::string>& get_supported_archs()
{
    static std::unordered_set<std::string> supported_archs{"gfx900", "gfx906", "gfx908", "gfx1030"};
    return supported_archs;
}

Paul's avatar
Paul committed
172
MIGRAPHX_PRED_MATCHER(bias_shape, instruction_ref ins)
Paul's avatar
Paul committed
173
174
{
    auto&& s = ins->get_shape();
Paul's avatar
Paul committed
175
176
    return s.broadcasted() and s.strides().size() == 4 and s.strides()[0] == 0 and
           s.strides()[1] != 0 and s.strides()[2] == 0 and s.strides()[3] == 0;
Paul's avatar
Paul committed
177
178
}

Paul's avatar
Paul committed
179
MIGRAPHX_PRED_MATCHER(fusable_conv, instruction_ref ins)
Paul's avatar
Paul committed
180
{
181
    const auto device_name = trim(split_string(get_device_name(), ':').front());
182
183
    if(not contains(get_supported_archs(), device_name))
        return false;
184
185
    if(enabled(MIGRAPHX_DISABLE_MIOPEN_FUSION{}))
        return false;
Paul's avatar
Paul committed
186
187
    if(ins->name() != "gpu::convolution")
        return false;
Paul's avatar
Paul committed
188
189
    if(ins->get_shape().type() != shape::float_type)
        return false;
Paul's avatar
Paul committed
190
191
    auto wei = ins->inputs().at(1)->get_shape();
    assert(wei.lens().size() == 4);
192
193
194
195
    auto miopen_conv_op = ins->get_operator().to_value();
    auto algo           = miopen_conv_op.at("algo").to<miopenConvFwdAlgorithm_t>();
    auto conv_op        = from_value<op::convolution>(miopen_conv_op["op"]);
    if(conv_op.group > 1)
Khalique's avatar
Khalique committed
196
        return false;
197
    if(wei.lens()[1] > 512 and algo != miopenConvolutionFwdAlgoWinograd)
Paul's avatar
Paul committed
198
        return false;
199
200
201
202
203
204

    // Do not fuse non-symmetric input
    auto input_lens = ins->inputs().at(0)->get_shape().lens();
    if(input_lens[2] != input_lens[3] or wei.lens()[2] != wei.lens()[3])
        return false;

205
    // Dont fuse winograd for non-3x3s since there is no fused windograd for those configs
206
207
    if(algo == miopenConvolutionFwdAlgoWinograd and wei.lens()[2] != 3 and wei.lens()[3] != 3 and
       contains({{1, 1}}, conv_op.stride))
208
        return false;
209
210
    return contains({{0, 0, 0, 0}, {1, 1, 1, 1}, {2, 2, 2, 2}}, conv_op.padding) and
           contains({{0, 0}, {1, 1}}, conv_op.stride) and contains({{1, 1}}, conv_op.dilation);
Paul's avatar
Paul committed
211
212
}

Paul's avatar
Paul committed
213
214
215
void move_broadcasted_back(std::vector<instruction_ref>& args)
{
    // Ensure the last arguments is the broadcasted one
Paul's avatar
Paul committed
216
    auto last = std::prev(args.end());
Paul's avatar
Paul committed
217
218
    auto it =
        std::find_if(args.begin(), last, [](auto arg) { return arg->get_shape().broadcasted(); });
Paul's avatar
Paul committed
219
220
    if(it != last)
        std::swap(*it, *std::prev(last));
Paul's avatar
Paul committed
221
222
223
224
225
}

void move_standard_front(std::vector<instruction_ref>& args)
{
    // Ensure the first arguments is the standard one
Paul's avatar
Paul committed
226
    auto last = std::prev(args.end());
Paul's avatar
Paul committed
227
228
    auto it =
        std::find_if(args.begin(), last, [](auto arg) { return arg->get_shape().standard(); });
Paul's avatar
Paul committed
229
    if(it != last)
Paul's avatar
Paul committed
230
231
232
        std::swap(*it, args.front());
}

233
234
auto gpu_name(const std::string& s) { return match::name("gpu::" + s); }

Paul Fultz II's avatar
Paul Fultz II committed
235
namespace {
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
struct miopen_fusion
{
    struct fuse_op_data
    {
        operation op;
        float alpha = 1;
        float beta  = 0;
    };
    struct fuse_op : fuse_op_data, reflect_equality<fuse_op>, reflect_stream<fuse_op>
    {
        template <class Self, class F>
        static auto reflect(Self& self, F f)
        {
            return pack(f(self.op, "op"), f(self.alpha, "alpha"), f(self.beta, "beta"));
        }
    };
    std::vector<fuse_op> ops = {};
    fusion f                 = {};
    std::function<void(context&, const fusion&, const std::vector<argument>&)> execute;
    template <class Self, class F>
    static auto reflect(Self& self, F f)
    {
        return pack(f(self.ops, "ops"));
    }

261
262
263
264
265
    std::ptrdiff_t output_alias(const std::vector<shape>& shapes) const
    {
        return shapes.size() - 1;
    }

266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
    value compile(context& ctx, const shape&, std::vector<shape> inputs)
    {
        // Compensate for allocation
        inputs.pop_back();
        std::size_t i = 0;
        f             = fusion(inputs[i]);
        i++;
        std::vector<std::function<void(const fused_operator_args&, const std::vector<argument>&)>>
            invokers;
        for(auto&& fop : ops)
        {
            if(i > inputs.size())
            {
                f = {};
                return {};
            }
            if(fop.op.name() == "convolution")
            {
                auto* mop = f.create_conv(any_cast<op::convolution>(fop.op), inputs[i]);
                invokers.push_back(
                    [=](const fused_operator_args& fargs, const std::vector<argument>& args) {
                        miopenSetOpArgsConvForward(
                            fargs.get(), mop, &fop.alpha, &fop.beta, args[i].implicit());
                    });
                i++;
            }
            else if(fop.op.name() == "add")
            {
                auto* mop = f.create_bias(inputs[i]);
                invokers.push_back(
                    [=](const fused_operator_args& fargs, const std::vector<argument>& args) {
                        miopenSetOpArgsBiasForward(
                            fargs.get(), mop, &fop.alpha, &fop.beta, args[i].implicit());
                    });
                i++;
            }
            else if(fop.op.name() == "relu")
            {
                auto* mop = f.create_relu();
                invokers.push_back([=](const fused_operator_args& fargs,
                                       const std::vector<argument>&) {
                    miopenSetOpArgsActivForward(fargs.get(), mop, &fop.alpha, &fop.beta, 0, 0, 0);
                });
            }
            else
            {
                f = {};
                return {};
            }
        }
        if(not f.compile(ctx))
        {
            f = {};
            return {};
        }
        execute = [invokers](context& c, const fusion& ff, const std::vector<argument>& args) {
            auto fargs = make_fused_args();
            for(auto&& invoker : invokers)
                invoker(fargs, args);
            ff.execute(c, fargs, args.front(), args.back());
        };
        return {{"workspace", f.get_workspace(ctx).bytes()}};
    }
    void finalize(context& ctx, const shape& output_shape, const std::vector<shape>& inputs)
    {
        if(not f.empty())
            return;
        auto v = compile(ctx, output_shape, inputs);
        if(not v.is_object())
            MIGRAPHX_THROW("Failed to compile fusion plan");
    }
    std::string name() const { return "gpu::miopen_fusion"; }
    shape compute_shape(const std::vector<shape>& inputs) const
    {
        if(ops.empty())
            return {};
        // TODO: Check number of arguments
        return ops.front().op.compute_shape({inputs[0], inputs[1]});
    }
    argument compute(context& ctx, const shape&, const std::vector<argument>& args) const
    {
        execute(ctx, f, args);
        return args.back();
    }
};
351
MIGRAPHX_REGISTER_OP(miopen_fusion)
352

Paul's avatar
Paul committed
353
354
355
struct miopen_conv_bias
{
    op::convolution op;
356
    fusion fp         = {};
357
358
    fusion::op_t conv = {};
    fusion::op_t bias = {};
Paul's avatar
Paul committed
359

Paul's avatar
Paul committed
360
361
362
363
364
365
    template <class Self, class F>
    static auto reflect(Self& self, F f)
    {
        return op::convolution::reflect(self.op, f);
    }

Paul's avatar
Paul committed
366
367
368
369
370
    std::string name() const { return "gpu::conv_bias"; }
    shape compute_shape(const std::vector<shape>& inputs) const
    {
        check_shapes{inputs, *this}.has(5);
        // TODO: Check slices
kahmed10's avatar
kahmed10 committed
371
        return op.normalize_compute_shape({inputs.at(0), inputs.at(1)});
Paul's avatar
Paul committed
372
    }
Paul's avatar
Paul committed
373
    argument compute(context& ctx, const shape&, const std::vector<argument>& args) const
Paul's avatar
Paul committed
374
    {
Paul's avatar
Paul committed
375
        auto fargs  = make_fused_args();
Paul's avatar
Paul committed
376
        float alpha = 1;
Paul's avatar
Paul committed
377
        float beta  = 0;
Paul's avatar
Paul committed
378
379
        miopenSetOpArgsConvForward(fargs.get(), conv, &alpha, &beta, args[1].implicit());
        miopenSetOpArgsBiasForward(fargs.get(), bias, &alpha, &beta, args[3].implicit());
380
        return fp.execute(ctx, fargs, args[0], args[4]);
Paul's avatar
Paul committed
381
382
    }

383
384
    void finalize(context& ctx, const shape&, const std::vector<shape>& inputs)
    {
385
386
387
388
        fp   = fusion(inputs[0]);
        conv = fp.create_conv(op, inputs[1]);
        bias = fp.create_bias(inputs[3]);
        if(not fp.compile(ctx))
389
            MIGRAPHX_THROW("Failed to compile fusion plan");
390
391
    }

392
    shape get_workspace(context& ctx) { return fp.get_workspace(ctx); }
Paul's avatar
Paul committed
393
394
395
396
    std::ptrdiff_t output_alias(const std::vector<shape>& shapes) const
    {
        return shapes.size() - 1;
    }
Paul's avatar
Paul committed
397
};
398
MIGRAPHX_REGISTER_OP(miopen_conv_bias)
Paul's avatar
Paul committed
399

Paul's avatar
Add cbr  
Paul committed
400
401
402
struct miopen_conv_bias_relu
{
    op::convolution op;
403
    fusion fp         = {};
404
405
406
    fusion::op_t conv = {};
    fusion::op_t bias = {};
    fusion::op_t relu = {};
Paul's avatar
Add cbr  
Paul committed
407

Paul's avatar
Paul committed
408
409
410
411
412
413
    template <class Self, class F>
    static auto reflect(Self& self, F f)
    {
        return op::convolution::reflect(self.op, f);
    }

Paul's avatar
Add cbr  
Paul committed
414
415
416
417
418
    std::string name() const { return "gpu::conv_bias_relu"; }
    shape compute_shape(const std::vector<shape>& inputs) const
    {
        check_shapes{inputs, *this}.has(5);
        // TODO: Check slices
kahmed10's avatar
kahmed10 committed
419
        return op.normalize_compute_shape({inputs.at(0), inputs.at(1)});
Paul's avatar
Add cbr  
Paul committed
420
    }
Paul's avatar
Paul committed
421
    argument compute(context& ctx, const shape&, const std::vector<argument>& args) const
Paul's avatar
Add cbr  
Paul committed
422
423
    {
        auto fargs  = make_fused_args();
Paul's avatar
Paul committed
424
        float alpha = 1;
Paul's avatar
Paul committed
425
        float beta  = 0;
Paul's avatar
Add cbr  
Paul committed
426
427
        miopenSetOpArgsConvForward(fargs.get(), conv, &alpha, &beta, args[1].implicit());
        miopenSetOpArgsBiasForward(fargs.get(), bias, &alpha, &beta, args[3].implicit());
Paul's avatar
Paul committed
428
        miopenSetOpArgsActivForward(fargs.get(), relu, &alpha, &beta, 0, 0, 0);
429
        return fp.execute(ctx, fargs, args[0], args[4]);
Paul's avatar
Add cbr  
Paul committed
430
    }
431
432
    void finalize(context& ctx, const shape&, const std::vector<shape>& inputs)
    {
433
434
435
436
437
        fp   = fusion(inputs[0]);
        conv = fp.create_conv(op, inputs[1]);
        bias = fp.create_bias(inputs[3]);
        relu = fp.create_relu();
        fp.compile(ctx);
438
439
    }

440
    shape get_workspace(context& ctx) { return fp.get_workspace(ctx); }
Paul's avatar
Paul committed
441
442
443
444
    std::ptrdiff_t output_alias(const std::vector<shape>& shapes) const
    {
        return shapes.size() - 1;
    }
Paul's avatar
Add cbr  
Paul committed
445
};
446
MIGRAPHX_REGISTER_OP(miopen_conv_bias_relu)
Paul's avatar
Add cbr  
Paul committed
447

Paul's avatar
Paul committed
448
template <class... Ms>
Paul's avatar
Add cbr  
Paul committed
449
450
auto conv_bias(Ms... ms)
{
Paul's avatar
Paul committed
451
    return match::name("gpu::add")(
Paul's avatar
Paul committed
452
453
        match::either_arg(0, 1)(bias_shape(match::used_once()).bind("bias"),
                                fusable_conv(match::used_once()).bind("conv")),
Paul's avatar
Paul committed
454
        ms...);
Paul's avatar
Paul committed
455
456
}

Paul's avatar
Paul committed
457
template <class Op>
458
void apply_conv_bias(context& ctx, module& m, const match::matcher_result& r)
Paul's avatar
Paul committed
459
460
461
462
463
464
{
    auto conv_ins    = r.instructions["conv"];
    auto bias_ins    = r.instructions["bias"];
    auto ins         = r.result;
    auto input_ins   = conv_ins->inputs().at(0);
    auto weights_ins = conv_ins->inputs().at(1);
465
    auto conv_op     = from_value<op::convolution>((conv_ins->get_operator()).to_value()["op"]);
Paul's avatar
Paul committed
466
467
468
    auto alloc_ins   = ins->inputs().back();
    auto old_ws_ins  = conv_ins->inputs().at(2);

469
    Op cb{conv_op};
Paul's avatar
Paul committed
470
    // TODO: Insert ws allocation
Paul's avatar
Paul committed
471
    auto ws = cb.get_workspace(ctx);
Paul's avatar
Paul committed
472
    (void)ws;
473
    m.replace_instruction(ins, cb, input_ins, weights_ins, old_ws_ins, bias_ins, alloc_ins);
Paul's avatar
Add cbr  
Paul committed
474
475
}

476
477
template <class... Strings>
inline auto precompile_name(Strings... names) // NOLINT
478
479
480
481
482
{
    return match::make_basic_pred_matcher([=](instruction_ref ins) {
        if(ins->name() != "gpu::precompile_op")
            return false;
        auto op = from_value<operation>(ins->get_operator().to_value().at("op"));
483
        return (contains({names...}, op.name()));
484
485
486
    });
}

Paul's avatar
Paul committed
487
struct find_conv_bias
Paul's avatar
Paul committed
488
{
Paul's avatar
Paul committed
489
    context* ctx = nullptr;
Paul's avatar
Paul committed
490
491
    auto matcher() const
    {
kahmed10's avatar
kahmed10 committed
492
493
        return conv_bias(match::none_of(
            match::output(match::name(std::unordered_set<std::string>{"gpu::relu"}))));
Paul's avatar
Paul committed
494
495
    }

496
    void apply(module& m, const match::matcher_result& r) const
Paul's avatar
Paul committed
497
    {
498
        apply_conv_bias<miopen_conv_bias>(*ctx, m, r);
Paul's avatar
Paul committed
499
500
501
    }
};

Paul's avatar
Paul committed
502
struct find_conv_bias_relu
Paul's avatar
Add cbr  
Paul committed
503
504
{
    context* ctx = nullptr;
Paul's avatar
Paul committed
505
    auto matcher() const { return match::name("gpu::relu")(match::arg(0)(conv_bias())); }
Paul's avatar
Add cbr  
Paul committed
506

507
    void apply(module& m, const match::matcher_result& r) const
Paul's avatar
Add cbr  
Paul committed
508
    {
509
        apply_conv_bias<miopen_conv_bias_relu>(*ctx, m, r);
Paul's avatar
Add cbr  
Paul committed
510
511
    }
};
512

513
514
515
516
517
518
519
520
521
522
523
struct find_conv_pointwise
{
    context* ctx = nullptr;
    auto matcher() const
    {
        return precompile_name("pointwise")(
            match::nargs(3),
            match::either_arg(0, 1)(bias_shape(match::used_once()).bind("bias"),
                                    fusable_conv(match::used_once()).bind("conv")));
    }

524
    void apply(module& m, const match::matcher_result& r) const
525
526
527
528
529
530
    {
        auto conv_ins    = r.instructions["conv"];
        auto bias_ins    = r.instructions["bias"];
        auto ins         = r.result;
        auto input_ins   = conv_ins->inputs().at(0);
        auto weights_ins = conv_ins->inputs().at(1);
531
        auto conv_op     = from_value<op::convolution>(conv_ins->get_operator().to_value()["op"]);
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
        auto alloc_ins   = ins->inputs().back();

        module_ref pm = ins->module_inputs().front();

        miopen_fusion op{};
        op.ops.push_back({{conv_op}});
        for(auto&& i : *pm)
        {
            if(i.name()[0] == '@')
                continue;
            op.ops.push_back({{i.get_operator()}});
        }
        std::vector<instruction_ref> inputs = {input_ins, weights_ins, bias_ins, alloc_ins};
        auto v                              = op.compile(*ctx, ins->get_shape(), to_shapes(inputs));
        if(not v.is_object())
            return;
        m.replace_instruction(ins, op, inputs);
    }
};

552
553
554
555
struct find_gemm_pointwise
{
    auto matcher() const
    {
556
557
        auto gemm_op   = match::name("gpu::gemm")(match::nargs(3), match::used_once()).bind("gemm");
        auto binary_op = match::all_of(
558
            match::nargs(3),
559
            match::either_arg(0, 1)(
560
561
562
                match::any_of(match::standard_shape(), match::is_constant()).bind("c"), gemm_op));
        auto unary_op = match::all_of(match::nargs(2), match::arg(0)(gemm_op));
        return precompile_name("pointwise")(match::any_of(binary_op, unary_op));
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
    }

    // TODO: Move to matcher.hpp
    static auto match_param(const std::string& name)
    {
        return match::make_basic_pred_matcher([=](auto ins) {
            if(ins->name() != "@param")
                return false;
            auto p = any_cast<builtin::param>(ins->get_operator());
            return p.parameter == name;
        });
    }

    template <class M>
    static auto match_mul_const(M m, const std::string& var)
    {
        return match::name("mul")(match::either_arg(0, 1)(match::name("@literal").bind(var), m))
            .bind(var + "_mul");
    }

    static auto match_add(const std::string& input, const std::string& output)
    {
        auto param     = match::name("@param");
        auto add       = match::name("add")(match::args(param, param));
        auto inner_mul = match::any_of(match_mul_const(match_param(input), "alpha"),
                                       match_mul_const(match_param(output), "beta"));
        auto mul_add   = match::name("add")(match::either_arg(0, 1)(inner_mul, param));
        auto add_mul   = match_mul_const(add, "gamma");
        return match::name("@return")(match::args(match::any_of(add, mul_add, add_mul)));
    }

594
595
596
597
598
599
    static auto match_mul(const std::string& input)
    {
        auto mul = match_mul_const(match_param(input), "alpha");
        return match::name("@return")(match::args(mul));
    }

600
601
602
603
604
605
606
    static float get_float(instruction_ref ins) { return ins->get_literal().at<float>(); }

    template <class Gemm>
    static bool update_gemm(Gemm& gemm, module_ref pm, unsigned input)
    {
        auto names = pm->get_parameter_names();
        std::sort(names.begin(), names.end());
607
608
609
610
611
        if(names.size() == 1)
        {
            auto mr = match::match_instruction(*pm, std::prev(pm->end()), match_mul(names[input]));
            if(mr.result == pm->end())
                return false;
612
            gemm.alpha *= get_float(mr.instructions["alpha"]);
613
614
615
            return true;
        }
        else if(names.size() == 2)
616
        {
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
            unsigned output = input == 0 ? 1 : 0;
            auto mr         = match::match_instruction(
                *pm, std::prev(pm->end()), match_add(names[input], names[output]));
            if(mr.result == pm->end())
                return false;
            if(contains(mr.instructions, "alpha_mul"))
                gemm.alpha *= get_float(mr.instructions["alpha"]);
            else if(contains(mr.instructions, "beta_mul"))
                gemm.beta *= get_float(mr.instructions["beta"]);
            else if(contains(mr.instructions, "gamma_mul"))
            {
                gemm.alpha *= get_float(mr.instructions["gamma"]);
                gemm.beta *= get_float(mr.instructions["gamma"]);
            }
            return true;
        }
        else
        {
            return false;
636
        }
637
638
639
640
641
642
643
644
645
646
647
648
    }

    void apply(module& m, const match::matcher_result& r) const
    {
        auto ins      = r.result;
        auto gemm_ins = r.instructions["gemm"];

        auto gemm = any_cast<rocblas_gemm<op::dot>>(gemm_ins->get_operator());

        // Already fused gemm
        if(not float_equal(gemm.beta, 0))
            return;
649
650
        if(ins->inputs().size() == 3)
            gemm.beta = 1;
651
652
653
654
655

        if(not update_gemm(
               gemm, ins->module_inputs().front(), ins->inputs().front() == gemm_ins ? 0 : 1))
            return;

656
657
658
        auto inputs = gemm_ins->inputs();
        inputs.pop_back();

659
660
661
662
663
664
665
666
667
668
669
670
671
        if(ins->inputs().size() == 3)
        {
            auto c_ins = r.instructions["c"];
            // const-fold input if not standard shape since rocblas can't handle it
            if(not c_ins->get_shape().standard())
            {
                auto c = make_op("contiguous");
                auto l = c.compute(c.compute_shape({c_ins->get_shape()}), {c_ins->eval()});
                c_ins  = m.add_literal(l.get_shape(), l.data());
            }
            inputs.push_back(c_ins);
        }

672
        inputs.push_back(ins->inputs().back());
673
674
675
676
677

        m.replace_instruction(ins, gemm, inputs);
    }
};

678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
struct find_contiguous_tranpose_gemm
{
    auto matcher() const
    {
        return match::name("gpu::contiguous")(match::arg(0)(
            match::name("transpose")(
                match::arg(0)(match::name("gpu::gemm")(match::used_once()).bind("gemm")))
                .bind("transpose")));
    }

    template <class Vector>
    static bool is_swapped(const Vector& perm, std::size_t i, std::size_t j)
    {
        if(i >= perm.size() or j >= perm.size())
            return false;
        auto perm2 = perm;
        std::iota(perm2.begin(), perm2.end(), 0);
        std::swap(perm2[i], perm2[j]);
        return perm2 == perm;
    }

    void apply(module& m, const match::matcher_result& r) const
    {
        auto ins       = r.result;
        auto gemm      = r.instructions["gemm"];
        auto alloc     = gemm->inputs().back();
        auto transpose = r.instructions["transpose"];
        auto perm      = transpose->get_operator().to_value()["permutation"].to_vector<int64_t>();
        auto iperm     = invert_permutation(perm);

        if(perm.size() < 3)
            return;

        if(not is_swapped(perm, perm.size() - 3, perm.size() - 2))
            return;

        auto lens = gemm->get_shape().lens();
        if(lens.size() > 3 and
           not std::all_of(lens.begin(), lens.end() - 3, [](auto i) { return i == 1; }))
            return;

        auto gemmv           = gemm->get_operator().to_value();
        gemmv["trans_batch"] = 1;

        auto s = shape{alloc->get_shape().type(), reorder_dims(alloc->get_shape().lens(), iperm)};
        auto new_alloc = m.insert_instruction(gemm, make_op("allocate", {{"shape", to_value(s)}}));
        auto alloc_transpose =
            m.insert_instruction(gemm, make_op("transpose", {{"permutation", perm}}), new_alloc);

        auto inputs        = gemm->inputs();
        inputs.back()      = alloc_transpose;
        auto new_gemm      = m.insert_instruction(gemm, make_op("gpu::gemm", gemmv), inputs);
        auto gemm_transpoe = m.insert_instruction(gemm, transpose->get_operator(), new_gemm);

        m.replace_instruction(ins, gemm_transpoe);
    }
};

736
737
738
739
740
741
742
struct find_commutative_broadcast
{
    auto matcher() const
    {
        return match::name("gpu::add", "gpu::mul")(match::arg(1)(match::broadcast_shape()));
    }

743
    void apply(module& m, const match::matcher_result& r) const
744
745
746
747
748
    {
        auto ins  = r.result;
        auto args = ins->inputs();
        move_broadcasted_back(args);

749
        m.replace_instruction(ins, ins->get_operator(), args);
750
751
    }
};
Paul Fultz II's avatar
Paul Fultz II committed
752
} // namespace
753

754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
struct find_contiguous
{
    auto matcher() const { return match::name("gpu::contiguous"); }

    void apply(module& m, const match::matcher_result& r) const
    {
        auto ins = r.result;

        m.replace_instruction(
            ins,
            make_op("gpu::precompile_op", {{"op", to_value(make_op("contiguous"))}}),
            ins->inputs());
    }
};

struct find_contiguous_pointwise
{
    auto matcher() const
    {
        return match::name("gpu::contiguous")(match::arg(0)(precompile_name("pointwise")));
    }

    void apply(module& m, const match::matcher_result& r) const
    {
        auto ins    = r.result;
        auto pw     = ins->inputs().front();
        auto alloc  = ins->inputs().back();
        auto args   = pw->inputs();
        args.back() = alloc;

        m.replace_instruction(ins, pw->get_operator(), args, pw->module_inputs());
    }
};

788
789
790
791
792
793
794
795
796
797
798
799
800
801
struct find_layernorm_pointwise
{
    auto matcher() const
    {
        return precompile_name("pointwise")(match::arg(0)(
            precompile_name("gpu::prelayernorm", "gpu::preadd_layernorm").bind("layernorm")));
    }

    void apply(module& m, const match::matcher_result& r) const
    {
        auto ins       = r.result;
        auto layernorm = r.instructions["layernorm"];
        if(not layernorm->module_inputs().empty())
            return;
802
        auto* pm    = ins->module_inputs().front();
803
804
805
806
807
808
809
810
        auto inputs = layernorm->inputs();
        inputs.pop_back();
        inputs.insert(inputs.end(), ins->inputs().begin() + 1, ins->inputs().end());

        m.replace_instruction(ins, layernorm->get_operator(), inputs, {pm});
    }
};

811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
struct find_concat_pointwise
{
    auto matcher() const
    {
        return precompile_name("pointwise")(
            match::arg(0)(precompile_name("concat").bind("concat")));
    }

    void apply(module& m, const match::matcher_result& r) const
    {
        auto ins    = r.result;
        auto concat = r.instructions["concat"];
        if(not concat->module_inputs().empty())
            return;

        // TODO: Handle type conversions
        if(ins->get_shape().type() != concat->get_shape().type())
            return;

        auto* pm    = ins->module_inputs().front();
        auto inputs = concat->inputs();
        inputs.pop_back();
        inputs.insert(inputs.end(), ins->inputs().begin() + 1, ins->inputs().end());

        auto op = concat->get_operator();
        op.from_value({{"additional_args", ins->inputs().size() - 1}, {"ignore_modules", true}});

        m.replace_instruction(ins, op, inputs, {pm});
    }
};

842
void fuse_ops::apply(module& m) const
Paul's avatar
Paul committed
843
{
844
    match::find_matches(m, find_contiguous_pointwise{});
845
    run_passes(m, {dead_code_elimination{}});
846
    match::find_matches(m, find_conv_pointwise{ctx}, find_conv_bias_relu{ctx}, find_conv_bias{ctx});
847
    run_passes(m, {dead_code_elimination{}});
848
    match::find_matches(m,
849
                        find_layernorm_pointwise{},
850
                        find_concat_pointwise{},
851
                        find_gemm_pointwise{},
852
                        find_contiguous_tranpose_gemm{},
853
                        find_commutative_broadcast{});
854
    match::find_matches(m, find_contiguous{});
Paul's avatar
Paul committed
855
}
Paul's avatar
Paul committed
856
857

} // namespace gpu
Paul's avatar
Paul committed
858
} // namespace MIGRAPHX_INLINE_NS
Paul's avatar
Paul committed
859
} // namespace migraphx