fuse_ops.cpp 28.8 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
/*
 * The MIT License (MIT)
 *
 * Copyright (c) 2015-2022 Advanced Micro Devices, Inc. All rights reserved.
 *
 * Permission is hereby granted, free of charge, to any person obtaining a copy
 * of this software and associated documentation files (the "Software"), to deal
 * in the Software without restriction, including without limitation the rights
 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
 * copies of the Software, and to permit persons to whom the Software is
 * furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice shall be included in
 * all copies or substantial portions of the Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL THE
 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
 * THE SOFTWARE.
 */
kahmed10's avatar
kahmed10 committed
24
25
#include <migraphx/pass_manager.hpp>
#include <migraphx/dead_code_elimination.hpp>
Paul's avatar
Paul committed
26
27
28
#include <migraphx/gpu/fuse_ops.hpp>
#include <migraphx/matcher.hpp>
#include <migraphx/gpu/miopen.hpp>
29
#include <migraphx/gpu/device_name.hpp>
30
#include <migraphx/gpu/oper.hpp>
31
#include <migraphx/gpu/gemm.hpp>
Paul's avatar
Paul committed
32
#include <migraphx/instruction.hpp>
33
#include <migraphx/register_op.hpp>
Paul's avatar
Paul committed
34
#include <migraphx/array.hpp>
35
#include <migraphx/permutation.hpp>
36
#include <migraphx/make_op.hpp>
kahmed10's avatar
kahmed10 committed
37
#include <cmath>
38
#include <set>
Paul's avatar
Paul committed
39
40

namespace migraphx {
Paul's avatar
Paul committed
41
inline namespace MIGRAPHX_INLINE_NS {
Paul's avatar
Paul committed
42
43
namespace gpu {

44
45
MIGRAPHX_DECLARE_ENV_VAR(MIGRAPHX_DISABLE_MIOPEN_FUSION)

Paul's avatar
Paul committed
46
47
48
49
50
51
52
53
struct fusion
{
    using op_t = miopenFusionOpDescriptor_t;
    shared<fusion_plan_descriptor> fp;

    // Used as a temporary hack to keep descriptor references alive
    std::vector<std::shared_ptr<void>> storage;

Paul's avatar
Paul committed
54
    template <class T>
Paul's avatar
Paul committed
55
56
57
58
59
60
61
    auto keep_alive(T x)
    {
        auto result = share(std::move(x));
        storage.push_back(result);
        return result;
    }

62
63
    fusion() = default;

Paul's avatar
Paul committed
64
65
    fusion(const shape& input)
    {
66
        assert(input.standard());
Paul's avatar
Paul committed
67
        auto t = make_tensor(input);
Paul's avatar
Paul committed
68
        fp     = make_fusion_plan(t);
69
        assert(fp);
Paul's avatar
Paul committed
70
71
72
        keep_alive(std::move(t));
    }

73
74
    bool empty() const { return fp == nullptr; }

Paul's avatar
Paul committed
75
76
    op_t operator[](std::size_t i) const
    {
77
        assert(fp);
Paul's avatar
Paul committed
78
79
80
        op_t result;
        auto status = miopenFusionPlanGetOp(fp.get(), i, &result);
        if(status != miopenStatusSuccess)
Paul's avatar
Paul committed
81
            MIGRAPHX_THROW("Failed retrieving operator at " + std::to_string(i));
Paul's avatar
Paul committed
82
83
84
        return result;
    }

85
86
87
88
89
    auto get() const
    {
        assert(fp);
        return fp.get();
    }
Paul's avatar
Paul committed
90
91
92

    op_t create_bias(const shape& bias)
    {
93
        assert(fp);
Paul's avatar
Paul committed
94
        op_t result;
Paul's avatar
Paul committed
95
96
        auto b      = shape{bias.type(), {1, bias.lens().at(1), 1, 1}};
        auto t      = keep_alive(make_tensor(b));
Paul's avatar
Paul committed
97
98
        auto status = miopenCreateOpBiasForward(fp.get(), &result, t.get());
        if(status != miopenStatusSuccess)
Paul's avatar
Paul committed
99
            MIGRAPHX_THROW("Creating operator failed");
Paul's avatar
Paul committed
100
101
102
103
104
        return result;
    }

    op_t create_relu()
    {
105
        assert(fp);
Paul's avatar
Paul committed
106
107
108
        op_t result;
        auto status = miopenCreateOpActivationForward(fp.get(), &result, miopenActivationRELU);
        if(status != miopenStatusSuccess)
Paul's avatar
Paul committed
109
            MIGRAPHX_THROW("Creating operator failed");
Paul's avatar
Paul committed
110
111
112
113
114
        return result;
    }

    op_t create_conv(const op::convolution& op, const shape& weights)
    {
115
        assert(fp);
Paul's avatar
Paul committed
116
        op_t result;
Paul's avatar
Paul committed
117
118
        auto cd     = keep_alive(make_conv(op));
        auto t      = keep_alive(make_tensor(weights));
Paul's avatar
Paul committed
119
120
        auto status = miopenCreateOpConvForward(fp.get(), &result, cd.get(), t.get());
        if(status != miopenStatusSuccess)
Paul's avatar
Paul committed
121
            MIGRAPHX_THROW("Creating operator failed");
Paul's avatar
Paul committed
122
123
        return result;
    }
Paul's avatar
Paul committed
124
125
126

    shape get_workspace(context&)
    {
127
        // assert(fp);
Paul's avatar
Paul committed
128
129
130
131
132
        // TODO: Use zero workspace for now
        std::size_t ws_size = 0;
        // int algo_count = 1;
        // miopenConvFwdAlgorithm_t algo;
        // miopenFusionPlanConvolutionGetAlgo(fp.get(), 1, &algo_count, &algo);
Paul's avatar
Paul committed
133
134
        // miopenFusionPlanGetWorkSpaceSize(ctx.get_stream().get_miopen(), fp.get(), &ws_size,
        // algo);
Paul's avatar
Paul committed
135
136
137
        return shape{shape::int8_type, {ws_size}};
    }

138
    bool compile(context& ctx)
Paul's avatar
Paul committed
139
    {
140
        assert(fp);
141
142
        return miopenCompileFusionPlan(ctx.get_stream().get_miopen(), fp.get()) ==
               miopenStatusSuccess;
Paul's avatar
Paul committed
143
144
    }

Paul's avatar
Paul committed
145
146
147
148
    argument execute(context& ctx,
                     const fused_operator_args& fargs,
                     const argument& x,
                     const argument& y) const
Paul's avatar
Paul committed
149
    {
150
        assert(fp);
Paul's avatar
Paul committed
151
152
        auto x_td   = make_tensor(x.get_shape());
        auto y_td   = make_tensor(y.get_shape());
Paul's avatar
Paul committed
153
        auto status = miopenExecuteFusionPlan(ctx.get_stream().get_miopen(),
Paul's avatar
Paul committed
154
155
156
157
158
159
                                              fp.get(),
                                              x_td.get(),
                                              x.implicit(),
                                              y_td.get(),
                                              y.implicit(),
                                              fargs.get());
Paul's avatar
Paul committed
160
        if(status != miopenStatusSuccess)
Paul's avatar
Paul committed
161
            MIGRAPHX_THROW("Failed to execute fusion plan");
Paul's avatar
Paul committed
162
163
        return y;
    }
Paul's avatar
Paul committed
164
165
};

166
167
const std::unordered_set<std::string>& get_supported_archs()
{
168
169
    static std::unordered_set<std::string> supported_archs{
        "gfx900", "gfx906", "gfx908", "gfx1030", "gfx940"};
170
171
172
    return supported_archs;
}

Paul's avatar
Paul committed
173
MIGRAPHX_PRED_MATCHER(bias_shape, instruction_ref ins)
Paul's avatar
Paul committed
174
175
{
    auto&& s = ins->get_shape();
Paul's avatar
Paul committed
176
177
    return s.broadcasted() and s.strides().size() == 4 and s.strides()[0] == 0 and
           s.strides()[1] != 0 and s.strides()[2] == 0 and s.strides()[3] == 0;
Paul's avatar
Paul committed
178
179
}

Paul's avatar
Paul committed
180
MIGRAPHX_PRED_MATCHER(fusable_conv, instruction_ref ins)
Paul's avatar
Paul committed
181
{
182
    const auto device_name = trim(split_string(get_device_name(), ':').front());
183
184
    if(not contains(get_supported_archs(), device_name))
        return false;
185
186
    if(enabled(MIGRAPHX_DISABLE_MIOPEN_FUSION{}))
        return false;
Paul's avatar
Paul committed
187
188
    if(ins->name() != "gpu::convolution")
        return false;
Paul's avatar
Paul committed
189
190
    if(ins->get_shape().type() != shape::float_type)
        return false;
Paul's avatar
Paul committed
191
192
    auto wei = ins->inputs().at(1)->get_shape();
    assert(wei.lens().size() == 4);
193
194
195
196
    auto miopen_conv_op = ins->get_operator().to_value();
    auto algo           = miopen_conv_op.at("algo").to<miopenConvFwdAlgorithm_t>();
    auto conv_op        = from_value<op::convolution>(miopen_conv_op["op"]);
    if(conv_op.group > 1)
Khalique's avatar
Khalique committed
197
        return false;
198
    if(wei.lens()[1] > 512 and algo != miopenConvolutionFwdAlgoWinograd)
Paul's avatar
Paul committed
199
        return false;
200
201
202
203
204
205

    // Do not fuse non-symmetric input
    auto input_lens = ins->inputs().at(0)->get_shape().lens();
    if(input_lens[2] != input_lens[3] or wei.lens()[2] != wei.lens()[3])
        return false;

206
    // Dont fuse winograd for non-3x3s since there is no fused windograd for those configs
207
208
    if(algo == miopenConvolutionFwdAlgoWinograd and wei.lens()[2] != 3 and wei.lens()[3] != 3 and
       contains({{1, 1}}, conv_op.stride))
209
        return false;
210
211
    return contains({{0, 0, 0, 0}, {1, 1, 1, 1}, {2, 2, 2, 2}}, conv_op.padding) and
           contains({{0, 0}, {1, 1}}, conv_op.stride) and contains({{1, 1}}, conv_op.dilation);
Paul's avatar
Paul committed
212
213
}

Paul's avatar
Paul committed
214
215
216
void move_broadcasted_back(std::vector<instruction_ref>& args)
{
    // Ensure the last arguments is the broadcasted one
Paul's avatar
Paul committed
217
    auto last = std::prev(args.end());
Paul's avatar
Paul committed
218
219
    auto it =
        std::find_if(args.begin(), last, [](auto arg) { return arg->get_shape().broadcasted(); });
Paul's avatar
Paul committed
220
221
    if(it != last)
        std::swap(*it, *std::prev(last));
Paul's avatar
Paul committed
222
223
224
225
226
}

void move_standard_front(std::vector<instruction_ref>& args)
{
    // Ensure the first arguments is the standard one
Paul's avatar
Paul committed
227
    auto last = std::prev(args.end());
Paul's avatar
Paul committed
228
229
    auto it =
        std::find_if(args.begin(), last, [](auto arg) { return arg->get_shape().standard(); });
Paul's avatar
Paul committed
230
    if(it != last)
Paul's avatar
Paul committed
231
232
233
        std::swap(*it, args.front());
}

234
235
auto gpu_name(const std::string& s) { return match::name("gpu::" + s); }

Paul Fultz II's avatar
Paul Fultz II committed
236
namespace {
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
struct miopen_fusion
{
    struct fuse_op_data
    {
        operation op;
        float alpha = 1;
        float beta  = 0;
    };
    struct fuse_op : fuse_op_data, reflect_equality<fuse_op>, reflect_stream<fuse_op>
    {
        template <class Self, class F>
        static auto reflect(Self& self, F f)
        {
            return pack(f(self.op, "op"), f(self.alpha, "alpha"), f(self.beta, "beta"));
        }
    };
    std::vector<fuse_op> ops = {};
    fusion f                 = {};
    std::function<void(context&, const fusion&, const std::vector<argument>&)> execute;
    template <class Self, class F>
    static auto reflect(Self& self, F f)
    {
        return pack(f(self.ops, "ops"));
    }

262
263
264
265
266
    std::ptrdiff_t output_alias(const std::vector<shape>& shapes) const
    {
        return shapes.size() - 1;
    }

267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
    value compile(context& ctx, const shape&, std::vector<shape> inputs)
    {
        // Compensate for allocation
        inputs.pop_back();
        std::size_t i = 0;
        f             = fusion(inputs[i]);
        i++;
        std::vector<std::function<void(const fused_operator_args&, const std::vector<argument>&)>>
            invokers;
        for(auto&& fop : ops)
        {
            if(i > inputs.size())
            {
                f = {};
                return {};
            }
            if(fop.op.name() == "convolution")
            {
                auto* mop = f.create_conv(any_cast<op::convolution>(fop.op), inputs[i]);
                invokers.push_back(
                    [=](const fused_operator_args& fargs, const std::vector<argument>& args) {
                        miopenSetOpArgsConvForward(
                            fargs.get(), mop, &fop.alpha, &fop.beta, args[i].implicit());
                    });
                i++;
            }
            else if(fop.op.name() == "add")
            {
                auto* mop = f.create_bias(inputs[i]);
                invokers.push_back(
                    [=](const fused_operator_args& fargs, const std::vector<argument>& args) {
                        miopenSetOpArgsBiasForward(
                            fargs.get(), mop, &fop.alpha, &fop.beta, args[i].implicit());
                    });
                i++;
            }
            else if(fop.op.name() == "relu")
            {
                auto* mop = f.create_relu();
                invokers.push_back([=](const fused_operator_args& fargs,
                                       const std::vector<argument>&) {
                    miopenSetOpArgsActivForward(fargs.get(), mop, &fop.alpha, &fop.beta, 0, 0, 0);
                });
            }
            else
            {
                f = {};
                return {};
            }
        }
        if(not f.compile(ctx))
        {
            f = {};
            return {};
        }
        execute = [invokers](context& c, const fusion& ff, const std::vector<argument>& args) {
            auto fargs = make_fused_args();
            for(auto&& invoker : invokers)
                invoker(fargs, args);
            ff.execute(c, fargs, args.front(), args.back());
        };
        return {{"workspace", f.get_workspace(ctx).bytes()}};
    }
    void finalize(context& ctx, const shape& output_shape, const std::vector<shape>& inputs)
    {
        if(not f.empty())
            return;
        auto v = compile(ctx, output_shape, inputs);
        if(not v.is_object())
            MIGRAPHX_THROW("Failed to compile fusion plan");
    }
    std::string name() const { return "gpu::miopen_fusion"; }
    shape compute_shape(const std::vector<shape>& inputs) const
    {
        if(ops.empty())
            return {};
        // TODO: Check number of arguments
        return ops.front().op.compute_shape({inputs[0], inputs[1]});
    }
    argument compute(context& ctx, const shape&, const std::vector<argument>& args) const
    {
        execute(ctx, f, args);
        return args.back();
    }
};
352
MIGRAPHX_REGISTER_OP(miopen_fusion)
353

Paul's avatar
Paul committed
354
355
356
struct miopen_conv_bias
{
    op::convolution op;
357
    fusion fp         = {};
358
359
    fusion::op_t conv = {};
    fusion::op_t bias = {};
Paul's avatar
Paul committed
360

Paul's avatar
Paul committed
361
362
363
364
365
366
    template <class Self, class F>
    static auto reflect(Self& self, F f)
    {
        return op::convolution::reflect(self.op, f);
    }

Paul's avatar
Paul committed
367
368
369
370
371
    std::string name() const { return "gpu::conv_bias"; }
    shape compute_shape(const std::vector<shape>& inputs) const
    {
        check_shapes{inputs, *this}.has(5);
        // TODO: Check slices
kahmed10's avatar
kahmed10 committed
372
        return op.normalize_compute_shape({inputs.at(0), inputs.at(1)});
Paul's avatar
Paul committed
373
    }
Paul's avatar
Paul committed
374
    argument compute(context& ctx, const shape&, const std::vector<argument>& args) const
Paul's avatar
Paul committed
375
    {
Paul's avatar
Paul committed
376
        auto fargs  = make_fused_args();
Paul's avatar
Paul committed
377
        float alpha = 1;
Paul's avatar
Paul committed
378
        float beta  = 0;
Paul's avatar
Paul committed
379
380
        miopenSetOpArgsConvForward(fargs.get(), conv, &alpha, &beta, args[1].implicit());
        miopenSetOpArgsBiasForward(fargs.get(), bias, &alpha, &beta, args[3].implicit());
381
        return fp.execute(ctx, fargs, args[0], args[4]);
Paul's avatar
Paul committed
382
383
    }

384
385
    void finalize(context& ctx, const shape&, const std::vector<shape>& inputs)
    {
386
387
388
389
        fp   = fusion(inputs[0]);
        conv = fp.create_conv(op, inputs[1]);
        bias = fp.create_bias(inputs[3]);
        if(not fp.compile(ctx))
390
            MIGRAPHX_THROW("Failed to compile fusion plan");
391
392
    }

393
    shape get_workspace(context& ctx) { return fp.get_workspace(ctx); }
Paul's avatar
Paul committed
394
395
396
397
    std::ptrdiff_t output_alias(const std::vector<shape>& shapes) const
    {
        return shapes.size() - 1;
    }
Paul's avatar
Paul committed
398
};
399
MIGRAPHX_REGISTER_OP(miopen_conv_bias)
Paul's avatar
Paul committed
400

Paul's avatar
Add cbr  
Paul committed
401
402
403
struct miopen_conv_bias_relu
{
    op::convolution op;
404
    fusion fp         = {};
405
406
407
    fusion::op_t conv = {};
    fusion::op_t bias = {};
    fusion::op_t relu = {};
Paul's avatar
Add cbr  
Paul committed
408

Paul's avatar
Paul committed
409
410
411
412
413
414
    template <class Self, class F>
    static auto reflect(Self& self, F f)
    {
        return op::convolution::reflect(self.op, f);
    }

Paul's avatar
Add cbr  
Paul committed
415
416
417
418
419
    std::string name() const { return "gpu::conv_bias_relu"; }
    shape compute_shape(const std::vector<shape>& inputs) const
    {
        check_shapes{inputs, *this}.has(5);
        // TODO: Check slices
kahmed10's avatar
kahmed10 committed
420
        return op.normalize_compute_shape({inputs.at(0), inputs.at(1)});
Paul's avatar
Add cbr  
Paul committed
421
    }
Paul's avatar
Paul committed
422
    argument compute(context& ctx, const shape&, const std::vector<argument>& args) const
Paul's avatar
Add cbr  
Paul committed
423
424
    {
        auto fargs  = make_fused_args();
Paul's avatar
Paul committed
425
        float alpha = 1;
Paul's avatar
Paul committed
426
        float beta  = 0;
Paul's avatar
Add cbr  
Paul committed
427
428
        miopenSetOpArgsConvForward(fargs.get(), conv, &alpha, &beta, args[1].implicit());
        miopenSetOpArgsBiasForward(fargs.get(), bias, &alpha, &beta, args[3].implicit());
Paul's avatar
Paul committed
429
        miopenSetOpArgsActivForward(fargs.get(), relu, &alpha, &beta, 0, 0, 0);
430
        return fp.execute(ctx, fargs, args[0], args[4]);
Paul's avatar
Add cbr  
Paul committed
431
    }
432
433
    void finalize(context& ctx, const shape&, const std::vector<shape>& inputs)
    {
434
435
436
437
438
        fp   = fusion(inputs[0]);
        conv = fp.create_conv(op, inputs[1]);
        bias = fp.create_bias(inputs[3]);
        relu = fp.create_relu();
        fp.compile(ctx);
439
440
    }

441
    shape get_workspace(context& ctx) { return fp.get_workspace(ctx); }
Paul's avatar
Paul committed
442
443
444
445
    std::ptrdiff_t output_alias(const std::vector<shape>& shapes) const
    {
        return shapes.size() - 1;
    }
Paul's avatar
Add cbr  
Paul committed
446
};
447
MIGRAPHX_REGISTER_OP(miopen_conv_bias_relu)
Paul's avatar
Add cbr  
Paul committed
448

Paul's avatar
Paul committed
449
template <class... Ms>
Paul's avatar
Add cbr  
Paul committed
450
451
auto conv_bias(Ms... ms)
{
Paul's avatar
Paul committed
452
    return match::name("gpu::add")(
Paul's avatar
Paul committed
453
454
        match::either_arg(0, 1)(bias_shape(match::used_once()).bind("bias"),
                                fusable_conv(match::used_once()).bind("conv")),
Paul's avatar
Paul committed
455
        ms...);
Paul's avatar
Paul committed
456
457
}

Paul's avatar
Paul committed
458
template <class Op>
459
void apply_conv_bias(context& ctx, module& m, const match::matcher_result& r)
Paul's avatar
Paul committed
460
461
462
463
464
465
{
    auto conv_ins    = r.instructions["conv"];
    auto bias_ins    = r.instructions["bias"];
    auto ins         = r.result;
    auto input_ins   = conv_ins->inputs().at(0);
    auto weights_ins = conv_ins->inputs().at(1);
466
    auto conv_op     = from_value<op::convolution>((conv_ins->get_operator()).to_value()["op"]);
Paul's avatar
Paul committed
467
468
469
    auto alloc_ins   = ins->inputs().back();
    auto old_ws_ins  = conv_ins->inputs().at(2);

470
    Op cb{conv_op};
Paul's avatar
Paul committed
471
    // TODO: Insert ws allocation
Paul's avatar
Paul committed
472
    auto ws = cb.get_workspace(ctx);
Paul's avatar
Paul committed
473
    (void)ws;
474
    m.replace_instruction(ins, cb, input_ins, weights_ins, old_ws_ins, bias_ins, alloc_ins);
Paul's avatar
Add cbr  
Paul committed
475
476
}

477
478
template <class... Strings>
inline auto precompile_name(Strings... names) // NOLINT
479
480
481
482
483
{
    return match::make_basic_pred_matcher([=](instruction_ref ins) {
        if(ins->name() != "gpu::precompile_op")
            return false;
        auto op = from_value<operation>(ins->get_operator().to_value().at("op"));
484
        return (contains({names...}, op.name()));
485
486
487
    });
}

Paul's avatar
Paul committed
488
struct find_conv_bias
Paul's avatar
Paul committed
489
{
Paul's avatar
Paul committed
490
    context* ctx = nullptr;
Paul's avatar
Paul committed
491
492
    auto matcher() const
    {
kahmed10's avatar
kahmed10 committed
493
494
        return conv_bias(match::none_of(
            match::output(match::name(std::unordered_set<std::string>{"gpu::relu"}))));
Paul's avatar
Paul committed
495
496
    }

497
    void apply(module& m, const match::matcher_result& r) const
Paul's avatar
Paul committed
498
    {
499
        apply_conv_bias<miopen_conv_bias>(*ctx, m, r);
Paul's avatar
Paul committed
500
501
502
    }
};

Paul's avatar
Paul committed
503
struct find_conv_bias_relu
Paul's avatar
Add cbr  
Paul committed
504
505
{
    context* ctx = nullptr;
Paul's avatar
Paul committed
506
    auto matcher() const { return match::name("gpu::relu")(match::arg(0)(conv_bias())); }
Paul's avatar
Add cbr  
Paul committed
507

508
    void apply(module& m, const match::matcher_result& r) const
Paul's avatar
Add cbr  
Paul committed
509
    {
510
        apply_conv_bias<miopen_conv_bias_relu>(*ctx, m, r);
Paul's avatar
Add cbr  
Paul committed
511
512
    }
};
513

514
515
516
517
518
519
520
521
522
523
524
struct find_conv_pointwise
{
    context* ctx = nullptr;
    auto matcher() const
    {
        return precompile_name("pointwise")(
            match::nargs(3),
            match::either_arg(0, 1)(bias_shape(match::used_once()).bind("bias"),
                                    fusable_conv(match::used_once()).bind("conv")));
    }

525
    void apply(module& m, const match::matcher_result& r) const
526
527
528
529
530
531
    {
        auto conv_ins    = r.instructions["conv"];
        auto bias_ins    = r.instructions["bias"];
        auto ins         = r.result;
        auto input_ins   = conv_ins->inputs().at(0);
        auto weights_ins = conv_ins->inputs().at(1);
532
        auto conv_op     = from_value<op::convolution>(conv_ins->get_operator().to_value()["op"]);
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
        auto alloc_ins   = ins->inputs().back();

        module_ref pm = ins->module_inputs().front();

        miopen_fusion op{};
        op.ops.push_back({{conv_op}});
        for(auto&& i : *pm)
        {
            if(i.name()[0] == '@')
                continue;
            op.ops.push_back({{i.get_operator()}});
        }
        std::vector<instruction_ref> inputs = {input_ins, weights_ins, bias_ins, alloc_ins};
        auto v                              = op.compile(*ctx, ins->get_shape(), to_shapes(inputs));
        if(not v.is_object())
            return;
        m.replace_instruction(ins, op, inputs);
    }
};

553
554
555
556
struct find_gemm_pointwise
{
    auto matcher() const
    {
557
558
        auto gemm_op   = match::name("gpu::gemm")(match::nargs(3), match::used_once()).bind("gemm");
        auto binary_op = match::all_of(
559
            match::nargs(3),
560
            match::either_arg(0, 1)(
561
562
563
                match::any_of(match::standard_shape(), match::is_constant()).bind("c"), gemm_op));
        auto unary_op = match::all_of(match::nargs(2), match::arg(0)(gemm_op));
        return precompile_name("pointwise")(match::any_of(binary_op, unary_op));
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
    }

    // TODO: Move to matcher.hpp
    static auto match_param(const std::string& name)
    {
        return match::make_basic_pred_matcher([=](auto ins) {
            if(ins->name() != "@param")
                return false;
            auto p = any_cast<builtin::param>(ins->get_operator());
            return p.parameter == name;
        });
    }

    template <class M>
    static auto match_mul_const(M m, const std::string& var)
    {
        return match::name("mul")(match::either_arg(0, 1)(match::name("@literal").bind(var), m))
            .bind(var + "_mul");
    }

    static auto match_add(const std::string& input, const std::string& output)
    {
        auto param     = match::name("@param");
        auto add       = match::name("add")(match::args(param, param));
        auto inner_mul = match::any_of(match_mul_const(match_param(input), "alpha"),
                                       match_mul_const(match_param(output), "beta"));
        auto mul_add   = match::name("add")(match::either_arg(0, 1)(inner_mul, param));
        auto add_mul   = match_mul_const(add, "gamma");
        return match::name("@return")(match::args(match::any_of(add, mul_add, add_mul)));
    }

595
596
597
598
599
600
    static auto match_mul(const std::string& input)
    {
        auto mul = match_mul_const(match_param(input), "alpha");
        return match::name("@return")(match::args(mul));
    }

601
602
603
604
605
606
607
    static float get_float(instruction_ref ins) { return ins->get_literal().at<float>(); }

    template <class Gemm>
    static bool update_gemm(Gemm& gemm, module_ref pm, unsigned input)
    {
        auto names = pm->get_parameter_names();
        std::sort(names.begin(), names.end());
608
609
610
611
612
        if(names.size() == 1)
        {
            auto mr = match::match_instruction(*pm, std::prev(pm->end()), match_mul(names[input]));
            if(mr.result == pm->end())
                return false;
613
            gemm.alpha *= get_float(mr.instructions["alpha"]);
614
615
616
            return true;
        }
        else if(names.size() == 2)
617
        {
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
            unsigned output = input == 0 ? 1 : 0;
            auto mr         = match::match_instruction(
                *pm, std::prev(pm->end()), match_add(names[input], names[output]));
            if(mr.result == pm->end())
                return false;
            if(contains(mr.instructions, "alpha_mul"))
                gemm.alpha *= get_float(mr.instructions["alpha"]);
            else if(contains(mr.instructions, "beta_mul"))
                gemm.beta *= get_float(mr.instructions["beta"]);
            else if(contains(mr.instructions, "gamma_mul"))
            {
                gemm.alpha *= get_float(mr.instructions["gamma"]);
                gemm.beta *= get_float(mr.instructions["gamma"]);
            }
            return true;
        }
        else
        {
            return false;
637
        }
638
639
640
641
642
643
644
645
646
647
648
649
    }

    void apply(module& m, const match::matcher_result& r) const
    {
        auto ins      = r.result;
        auto gemm_ins = r.instructions["gemm"];

        auto gemm = any_cast<rocblas_gemm<op::dot>>(gemm_ins->get_operator());

        // Already fused gemm
        if(not float_equal(gemm.beta, 0))
            return;
650
651
        if(ins->inputs().size() == 3)
            gemm.beta = 1;
652
653
654
655
656

        if(not update_gemm(
               gemm, ins->module_inputs().front(), ins->inputs().front() == gemm_ins ? 0 : 1))
            return;

657
658
659
        auto inputs = gemm_ins->inputs();
        inputs.pop_back();

660
661
662
663
664
665
666
667
668
669
670
671
672
        if(ins->inputs().size() == 3)
        {
            auto c_ins = r.instructions["c"];
            // const-fold input if not standard shape since rocblas can't handle it
            if(not c_ins->get_shape().standard())
            {
                auto c = make_op("contiguous");
                auto l = c.compute(c.compute_shape({c_ins->get_shape()}), {c_ins->eval()});
                c_ins  = m.add_literal(l.get_shape(), l.data());
            }
            inputs.push_back(c_ins);
        }

673
        inputs.push_back(ins->inputs().back());
674
675
676
677
678

        m.replace_instruction(ins, gemm, inputs);
    }
};

679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
struct find_contiguous_tranpose_gemm
{
    auto matcher() const
    {
        return match::name("gpu::contiguous")(match::arg(0)(
            match::name("transpose")(
                match::arg(0)(match::name("gpu::gemm")(match::used_once()).bind("gemm")))
                .bind("transpose")));
    }

    template <class Vector>
    static bool is_swapped(const Vector& perm, std::size_t i, std::size_t j)
    {
        if(i >= perm.size() or j >= perm.size())
            return false;
        auto perm2 = perm;
        std::iota(perm2.begin(), perm2.end(), 0);
        std::swap(perm2[i], perm2[j]);
        return perm2 == perm;
    }

    void apply(module& m, const match::matcher_result& r) const
    {
        auto ins       = r.result;
        auto gemm      = r.instructions["gemm"];
        auto alloc     = gemm->inputs().back();
        auto transpose = r.instructions["transpose"];
        auto perm      = transpose->get_operator().to_value()["permutation"].to_vector<int64_t>();
        auto iperm     = invert_permutation(perm);

        if(perm.size() < 3)
            return;

        if(not is_swapped(perm, perm.size() - 3, perm.size() - 2))
            return;

        auto lens = gemm->get_shape().lens();
        if(lens.size() > 3 and
           not std::all_of(lens.begin(), lens.end() - 3, [](auto i) { return i == 1; }))
            return;

        auto gemmv           = gemm->get_operator().to_value();
        gemmv["trans_batch"] = 1;

        auto s = shape{alloc->get_shape().type(), reorder_dims(alloc->get_shape().lens(), iperm)};
        auto new_alloc = m.insert_instruction(gemm, make_op("allocate", {{"shape", to_value(s)}}));
        auto alloc_transpose =
            m.insert_instruction(gemm, make_op("transpose", {{"permutation", perm}}), new_alloc);

        auto inputs        = gemm->inputs();
        inputs.back()      = alloc_transpose;
        auto new_gemm      = m.insert_instruction(gemm, make_op("gpu::gemm", gemmv), inputs);
        auto gemm_transpoe = m.insert_instruction(gemm, transpose->get_operator(), new_gemm);

        m.replace_instruction(ins, gemm_transpoe);
    }
};

737
738
739
740
741
742
743
struct find_commutative_broadcast
{
    auto matcher() const
    {
        return match::name("gpu::add", "gpu::mul")(match::arg(1)(match::broadcast_shape()));
    }

744
    void apply(module& m, const match::matcher_result& r) const
745
746
747
748
749
    {
        auto ins  = r.result;
        auto args = ins->inputs();
        move_broadcasted_back(args);

750
        m.replace_instruction(ins, ins->get_operator(), args);
751
752
    }
};
Paul Fultz II's avatar
Paul Fultz II committed
753
} // namespace
754

755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
struct find_contiguous
{
    auto matcher() const { return match::name("gpu::contiguous"); }

    void apply(module& m, const match::matcher_result& r) const
    {
        auto ins = r.result;

        m.replace_instruction(
            ins,
            make_op("gpu::precompile_op", {{"op", to_value(make_op("contiguous"))}}),
            ins->inputs());
    }
};

struct find_contiguous_pointwise
{
    auto matcher() const
    {
        return match::name("gpu::contiguous")(match::arg(0)(precompile_name("pointwise")));
    }

    void apply(module& m, const match::matcher_result& r) const
    {
        auto ins    = r.result;
        auto pw     = ins->inputs().front();
        auto alloc  = ins->inputs().back();
        auto args   = pw->inputs();
        args.back() = alloc;

        m.replace_instruction(ins, pw->get_operator(), args, pw->module_inputs());
    }
};

789
790
791
792
struct find_layernorm_pointwise
{
    auto matcher() const
    {
793
        return precompile_name("pointwise")(match::any_of[match::inputs()](
794
795
796
797
798
            precompile_name("gpu::prelayernorm", "gpu::preadd_layernorm").bind("layernorm")));
    }

    void apply(module& m, const match::matcher_result& r) const
    {
799
        auto pw_ins    = r.result;
800
801
802
        auto layernorm = r.instructions["layernorm"];
        if(not layernorm->module_inputs().empty())
            return;
803
804
805
806
807
        auto* pm       = pw_ins->module_inputs().front();
        auto pw_inputs = pw_ins->inputs();
        auto ln_pos    = std::find(pw_inputs.begin(), pw_inputs.end(), layernorm);
        assert(ln_pos != pw_inputs.end());
        pw_inputs.erase(ln_pos);
808
809
        auto inputs = layernorm->inputs();
        inputs.pop_back();
810
        inputs.insert(inputs.end(), pw_inputs.begin(), pw_inputs.end());
811

812
        m.replace_instruction(pw_ins, layernorm->get_operator(), inputs, {pm});
813
814
815
    }
};

816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
struct find_concat_pointwise
{
    auto matcher() const
    {
        return precompile_name("pointwise")(
            match::arg(0)(precompile_name("concat").bind("concat")));
    }

    void apply(module& m, const match::matcher_result& r) const
    {
        auto ins    = r.result;
        auto concat = r.instructions["concat"];
        if(not concat->module_inputs().empty())
            return;

        // TODO: Handle type conversions
        if(ins->get_shape().type() != concat->get_shape().type())
            return;

        auto* pm    = ins->module_inputs().front();
        auto inputs = concat->inputs();
        inputs.pop_back();
        inputs.insert(inputs.end(), ins->inputs().begin() + 1, ins->inputs().end());

        auto op = concat->get_operator();
        op.from_value({{"additional_args", ins->inputs().size() - 1}, {"ignore_modules", true}});

        m.replace_instruction(ins, op, inputs, {pm});
    }
};

847
void fuse_ops::apply(module& m) const
Paul's avatar
Paul committed
848
{
849
    match::find_matches(m, find_contiguous_pointwise{});
850
    run_passes(m, {dead_code_elimination{}});
851
    match::find_matches(m, find_conv_pointwise{ctx}, find_conv_bias_relu{ctx}, find_conv_bias{ctx});
852
    run_passes(m, {dead_code_elimination{}});
853
    match::find_matches(m,
854
                        find_layernorm_pointwise{},
855
                        find_concat_pointwise{},
856
                        find_gemm_pointwise{},
857
                        find_contiguous_tranpose_gemm{},
858
                        find_commutative_broadcast{});
859
    match::find_matches(m, find_contiguous{});
Paul's avatar
Paul committed
860
}
Paul's avatar
Paul committed
861
862

} // namespace gpu
Paul's avatar
Paul committed
863
} // namespace MIGRAPHX_INLINE_NS
Paul's avatar
Paul committed
864
} // namespace migraphx