fuse_ops.cpp 6.88 KB
Newer Older
Paul's avatar
Paul committed
1
#include <migraph/gpu/fuse_ops.hpp>
Paul's avatar
Paul committed
2
#include <migraph/matcher.hpp>
Paul's avatar
Paul committed
3
4
#include <migraph/gpu/miopen.hpp>
#include <migraph/gpu/convolution.hpp>
Paul's avatar
Paul committed
5
6
7
8
9
10
11
#include <migraph/gpu/device/add_relu.hpp>
#include <migraph/instruction.hpp>

namespace migraph {

namespace gpu {

Paul's avatar
Paul committed
12
13
14
15
16
17
18
19
struct fusion
{
    using op_t = miopenFusionOpDescriptor_t;
    shared<fusion_plan_descriptor> fp;

    // Used as a temporary hack to keep descriptor references alive
    std::vector<std::shared_ptr<void>> storage;

Paul's avatar
Paul committed
20
    template <class T>
Paul's avatar
Paul committed
21
22
23
24
25
26
27
28
29
30
31
    auto keep_alive(T x)
    {
        auto result = share(std::move(x));
        storage.push_back(result);
        return result;
    }

    fusion(const shape& input)
    // : fp(make_fusion_plan(input))
    {
        auto t = make_tensor(input);
Paul's avatar
Paul committed
32
        fp     = make_fusion_plan(t);
Paul's avatar
Paul committed
33
34
35
36
37
38
39
40
41
42
43
44
        keep_alive(std::move(t));
    }

    op_t operator[](std::size_t i) const
    {
        op_t result;
        auto status = miopenFusionPlanGetOp(fp.get(), i, &result);
        if(status != miopenStatusSuccess)
            MIGRAPH_THROW("Failed retrieving operator at " + std::to_string(i));
        return result;
    }

Paul's avatar
Paul committed
45
    auto get() const { return fp.get(); }
Paul's avatar
Paul committed
46
47
48
49

    op_t create_bias(const shape& bias)
    {
        op_t result;
Paul's avatar
Paul committed
50
51
        auto b      = shape{bias.type(), {1, bias.lens().at(1), 1, 1}};
        auto t      = keep_alive(make_tensor(b));
Paul's avatar
Paul committed
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
        auto status = miopenCreateOpBiasForward(fp.get(), &result, t.get());
        if(status != miopenStatusSuccess)
            MIGRAPH_THROW("Creating operator failed");
        return result;
    }

    op_t create_relu()
    {
        op_t result;
        auto status = miopenCreateOpActivationForward(fp.get(), &result, miopenActivationRELU);
        if(status != miopenStatusSuccess)
            MIGRAPH_THROW("Creating operator failed");
        return result;
    }

    op_t create_conv(const op::convolution& op, const shape& weights)
    {
        op_t result;
Paul's avatar
Paul committed
70
71
        auto cd     = keep_alive(make_conv(op));
        auto t      = keep_alive(make_tensor(weights));
Paul's avatar
Paul committed
72
73
74
75
76
77
78
        auto status = miopenCreateOpConvForward(fp.get(), &result, cd.get(), t.get());
        if(status != miopenStatusSuccess)
            MIGRAPH_THROW("Creating operator failed");
        return result;
    }
};

Paul's avatar
Paul committed
79
80
81
MIGRAPH_PRED_MATCHER(bias_shape, instruction_ref ins)
{
    auto&& s = ins->get_shape();
Paul's avatar
Paul committed
82
83
    return s.broadcasted() and s.strides().size() == 4 and s.strides()[0] == 0 and
           s.strides()[1] != 0 and s.strides()[2] == 0 and s.strides()[3] == 0;
Paul's avatar
Paul committed
84
85
86
}

// TODO: Move to another header
Paul's avatar
Paul committed
87
88
template <class T, class... Ts>
std::array<T, sizeof...(Ts) + 1> make_array(T x, Ts... xs)
Paul's avatar
Paul committed
89
90
{
    return {std::move(x), std::move(static_cast<T>(xs))...};
Paul's avatar
Paul committed
91
}
Paul's avatar
Paul committed
92
93
94
95
96
97

MIGRAPH_PRED_MATCHER(fusable_conv, instruction_ref ins)
{
    if(ins->name() != "gpu::convolution")
        return false;
    auto op = any_cast<miopen_convolution>(ins->get_operator()).op;
Paul's avatar
Paul committed
98
99
    return op.padding == make_array<size_t>(0, 0) and op.stride == make_array<size_t>(1, 1) and
           op.dilation == make_array<size_t>(1, 1);
Paul's avatar
Paul committed
100
101
}

Paul's avatar
Paul committed
102
103
104
105
106
struct hip_add_relu
{
    std::string name() const { return "hip::add_relu"; }
    shape compute_shape(const std::vector<shape>& inputs) const
    {
Paul's avatar
Paul committed
107
        check_shapes{inputs, *this}.has(3);
Paul's avatar
Paul committed
108
109
        return inputs.front();
    }
Paul's avatar
Paul committed
110
    argument compute(context&, const shape&, const std::vector<argument>& args) const
Paul's avatar
Paul committed
111
    {
112
        device::add_relu(args.at(2), args.at(0), args.at(1));
Paul's avatar
Paul committed
113
114
115
116
        return args.at(2);
    }
};

Paul's avatar
Paul committed
117
struct match_add_relu
Paul's avatar
Paul committed
118
{
Paul's avatar
Paul committed
119
120
    auto matcher() const
    {
Paul's avatar
Paul committed
121
        return match::name("gpu::relu")(match::arg(0)(match::name("gpu::add").bind("add")));
Paul's avatar
Paul committed
122
    }
Paul's avatar
Paul committed
123

Paul's avatar
Paul committed
124
125
    void apply(program& p, match::matcher_result r) const
    {
Paul's avatar
Paul committed
126
        auto add_ins = r.instructions["add"];
Paul's avatar
Paul committed
127
128
        auto ins     = r.result;
        auto args    = add_ins->inputs();
Paul's avatar
Paul committed
129
        // Use the allocation from the relu operator
Paul's avatar
Paul committed
130
        args.back() = ins->inputs().back();
Paul's avatar
Paul committed
131
        p.replace_instruction(ins, hip_add_relu{}, args);
Paul's avatar
Paul committed
132
    }
Paul's avatar
Paul committed
133
134
};

Paul's avatar
Paul committed
135
136
137
138
139
140
141
struct miopen_conv_bias
{
    op::convolution op;
    fusion f;
    fusion::op_t conv;
    fusion::op_t bias;

Paul's avatar
Paul committed
142
    miopen_conv_bias(op::convolution c, shape input, shape weights, shape b) : op(c), f(input)
Paul's avatar
Paul committed
143
144
145
146
147
148
149
150
151
152
153
154
    {
        f.create_conv(op, weights);
        f.create_bias(b);
    }

    std::string name() const { return "gpu::conv_bias"; }
    shape compute_shape(const std::vector<shape>& inputs) const
    {
        check_shapes{inputs, *this}.has(5);
        // TODO: Check slices
        return op.compute_shape({inputs.at(0), inputs.at(1)});
    }
Paul's avatar
Paul committed
155
156
    argument
    compute(context& ctx, const shape& output_shape, const std::vector<argument>& args) const
Paul's avatar
Paul committed
157
    {
Paul's avatar
Paul committed
158
        auto fargs  = make_fused_args();
Paul's avatar
Paul committed
159
160
161
162
163
        float alpha = 1, beta = 0;
        auto x = make_tensor(args[0].get_shape());
        auto y = make_tensor(output_shape);
        miopenSetOpArgsConvForward(fargs.get(), conv, &alpha, &beta, args[1].implicit());
        miopenSetOpArgsBiasForward(fargs.get(), bias, &alpha, &beta, args[3].implicit());
Paul's avatar
Paul committed
164
165
166
167
168
169
170
        miopenExecuteFusionPlan(ctx.handle.get(),
                                f.get(),
                                x.get(),
                                args[0].implicit(),
                                y.get(),
                                args[4].implicit(),
                                fargs.get());
Paul's avatar
Paul committed
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
        return args.at(4);
    }

    shape compile(context& ctx)
    {
        int algo_count = 1;
        miopenConvFwdAlgorithm_t algo;
        miopenFusionPlanConvolutionGetAlgo(f.get(), 1, &algo_count, &algo);
        std::size_t ws_size = 0;
        miopenFusionPlanGetWorkSpaceSize(ctx.handle.get(), f.get(), &ws_size, algo);
        auto status = miopenCompileFusionPlan(ctx.handle.get(), f.get());
        if(status != miopenStatusSuccess)
            MIGRAPH_THROW("Compiling fusion plan failed");
        return shape{shape::int8_type, {ws_size}};
    }
};

struct match_conv_bias
{
Paul's avatar
Paul committed
190
    context* ctx = nullptr;
Paul's avatar
Paul committed
191
192
    auto matcher() const
    {
Paul's avatar
Paul committed
193
194
        return match::name("gpu::add")(
            match::either_arg(0, 1)(bias_shape().bind("bias"), fusable_conv().bind("conv")));
Paul's avatar
Paul committed
195
196
197
198
    }

    void apply(program& p, match::matcher_result r) const
    {
Paul's avatar
Paul committed
199
200
        auto conv_ins    = r.instructions["conv"];
        auto bias_ins    = r.instructions["bias"];
Paul's avatar
Paul committed
201
        auto ins         = r.result;
Paul's avatar
Paul committed
202
        auto input_ins   = conv_ins->inputs().at(0);
Paul's avatar
Paul committed
203
        auto weights_ins = conv_ins->inputs().at(1);
Paul's avatar
Paul committed
204
205
206
        auto conv_op     = any_cast<miopen_convolution>(conv_ins->get_operator()).op;
        auto alloc_ins   = ins->inputs().back();
        auto old_ws_ins  = conv_ins->inputs().at(2);
Paul's avatar
Paul committed
207

Paul's avatar
Paul committed
208
209
        miopen_conv_bias cb{
            conv_op, input_ins->get_shape(), weights_ins->get_shape(), bias_ins->get_shape()};
Paul's avatar
Paul committed
210
        // TODO: Insert ws allocation
Paul's avatar
Paul committed
211
        auto ws = cb.compile(*ctx);
Paul's avatar
Paul committed
212
213
214
215
216

        p.replace_instruction(ins, cb, input_ins, weights_ins, old_ws_ins, bias_ins, alloc_ins);
    }
};

Paul's avatar
Paul committed
217
218
219
void fuse_ops::apply(program& p) const
{
    match::find_matches(p, match_add_relu{}, match_conv_bias{ctx});
Paul's avatar
Paul committed
220
}
Paul's avatar
Paul committed
221
222
223
224

} // namespace gpu

} // namespace migraph