fuse_mlir.cpp 12.6 KB
Newer Older
Paul Fultz II's avatar
Paul Fultz II committed
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
/*
 * The MIT License (MIT)
 *
 * Copyright (c) 2015-2022 Advanced Micro Devices, Inc. All rights reserved.
 *
 * Permission is hereby granted, free of charge, to any person obtaining a copy
 * of this software and associated documentation files (the "Software"), to deal
 * in the Software without restriction, including without limitation the rights
 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
 * copies of the Software, and to permit persons to whom the Software is
 * furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice shall be included in
 * all copies or substantial portions of the Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL THE
 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
 * THE SOFTWARE.
 */
#include <migraphx/gpu/fuse_mlir.hpp>
#include <migraphx/gpu/mlir.hpp>
#include <migraphx/matcher.hpp>
#include <migraphx/pass_manager.hpp>
#include <migraphx/make_op.hpp>
#include <migraphx/register_op.hpp>
30
#include <migraphx/env.hpp>
Paul Fultz II's avatar
Paul Fultz II committed
31
32
33
34
35
36
37
38

namespace migraphx {
inline namespace MIGRAPHX_INLINE_NS {

struct module;

namespace gpu {

39
40
MIGRAPHX_DECLARE_ENV_VAR(MIGRAPHX_ENABLE_MLIR);

41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
bool mlir_enabled()
{
#ifdef MIGRAPHX_MLIR
    const bool mlir_enabled = enabled(MIGRAPHX_ENABLE_MLIR{});
    if(mlir_enabled)
    {
        return true;
    }
    else
    {

        std::cerr << "WARNING: MIGraphX built with MLIR but it is not enabled. Please set the env "
                     "var MIGRAPHX_ENABLE_MLIR to use MLIR kernel generator."
                  << std::endl;
        return false;
    }
#else
    return false;
#endif
}

Paul Fultz II's avatar
Paul Fultz II committed
62
#ifdef MIGRAPHX_MLIR
63
64

struct mlir_op
Paul Fultz II's avatar
Paul Fultz II committed
65
{
66
    std::string name() const { return "gpu::mlir_op"; }
Paul Fultz II's avatar
Paul Fultz II committed
67
68
69
70
71
72
73
74
75
76
    operation op = make_op("convolution");

    template <class Self, class F>
    static auto reflect(Self& self, F f)
    {
        return pack(f(self.op, "op"));
    }

    shape compute_shape(std::vector<shape> inputs, const std::vector<module_ref>& mods) const
    {
77
        check_shapes{inputs, *this}.packed_or_broadcasted();
Paul Fultz II's avatar
Paul Fultz II committed
78
79
80
81
        if(mods.size() != 1)
            MIGRAPHX_THROW("should have one submodule.");
        if(inputs.size() < 2)
            MIGRAPHX_THROW("should have at least two inputs.");
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116

        module_ref mod = mods[0];
        auto type      = mod->get_output_shapes().front().type();
        std::unordered_map<instruction_ref, shape> ins_shapes;
        size_t param_cnt               = 0;
        std::vector<std::string> names = mod->get_parameter_names();
        std::sort(names.begin(), names.end());
        for(std::string param_name : names)
        {
            ins_shapes[mod->get_parameter(param_name)] = inputs[param_cnt++];
        }
        for(auto ins : iterator_for(*mod))
        {
            if(ins->name() == "@param")
            {
                continue;
            }
            if(ins->name() == "@literal")
            {
                ins_shapes[ins] = ins->get_shape();
                continue;
            }
            if(ins->name() == "@return")
            {
                return ins_shapes[ins->inputs().at(0)].with_type(type);
            }
            std::vector<shape> input_shapes;
            input_shapes.resize(ins->inputs().size());
            std::transform(ins->inputs().begin(),
                           ins->inputs().end(),
                           input_shapes.begin(),
                           [&](auto in) { return ins_shapes[in]; });
            ins_shapes[ins] = ins->get_operator().compute_shape(input_shapes);
        }
        MIGRAPHX_THROW("No return found in the submodule");
Paul Fultz II's avatar
Paul Fultz II committed
117
118
    }
};
119
MIGRAPHX_REGISTER_OP(mlir_op);
Paul Fultz II's avatar
Paul Fultz II committed
120
121

namespace {
122
123
124

MIGRAPHX_PRED_MATCHER(is_mlir_conv, instruction_ref ins)
{
125
    if(ins->name() != "convolution" and ins->name() != "quant_convolution")
126
127
128
129
130
        return false;
    value v    = ins->get_operator().to_value();
    auto group = v.at("group").to<int>();
    if(group != 1)
        return false;
131
132
133
    // Avoid MLIR assertion: Index < Length && "Invalid index!"
    if(ins->get_shape().lens().size() != 4)
        return false;
134
135
136
    return true;
}

137
struct find_mlir_op
Paul Fultz II's avatar
Paul Fultz II committed
138
139
140
{
    auto matcher() const
    {
141
        auto dot_or_conv = match::skip(match::name("contiguous"))(
142
143
            match::any_of(match::name("dot"), match::name("quant_dot"), is_mlir_conv())
                .bind("gemm_based_op"));
144
        return match::name("pointwise")(match::any_of[match::inputs()](dot_or_conv.bind("x")));
Paul Fultz II's avatar
Paul Fultz II committed
145
146
    }

147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
    std::unordered_map<instruction_ref, instruction_ref>
    create_param_map_with_literals(module_ref mm, const module* pm, const shape& shape) const
    {
        std::unordered_map<instruction_ref, instruction_ref> ins_map;
        for(auto ins : iterator_for(*pm))
        {
            if(ins->name() != "@literal")
            {
                continue;
            }
            literal r               = ins->get_literal();
            instruction_ref literal = mm->add_literal(r);
            instruction_ref mbcast  = mm->add_instruction(
                make_op("multibroadcast", {{"out_lens", shape.lens()}}), literal);
            ins_map[ins] = mbcast;
        }
        return ins_map;
    }

    std::tuple<instruction_ref, std::vector<instruction_ref>>
    fuse_input_ops_and_gemm_based_op(module_ref mm, instruction_ref gemm_based_op) const
    {
        std::vector<instruction_ref> top_inputs;
        std::vector<instruction_ref> imm_inputs;
        size_t input_cnt = 0;
        for(instruction_ref input : gemm_based_op->inputs())
        {
            std::vector<operation> op_stream;
            while(contains({"slice", "transpose", "contiguous", "reshape"}, input->name()))
            {
                op_stream.push_back(input->get_operator());
                input = input->inputs().at(0);
            }
            top_inputs.push_back(input);
            instruction_ref prev_input =
                mm->add_parameter("y" + std::to_string(input_cnt++), input->get_shape());
            for(const auto& op : reverse(op_stream))
            {
                prev_input = mm->add_instruction(op, {prev_input});
            }
            imm_inputs.push_back(prev_input);
        }
        instruction_ref new_gemm_based_op =
            mm->add_instruction(gemm_based_op->get_operator(), imm_inputs);
        return {new_gemm_based_op, top_inputs};
    }

194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
    // Whitelist supported fusion options, including imposing type constraints
    // for cases where MLIR only supports an operation (usually a pointwise function)
    // on particular types.
    bool is_pointwise_op_supported_by_mlir(const instruction& i) const
    {
        using type_t                                      = shape::type_t;
        const auto& name                                  = i.name();
        const auto result_type                            = i.get_shape().type();
        const std::initializer_list<type_t> allowed_types = {type_t::float_type,
                                                             type_t::half_type,
                                                             type_t::int8_type,
                                                             type_t::int32_type,
                                                             type_t::bool_type};
        // Preliminary type check.
        if(not contains(allowed_types, result_type))
        {
            return false;
        }
        const std::initializer_list<std::string> any_type_ops = {"@literal", "@param", "@return"};
        const std::initializer_list<std::string> no_bool_ops  = {"convolution",
                                                                "quant_convolution",
                                                                "dot",
                                                                "quant_dot",
                                                                "add",
                                                                "clip",
kahmed10's avatar
kahmed10 committed
219
                                                                "relu",
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
                                                                "sub",
                                                                "mul",
                                                                "div",
                                                                "pow",
                                                                "where",
                                                                "quantizelinear",
                                                                "dequantizelinear",
                                                                "abs",
                                                                "neg"};
        const std::initializer_list<std::string> fp_only_ops  = {"ceil",
                                                                "erf",
                                                                "exp",
                                                                "floor",
                                                                "log",
                                                                "recip",
                                                                "rsqrt",
                                                                "sigmoid"
                                                                "softmax",
                                                                "tanh"};
        bool is_float = contains({type_t::float_type, type_t::half_type}, result_type);
        if(contains(any_type_ops, name))
            return true;
242
        if(result_type != type_t::bool_type and contains(no_bool_ops, name))
243
            return true;
244
        if(is_float and contains(fp_only_ops, name))
245
246
247
            return true;
        // Only conversions between floating types are known to be unambigiously
        // supported.
248
        if(is_float and name == "convert")
249
250
251
252
253
254
255
256
        {
            return std::all_of(i.inputs().begin(), i.inputs().end(), [](const auto& arg) {
                return contains({type_t::float_type, type_t::half_type}, arg->get_shape().type());
            });
        }
        return false;
    }

Paul Fultz II's avatar
Paul Fultz II committed
257
258
    void apply(module_pass_manager& mpm, const match::matcher_result& r) const
    {
259
260
261
262
263
        auto ins           = r.result;
        auto gemm_based_op = r.instructions["gemm_based_op"];
        auto x_ins         = r.instructions["x"]; // input after contiguous
        auto* pm           = ins->module_inputs().front();
        auto names         = pm->get_parameter_names();
264
265
266
        // Whitelist pointwise operators.
        if(std::any_of(pm->begin(), pm->end(), [&](const auto& i) {
               return not is_pointwise_op_supported_by_mlir(i);
Paul Fultz II's avatar
Paul Fultz II committed
267
268
           }))
            return;
269

Paul Fultz II's avatar
Paul Fultz II committed
270
271
272
        std::sort(names.begin(), names.end());
        module_ref mm = mpm.create_module("mlir_" + pm->name());
        mm->set_bypass();
273
274
275
        std::unordered_map<instruction_ref, instruction_ref> param_map =
            create_param_map_with_literals(mm, pm, gemm_based_op->get_shape());
        auto [anchor_op, top_inputs] = fuse_input_ops_and_gemm_based_op(mm, gemm_based_op);
Paul Fultz II's avatar
Paul Fultz II committed
276
277
278
279
        std::transform(names.begin(),
                       names.end(),
                       ins->inputs().begin(),
                       std::inserter(param_map, param_map.end()),
280
                       [&, &anchor_op = anchor_op](auto name, auto input) {
Paul Fultz II's avatar
Paul Fultz II committed
281
                           if(input == x_ins)
282
                               return std::make_pair(pm->get_parameter(name), anchor_op);
Paul Fultz II's avatar
Paul Fultz II committed
283
284
285
286
287
288
289
290
291
                           return std::make_pair(pm->get_parameter(name),
                                                 mm->add_parameter(name, input->get_shape()));
                       });
        mm->add_return(mm->insert_instructions(mm->end(), pm, param_map));

        std::vector<instruction_ref> inputs;
        std::copy_if(ins->inputs().begin(),
                     ins->inputs().end(),
                     std::back_inserter(inputs),
292
                     [&](auto input) { return input != gemm_based_op; });
293
        inputs.insert(inputs.end(), top_inputs.begin(), top_inputs.end());
Paul Fultz II's avatar
Paul Fultz II committed
294
        mpm.get_module().replace_instruction(
295
            ins, mlir_op{gemm_based_op->get_operator()}, inputs, {mm});
Paul Fultz II's avatar
Paul Fultz II committed
296
297
    }
};
298

Paul Fultz II's avatar
Paul Fultz II committed
299
300
301
302
303
304
305
} // namespace

#endif

void fuse_mlir::apply(module_pass_manager& mpm) const
{
#ifdef MIGRAPHX_MLIR
306
    match::find_matches(mpm, find_mlir_op{});
Paul Fultz II's avatar
Paul Fultz II committed
307
308
309
310
311
312
313
314
315
#else
    (void)mpm;
#endif
}

} // namespace gpu

} // namespace MIGRAPHX_INLINE_NS
} // namespace migraphx