prefuse_ops.cpp 7.05 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
/*
 * The MIT License (MIT)
 *
 * Copyright (c) 2015-2022 Advanced Micro Devices, Inc. All rights reserved.
 *
 * Permission is hereby granted, free of charge, to any person obtaining a copy
 * of this software and associated documentation files (the "Software"), to deal
 * in the Software without restriction, including without limitation the rights
 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
 * copies of the Software, and to permit persons to whom the Software is
 * furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice shall be included in
 * all copies or substantial portions of the Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL THE
 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
 * THE SOFTWARE.
 */
24
#include <migraphx/permutation.hpp>
25
#include <migraphx/gpu/prefuse_ops.hpp>
26
#include <migraphx/gpu/gemm_softmax_gemm.hpp>
27
#include <migraphx/match/layernorm.hpp>
28
#include <migraphx/register_op.hpp>
29
30
#include <migraphx/pass_manager.hpp>
#include <migraphx/dead_code_elimination.hpp>
31
#ifdef MIGRAPHX_USE_COMPOSABLEKERNEL
32
#include <migraphx/gpu/ck.hpp>
33
#endif
34
#include <migraphx/gpu/fuse_mlir.hpp>
35
36
37
38

namespace migraphx {
inline namespace MIGRAPHX_INLINE_NS {
namespace gpu {
39

40
namespace {
41
42
43
44

template <class Derived, std::size_t N>
struct layernorm_base
{
45
46
47
48
49
50
    float epsilon = 1e-12f;
    template <class Self, class F>
    static auto reflect(Self& self, F f)
    {
        return pack(f(self.epsilon, "epsilon"));
    }
51
52
    shape compute_shape(std::vector<shape> inputs, std::vector<module_ref> mods) const
    {
53
        std::size_t nargs = N;
54
55
56
        if(not mods.empty())
        {
            auto* pm = mods.front();
57
            nargs += pm->get_parameter_names().size() - 1;
58
        }
59
60
        check_shapes{inputs, static_cast<const Derived&>(*this)}.has(nargs);
        auto s = inputs.front();
61
62
63
        auto t = s.type();
        if(not mods.empty())
            t = mods.front()->get_output_shapes().front().type();
64
65
66
67
68
69
70
71
72
73
74
75
76
77

        // Scalar output if all inputs are scalar
        if(inputs.front().elements() == 1 and
           all_of(inputs, [](const auto& ss) { return ss.scalar(); }))
            return inputs.front();
        auto l_s = shape::from_permutation(
            t, s.lens(), find_permutation(std::vector<shape>(inputs.begin(), inputs.begin() + N)));
        // just prelayernorm or preadd_layernorm
        if(nargs <= N)
            return l_s;
        // else, layernorm + pointwise fusion, preserve layout of fused op
        std::vector<shape> lp_s(inputs.begin() + N, inputs.end());
        lp_s.insert(lp_s.begin(), l_s);
        return shape::from_permutation(t, s.lens(), find_permutation(lp_s));
78
79
80
    }
};

81
struct layernorm : layernorm_base<layernorm, 1>
82
{
83

84
85
86
87
    std::string name() const { return "gpu::prelayernorm"; }
};
MIGRAPHX_REGISTER_OP(layernorm);

88
struct add_layernorm : layernorm_base<add_layernorm, 2>
89
90
91
92
93
{
    std::string name() const { return "gpu::preadd_layernorm"; }
};
MIGRAPHX_REGISTER_OP(add_layernorm);

94
95
96
97
98
99
100
101
struct find_layernorm
{
    auto matcher() const { return match::layernorm(); }

    void apply(module& m, const match::matcher_result& r) const
    {
        auto ins   = r.result;
        auto x_ins = r.instructions["x"];
102
103
104
        float eps  = 0;
        if(contains(r.instructions, "eps"))
            eps = r.instructions["eps"]->eval().at<float>();
105

106
        m.replace_instruction(ins, layernorm{eps}, x_ins);
107
108
109
    }
};

110
struct find_add_layernorm
111
112
113
{
    auto matcher() const
    {
114
115
        return match::name("gpu::prelayernorm")(
            match::args(match::name("add")(match::used_once()).bind("add")));
116
117
118
119
    }

    void apply(module& m, const match::matcher_result& r) const
    {
120
121
        auto ins     = r.result;
        auto add_ins = r.instructions["add"];
122
        auto op      = any_cast<layernorm>(ins->get_operator());
123

124
        m.replace_instruction(ins, add_layernorm{op.epsilon}, add_ins->inputs());
125
126
    }
};
127
128
129
130
131
132
133

struct pre_gemm_softmax_gemm : gemm_softmax_gemm
{
    std::string name() const { return "gpu::pre_gemm_softmax_gemm"; }
};
MIGRAPHX_REGISTER_OP(pre_gemm_softmax_gemm);

134
auto is_ck_gemm()
135
{
136
137
138
139
140
141
142
143
144
145
146
    return match::make_basic_pred_matcher([=](instruction_ref ins) {
#ifdef MIGRAPHX_USE_COMPOSABLEKERNEL
        if(not enabled(MIGRAPHX_ENABLE_CK{}))
            return false;
        if(ins->name() != "dot")
            return false;
        if(not pre_gemm_softmax_gemm::is_ck_supported_type(ins->get_shape().type()))
            return false;
        return true;
#else
        (void)ins;
147
        return false;
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
#endif
    });
}

auto is_mlir_gemm()
{
    return match::make_basic_pred_matcher([=](instruction_ref ins) {
        if(not mlir_attention_enabled())
            return false;
        if(ins->name() != "dot")
            return false;
        return std::all_of(ins->inputs().begin(), ins->inputs().end(), [&](auto i) {
            return pre_gemm_softmax_gemm::is_mlir_supported_type(i->get_shape().type());
        });
    });
163
164
165
166
167
168
}

struct find_gemm_softmax_gemm
{
    auto matcher() const
    {
169
170
        auto gemm1 = match::skip(match::name("contiguous"))(
            match::name("dot")(match::any_of(is_ck_gemm(), is_mlir_gemm()).bind("gemm1")));
171
172
173
174
        auto mul = match::name("mul")(
            match::nargs(2), match::either_arg(0, 1)(match::is_constant().bind("scale"), gemm1));
        auto softmax = match::name("softmax")(match::arg(0)(mul)).bind("softmax");

175
176
        return match::name("dot")(match::any_of(is_ck_gemm(), is_mlir_gemm()).bind("gemm2"))(
            match::arg(0)(softmax));
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
    }

    void apply(module_pass_manager& mpm, const match::matcher_result& r) const
    {
        auto ins       = r.result;
        auto gemm2_ins = r.instructions["gemm2"];
        auto gemm1_ins = r.instructions["gemm1"];
        auto scale_lit = r.instructions["scale"];

        float scale = 1.0;
        scale_lit->eval().visit([&](const auto s) {
            // CK only supports single-valued scale
            if(std::all_of(
                   s.begin() + 1, s.end(), [&](auto v) { return float_equal(v, s.front()); }))
                scale = s.front();
            else
                return;
        });

        auto inputs = gemm1_ins->inputs();            // A, B
        inputs.push_back(gemm2_ins->inputs().back()); // B1

        mpm.get_module().replace_instruction(
            ins, pre_gemm_softmax_gemm{gemm2_ins->get_operator(), scale}, inputs);
    }
};

204
205
} // namespace

206
void prefuse_ops::apply(module_pass_manager& mpm) const
207
{
208
209
210
    match::find_matches(mpm.get_module(), find_layernorm{});
    mpm.run_pass(dead_code_elimination{});
    match::find_matches(mpm.get_module(), find_add_layernorm{});
211
    match::find_matches(mpm, find_gemm_softmax_gemm{});
212
213
214
215
216
}

} // namespace gpu
} // namespace MIGRAPHX_INLINE_NS
} // namespace migraphx