prefuse_ops.cpp 6.44 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
/*
 * The MIT License (MIT)
 *
 * Copyright (c) 2015-2022 Advanced Micro Devices, Inc. All rights reserved.
 *
 * Permission is hereby granted, free of charge, to any person obtaining a copy
 * of this software and associated documentation files (the "Software"), to deal
 * in the Software without restriction, including without limitation the rights
 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
 * copies of the Software, and to permit persons to whom the Software is
 * furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice shall be included in
 * all copies or substantial portions of the Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL THE
 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
 * THE SOFTWARE.
 */
24
#include <migraphx/permutation.hpp>
25
#include <migraphx/gpu/prefuse_ops.hpp>
26
#include <migraphx/gpu/gemm_softmax_gemm.hpp>
27
#include <migraphx/match/layernorm.hpp>
28
#include <migraphx/register_op.hpp>
29
30
#include <migraphx/pass_manager.hpp>
#include <migraphx/dead_code_elimination.hpp>
31
#ifdef MIGRAPHX_USE_COMPOSABLEKERNEL
32
#include <migraphx/gpu/ck.hpp>
33
#endif
34
35
36
37

namespace migraphx {
inline namespace MIGRAPHX_INLINE_NS {
namespace gpu {
38

39
namespace {
40
41
42
43

template <class Derived, std::size_t N>
struct layernorm_base
{
44
45
46
47
48
49
    float epsilon = 1e-12f;
    template <class Self, class F>
    static auto reflect(Self& self, F f)
    {
        return pack(f(self.epsilon, "epsilon"));
    }
50
51
    shape compute_shape(std::vector<shape> inputs, std::vector<module_ref> mods) const
    {
52
        std::size_t nargs = N;
53
54
55
        if(not mods.empty())
        {
            auto* pm = mods.front();
56
            nargs += pm->get_parameter_names().size() - 1;
57
        }
58
59
        check_shapes{inputs, static_cast<const Derived&>(*this)}.has(nargs);
        auto s = inputs.front();
60
61
62
        auto t = s.type();
        if(not mods.empty())
            t = mods.front()->get_output_shapes().front().type();
63
64
65
66
67
68
69
70
71
72
73
74
75
76

        // Scalar output if all inputs are scalar
        if(inputs.front().elements() == 1 and
           all_of(inputs, [](const auto& ss) { return ss.scalar(); }))
            return inputs.front();
        auto l_s = shape::from_permutation(
            t, s.lens(), find_permutation(std::vector<shape>(inputs.begin(), inputs.begin() + N)));
        // just prelayernorm or preadd_layernorm
        if(nargs <= N)
            return l_s;
        // else, layernorm + pointwise fusion, preserve layout of fused op
        std::vector<shape> lp_s(inputs.begin() + N, inputs.end());
        lp_s.insert(lp_s.begin(), l_s);
        return shape::from_permutation(t, s.lens(), find_permutation(lp_s));
77
78
79
    }
};

80
struct layernorm : layernorm_base<layernorm, 1>
81
{
82

83
84
85
86
    std::string name() const { return "gpu::prelayernorm"; }
};
MIGRAPHX_REGISTER_OP(layernorm);

87
struct add_layernorm : layernorm_base<add_layernorm, 2>
88
89
90
91
92
{
    std::string name() const { return "gpu::preadd_layernorm"; }
};
MIGRAPHX_REGISTER_OP(add_layernorm);

93
94
95
96
97
98
99
100
struct find_layernorm
{
    auto matcher() const { return match::layernorm(); }

    void apply(module& m, const match::matcher_result& r) const
    {
        auto ins   = r.result;
        auto x_ins = r.instructions["x"];
101
102
103
        float eps  = 0;
        if(contains(r.instructions, "eps"))
            eps = r.instructions["eps"]->eval().at<float>();
104

105
        m.replace_instruction(ins, layernorm{eps}, x_ins);
106
107
108
    }
};

109
struct find_add_layernorm
110
111
112
{
    auto matcher() const
    {
113
114
        return match::name("gpu::prelayernorm")(
            match::args(match::name("add")(match::used_once()).bind("add")));
115
116
117
118
    }

    void apply(module& m, const match::matcher_result& r) const
    {
119
120
        auto ins     = r.result;
        auto add_ins = r.instructions["add"];
121
        auto op      = any_cast<layernorm>(ins->get_operator());
122

123
        m.replace_instruction(ins, add_layernorm{op.epsilon}, add_ins->inputs());
124
125
    }
};
126

127
128
#ifdef MIGRAPHX_USE_COMPOSABLEKERNEL

129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
struct pre_gemm_softmax_gemm : gemm_softmax_gemm
{
    std::string name() const { return "gpu::pre_gemm_softmax_gemm"; }
};
MIGRAPHX_REGISTER_OP(pre_gemm_softmax_gemm);

MIGRAPHX_PRED_MATCHER(is_ck_gemm, instruction_ref ins)
{
    if(ins->name() != "dot")
        return false;
    if(not pre_gemm_softmax_gemm::is_ck_supported_type(ins->get_shape().type()))
        return false;
    return true;
}

struct find_gemm_softmax_gemm
{
    auto matcher() const
    {
        auto gemm1 =
            match::skip(match::name("contiguous"))(match::name("dot")(is_ck_gemm().bind("gemm1")));
        auto mul = match::name("mul")(
            match::nargs(2), match::either_arg(0, 1)(match::is_constant().bind("scale"), gemm1));
        auto softmax = match::name("softmax")(match::arg(0)(mul)).bind("softmax");

        return match::name("dot")(is_ck_gemm().bind("gemm2"))(match::arg(0)(softmax));
    }

    void apply(module_pass_manager& mpm, const match::matcher_result& r) const
    {
        auto ins       = r.result;
        auto gemm2_ins = r.instructions["gemm2"];
        auto gemm1_ins = r.instructions["gemm1"];
        auto scale_lit = r.instructions["scale"];

        float scale = 1.0;
        scale_lit->eval().visit([&](const auto s) {
            // CK only supports single-valued scale
            if(std::all_of(
                   s.begin() + 1, s.end(), [&](auto v) { return float_equal(v, s.front()); }))
                scale = s.front();
            else
                return;
        });

        auto inputs = gemm1_ins->inputs();            // A, B
        inputs.push_back(gemm2_ins->inputs().back()); // B1

        mpm.get_module().replace_instruction(
            ins, pre_gemm_softmax_gemm{gemm2_ins->get_operator(), scale}, inputs);
    }
};

182
183
#endif

184
185
} // namespace

186
void prefuse_ops::apply(module_pass_manager& mpm) const
187
{
188
189
190
    match::find_matches(mpm.get_module(), find_layernorm{});
    mpm.run_pass(dead_code_elimination{});
    match::find_matches(mpm.get_module(), find_add_layernorm{});
191
#ifdef MIHRAPHX_USE_COMPOSABLEKERNEL
192
193
    if(enabled(MIGRAPHX_ENABLE_CK{}))
        match::find_matches(mpm, find_gemm_softmax_gemm{});
194
#endif
195
196
197
198
199
}

} // namespace gpu
} // namespace MIGRAPHX_INLINE_NS
} // namespace migraphx