prefuse_ops.cpp 6.27 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
/*
 * The MIT License (MIT)
 *
 * Copyright (c) 2015-2022 Advanced Micro Devices, Inc. All rights reserved.
 *
 * Permission is hereby granted, free of charge, to any person obtaining a copy
 * of this software and associated documentation files (the "Software"), to deal
 * in the Software without restriction, including without limitation the rights
 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
 * copies of the Software, and to permit persons to whom the Software is
 * furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice shall be included in
 * all copies or substantial portions of the Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL THE
 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
 * THE SOFTWARE.
 */
24
#include <migraphx/permutation.hpp>
25
26
#include <migraphx/gpu/prefuse_ops.hpp>
#include <migraphx/match/layernorm.hpp>
27
#include <migraphx/register_op.hpp>
28
29
#include <migraphx/pass_manager.hpp>
#include <migraphx/dead_code_elimination.hpp>
30
#include <migraphx/ck.hpp>
31
32
33
34

namespace migraphx {
inline namespace MIGRAPHX_INLINE_NS {
namespace gpu {
35

36
namespace {
37
38
39
40

template <class Derived, std::size_t N>
struct layernorm_base
{
41
42
43
44
45
46
    float epsilon = 1e-12f;
    template <class Self, class F>
    static auto reflect(Self& self, F f)
    {
        return pack(f(self.epsilon, "epsilon"));
    }
47
48
    shape compute_shape(std::vector<shape> inputs, std::vector<module_ref> mods) const
    {
49
        std::size_t nargs = N;
50
51
52
        if(not mods.empty())
        {
            auto* pm = mods.front();
53
            nargs += pm->get_parameter_names().size() - 1;
54
        }
55
56
        check_shapes{inputs, static_cast<const Derived&>(*this)}.has(nargs);
        auto s = inputs.front();
57
58
59
        auto t = s.type();
        if(not mods.empty())
            t = mods.front()->get_output_shapes().front().type();
60
61
62
63
64
65
66
67
68
69
70
71
72
73

        // Scalar output if all inputs are scalar
        if(inputs.front().elements() == 1 and
           all_of(inputs, [](const auto& ss) { return ss.scalar(); }))
            return inputs.front();
        auto l_s = shape::from_permutation(
            t, s.lens(), find_permutation(std::vector<shape>(inputs.begin(), inputs.begin() + N)));
        // just prelayernorm or preadd_layernorm
        if(nargs <= N)
            return l_s;
        // else, layernorm + pointwise fusion, preserve layout of fused op
        std::vector<shape> lp_s(inputs.begin() + N, inputs.end());
        lp_s.insert(lp_s.begin(), l_s);
        return shape::from_permutation(t, s.lens(), find_permutation(lp_s));
74
75
76
    }
};

77
struct layernorm : layernorm_base<layernorm, 1>
78
{
79

80
81
82
83
    std::string name() const { return "gpu::prelayernorm"; }
};
MIGRAPHX_REGISTER_OP(layernorm);

84
struct add_layernorm : layernorm_base<add_layernorm, 2>
85
86
87
88
89
{
    std::string name() const { return "gpu::preadd_layernorm"; }
};
MIGRAPHX_REGISTER_OP(add_layernorm);

90
91
92
93
94
95
96
97
struct find_layernorm
{
    auto matcher() const { return match::layernorm(); }

    void apply(module& m, const match::matcher_result& r) const
    {
        auto ins   = r.result;
        auto x_ins = r.instructions["x"];
98
99
100
        float eps  = 0;
        if(contains(r.instructions, "eps"))
            eps = r.instructions["eps"]->eval().at<float>();
101

102
        m.replace_instruction(ins, layernorm{eps}, x_ins);
103
104
105
    }
};

106
struct find_add_layernorm
107
108
109
{
    auto matcher() const
    {
110
111
        return match::name("gpu::prelayernorm")(
            match::args(match::name("add")(match::used_once()).bind("add")));
112
113
114
115
    }

    void apply(module& m, const match::matcher_result& r) const
    {
116
117
        auto ins     = r.result;
        auto add_ins = r.instructions["add"];
118
        auto op      = any_cast<layernorm>(ins->get_operator());
119

120
        m.replace_instruction(ins, add_layernorm{op.epsilon}, add_ins->inputs());
121
122
    }
};
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176

struct pre_gemm_softmax_gemm : gemm_softmax_gemm
{
    std::string name() const { return "gpu::pre_gemm_softmax_gemm"; }
};
MIGRAPHX_REGISTER_OP(pre_gemm_softmax_gemm);

MIGRAPHX_PRED_MATCHER(is_ck_gemm, instruction_ref ins)
{
    if(ins->name() != "dot")
        return false;
    if(not pre_gemm_softmax_gemm::is_ck_supported_type(ins->get_shape().type()))
        return false;
    return true;
}

struct find_gemm_softmax_gemm
{
    auto matcher() const
    {
        auto gemm1 =
            match::skip(match::name("contiguous"))(match::name("dot")(is_ck_gemm().bind("gemm1")));
        auto mul = match::name("mul")(
            match::nargs(2), match::either_arg(0, 1)(match::is_constant().bind("scale"), gemm1));
        auto softmax = match::name("softmax")(match::arg(0)(mul)).bind("softmax");

        return match::name("dot")(is_ck_gemm().bind("gemm2"))(match::arg(0)(softmax));
    }

    void apply(module_pass_manager& mpm, const match::matcher_result& r) const
    {
        auto ins       = r.result;
        auto gemm2_ins = r.instructions["gemm2"];
        auto gemm1_ins = r.instructions["gemm1"];
        auto scale_lit = r.instructions["scale"];

        float scale = 1.0;
        scale_lit->eval().visit([&](const auto s) {
            // CK only supports single-valued scale
            if(std::all_of(
                   s.begin() + 1, s.end(), [&](auto v) { return float_equal(v, s.front()); }))
                scale = s.front();
            else
                return;
        });

        auto inputs = gemm1_ins->inputs();            // A, B
        inputs.push_back(gemm2_ins->inputs().back()); // B1

        mpm.get_module().replace_instruction(
            ins, pre_gemm_softmax_gemm{gemm2_ins->get_operator(), scale}, inputs);
    }
};

177
178
} // namespace

179
void prefuse_ops::apply(module_pass_manager& mpm) const
180
{
181
182
183
    match::find_matches(mpm.get_module(), find_layernorm{});
    mpm.run_pass(dead_code_elimination{});
    match::find_matches(mpm.get_module(), find_add_layernorm{});
184
185
    if (enabled(MIGRAPHX_ENABLE_CK{}))
        match::find_matches(mpm, find_gemm_softmax_gemm{});
186
187
188
189
190
}

} // namespace gpu
} // namespace MIGRAPHX_INLINE_NS
} // namespace migraphx