prefuse_ops.cpp 6.31 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
/*
 * The MIT License (MIT)
 *
 * Copyright (c) 2015-2022 Advanced Micro Devices, Inc. All rights reserved.
 *
 * Permission is hereby granted, free of charge, to any person obtaining a copy
 * of this software and associated documentation files (the "Software"), to deal
 * in the Software without restriction, including without limitation the rights
 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
 * copies of the Software, and to permit persons to whom the Software is
 * furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice shall be included in
 * all copies or substantial portions of the Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL THE
 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
 * THE SOFTWARE.
 */
24
#include <migraphx/permutation.hpp>
25
#include <migraphx/gpu/prefuse_ops.hpp>
Alan Turner's avatar
Alan Turner committed
26
#include <migraphx/gpu/gemm_softmax_gemm.hpp>
27
#include <migraphx/match/layernorm.hpp>
28
#include <migraphx/register_op.hpp>
29
30
#include <migraphx/pass_manager.hpp>
#include <migraphx/dead_code_elimination.hpp>
Alan Turner's avatar
Alan Turner committed
31
#include <migraphx/gpu/ck.hpp>
32
33
34
35

namespace migraphx {
inline namespace MIGRAPHX_INLINE_NS {
namespace gpu {
36

37
namespace {
38
39
40
41

template <class Derived, std::size_t N>
struct layernorm_base
{
42
43
44
45
46
47
    float epsilon = 1e-12f;
    template <class Self, class F>
    static auto reflect(Self& self, F f)
    {
        return pack(f(self.epsilon, "epsilon"));
    }
48
49
    shape compute_shape(std::vector<shape> inputs, std::vector<module_ref> mods) const
    {
50
        std::size_t nargs = N;
51
52
53
        if(not mods.empty())
        {
            auto* pm = mods.front();
54
            nargs += pm->get_parameter_names().size() - 1;
55
        }
56
57
        check_shapes{inputs, static_cast<const Derived&>(*this)}.has(nargs);
        auto s = inputs.front();
58
59
60
        auto t = s.type();
        if(not mods.empty())
            t = mods.front()->get_output_shapes().front().type();
61
62
63
64
65
66
67
68
69
70
71
72
73
74

        // Scalar output if all inputs are scalar
        if(inputs.front().elements() == 1 and
           all_of(inputs, [](const auto& ss) { return ss.scalar(); }))
            return inputs.front();
        auto l_s = shape::from_permutation(
            t, s.lens(), find_permutation(std::vector<shape>(inputs.begin(), inputs.begin() + N)));
        // just prelayernorm or preadd_layernorm
        if(nargs <= N)
            return l_s;
        // else, layernorm + pointwise fusion, preserve layout of fused op
        std::vector<shape> lp_s(inputs.begin() + N, inputs.end());
        lp_s.insert(lp_s.begin(), l_s);
        return shape::from_permutation(t, s.lens(), find_permutation(lp_s));
75
76
77
    }
};

78
struct layernorm : layernorm_base<layernorm, 1>
79
{
80

81
82
83
84
    std::string name() const { return "gpu::prelayernorm"; }
};
MIGRAPHX_REGISTER_OP(layernorm);

85
struct add_layernorm : layernorm_base<add_layernorm, 2>
86
87
88
89
90
{
    std::string name() const { return "gpu::preadd_layernorm"; }
};
MIGRAPHX_REGISTER_OP(add_layernorm);

91
92
93
94
95
96
97
98
struct find_layernorm
{
    auto matcher() const { return match::layernorm(); }

    void apply(module& m, const match::matcher_result& r) const
    {
        auto ins   = r.result;
        auto x_ins = r.instructions["x"];
99
100
101
        float eps  = 0;
        if(contains(r.instructions, "eps"))
            eps = r.instructions["eps"]->eval().at<float>();
102

103
        m.replace_instruction(ins, layernorm{eps}, x_ins);
104
105
106
    }
};

107
struct find_add_layernorm
108
109
110
{
    auto matcher() const
    {
111
112
        return match::name("gpu::prelayernorm")(
            match::args(match::name("add")(match::used_once()).bind("add")));
113
114
115
116
    }

    void apply(module& m, const match::matcher_result& r) const
    {
117
118
        auto ins     = r.result;
        auto add_ins = r.instructions["add"];
119
        auto op      = any_cast<layernorm>(ins->get_operator());
120

121
        m.replace_instruction(ins, add_layernorm{op.epsilon}, add_ins->inputs());
122
123
    }
};
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177

struct pre_gemm_softmax_gemm : gemm_softmax_gemm
{
    std::string name() const { return "gpu::pre_gemm_softmax_gemm"; }
};
MIGRAPHX_REGISTER_OP(pre_gemm_softmax_gemm);

MIGRAPHX_PRED_MATCHER(is_ck_gemm, instruction_ref ins)
{
    if(ins->name() != "dot")
        return false;
    if(not pre_gemm_softmax_gemm::is_ck_supported_type(ins->get_shape().type()))
        return false;
    return true;
}

struct find_gemm_softmax_gemm
{
    auto matcher() const
    {
        auto gemm1 =
            match::skip(match::name("contiguous"))(match::name("dot")(is_ck_gemm().bind("gemm1")));
        auto mul = match::name("mul")(
            match::nargs(2), match::either_arg(0, 1)(match::is_constant().bind("scale"), gemm1));
        auto softmax = match::name("softmax")(match::arg(0)(mul)).bind("softmax");

        return match::name("dot")(is_ck_gemm().bind("gemm2"))(match::arg(0)(softmax));
    }

    void apply(module_pass_manager& mpm, const match::matcher_result& r) const
    {
        auto ins       = r.result;
        auto gemm2_ins = r.instructions["gemm2"];
        auto gemm1_ins = r.instructions["gemm1"];
        auto scale_lit = r.instructions["scale"];

        float scale = 1.0;
        scale_lit->eval().visit([&](const auto s) {
            // CK only supports single-valued scale
            if(std::all_of(
                   s.begin() + 1, s.end(), [&](auto v) { return float_equal(v, s.front()); }))
                scale = s.front();
            else
                return;
        });

        auto inputs = gemm1_ins->inputs();            // A, B
        inputs.push_back(gemm2_ins->inputs().back()); // B1

        mpm.get_module().replace_instruction(
            ins, pre_gemm_softmax_gemm{gemm2_ins->get_operator(), scale}, inputs);
    }
};

178
179
} // namespace

180
void prefuse_ops::apply(module_pass_manager& mpm) const
181
{
182
183
184
    match::find_matches(mpm.get_module(), find_layernorm{});
    mpm.run_pass(dead_code_elimination{});
    match::find_matches(mpm.get_module(), find_add_layernorm{});
Alan Turner's avatar
Alan Turner committed
185
    if(enabled(MIGRAPHX_ENABLE_CK{}))
186
        match::find_matches(mpm, find_gemm_softmax_gemm{});
187
188
189
190
191
}

} // namespace gpu
} // namespace MIGRAPHX_INLINE_NS
} // namespace migraphx