layernorm.cpp 6.67 KB
Newer Older
kahmed10's avatar
kahmed10 committed
1
2
3
4
5
6
7
8
9
10
#include <migraphx/gpu/device/layernorm.hpp>
#include <migraphx/gpu/device/reduce.hpp>
#include <migraphx/gpu/device/pow.hpp>
#include <migraphx/gpu/device/fast_div.hpp>

namespace migraphx {
inline namespace MIGRAPHX_INLINE_NS {
namespace gpu {
namespace device {

kahmed10's avatar
kahmed10 committed
11
12
13
14
15
16
17
18
#ifndef MIGRAPHX_WORKAROUND_NAVI_DPP_SYNC
#if __AMDGCN_WAVEFRONT_SIZE == 32
#define MIGRAPHX_WORKAROUND_NAVI_DPP_SYNC 1
#else
#define MIGRAPHX_WORKAROUND_NAVI_DPP_SYNC 0
#endif
#endif

19
20
21
22
23
24
25
26
27
28
29
30
31
32
template <class T>
struct vector_type
{
};

template <class T, index_int N>
struct vector_type<vec<T, N>>
{
    using type = T;
};

template <class T>
using vector_type_t = typename vector_type<T>::type;

Paul Fultz II's avatar
Paul Fultz II committed
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
template <class T>
struct vector_size : std::integral_constant<index_int, 1>
{
};

template <class T, index_int N>
struct vector_size<vec<T, N>> : std::integral_constant<index_int, N>
{
};

template <class T, class F>
__device__ auto vec_transform(T x, F f)
{
    return f(x);
}

template <class T, index_int N, class F>
__device__ auto vec_transform(vec<T, N> x, F f)
{
    vec<T, N> y = x;
    // cppcheck-suppress useStlAlgorithm
    for(index_int k = 0; k < N; k++)
        y[k] = f(x[k]);
    return y;
}

template <class T, class U, class Op>
__device__ auto vec_reduce(T x, U, Op)
{
    return x;
}

template <class T, index_int N, class U, class Op>
__device__ auto vec_reduce(vec<T, N> x, U init, Op op)
{
    T r = init;
    for(index_int k = 0; k < N; k++)
        r = op(r, x[k]);
    return r;
}

template <index_int N, class Op, class T, class F>
__device__ auto auto_block_reduce(index idx, Op op, T init, index_int n, F f)
{
    auto r = block_reduce<N>(idx, op, init, n, f);
    return vec_reduce(r, 0, op);
}

template <index_int MaxBlockSize, class Input, class Output>
Shucai Xiao's avatar
Shucai Xiao committed
82
83
__device__ void layernorm(
    index_int i, index idx, int block_size_div, index_int relements, Input input, Output output)
Paul Fultz II's avatar
Paul Fultz II committed
84
85
86
87
88
89
90
91
92
{
    using value_type       = decltype(input(idx.local));
    const auto relements_v = relements / vector_size<value_type>{};
    const auto out_idx     = fast_div(i, block_size_div);
    const auto base_idx    = out_idx * relements_v;
    const auto input_idx   = base_idx + idx.local;
    const bool in_range    = idx.local < relements_v;

    auto mean = [&](auto z) {
kahmed10's avatar
kahmed10 committed
93
94
95
96
97
98
99
        auto m = auto_block_reduce<MaxBlockSize>(
                     idx, sum{}, value_type(0), relements_v, [=](auto) { return z; }) /
                 value_type(relements);
#if MIGRAPHX_WORKAROUND_NAVI_DPP_SYNC
        __builtin_amdgcn_s_barrier();
#endif
        return m;
Paul Fultz II's avatar
Paul Fultz II committed
100
101
102
103
104
105
106
107
108
109
110
111
112
113
    };

    // m = x - mean(x)
    value_type x = in_range ? input(input_idx) : 0;
    value_type m = x - mean(x);

    // mean(m ^ 2) + 1e-12
    value_type r = mean(m * m) + value_type(1e-12);

    // m * rsqrt(mean(m ^ 2) + 1e-12)
    if(in_range)
        output(input_idx, m * vec_transform(r, &rsqrt));
}

kahmed10's avatar
kahmed10 committed
114
115
// m = x - mean(x)
// m / sqrt(mean(m ^ 2) + 1e-12)
116

Paul Fultz II's avatar
Paul Fultz II committed
117
template <index_int N, class Input, class Output, class... Arguments>
118
119
void layernorm_vec_impl(hipStream_t stream,
                        index_int nelements,
Paul Fultz II's avatar
Paul Fultz II committed
120
121
122
123
124
                        index_int relements,
                        Input in,
                        Output out,
                        const argument& result,
                        const Arguments&... args)
kahmed10's avatar
kahmed10 committed
125
{
Paul Fultz II's avatar
Paul Fultz II committed
126
    hip_vec_visit_all<N>(result, args...)([&](auto output, auto... inputs) {
Shucai Xiao's avatar
Shucai Xiao committed
127
        const auto relements_v   = relements / N;
Shucai Xiao's avatar
Shucai Xiao committed
128
129
130
        const int max_block_size = 256;
        const int block_size     = compute_block_size(relements_v, max_block_size);
        const int block_size_div = encode_divisor(block_size);
131
132
133
        assert(relements_v <= block_size);

        gs_launch(stream, nelements * block_size, block_size)([=](auto i, auto idx) __device__ {
Paul Fultz II's avatar
Paul Fultz II committed
134
135
136
137
138
139
140
141
142
            layernorm<max_block_size>(
                i,
                idx,
                block_size_div,
                relements,
                [&](auto input_idx) { return in(inputs.data()[input_idx]...); },
                [&](auto input_idx, auto x) {
                    out(x, output.data()[input_idx], inputs.data()[input_idx]...);
                });
143
144
145
146
        });
    });
}

Paul Fultz II's avatar
Paul Fultz II committed
147
template <class Input, class Output, class... Arguments>
148
149
void layernorm_impl(hipStream_t stream,
                    index_int nelements,
Paul Fultz II's avatar
Paul Fultz II committed
150
151
152
153
154
                    index_int relements,
                    Input in,
                    Output out,
                    const argument& result,
                    const Arguments&... args)
155
{
Paul Fultz II's avatar
Paul Fultz II committed
156
    hip_visit_all(result, args...)([&](auto output, auto... inputs) {
Shucai Xiao's avatar
Shucai Xiao committed
157
158
159
        const int max_block_size = 256;
        const int block_size     = compute_block_size(relements, max_block_size);
        const int block_size_div = encode_divisor(block_size);
160
        assert(relements <= block_size);
kahmed10's avatar
kahmed10 committed
161
162

        gs_launch(stream, nelements * block_size, block_size)([=](auto i, auto idx) __device__ {
Paul Fultz II's avatar
Paul Fultz II committed
163
164
165
166
167
168
169
170
171
            layernorm<max_block_size>(
                i,
                idx,
                block_size_div,
                relements,
                [&](auto input_idx) { return in(inputs.data()[input_idx]...); },
                [&](auto input_idx, auto x) {
                    out(x, output.data()[input_idx], inputs.data()[input_idx]...);
                });
kahmed10's avatar
kahmed10 committed
172
173
174
175
        });
    });
}

Paul Fultz II's avatar
Paul Fultz II committed
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
template <class... Arguments>
auto layernorm_fusion(hipStream_t stream,
                      const argument& result,
                      const argument& arg1,
                      const Arguments&... args)
{
    return [=](auto input, auto output) {
        auto relements    = arg1.get_shape().lens().back();
        auto nelements    = result.get_shape().elements() / relements;
        auto output_shape = result.get_shape();
        auto reduce_output_lens(output_shape.lens());
        reduce_output_lens.back() = 1;

        if((relements % 4) == 0)
            layernorm_vec_impl<4>(
                stream, nelements, relements, input, output, result, arg1, args...);
        else if(relements < 256)
            layernorm_impl(stream, nelements, relements, input, output, result, arg1, args...);
        else
            MIGRAPHX_THROW("No kernel for layernorm");
    };
}

void triadd_layernorm(hipStream_t stream,
                      const argument& result,
                      const argument& arg1,
                      const argument& arg2,
                      const argument& arg3)
{
    layernorm_fusion(stream, result, arg1, arg2, arg3)(
        [](auto x, auto y, auto z) { return x + y + z; }, [](auto x, auto& y, auto...) { y = x; });
}

209
210
void layernorm(hipStream_t stream, const argument& result, const argument& arg1)
{
Paul Fultz II's avatar
Paul Fultz II committed
211
212
    layernorm_fusion(stream, result, arg1)([](auto x) { return x; },
                                           [](auto x, auto& y, auto) { y = x; });
213
214
}

kahmed10's avatar
kahmed10 committed
215
216
217
218
} // namespace device
} // namespace gpu
} // namespace MIGRAPHX_INLINE_NS
} // namespace migraphx