layernorm.cpp 6.86 KB
Newer Older
kahmed10's avatar
kahmed10 committed
1
2
3
4
5
6
7
8
9
10
#include <migraphx/gpu/device/layernorm.hpp>
#include <migraphx/gpu/device/reduce.hpp>
#include <migraphx/gpu/device/pow.hpp>
#include <migraphx/gpu/device/fast_div.hpp>

namespace migraphx {
inline namespace MIGRAPHX_INLINE_NS {
namespace gpu {
namespace device {

kahmed10's avatar
kahmed10 committed
11
12
13
14
15
16
17
18
#ifndef MIGRAPHX_WORKAROUND_NAVI_DPP_SYNC
#if __AMDGCN_WAVEFRONT_SIZE == 32
#define MIGRAPHX_WORKAROUND_NAVI_DPP_SYNC 1
#else
#define MIGRAPHX_WORKAROUND_NAVI_DPP_SYNC 0
#endif
#endif

19
20
21
22
23
24
25
26
27
28
29
30
31
32
template <class T>
struct vector_type
{
};

template <class T, index_int N>
struct vector_type<vec<T, N>>
{
    using type = T;
};

template <class T>
using vector_type_t = typename vector_type<T>::type;

Paul Fultz II's avatar
Paul Fultz II committed
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
template <class T>
struct vector_size : std::integral_constant<index_int, 1>
{
};

template <class T, index_int N>
struct vector_size<vec<T, N>> : std::integral_constant<index_int, N>
{
};

template <class T, class F>
__device__ auto vec_transform(T x, F f)
{
    return f(x);
}

template <class T, index_int N, class F>
__device__ auto vec_transform(vec<T, N> x, F f)
{
    vec<T, N> y = x;
    // cppcheck-suppress useStlAlgorithm
    for(index_int k = 0; k < N; k++)
        y[k] = f(x[k]);
    return y;
}

template <class T, class U, class Op>
__device__ auto vec_reduce(T x, U, Op)
{
    return x;
}

template <class T, index_int N, class U, class Op>
__device__ auto vec_reduce(vec<T, N> x, U init, Op op)
{
    T r = init;
    for(index_int k = 0; k < N; k++)
        r = op(r, x[k]);
    return r;
}

template <index_int N, class Op, class T, class F>
__device__ auto auto_block_reduce(index idx, Op op, T init, index_int n, F f)
{
    auto r = block_reduce<N>(idx, op, init, n, f);
    return vec_reduce(r, 0, op);
}

template <index_int MaxBlockSize, class Input, class Output>
__device__ void layernorm(index_int i,
                          index idx,
                          std::size_t block_size_div,
                          index_int relements,
                          Input input,
                          Output output)
{
    using value_type       = decltype(input(idx.local));
    const auto relements_v = relements / vector_size<value_type>{};
    const auto out_idx     = fast_div(i, block_size_div);
    const auto base_idx    = out_idx * relements_v;
    const auto input_idx   = base_idx + idx.local;
    const bool in_range    = idx.local < relements_v;

    auto mean = [&](auto z) {
kahmed10's avatar
kahmed10 committed
97
98
99
100
101
102
103
        auto m = auto_block_reduce<MaxBlockSize>(
                     idx, sum{}, value_type(0), relements_v, [=](auto) { return z; }) /
                 value_type(relements);
#if MIGRAPHX_WORKAROUND_NAVI_DPP_SYNC
        __builtin_amdgcn_s_barrier();
#endif
        return m;
Paul Fultz II's avatar
Paul Fultz II committed
104
105
106
107
108
109
110
111
112
113
114
115
116
117
    };

    // m = x - mean(x)
    value_type x = in_range ? input(input_idx) : 0;
    value_type m = x - mean(x);

    // mean(m ^ 2) + 1e-12
    value_type r = mean(m * m) + value_type(1e-12);

    // m * rsqrt(mean(m ^ 2) + 1e-12)
    if(in_range)
        output(input_idx, m * vec_transform(r, &rsqrt));
}

kahmed10's avatar
kahmed10 committed
118
119
// m = x - mean(x)
// m / sqrt(mean(m ^ 2) + 1e-12)
120

Paul Fultz II's avatar
Paul Fultz II committed
121
template <index_int N, class Input, class Output, class... Arguments>
122
123
void layernorm_vec_impl(hipStream_t stream,
                        index_int nelements,
Paul Fultz II's avatar
Paul Fultz II committed
124
125
126
127
128
                        index_int relements,
                        Input in,
                        Output out,
                        const argument& result,
                        const Arguments&... args)
kahmed10's avatar
kahmed10 committed
129
{
Paul Fultz II's avatar
Paul Fultz II committed
130
    hip_vec_visit_all<N>(result, args...)([&](auto output, auto... inputs) {
131
132
133
134
135
136
137
        const auto relements_v           = relements / N;
        const std::size_t max_block_size = 256;
        const std::size_t block_size     = compute_block_size(relements_v, max_block_size);
        const std::size_t block_size_div = encode_divisor(block_size);
        assert(relements_v <= block_size);

        gs_launch(stream, nelements * block_size, block_size)([=](auto i, auto idx) __device__ {
Paul Fultz II's avatar
Paul Fultz II committed
138
139
140
141
142
143
144
145
146
            layernorm<max_block_size>(
                i,
                idx,
                block_size_div,
                relements,
                [&](auto input_idx) { return in(inputs.data()[input_idx]...); },
                [&](auto input_idx, auto x) {
                    out(x, output.data()[input_idx], inputs.data()[input_idx]...);
                });
147
148
149
150
        });
    });
}

Paul Fultz II's avatar
Paul Fultz II committed
151
template <class Input, class Output, class... Arguments>
152
153
void layernorm_impl(hipStream_t stream,
                    index_int nelements,
Paul Fultz II's avatar
Paul Fultz II committed
154
155
156
157
158
                    index_int relements,
                    Input in,
                    Output out,
                    const argument& result,
                    const Arguments&... args)
159
{
Paul Fultz II's avatar
Paul Fultz II committed
160
    hip_visit_all(result, args...)([&](auto output, auto... inputs) {
kahmed10's avatar
kahmed10 committed
161
162
163
        const std::size_t max_block_size = 256;
        const std::size_t block_size     = compute_block_size(relements, max_block_size);
        const std::size_t block_size_div = encode_divisor(block_size);
164
        assert(relements <= block_size);
kahmed10's avatar
kahmed10 committed
165
166

        gs_launch(stream, nelements * block_size, block_size)([=](auto i, auto idx) __device__ {
Paul Fultz II's avatar
Paul Fultz II committed
167
168
169
170
171
172
173
174
175
            layernorm<max_block_size>(
                i,
                idx,
                block_size_div,
                relements,
                [&](auto input_idx) { return in(inputs.data()[input_idx]...); },
                [&](auto input_idx, auto x) {
                    out(x, output.data()[input_idx], inputs.data()[input_idx]...);
                });
kahmed10's avatar
kahmed10 committed
176
177
178
179
        });
    });
}

Paul Fultz II's avatar
Paul Fultz II committed
180
181
182
183
184
185
186
187
188
template <class... Arguments>
auto layernorm_fusion(hipStream_t stream,
                      const argument& result,
                      const argument& arg1,
                      const Arguments&... args)
{
    return [=](auto input, auto output) {
        auto relements    = arg1.get_shape().lens().back();
        auto nelements    = result.get_shape().elements() / relements;
Shucai Xiao's avatar
Shucai Xiao committed
189
190
191
        // auto output_shape = result.get_shape();
        // auto reduce_output_lens(output_shape.lens());
        // reduce_output_lens.back() = 1;
Paul Fultz II's avatar
Paul Fultz II committed
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212

        if((relements % 4) == 0)
            layernorm_vec_impl<4>(
                stream, nelements, relements, input, output, result, arg1, args...);
        else if(relements < 256)
            layernorm_impl(stream, nelements, relements, input, output, result, arg1, args...);
        else
            MIGRAPHX_THROW("No kernel for layernorm");
    };
}

void triadd_layernorm(hipStream_t stream,
                      const argument& result,
                      const argument& arg1,
                      const argument& arg2,
                      const argument& arg3)
{
    layernorm_fusion(stream, result, arg1, arg2, arg3)(
        [](auto x, auto y, auto z) { return x + y + z; }, [](auto x, auto& y, auto...) { y = x; });
}

213
214
void layernorm(hipStream_t stream, const argument& result, const argument& arg1)
{
Paul Fultz II's avatar
Paul Fultz II committed
215
216
    layernorm_fusion(stream, result, arg1)([](auto x) { return x; },
                                           [](auto x, auto& y, auto) { y = x; });
217
218
}

kahmed10's avatar
kahmed10 committed
219
220
221
222
} // namespace device
} // namespace gpu
} // namespace MIGRAPHX_INLINE_NS
} // namespace migraphx