blockwise_3d_tensor_op.hpp 14.4 KB
Newer Older
1
2
3
#ifndef CK_BLOCKWISE_3D_TENSOR_OP_HPP
#define CK_BLOCKWISE_3D_TENSOR_OP_HPP

Chao Liu's avatar
Chao Liu committed
4
5
#include "common.hpp"
#include "ConstantTensorDescriptor.hpp"
6

7
8
namespace ck {

9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
template <index_t BlockSize,
          class Float,
          class SrcDesc,
          class DstDesc,
          class CopyLengths,
          index_t DataPerRead>
struct Blockwise3dTensorCopy1
{
    using vector_t = typename vector_type<Float, DataPerRead>::MemoryType;

    __device__ constexpr Blockwise3dTensorCopy1()
    {
        constexpr auto I0 = Number<0>{};
        constexpr auto I1 = Number<1>{};
        constexpr auto I2 = Number<2>{};

        static_assert(DataPerRead == 1 ||
                          (SrcDesc{}.GetStride(I2) == 1 && DstDesc{}.GetStride(I2) == 1),
                      "wrong! only support stride2 == 1 if DataPerRead > 1!\n");

        static_assert(DataPerRead == 1 || DataPerRead == 2 || DataPerRead == 4,
                      "wrong! only support DataPerRead == 1, 2 or 4!\n");

        static_assert(SrcDesc{}.GetStride(I1) % DataPerRead == 0 &&
                          DstDesc{}.GetStride(I1) % DataPerRead == 0,
                      "src and dst stride1 should be multiple of DataPerRead to keep alignment");

        // we allow out-of-bound read from src in D3 dimension,
        //   but we need to make sure dst stride2 is big enough,
        //   so that the out-of-bound write won't contaminate next line in dst
        constexpr index_t L2          = CopyLengths{}.Get(I2);
40
        constexpr index_t read_per_d2 = math::integer_divide_ceil(L2, DataPerRead);
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58

        static_assert(read_per_d2 * DataPerRead <= DstDesc{}.GetStride(I1),
                      "wrong! out-of-bound write will contaminate next line!\n");
    }

    __device__ void Run(const Float* __restrict__ p_src, Float* __restrict__ p_dst) const
    {
        constexpr auto I0 = Number<0>{};
        constexpr auto I1 = Number<1>{};
        constexpr auto I2 = Number<2>{};

        constexpr auto src_desc = SrcDesc{};
        constexpr auto dst_desc = DstDesc{};

        constexpr index_t L0 = CopyLengths{}.Get(I0);
        constexpr index_t L1 = CopyLengths{}.Get(I1);
        constexpr index_t L2 = CopyLengths{}.Get(I2);

59
        constexpr index_t read_per_d2 = math::integer_divide_ceil(L2, DataPerRead);
60

61
        constexpr auto ref_desc = make_ConstantTensorDescriptor(Sequence<L0, L1, read_per_d2>{});
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77

        constexpr index_t NLoop = ref_desc.GetElementSize() / BlockSize;

        auto f_copy = [&](index_t is) {
            index_t did[3];

            did[0] = is / ref_desc.GetStride(I0);

            is -= did[0] * ref_desc.GetStride(I0);

            did[1] = is / ref_desc.GetStride(I1);

            is -= did[1] * ref_desc.GetStride(I1);

            did[2] = is / ref_desc.GetStride(I2);

78
79
80
81
            const index_t src_index =
                src_desc.GetOffsetFromMultiIndex(did[0], did[1], did[2] * DataPerRead);
            const index_t dst_index =
                dst_desc.GetOffsetFromMultiIndex(did[0], did[1], did[2] * DataPerRead);
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106

            *(reinterpret_cast<vector_t*>(p_dst + dst_index)) =
                *(reinterpret_cast<const vector_t*>(p_src + src_index));
        };

        for(index_t iloop = 0; iloop < NLoop; ++iloop)
        {
            index_t is = get_thread_local_1d_id() + iloop * BlockSize;

            f_copy(is);
        }

        constexpr bool has_tail = (ref_desc.GetElementSize() > NLoop * BlockSize);

        if(has_tail)
        {
            index_t is = get_thread_local_1d_id() + NLoop * BlockSize;

            if(is < ref_desc.GetElementSize())
            {
                f_copy(is);
            }
        }
    }
};
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152

// starting point need to be aligned to float4 or float2 or float
// stride3 need to be 1 for both source and destination
template <index_t BlockSize,
          class Float,
          class SrcDesc,
          class DstDesc,
          class CopyLengths,
          class ThreadPerDims,
          index_t DataPerRead>
struct Blockwise3dTensorCopy3
{
    using vector_t = typename vector_type<Float, DataPerRead>::MemoryType;

    index_t mSrcMyThreadOffset;
    index_t mDstMyThreadOffset;

    __device__ Blockwise3dTensorCopy3()
    {
        constexpr auto I0 = Number<0>{};
        constexpr auto I1 = Number<1>{};
        constexpr auto I2 = Number<2>{};

        static_assert(DataPerRead == 1 ||
                          (SrcDesc{}.GetStride(I2) == 1 && DstDesc{}.GetStride(I2) == 1),
                      "wrong! only support stride3 == 1 if DataPerRead > 1!\n");

        static_assert(DataPerRead == 1 || DataPerRead == 2 || DataPerRead == 4,
                      "wrong! only support DataPerRead == 1, 2 or 4!\n");

        static_assert(
            SrcDesc{}.GetStride(I1) % DataPerRead == 0 &&
                DstDesc{}.GetStride(I1) % DataPerRead == 0,
            "wrong! src and dst stride1 should be multiple of DataPerRead to keep alignment");

        constexpr index_t L0 = CopyLengths{}.Get(I0);
        constexpr index_t L1 = CopyLengths{}.Get(I1);
        constexpr index_t L2 = CopyLengths{}.Get(I2);

        constexpr index_t thread_per_d0 = ThreadPerDims{}.Get(I0);
        constexpr index_t thread_per_d1 = ThreadPerDims{}.Get(I1);
        constexpr index_t thread_per_d2 = ThreadPerDims{}.Get(I2);

        // we allow out-of-bound read from src in D2 dimension,
        //   but we need to make sure dst stride is big enough,
        //   so that the out-of-bound write won't contaminate next line in dst
153
        constexpr index_t nloop_d2 = math::integer_divide_ceil(L2, thread_per_d2 * DataPerRead);
154
155
156
157
158
159
160
161
162
163
164

        static_assert(nloop_d2 * thread_per_d2 * DataPerRead <= DstDesc{}.GetStride(I1),
                      "wrong! out-of-bound write will contaminate next line!\n");

        static_assert(L0 % thread_per_d0 == 0 && L1 % thread_per_d1 == 0,
                      "wrong! L0, L1, L2 should be divided evenly!\n");

        static_assert(BlockSize >= thread_per_d0 * thread_per_d1 * thread_per_d2,
                      "wrrong! BlockSize is not big enough for ThreadPerDims!");

        constexpr index_t num_active_thread =
165
            accumulate_on_sequence(ThreadPerDims{}, math::multiplies<index_t>{}, Number<1>{});
166
167
168
169
170
171
172
173
174
175

        if(BlockSize > num_active_thread)
        {
            if(get_thread_local_1d_id() >= num_active_thread)
            {
                return;
            }
        }

        constexpr auto thread_cluster_desc = make_ConstantTensorDescriptor(ThreadPerDims{});
176
177
        const auto thread_multi_id =
            thread_cluster_desc.GetMultiIndexFrom1dIndex(get_thread_local_1d_id());
178

179
        mSrcMyThreadOffset = SrcDesc{}.GetOffsetFromMultiIndex(
180
181
            thread_multi_id[0], thread_multi_id[1], thread_multi_id[2] * DataPerRead);

182
        mDstMyThreadOffset = DstDesc{}.GetOffsetFromMultiIndex(
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
            thread_multi_id[0], thread_multi_id[1], thread_multi_id[2] * DataPerRead);
    }

    __device__ void Run(const Float* __restrict__ p_src, Float* __restrict__ p_dst) const
    {
        constexpr auto I0 = Number<0>{};
        constexpr auto I1 = Number<1>{};
        constexpr auto I2 = Number<2>{};

        constexpr index_t L0 = CopyLengths{}.Get(I0);
        constexpr index_t L1 = CopyLengths{}.Get(I1);
        constexpr index_t L2 = CopyLengths{}.Get(I2);

        constexpr index_t thread_per_d0 = ThreadPerDims{}.Get(I0);
        constexpr index_t thread_per_d1 = ThreadPerDims{}.Get(I1);
        constexpr index_t thread_per_d2 = ThreadPerDims{}.Get(I2);

        constexpr index_t num_active_thread = thread_per_d0 * thread_per_d1 * thread_per_d2;

        if(BlockSize > num_active_thread)
        {
            if(get_thread_local_1d_id() >= num_active_thread)
            {
                return;
            }
        }

        constexpr index_t nloop_d0 = L0 / thread_per_d0;
        constexpr index_t nloop_d1 = L1 / thread_per_d1;
212
        constexpr index_t nloop_d2 = math::integer_divide_ceil(L2, thread_per_d2 * DataPerRead);
213
214
215
216
217
218
219
220
221
222
223

#pragma unroll
        for(index_t iloop_d0 = 0; iloop_d0 < nloop_d0; ++iloop_d0)
        {
#pragma unroll
            for(index_t iloop_d1 = 0; iloop_d1 < nloop_d1; ++iloop_d1)
            {
#pragma unroll
                for(index_t iloop_d2 = 0; iloop_d2 < nloop_d2; ++iloop_d2)
                {
                    const index_t src_offset =
224
225
226
                        SrcDesc{}.GetOffsetFromMultiIndex(iloop_d0 * thread_per_d0,
                                                          iloop_d1 * thread_per_d1,
                                                          iloop_d2 * thread_per_d2 * DataPerRead);
227
228

                    const index_t dst_offset =
229
230
231
                        DstDesc{}.GetOffsetFromMultiIndex(iloop_d0 * thread_per_d0,
                                                          iloop_d1 * thread_per_d1,
                                                          iloop_d2 * thread_per_d2 * DataPerRead);
232
233
234
235
236
237
238
239

                    *(reinterpret_cast<vector_t*>(&p_dst[dst_offset + mDstMyThreadOffset])) = *(
                        reinterpret_cast<const vector_t*>(&p_src[src_offset + mSrcMyThreadOffset]));
                }
            }
        }
    }

240
    __device__ static constexpr index_t GetRegisterClipboardSize()
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
    {
        static_assert(is_same<Float, float>::value, "wrong! only support float!\n");

        constexpr auto I0 = Number<0>{};
        constexpr auto I1 = Number<1>{};
        constexpr auto I2 = Number<2>{};

        constexpr index_t L0 = CopyLengths{}.Get(I0);
        constexpr index_t L1 = CopyLengths{}.Get(I1);
        constexpr index_t L2 = CopyLengths{}.Get(I2);

        constexpr index_t thread_per_d0 = ThreadPerDims{}.Get(I0);
        constexpr index_t thread_per_d1 = ThreadPerDims{}.Get(I1);
        constexpr index_t thread_per_d2 = ThreadPerDims{}.Get(I2);

        constexpr index_t nloop_d0 = L0 / thread_per_d0;
        constexpr index_t nloop_d1 = L1 / thread_per_d1;
258
        constexpr index_t nloop_d2 = math::integer_divide_ceil(L2, thread_per_d2 * DataPerRead);
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289

        return DataPerRead * nloop_d0 * nloop_d1 * nloop_d2;
    }

    __device__ void RunLoadRegisterClipboard(const Float* __restrict__ p_src,
                                             Float* __restrict__ p_clipboard) const
    {
        constexpr auto I0 = Number<0>{};
        constexpr auto I1 = Number<1>{};
        constexpr auto I2 = Number<2>{};

        constexpr index_t L0 = CopyLengths{}.Get(I0);
        constexpr index_t L1 = CopyLengths{}.Get(I1);
        constexpr index_t L2 = CopyLengths{}.Get(I2);

        constexpr index_t thread_per_d0 = ThreadPerDims{}.Get(I0);
        constexpr index_t thread_per_d1 = ThreadPerDims{}.Get(I1);
        constexpr index_t thread_per_d2 = ThreadPerDims{}.Get(I2);

        constexpr index_t num_active_thread = thread_per_d0 * thread_per_d1 * thread_per_d2;

        if(BlockSize > num_active_thread)
        {
            if(get_thread_local_1d_id() >= num_active_thread)
            {
                return;
            }
        }

        constexpr index_t nloop_d0 = L0 / thread_per_d0;
        constexpr index_t nloop_d1 = L1 / thread_per_d1;
290
        constexpr index_t nloop_d2 = math::integer_divide_ceil(L2, thread_per_d2 * DataPerRead);
291
292
293
294
295
296
297
298
299
300
301
302
303
304

        constexpr auto clipboard_desc =
            make_ConstantTensorDescriptor(Sequence<nloop_d0, nloop_d1, nloop_d2 * DataPerRead>{});

#pragma unroll
        for(index_t iloop_d0 = 0; iloop_d0 < nloop_d0; ++iloop_d0)
        {
#pragma unroll
            for(index_t iloop_d1 = 0; iloop_d1 < nloop_d1; ++iloop_d1)
            {
#pragma unroll
                for(index_t iloop_d2 = 0; iloop_d2 < nloop_d2; ++iloop_d2)
                {
                    const index_t src_offset =
305
306
307
                        SrcDesc{}.GetOffsetFromMultiIndex(iloop_d0 * thread_per_d0,
                                                          iloop_d1 * thread_per_d1,
                                                          iloop_d2 * thread_per_d2 * DataPerRead);
308

309
310
                    const index_t clipboard_offset = clipboard_desc.GetOffsetFromMultiIndex(
                        iloop_d0, iloop_d1, iloop_d2 * DataPerRead);
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345

                    *(reinterpret_cast<vector_t*>(&p_clipboard[clipboard_offset])) = *(
                        reinterpret_cast<const vector_t*>(&p_src[src_offset + mSrcMyThreadOffset]));
                }
            }
        }
    }

    __device__ void RunStoreRegisterClipboard(const Float* __restrict__ p_clipboard,
                                              Float* __restrict__ p_dst) const
    {
        constexpr auto I0 = Number<0>{};
        constexpr auto I1 = Number<1>{};
        constexpr auto I2 = Number<2>{};

        constexpr index_t L0 = CopyLengths{}.Get(I0);
        constexpr index_t L1 = CopyLengths{}.Get(I1);
        constexpr index_t L2 = CopyLengths{}.Get(I2);

        constexpr index_t thread_per_d0 = ThreadPerDims{}.Get(I0);
        constexpr index_t thread_per_d1 = ThreadPerDims{}.Get(I1);
        constexpr index_t thread_per_d2 = ThreadPerDims{}.Get(I2);

        constexpr index_t num_active_thread = thread_per_d0 * thread_per_d1 * thread_per_d2;

        if(BlockSize > num_active_thread)
        {
            if(get_thread_local_1d_id() >= num_active_thread)
            {
                return;
            }
        }

        constexpr index_t nloop_d0 = L0 / thread_per_d0;
        constexpr index_t nloop_d1 = L1 / thread_per_d1;
346
        constexpr index_t nloop_d2 = math::integer_divide_ceil(L2, thread_per_d2 * DataPerRead);
347
348
349
350
351
352
353
354
355
356
357
358
359

        constexpr auto clipboard_desc =
            make_ConstantTensorDescriptor(Sequence<nloop_d0, nloop_d1, nloop_d2 * DataPerRead>{});

#pragma unroll
        for(index_t iloop_d0 = 0; iloop_d0 < nloop_d0; ++iloop_d0)
        {
#pragma unroll
            for(index_t iloop_d1 = 0; iloop_d1 < nloop_d1; ++iloop_d1)
            {
#pragma unroll
                for(index_t iloop_d2 = 0; iloop_d2 < nloop_d2; ++iloop_d2)
                {
360
361
                    const index_t clipboard_offset = clipboard_desc.GetOffsetFromMultiIndex(
                        iloop_d0, iloop_d1, iloop_d2 * DataPerRead);
362
363

                    const index_t dst_offset =
364
365
366
                        DstDesc{}.GetOffsetFromMultiIndex(iloop_d0 * thread_per_d0,
                                                          iloop_d1 * thread_per_d1,
                                                          iloop_d2 * thread_per_d2 * DataPerRead);
367
368
369
370
371
372
373
374

                    *(reinterpret_cast<vector_t*>(&p_dst[dst_offset + mDstMyThreadOffset])) =
                        *(reinterpret_cast<const vector_t*>(&p_clipboard[clipboard_offset]));
                }
            }
        }
    }
};
375
376
377
378

} // namespace ck

#endif