layout_utils.hpp 14.9 KB
Newer Older
1
// SPDX-License-Identifier: MIT
2
// Copyright (c) 2023-2024, Advanced Micro Devices, Inc. All rights reserved.
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17

#pragma once

#include "ck/ck.hpp"

#include "ck/utility/number.hpp"
#include "ck/utility/tuple.hpp"
#include "ck/utility/tuple_helper.hpp"
#include "ck/utility/sequence.hpp"
#include "ck/utility/sequence_helper.hpp"
#include "ck/utility/is_detected.hpp"

#include "ck/tensor_description/tensor_descriptor.hpp"
#include "ck/tensor_description/tensor_descriptor_helper.hpp"
#include "ck/tensor_description/multi_index_transform_helper.hpp"
18
#include "ck/tensor_operation/gpu/device/matrix_padder.hpp"
19

20
21
// Disable from doxygen docs generation
/// @cond INTERNAL
22
23
namespace ck {
namespace wrapper {
24
/// @endcond
25
26

// Disable from doxygen docs generation
27
/// @cond INTERNAL
28
// forward declaration
29
template <typename Shape, typename UnrolledDescriptorType>
30
31
32
33
struct Layout;

template <typename T>
using is_tuple = decltype(std::declval<T&>().IsTuple());
34
35

namespace {
36
namespace detail {
37
38
39
40
41
42
/**
 * \brief Generate packed (column-major) strides if not passed
 *
 * \param shape Tensor shape.
 * \return Generated column-major strides.
 */
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
template <typename... Ts>
__host__ __device__ constexpr static auto
GenerateColumnMajorPackedStrides(const Tuple<Ts...>& shape)
{
    const auto unrolled_shape = UnrollNestedTuple(shape);
    return generate_tuple(
        [&](auto i) {
            if constexpr(i.value == 0)
            {
                return Number<1>{};
            }
            else
            {
                return TupleReduce<Number<0>{}.value, i.value>([](auto x, auto y) { return x * y; },
                                                               unrolled_shape);
            }
        },
        Number<decltype(unrolled_shape)::Size()>{});
}

63
64
65
66
67
68
69
/**
 * \brief Create naive tensor descriptor from nested shape.
 *
 * \param shape Tensor shape.
 * \param strides Tensor strides.
 * \return Unrolled descriptor
 */
70
template <typename LayoutShape, typename LayoutStrides>
71
72
__host__ __device__ constexpr auto MakeUnrolledDescriptor(const LayoutShape& shape,
                                                          const LayoutStrides& strides)
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
{
    const auto unrolled_shape = UnrollNestedTuple(shape);
    if constexpr(is_same_v<LayoutStrides, Tuple<>>)
    {
        // if not passed, then generate
        const auto unrolled_strides = GenerateColumnMajorPackedStrides(unrolled_shape);
        static_assert(unrolled_shape.Size() == unrolled_strides.Size(),
                      "Size of strides and shape are not consistent.");
        return make_naive_tensor_descriptor(unrolled_shape, unrolled_strides);
    }
    else
    {
        const auto unrolled_strides = UnrollNestedTuple(strides);
        static_assert(unrolled_shape.Size() == unrolled_strides.Size(),
                      "Size of strides and shape are not consistent.");
        return make_naive_tensor_descriptor(unrolled_shape, unrolled_strides);
    }
}
91
} // namespace detail
92
93
} // namespace

94
95
96
97
98
99
100
101
102
103
104
/// @endcond

// make_*
/**
 * \brief Make layout function.
 *
 * \tparam Shape Shape for layout.
 * \tparam Strides Strides for layout.
 * \return Constructed layout.
 */
template <typename Shape, typename Strides>
105
__host__ __device__ constexpr auto make_layout(const Shape& shape, const Strides& strides)
106
{
107
108
109
    using UnrolledDescriptorType = decltype(detail::MakeUnrolledDescriptor(Shape{}, Strides{}));
    return Layout<Shape, UnrolledDescriptorType>(shape,
                                                 detail::MakeUnrolledDescriptor(shape, strides));
110
111
112
113
114
115
116
117
118
119
}

/**
 * \brief Make layout function with packed strides
 *        (column-major).
 *
 * \tparam Shape Shape for layout.
 * \return Constructed layout.
 */
template <typename Shape>
120
__host__ __device__ constexpr auto make_layout(const Shape& shape)
121
{
122
123
124
    using UnrolledDescriptorType = decltype(detail::MakeUnrolledDescriptor(Shape{}, Tuple<>{}));
    return Layout<Shape, UnrolledDescriptorType>(shape,
                                                 detail::MakeUnrolledDescriptor(shape, Tuple<>{}));
125
126
127
}
// Layout helpers
// get
128
129
/**
 * \private
130
131
132
133
 * \brief Get dim.
 *
 * \param dim Dimension.
 * \return Returned the same dimension.
134
135
136
137
138
139
140
 */
template <typename T>
__host__ __device__ T constexpr get(const T& dim)
{
    return dim;
}

141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
/**
 * \brief Get element from tuple (Shape/Strides/Idxs).
 *
 * \tparam idx Index to lookup.
 * \param tuple Tuple to lookup.
 * \return Requsted element.
 */
template <index_t idx, typename... Dims>
__host__ __device__ constexpr auto get(const Tuple<Dims...>& tuple)
{
    return tuple.At(Number<idx>{});
}

/**
 * \brief Get sub layout.
 *
 * \tparam idx Index to lookup.
 * \param layout Layout to create sub layout.
 * \return Requsted sub layout.
 */
161
162
template <index_t idx, typename Shape, typename UnrolledDesc>
__host__ __device__ constexpr auto get(const Layout<Shape, UnrolledDesc>& layout)
163
{
164
165
    const auto& shape    = layout.GetShape();
    const auto new_shape = get<idx>(shape);
166
167
    static_assert(is_detected<is_tuple, decltype(new_shape)>::value,
                  "Shape of sub layout must be tuple");
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202

    constexpr auto old_shape_dims = decltype(UnrollNestedTuple(shape))::Size();
    constexpr auto new_shape_dims = decltype(UnrollNestedTuple(new_shape))::Size();
    constexpr auto shape_offset   = decltype(UnrollNestedTuple(TupleSlice<0, idx>(shape)))::Size();

    const auto unrolled_shape = UnrollNestedTuple(shape);
    const auto transforms     = generate_tuple(
        [&](auto i) {
            // Compare Idx with shape
            if constexpr(i < shape_offset || i >= shape_offset + new_shape_dims)
            {
                // Remove dimension
                return make_freeze_transform(Number<0>{});
            }
            else
            {
                return make_pass_through_transform(unrolled_shape.At(i));
            }
        },
        Number<old_shape_dims>{});

    const auto lower_dims =
        generate_tuple([&](auto i) { return Sequence<i.value>{}; }, Number<old_shape_dims>{});
    const auto upper_dims = generate_tuple(
        [&](auto i) {
            if constexpr(i < shape_offset || i >= shape_offset + new_shape_dims)
                return Sequence<>{};

            else
            {
                return Sequence<i.value - shape_offset>{};
            }
        },
        Number<old_shape_dims>{});

203
    const auto& flatten_desc = layout.GetUnrolledDescriptor();
204
205
    auto new_desc = transform_tensor_descriptor(flatten_desc, transforms, lower_dims, upper_dims);
    return Layout<decltype(new_shape), decltype(new_desc)>(new_shape, new_desc);
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
}

/**
 * \brief Hierarchical get.
 *
 * \tparam Idxs Indexes to lookup.
 * \param elem Element to lookup.
 * \return Requsted element.
 */
template <index_t Idx, index_t... Idxs, typename T>
__host__ __device__ constexpr auto get(const T& elem)
{
    return get<Idxs...>(get<Idx>(elem));
}

// size
222
223
/**
 * \private
224
225
226
227
 * \brief Get size.
 *
 * \param dim Size.
 * \return Returned the same size.
228
229
230
231
232
233
234
 */
template <typename T>
__host__ __device__ T constexpr size(const T& dim)
{
    return dim;
}

235
236
237
238
/**
 * \brief Length get (product if tuple).
 *
 * \tparam idx Index to lookup.
239
 * \param layout Layout to get Shape of.
240
241
 * \return Requsted length.
 */
242
243
template <index_t idx, typename Shape, typename UnrolledDescriptorType>
__host__ __device__ constexpr auto size(const Layout<Shape, UnrolledDescriptorType>& layout)
244
245
246
247
248
249
250
251
252
253
254
{
    return layout.template GetLength<idx>();
}

/**
 * \brief Shape size (product of dims).
 *
 * \param shape Shape to lookup.
 * \return Requsted size.
 */
template <typename... ShapeDims>
255
__host__ __device__ constexpr auto size(const Tuple<ShapeDims...>& shape)
256
257
258
259
260
261
262
263
264
265
266
267
{
    const auto unrolled_shape = UnrollNestedTuple(shape);
    return TupleReduce<0, unrolled_shape.Size()>([](auto x, auto y) { return x * y; },
                                                 unrolled_shape);
}

/**
 * \brief Layout size (product of dims).
 *
 * \param layout Layout to calculate shape size.
 * \return Requsted size.
 */
268
269
template <typename Shape, typename UnrolledDescriptorType>
__host__ __device__ constexpr auto size(const Layout<Shape, UnrolledDescriptorType>& layout)
270
271
272
273
274
275
276
277
278
279
280
281
{
    return layout.GetLengths();
}

/**
 * \brief Length get from tuple (product if tuple).
 *
 * \tparam idx Index to lookup.
 * \param tuple Tuple to lookup.
 * \return Requsted length.
 */
template <index_t idx, typename... Ts>
282
__host__ __device__ constexpr auto size(const Tuple<Ts...>& tuple)
283
284
285
286
287
288
289
{
    return size(tuple.At(Number<idx>{}));
}

/**
 * \brief Hierarchical size.
 *
290
291
 * \tparam Idx First index to lookup (to avoid empty Idxs).
 * \tparam Idxs Next indexes to lookup.
292
293
294
 * \param elem Element to lookup.
 * \return Requsted element.
 */
295
template <index_t Idx, index_t... Idxs, typename T>
296
297
__host__ __device__ constexpr auto size(const T& elem)
{
298
    return size(get<Idx, Idxs...>(elem));
299
300
301
302
303
304
305
306
307
}

// rank
/**
 * \brief Get layout rank (num elements in shape).
 *
 * \param layout Layout to calculate rank.
 * \return Requsted rank.
 */
308
template <typename Shape, typename UnrolledDescriptorType>
309
__host__ __device__ constexpr auto
310
rank([[maybe_unused]] const Layout<Shape, UnrolledDescriptorType>& layout)
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
{
    return Shape::Size();
}

/**
 * \brief Get tuple rank (num elements in tuple).
 *        Return 1 if scalar passed.
 *
 * \param tuple Tuple to calculate rank.
 * \return Requsted rank.
 */
template <typename... Dims>
__host__ __device__ constexpr auto rank([[maybe_unused]] const Tuple<Dims...>& tuple)
{
    return Tuple<Dims...>::Size();
}

/**
 * \private
330
331
332
333
 * \brief Rank for scalar
 *
 * \param dim Dimension scalar.
 * \return Returned 1.
334
335
 */
template <index_t IDim>
336
__host__ __device__ constexpr index_t rank([[maybe_unused]] const Number<IDim>& dim)
337
338
339
340
341
342
{
    return 1;
}

/**
 * \private
343
344
345
346
 * \brief Rank for scalar
 *
 * \param dim Dimension scalar.
 * \return Returned 1.
347
 */
348
__host__ __device__ constexpr index_t rank([[maybe_unused]] const index_t& dim) { return 1; }
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369

/**
 * \brief Hierarchical rank.
 *
 * \tparam Idxs Indexes to lookup.
 * \param elem Element to lookup.
 * \return Requsted rank.
 */
template <index_t... Idxs, typename T>
__host__ __device__ constexpr auto rank(const T& elem)
{
    return rank(get<Idxs...>(elem));
}

// depth
/**
 * \brief Get depth of the layout shape (return 0 if scalar).
 *
 * \param layout Layout to calculate depth.
 * \return Requsted depth.
 */
370
371
template <typename Shape, typename UnrolledDescriptorType>
__host__ __device__ constexpr auto depth(const Layout<Shape, UnrolledDescriptorType>& layout)
372
{
373
374
    const auto& shape = layout.GetShape();
    return TupleDepth(shape);
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
}

/**
 * \brief Get depth of the tuple. (return 0 if scalar)
 *
 * \param tuple Tuple to calculate depth.
 * \return Requsted depth.
 */
template <typename... Dims>
__host__ __device__ constexpr auto depth(const Tuple<Dims...>& tuple)
{
    return TupleDepth(tuple);
}

/**
 * \private
391
392
393
394
 * \brief Depth for scalar
 *
 * \param dim Scalar.
 * \return Returned 0.
395
396
 */
template <index_t IDim>
397
__host__ __device__ constexpr index_t depth([[maybe_unused]] const Number<IDim>& dim)
398
399
400
401
402
403
{
    return 0;
}

/**
 * \private
404
405
406
407
 * \brief Depth for scalar
 *
 * \param dim Scalar.
 * \return Returned 0.
408
 */
409
__host__ __device__ constexpr index_t depth([[maybe_unused]] const index_t& dim) { return 0; }
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426

/**
 * \brief Hierarchical depth.
 *
 * \tparam Idxs Indexes to lookup.
 * \param elem Element to lookup.
 * \return Requsted depth.
 */
template <index_t... Idxs, typename T>
__host__ __device__ constexpr auto depth(const T& elem)
{
    return depth(get<Idxs...>(elem));
}

/**
 * \brief Get Layout shape.
 *
427
 * \param layout Layout to get shape from.
428
429
 * \return Requsted shape.
 */
430
431
template <typename LayoutType>
__host__ __device__ constexpr const auto& shape(const LayoutType& layout)
432
433
434
435
{
    return layout.GetShape();
}

436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
// pad
/**
 * \brief Pad layout shapes to be adjusted to tile lengths.
 *
 *
 * \param layout Layout to pad.
 * \param tile_lengths Tile lengths to align layout shape.
 * \return Padded layout.
 */
template <typename Shape, typename UnrolledDesc, typename TileLengths>
__host__ __device__ constexpr auto pad(const Layout<Shape, UnrolledDesc>& layout,
                                       const TileLengths& tile_lengths)
{
    auto& unrolled_desc = layout.GetUnrolledDescriptor();
    // Generate sequence with ones to mark that all dims will be padded
    constexpr auto do_pads_seq =
        generate_sequence_v2([](auto) { return Number<1>{}; }, Number<Shape::Size()>{});
    // Create descriptor with padding
    auto padded_desc =
        tensor_operation::device::PadTensorDescriptor(unrolled_desc, tile_lengths, do_pads_seq);
    // Generate padded shape
    const auto padded_shape = generate_tuple(
        [&](auto i) { return padded_desc.GetLength(Number<i>{}); }, Number<TileLengths::Size()>{});
    // Create layout
    return Layout<decltype(padded_shape), decltype(padded_desc)>(padded_shape, padded_desc);
}

// unmerge
/**
 * \brief Unmerge selected dim in layout.
 *
 * \tparam Idx Index to dimension being unmerged.
 * \param layout Layout to pad.
 * \param new_lengths Dimensions into which the indicated dimension will be divided.
 * \param new_indexes Indexes to shuffle dims. Dims for unmerged dim should be nested.
 * \return Unmerged layout.
 */
template <index_t Idx, typename Shape, typename UnrolledDesc, typename NewLengths, typename NewIdxs>
__host__ __device__ constexpr auto unmerge(const Layout<Shape, UnrolledDesc>& layout,
                                           const NewLengths& new_lengths,
                                           [[maybe_unused]] const NewIdxs& new_indexes)
{
    const auto& layout_shape = shape(layout);
    auto& unrolled_desc      = layout.GetUnrolledDescriptor();
    constexpr auto dims      = Shape::Size();
    // Generate transforms
    const auto transforms = generate_tuple(
        [&](auto i) {
            if constexpr(i == Idx)
            {
                return make_unmerge_transform(new_lengths);
            }
            else
            {
                return make_pass_through_transform(layout_shape.At(i));
            }
        },
        Number<dims>{});

    constexpr auto lower_dims =
        generate_tuple([&](auto i) { return Sequence<i.value>{}; }, Number<dims>{});
    constexpr auto upper_dims = generate_tuple(
        [&](auto i) {
            if constexpr(is_detected<is_tuple, tuple_element_t<i.value, NewIdxs>>::value)
            {
                constexpr auto idxs_tuple = tuple_element_t<i.value, NewIdxs>{};
                return to_sequence(idxs_tuple);
            }
            else
            {
                constexpr index_t index = tuple_element_t<i.value, NewIdxs>{};
                return Sequence<index>{};
            }
        },
        Number<dims>{});

    const auto unmerged_desc =
        transform_tensor_descriptor(unrolled_desc, transforms, lower_dims, upper_dims);
    const auto unmerged_shape =
        generate_tuple([&](auto i) { return unmerged_desc.GetLength(Number<i>{}); },
                       Number<decltype(unmerged_desc)::GetNumOfVisibleDimension()>{});

    // Create layout
    return Layout<decltype(unmerged_shape), decltype(unmerged_desc)>(unmerged_shape, unmerged_desc);
}

522
523
} // namespace wrapper
} // namespace ck