"vscode:/vscode.git/clone" did not exist on "017d9f15151ce571a5f4fd381699c72a872636ec"
tile_reduce.cpp 6.61 KB
Newer Older
carlushuang's avatar
carlushuang committed
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
// SPDX-License-Identifier: MIT
// Copyright (c) 2018-2024, Advanced Micro Devices, Inc. All rights reserved.

#include <vector>
#include <iostream>
#include <numeric>
#include <cassert>
#include <cstdlib>
#include <iostream>
#include <time.h>
#include <unordered_set>

#include "ck_tile/core.hpp"
#include "ck_tile/ops/reduce.hpp"

#ifndef TEST_TILE_REDUCE_VERBOSE
carlushuang's avatar
carlushuang committed
17
#define TEST_TILE_REDUCE_VERBOSE 1
carlushuang's avatar
carlushuang committed
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
#endif

#define HIP_CALL(call)                                                              \
    do                                                                              \
    {                                                                               \
        hipError_t err = call;                                                      \
        if(err != hipSuccess)                                                       \
        {                                                                           \
            printf("[hiperror](%d) fail to call %s", static_cast<int>(err), #call); \
            exit(0);                                                                \
        }                                                                           \
    } while(0)

#define BLOCK_SIZE 256

carlushuang's avatar
carlushuang committed
33
template <int Rows, int Cols, typename DataType, int BytesPerIssue = 16>
carlushuang's avatar
carlushuang committed
34
35
36
37
38
__global__ void reduce_row(DataType* p_src, DataType* p_dst)
{
    using namespace ck_tile;

    // some constexpr vars
carlushuang's avatar
carlushuang committed
39
    constexpr index_t vec = BytesPerIssue / sizeof(DataType);
carlushuang's avatar
carlushuang committed
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
    static_assert(Cols % vec == 0);
    constexpr index_t col_lanes = Cols / vec;
    constexpr index_t warp_size = ck_tile::get_warp_size();
    static_assert(warp_size % col_lanes == 0);
    constexpr index_t row_lanes = warp_size / col_lanes;
    constexpr index_t num_warps = BLOCK_SIZE / warp_size;
    static_assert(Rows % (num_warps * row_lanes) == 0);
    constexpr index_t row_repeat = Rows / (num_warps * row_lanes);

    auto src_tile = [&]() {
        constexpr auto src_dist = make_static_tile_distribution(
            tile_distribution_encoding<
                sequence<1>,
                tuple<sequence<row_repeat, num_warps, row_lanes>, sequence<col_lanes, vec>>,
                tuple<sequence<1>, sequence<1, 2>>,
                tuple<sequence<1>, sequence<2, 0>>,
                sequence<1, 2>,
                sequence<0, 1>>{});

        auto src_view =
            make_naive_tensor_view<address_space_enum::global>(p_src,
                                                               make_tuple(Rows, Cols),
                                                               make_tuple(Cols, 1),
                                                               number<vec>{}, // alignement
                                                               number<1>{});
        return make_tile_window(
            src_view, make_tuple(number<Rows>{}, number<Cols>{}), {0, 0}, src_dist);
    }();

    constexpr auto dst_dist = make_static_tile_distribution(
        tile_distribution_encoding<
            sequence<col_lanes>, // -> replicate here, hence we can figure out the offset
            tuple<sequence<row_repeat, num_warps, row_lanes>, sequence<1> /* only 1 per row*/>,
            tuple<sequence<1>, sequence<1, 0>>,
            tuple<sequence<1>, sequence<2, 0>>,
            sequence<1, 2>,
            sequence<0, 0>>{});

    auto dst_tile = [&]() {
        auto dst_view =
            make_naive_tensor_view<address_space_enum::global>(p_dst,
                                                               make_tuple(Rows, 1),
                                                               make_tuple(1, 1),
                                                               number<1>{}, // alignement
                                                               number<1>{});
        return make_tile_window(
            dst_view, make_tuple(number<Rows>{}, number<1>{}), {0, 0}, dst_dist);
    }();

    auto data = load_tile(src_tile);

    const auto f_max = [](auto e0, auto e1) { return max(e0, e1); };

    // Note: the return type will fill the replicate dim
    //       usually is 2d. This is for the next block_tile_reduce_sync()
    //       in order to do further reduce.
    auto r =
        block_tile_reduce<DataType>(data, sequence<1>{}, f_max, -numeric<DataType>::infinity());

    // r.foo();

    // further reduce cross thread
    block_tile_reduce_sync(r, f_max, bool_constant<false>{});

    if(threadIdx.x % col_lanes == 0)
    {
        auto o                = make_static_distributed_tensor<DataType>(dst_dist);
        o.get_thread_buffer() = r.get_thread_buffer();
        store_tile(dst_tile, o);
    }
}

carlushuang's avatar
carlushuang committed
112
113
template <int Rows, int Cols, typename DataType, int BytesPerIssue = 16>
bool test_tile_reduce()
carlushuang's avatar
carlushuang committed
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
{
    std::srand(std::time(nullptr));
    DataType* src = reinterpret_cast<DataType*>(malloc(Rows * Cols * sizeof(DataType)));
    DataType* dst = reinterpret_cast<DataType*>(malloc(Rows * sizeof(DataType)));

    // const auto f_max = [](auto e0, auto e1) { return max(e0, e1); };

    for(auto i = 0; i < Rows * Cols; i++)
    {
        float v = static_cast<float>(std::rand() % 2000 - 1000) / 1000.f;
        src[i]  = ck_tile::type_convert<DataType>(v);
    }

    void* dev_src;
    void* dev_dst;
    HIP_CALL(hipMalloc(&dev_src, Rows * Cols * sizeof(DataType)));
    HIP_CALL(hipMalloc(&dev_dst, Rows * sizeof(DataType)));

    HIP_CALL(hipMemcpy(dev_src, src, Rows * Cols * sizeof(DataType), hipMemcpyHostToDevice));

    constexpr int bdim = BLOCK_SIZE;
    int gdim           = 1;
carlushuang's avatar
carlushuang committed
136
137
    reduce_row<Rows, Cols, DataType, BytesPerIssue><<<gdim, bdim>>>(
        reinterpret_cast<DataType*>(dev_src), reinterpret_cast<DataType*>(dev_dst));
carlushuang's avatar
carlushuang committed
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170

    HIP_CALL(hipMemcpy(dst, dev_dst, Rows * sizeof(DataType), hipMemcpyDeviceToHost));

    int err_cnt = 0;

    for(int i_r = 0; i_r < Rows; i_r++)
    {
        auto row_max = -ck_tile::numeric<float>::infinity();
        for(int i_c = 0; i_c < Cols; i_c++)
        {
            int idx = i_r * Cols + i_c;
            float v = ck_tile::type_convert<float>(src[idx]);
            row_max = row_max > v ? row_max : v;
#if TEST_TILE_REDUCE_VERBOSE
            printf("%.3f ", v);
#endif
        }
        {
            uint32_t ref = ck_tile::bit_cast<uint32_t>(row_max);
            uint32_t out = ck_tile::bit_cast<uint32_t>(dst[i_r]);
            if(ref != out)
                err_cnt++;
        }
#if TEST_TILE_REDUCE_VERBOSE
        printf(" -> %.3f (%.3f)\n", dst[i_r], row_max);
#endif
    }
#if TEST_TILE_REDUCE_VERBOSE
    printf("\n");
#endif

    free(src);
    free(dst);
carlushuang's avatar
carlushuang committed
171
    return err_cnt == 0 ? true : false;
carlushuang's avatar
carlushuang committed
172
173
174
175
}

int main()
{
carlushuang's avatar
carlushuang committed
176
177
178
179
180
    bool r = true;
    r &= test_tile_reduce<32, 64, float>();
    r &= test_tile_reduce<32, 16, float, 4>();

    return r ? 0 : -1;
carlushuang's avatar
carlushuang committed
181
}