reduce_blockwise_two_call.cpp 12.2 KB
Newer Older
Chao Liu's avatar
Chao Liu committed
1
2
3
// SPDX-License-Identifier: MIT
// Copyright (c) 2018-2022, Advanced Micro Devices, Inc. All rights reserved.

4
5
6
7
8
9
10
#include <iostream>
#include <numeric>
#include <sstream>
#include <initializer_list>
#include <cstdlib>
#include <getopt.h>

Chao Liu's avatar
Chao Liu committed
11
12
13
14
#include "ck/ck.hpp"
#include "ck/utility/reduction_enums.hpp"
#include "ck/tensor_operation/gpu/device/reduction_operator_mapping.hpp"
#include "ck/tensor_operation/gpu/device/device_reduce_multiblock.hpp"
15

Chao Liu's avatar
Chao Liu committed
16
#include "ck/library/utility/check_err.hpp"
17
18
19
20
21
#include "ck/library/utility/device_memory.hpp"
#include "ck/library/utility/host_tensor.hpp"
#include "ck/library/utility/host_tensor_generator.hpp"
#include "ck/library/utility/host_common_util.hpp"
#include "ck/library/utility/host_reduction.hpp"
22
23
24
25
26
27
28
29
30
31
32
33

using namespace ck;
using namespace ck::tensor_operation::device;

using InOutDataType = ck::half_t;
using InOutDataType = ck::half_t;
using AccDataType   = float;

constexpr ReduceTensorOp ReduceOpId = ReduceTensorOp::NORM2;
constexpr bool PropagateNan         = true;
constexpr bool OutputIndex          = false;

34
using ReduceOperation = typename reduce_binary_operator<ReduceOpId>::opType;
35
using InElementwiseOperation =
36
    typename reduce_unary_operator<ReduceOpId, true, true>::InElementwiseOperation;
37
using AccElementwiseOperation =
38
    typename reduce_unary_operator<ReduceOpId, true, true>::AccElementwiseOperation;
39

40
using PassThroughOp = tensor_operation::element_wise::PassThrough;
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173

using DeviceReduceInstance_1 = DeviceReduceMultiBlock<InOutDataType,
                                                      AccDataType,
                                                      InOutDataType,
                                                      5, // Rank
                                                      1, // NumReduceDim
                                                      ReduceOperation,
                                                      InElementwiseOperation,
                                                      PassThroughOp,
                                                      InMemoryDataOperationEnum::Set,
                                                      PropagateNan,
                                                      OutputIndex,
                                                      false, // HaveIndexInputIfOutputIndex
                                                      256,
                                                      32,
                                                      8,
                                                      1,
                                                      1,
                                                      1, // vector dim
                                                      1,
                                                      1>;

using DeviceReduceInstance_2 = DeviceReduceMultiBlock<InOutDataType,
                                                      AccDataType,
                                                      InOutDataType,
                                                      4, // Rank
                                                      1, // NumReduceDim
                                                      ReduceOperation,
                                                      PassThroughOp,
                                                      AccElementwiseOperation,
                                                      InMemoryDataOperationEnum::Set,
                                                      PropagateNan,
                                                      OutputIndex,
                                                      false, // HaveIndexInputIfOutputIndex
                                                      256,
                                                      128,
                                                      2,
                                                      1,
                                                      1,
                                                      1, // vector dim
                                                      1,
                                                      1>;

static bool do_verify;
static int init_method;
static float alpha;
static float beta;
static bool time_kernel;

int main(int argc, char* argv[])
{
    // used by the device reduction
    const std::vector<int> reduceDims_1    = {4};
    const std::vector<int> invariantDims_1 = {0, 1, 2, 3};

    const std::vector<int> reduceDims_2    = {3};
    const std::vector<int> invariantDims_2 = {0, 1, 2};

    // used by the host reduction
    const std::vector<int> reduceDims    = {3, 4};
    const std::vector<int> invariantDims = {0, 1, 2};

    const std::vector<size_t> inLengths_1 = {64, 320, 80, 4, 128};

    // input lengths of the second reduction, which is also the output lengths of the first
    // reduction
    const std::vector<size_t> inLengths_2 = {64, 320, 80, 4};

    const std::vector<size_t> outLengths = {64, 320, 80};

    if(argc == 1)
    {
        do_verify   = true;
        init_method = 2;
        time_kernel = true;
    }
    else if(argc == 4)
    {
        do_verify   = static_cast<bool>(argv[1]);
        init_method = atoi(argv[2]);
        time_kernel = static_cast<bool>(atoi(argv[3]));
    }
    else
    {
        std::ostringstream ostr;

        ostr << "Wrong parameter! " << std::endl
             << "Usage: " << argv[0] << "[verify 0/1] init_method time_kernel" << std::endl;

        throw std::runtime_error(ostr.str());
    };

    alpha = 1.0f;
    beta  = 0.0f;

    Tensor<InOutDataType> in_1(inLengths_1);

    Tensor<InOutDataType> out_ref(outLengths);
    Tensor<InOutDataType> in_2(inLengths_2); // also the output tensor of the first reduction
    Tensor<InOutDataType> out(outLengths);

    auto inStrides_1 = in_1.mDesc.GetStrides();
    auto inStrides_2 = in_2.mDesc.GetStrides();
    auto outStrides  = out.mDesc.GetStrides();

    size_t invariant_total_length = out.mDesc.GetElementSize();
    size_t reduce_total_length    = in_1.mDesc.GetElementSize() / invariant_total_length;

    std::size_t num_thread = 1;

    if(do_verify)
    {
        switch(init_method)
        {
        case 0: break;
        case 1:
            in_1.GenerateTensorValue(GeneratorTensor_1<InOutDataType>{1}, num_thread);
            if(beta != 0.0f)
                out_ref.GenerateTensorValue(GeneratorTensor_1<InOutDataType>{1}, num_thread);
            break;
        case 2:
            in_1.GenerateTensorValue(GeneratorTensor_2<InOutDataType>{-5, 5}, num_thread);
            if(beta != 0.0f)
                out_ref.GenerateTensorValue(GeneratorTensor_2<InOutDataType>{-5, 5}, num_thread);
            break;
        default:
            in_1.GenerateTensorValue(GeneratorTensor_3<InOutDataType>{-5.0, 5.0}, num_thread);
            if(beta != 0.0f)
                out_ref.GenerateTensorValue(GeneratorTensor_3<InOutDataType>{-5.0, 5.0},
                                            num_thread);
        }

        if(beta != 0.0f)
174
            for(size_t i = 0; i < out_ref.mDesc.GetElementSpaceSize(); i++)
175
176
177
                out.mData[i] = out_ref.mData[i];
    };

178
179
180
    DeviceMem in_1_dev(sizeof(InOutDataType) * in_1.mDesc.GetElementSpaceSize());
    DeviceMem in_2_dev(sizeof(InOutDataType) * in_2.mDesc.GetElementSpaceSize());
    DeviceMem out_dev(sizeof(InOutDataType) * out.mDesc.GetElementSpaceSize());
181
182
183
184
185
186

    in_1_dev.ToDevice(in_1.mData.data());

    if(beta != 0.0f)
        out_dev.ToDevice(out.mData.data());

187
188
189
190
191
192
193
    InElementwiseOperation in_elementwise_op;
    AccElementwiseOperation acc_elementwise_op;

    std::tie(in_elementwise_op, acc_elementwise_op) =
        reduce_unary_operator<ReduceOpId, true, true>::GetElementwiseOperator(
            static_cast<int32_t>(reduce_total_length));

194
195
196
197
198
    if(do_verify)
    {
        ReductionHost<InOutDataType,
                      AccDataType,
                      InOutDataType,
199
200
201
                      ReduceOperation,
                      InElementwiseOperation,
                      AccElementwiseOperation,
202
203
204
205
206
207
                      5, // Rank
                      2, // NumReduceDim
                      PropagateNan,
                      OutputIndex>
            hostReduce(in_1.mDesc, out_ref.mDesc, invariantDims, reduceDims);

208
209
210
211
212
213
214
        hostReduce.Run(alpha,
                       in_1.mData.data(),
                       beta,
                       out_ref.mData.data(),
                       nullptr,
                       in_elementwise_op,
                       acc_elementwise_op);
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
    };

    std::vector<ck::index_t> i_inLengths_1;
    std::vector<ck::index_t> i_inStrides_1;
    std::vector<ck::index_t> i_inLengths_2;
    std::vector<ck::index_t> i_inStrides_2;
    std::vector<ck::index_t> i_outLengths;
    std::vector<ck::index_t> i_outStrides;

    i_inLengths_1.assign(inLengths_1.begin(), inLengths_1.end());
    i_inStrides_1.assign(inStrides_1.begin(), inStrides_1.end());
    i_inLengths_2.assign(inLengths_2.begin(), inLengths_2.end());
    i_inStrides_2.assign(inStrides_2.begin(), inStrides_2.end());
    i_outLengths.assign(outLengths.begin(), outLengths.end());
    i_outStrides.assign(outStrides.begin(), outStrides.end());

    auto reduce_1 = DeviceReduceInstance_1{};

233
234
235
236
237
238
239
240
241
242
243
244
245
    auto argument_ptr_1 = reduce_1.MakeArgumentPointer(i_inLengths_1,
                                                       i_inStrides_1,
                                                       i_inLengths_2,
                                                       i_inStrides_2,
                                                       reduceDims_1,
                                                       1.0f,
                                                       0.0f,
                                                       in_1_dev.GetDeviceBuffer(),
                                                       nullptr,
                                                       in_2_dev.GetDeviceBuffer(),
                                                       nullptr,
                                                       in_elementwise_op,
                                                       PassThroughOp{});
246
247
248
249
250
251
252
253
254
255
256
257

    if(!reduce_1.IsSupportedArgument(argument_ptr_1.get()))
    {
        std::cout
            << "The runtime parameters seems not supported by the DeviceReduce instance, exiting!"
            << std::endl;
    };

    auto invoker_ptr_1 = reduce_1.MakeInvokerPointer();

    auto reduce_2 = DeviceReduceInstance_2{};

258
259
260
261
262
263
264
265
266
267
268
269
270
    auto argument_ptr_2 = reduce_2.MakeArgumentPointer(i_inLengths_2,
                                                       i_inStrides_2,
                                                       i_outLengths,
                                                       i_outStrides,
                                                       reduceDims_2,
                                                       alpha,
                                                       beta,
                                                       in_2_dev.GetDeviceBuffer(),
                                                       nullptr,
                                                       out_dev.GetDeviceBuffer(),
                                                       nullptr,
                                                       PassThroughOp{},
                                                       acc_elementwise_op);
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301

    if(!reduce_2.IsSupportedArgument(argument_ptr_2.get()))
    {
        std::cout
            << "The runtime parameters seems not supported by the DeviceReduce instance, exiting!"
            << std::endl;
    };

    auto invoker_ptr_2 = reduce_2.MakeInvokerPointer();

    float avg_time_1 = invoker_ptr_1->Run(argument_ptr_1.get(), StreamConfig{nullptr, time_kernel});
    float avg_time_2 = invoker_ptr_2->Run(argument_ptr_2.get(), StreamConfig{nullptr, time_kernel});

    std::size_t num_bytes = invariant_total_length * reduce_total_length * sizeof(InOutDataType) +
                            invariant_total_length * sizeof(InOutDataType);

    float gb_per_sec = num_bytes / 1.E6 / (avg_time_1 + avg_time_2);

    std::cout << "Perf: " << avg_time_1 + avg_time_2 << " ms, " << gb_per_sec << " GB/s, "
              << reduce_1.GetTypeString() << " => " << reduce_2.GetTypeString() << std::endl;

    bool pass = true;

    if(do_verify)
    {
        out_dev.FromDevice(out.mData.data());
        pass = pass && ck::utils::check_err(out.mData, out_ref.mData);
    };

    return (pass ? 0 : 1);
}