quantization.cpp 15.4 KB
Newer Older
Shucai Xiao's avatar
Shucai Xiao committed
1
#include <migraphx/quantization.hpp>
2
3
4
#include <migraphx/program.hpp>
#include <migraphx/instruction.hpp>
#include <migraphx/iterator_for.hpp>
5
#include <migraphx/op/convert.hpp>
Shucai Xiao's avatar
Shucai Xiao committed
6
7
8
#include <migraphx/op/dot.hpp>
#include <migraphx/op/mul.hpp>
#include <migraphx/op/add.hpp>
Shucai Xiao's avatar
Shucai Xiao committed
9
#include <migraphx/op/quant_dot.hpp>
Shucai Xiao's avatar
Shucai Xiao committed
10
#include <migraphx/op/capture.hpp>
Shucai Xiao's avatar
Shucai Xiao committed
11
#include <migraphx/op/convolution.hpp>
Shucai Xiao's avatar
Shucai Xiao committed
12
#include <migraphx/op/quant_convolution.hpp>
Shucai Xiao's avatar
Shucai Xiao committed
13
#include <migraphx/op/multibroadcast.hpp>
14
#include <migraphx/stringutils.hpp>
15
#include <migraphx/ranges.hpp>
16
#include <utility>
17
18
#include <iomanip>
#include <fstream>
19
20
21
22

namespace migraphx {
inline namespace MIGRAPHX_INLINE_NS {

Shucai Xiao's avatar
Shucai Xiao committed
23
24
25
instruction_ref insert_quant_ins(program& prog,
                                 instruction_ref& ins,
                                 shape::type_t type,
Shucai Xiao's avatar
Shucai Xiao committed
26
                                 std::unordered_map<instruction_ref, instruction_ref>& map_ins)
27
{
Shucai Xiao's avatar
Shucai Xiao committed
28
    if(map_ins.count(ins) > 0)
29
    {
Shucai Xiao's avatar
Shucai Xiao committed
30
31
32
33
34
35
        return map_ins[ins];
    }

    if(ins->name() == "undefined")
    {
        return ins;
36
37
    }

Shucai Xiao's avatar
Shucai Xiao committed
38
    assert(ins->get_shape().type() == shape::float_type ||
Shucai Xiao's avatar
Shucai Xiao committed
39
40
41
           ins->get_shape().type() == shape::double_type ||
           ins->get_shape().type() == shape::int32_type);
    instruction_ref quant_ins{};
Shucai Xiao's avatar
Shucai Xiao committed
42
    quant_ins    = prog.insert_instruction(std::next(ins), op::convert{type}, ins);
Shucai Xiao's avatar
Shucai Xiao committed
43
    map_ins[ins] = quant_ins;
44

Shucai Xiao's avatar
Shucai Xiao committed
45
    return quant_ins;
46
47
}

Shucai Xiao's avatar
Shucai Xiao committed
48
49
50
51
52
// This function is to convert any instructions specified in the input
// from double or float to float16 by inserting a convert operator.
// For the conversion, there could be cases of overflowing, but it
// is very rare in the area of deeping learning, so we just do a
// truncate of the input to get the fp16.
53
void quantize(program& prog, const std::vector<std::string>& ins_names)
54
{
55
    std::unordered_map<instruction_ref, instruction_ref> map_fp16;
Shucai Xiao's avatar
Shucai Xiao committed
56
    for(auto ins : iterator_for(prog))
57
    {
58
        // all indicates every instruction is converted
Shucai Xiao's avatar
Shucai Xiao committed
59
        if((not contains(ins_names, "all")) and (not contains(ins_names, ins->name())))
60
61
62
        {
            continue;
        }
63

64
        shape::type_t orig_type = ins->get_shape().type();
Shucai Xiao's avatar
Shucai Xiao committed
65
        // process all inputs, if input is a fp32 or fp64, convert it
66
        // to a fp16 by adding a convert operator.
67
        auto inputs = ins->inputs();
68
        std::vector<instruction_ref> converted_inputs;
Shucai Xiao's avatar
Shucai Xiao committed
69
        for(auto input : inputs)
70
71
        {
            auto s = input->get_shape();
Shucai Xiao's avatar
Shucai Xiao committed
72
            if(s.type() == shape::float_type || s.type() == shape::double_type)
73
            {
74
                // if the input is a convert operator, uses its input
75
76
                // as its current input
                instruction_ref input_fp16{};
77
                if(input->name() == "convert")
78
79
80
81
82
                {
                    input_fp16 = input->inputs().front();
                }
                else
                {
Shucai Xiao's avatar
Shucai Xiao committed
83
                    input_fp16 = insert_quant_ins(prog, input, shape::half_type, map_fp16);
84
                }
85
                converted_inputs.push_back(input_fp16);
86
            }
87
88
89
90
91
92
            else
            {
                converted_inputs.push_back(input);
            }
        }

93
        // no change for the input, go to the next instruction
Shucai Xiao's avatar
Shucai Xiao committed
94
        if(inputs == converted_inputs)
95
        {
96
            continue;
Shucai Xiao's avatar
Shucai Xiao committed
97
98
99
100
101
102
        }

        auto op        = ins->get_operator();
        auto ins_shape = compute_shape(op, converted_inputs);
        if(ins_shape.type() != orig_type)
        {
Shucai Xiao's avatar
Shucai Xiao committed
103
104
105
106
107
            // check the dead code case to avoid assert
            bool output_empty = ins->outputs().empty();
            auto ins_orig_type =
                prog.insert_instruction(std::next(ins), op::convert{orig_type}, ins);
            if(!output_empty)
108
            {
Shucai Xiao's avatar
Shucai Xiao committed
109
                prog.replace_instruction(ins, ins_orig_type);
110
            }
111
        }
Shucai Xiao's avatar
Shucai Xiao committed
112
113

        prog.replace_instruction(ins, op, converted_inputs);
114
115
116
    }
}

Shucai Xiao's avatar
Shucai Xiao committed
117
void quantize(program& prog) { quantize(prog, {"all"}); }
Shucai Xiao's avatar
Shucai Xiao committed
118

Shucai Xiao's avatar
Shucai Xiao committed
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
// int8 quantization is different from fp16 since int8 can only handle value
// -128 ~ 127. To convert the float or double to int8, we need a scale and
// a shift, then the convert can be done as v_int8 = fp * scale + shift.
// To simplify the changes, we consider shift as 0.0f for now.
void quantize_int8(program& prog,
                   const std::vector<std::string>& ins_names,
                   const std::vector<std::pair<float, float>>& quant_params)
{
    for(size_t i = 0; i < quant_params.size(); i++)
    {
        auto param = quant_params.at(i);
        std::cout << "index = " << i << ", scale = " << param.first << "\t" << param.second
                  << std::endl;
    }
    std::cout << std::endl;

    // For now, we only support the int8 quantization of gemm and convolution
    std::vector<std::string> op_names = {"dot", "convolution"};
    if(!std::all_of(ins_names.begin(), ins_names.end(), [&](auto name) {
           return (std::find(op_names.begin(), op_names.end(), name) != op_names.end());
       }))
    {
        MIGRAPHX_THROW("QUANTIZE_INT8: only support DOT and CONVOLUTION operation");
    }

    std::size_t quant_param_index = 0;
    std::unordered_map<instruction_ref, instruction_ref> map_quant_ins;
    std::unordered_map<instruction_ref, std::size_t> map_index;
    for(auto ins : iterator_for(prog))
    {
        if(not contains(ins_names, ins->name()))
        {
            continue;
        }

        shape::type_t orig_type = ins->get_shape().type();

        // for the dot operator, there could be 2 or 3 input arguments
        // if the 3rd argument is available, convert it to an int32.
        std::vector<instruction_ref> converted_inputs;

        // process all inputs, if input is a fp32 or fp64, convert it
        // to a int8 type by adding a convert operator and replace
        // the operator with the corresponding int8 version
        auto inputs = ins->inputs();
        std::vector<std::pair<float, float>> ins_quant_params;
        for(auto input : inputs)
        {
            // calculate the index of each instruction to be quantized
            if(map_index.count(input) == 0)
            {
                map_index[input] = quant_param_index++;
            }
            auto param = quant_params[map_index[input]];
            ins_quant_params.push_back(param);

            // In general, the target_type is int8, but for the dot
            // operation, if it has 3 inputs, then the last one should
            // be converted to int32_type
            shape::type_t quant_type = shape::int8_type;
            if(ins->name() == "dot" and inputs.size() == 3 and input == inputs.back())
            {
                quant_type = shape::int32_type;
            }

            auto s = input->get_shape();
            if((s.type() == shape::float_type || s.type() == shape::double_type ||
                s.type() == shape::int32_type) &&
               s.type() != quant_type)
            {
                // if the input is a convert operator, uses its input
                // as its current input
                instruction_ref quant_input{};
                if(input->name() == "convert")
                {
                    auto tmp_ins = input->inputs().front();
                    if(tmp_ins->get_shape().type() == quant_type)
                    {
                        quant_input = input->inputs().front();
                    }
                    else
                    {
201
                        quant_input = insert_quant_ins(prog, input, quant_type, map_quant_ins);
Shucai Xiao's avatar
Shucai Xiao committed
202
203
204
205
                    }
                }
                else
                {
206
                    quant_input = insert_quant_ins(prog, input, quant_type, map_quant_ins);
Shucai Xiao's avatar
Shucai Xiao committed
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
                }
                converted_inputs.push_back(quant_input);
            }
            else
            {
                converted_inputs.push_back(input);
            }
        }

        // no change for the input, go to the next instruction
        if(inputs == converted_inputs)
        {
            continue;
        }

        // When converting from other types to int8_type, there are parameters
        // used as scale and shift(.0f), which will generate results diffrent from
        // the original results. To adjust the output to be "correct(approximatly
        // equal)", we need additional calculation for the adjustment
        if(ins->name() == "dot")
        {
            auto dot_op = any_cast<op::dot>(ins->get_operator());
            float new_alpha =
                dot_op.alpha / (ins_quant_params[0].first * ins_quant_params[1].first);
            float new_beta = dot_op.beta;
            // We need additional checking about the quant_alpha value. If
            // abs(quant_alpha) > 50 (some tmp value set here), we can convert
            // it to an integer as the new_alpha in the quant_dot
            float threshold = 50.0f;
            if(fabs(new_alpha) >= threshold && fabs(new_beta) >= threshold)
            {
                int32_t quant_alpha = static_cast<int32_t>(new_alpha);
                int32_t quant_beta  = static_cast<int32_t>(new_beta);
                shape quant_shape   = compute_shape(op::quant_dot{1, 0}, converted_inputs);
                if(quant_shape.type() == orig_type)
                {
                    prog.replace_instruction(
                        ins, op::quant_dot{quant_alpha, quant_beta}, converted_inputs);
                }
                else
                {
                    auto quant_dot = prog.insert_instruction(
                        ins, op::quant_dot{quant_alpha, quant_beta}, converted_inputs);
                    prog.replace_instruction(ins, op::convert{orig_type}, quant_dot);
                }
            }
            // either alpha or beta cannot be quantized because of too big
            // relative rounding error
            else
            {
                auto q_dot = prog.insert_instruction(ins, op::quant_dot{1, 0}, converted_inputs);
                if(inputs.size() == 3 and dot_op.beta != 0.0f)
                {
                    auto alpha_ab = prog.insert_instruction(
                        ins, op::convert{orig_type, new_alpha, 0.0f}, q_dot);
                    auto c_shape = q_dot->get_shape();
                    std::vector<float> vec_beta(c_shape.elements(), dot_op.beta);
                    auto l_beta =
                        prog.add_literal(literal({shape::float_type, c_shape.lens()}, vec_beta));
                    instruction_ref beta_c{};
                    if(orig_type != shape::float_type)
                    {
                        auto fp32_c = prog.insert_instruction(
                            ins, op::convert{shape::float_type}, inputs.back());
                        auto fp32_beta_c = prog.insert_instruction(ins, op::mul{}, l_beta, fp32_c);
                        beta_c = prog.insert_instruction(ins, op::convert{orig_type}, fp32_beta_c);
                    }
                    else
                    {
                        beta_c = prog.insert_instruction(ins, op::mul{}, l_beta, inputs.back());
                    }
                    prog.replace_instruction(ins, op::add{}, alpha_ab, beta_c);
                }
                else
                {
                    prog.replace_instruction(ins, op::convert{orig_type, new_alpha, 0.0f}, q_dot);
                }
            }
        }
        else if(ins->name() == "convolution")
        {
            // Current MIOpen convolution does not support alpha and beta,
            // so we need a separate multiply to adjust the output
            auto conv_op       = any_cast<op::convolution>(ins->get_operator());
            auto padding       = conv_op.padding;
            auto stride        = conv_op.stride;
            auto dilation      = conv_op.dilation;
            auto padding_mode  = conv_op.padding_mode;
            auto group         = conv_op.group;
            auto adjust_factor = 1.0f / (ins_quant_params[0].first * ins_quant_params[1].first);

            auto quant_conv = prog.insert_instruction(
                ins,
                op::quant_convolution{padding, stride, dilation, padding_mode, group},
                converted_inputs);
            prog.replace_instruction(ins, op::convert{orig_type, adjust_factor, 0.0f}, quant_conv);
        }
        else
        {
            MIGRAPHX_THROW("QUANTIZE_INT8: does not support operator" + ins->name());
        }
    }

    if(quant_param_index != quant_params.size())
    {
        MIGRAPHX_THROW("QUANTIZE_INT8: number of scales does not match");
    }
}

void quantize_int8(program& prog, const std::vector<std::string>& ins_names)
{
    quantize_int8(prog, ins_names, *prog.int8_quant_params);
}

void quantize_int8(program& prog)
{
    std::vector<std::string> ins_names = {"dot", "convolution"};
    quantize_int8(prog, ins_names);
}

Shucai Xiao's avatar
Shucai Xiao committed
327
328
// For the input of each input argument, we need to insert a
// capture operator to compute the scale and shift
Shucai Xiao's avatar
Shucai Xiao committed
329
330
void capture_arguments(program& prog,
                       const std::vector<std::string>& ins_names,
Shucai Xiao's avatar
Shucai Xiao committed
331
                       const std::function<void(std::size_t, std::vector<argument>)>& func)
Shucai Xiao's avatar
Shucai Xiao committed
332
{
333

Shucai Xiao's avatar
Shucai Xiao committed
334
    size_t num_quant_params = 0;
Shucai Xiao's avatar
Shucai Xiao committed
335
    // the int8 quantization only support dot and convolution
Shucai Xiao's avatar
Shucai Xiao committed
336
    std::vector<std::string> op_names = {"dot", "convolution", "quant_dot", "quant_convolution"};
Shucai Xiao's avatar
Shucai Xiao committed
337
338
339
    if(!std::all_of(ins_names.begin(), ins_names.end(), [&](auto name) {
           return std::find(op_names.begin(), op_names.end(), name) != op_names.end();
       }))
Shucai Xiao's avatar
Shucai Xiao committed
340
341
342
343
344
345
346
    {
        MIGRAPHX_THROW("CAPTURE_ARGUMENTS: input operator is not supported");
    }

    std::unordered_map<instruction_ref, instruction_ref> ins_map;
    for(auto ins : iterator_for(prog))
    {
Shucai Xiao's avatar
Shucai Xiao committed
347
        if(not contains(ins_names, ins->name()))
Shucai Xiao's avatar
Shucai Xiao committed
348
349
350
351
352
353
        {
            continue;
        }

        auto inputs = ins->inputs();
        std::vector<instruction_ref> new_args;
Shucai Xiao's avatar
Shucai Xiao committed
354
        for(auto input : inputs)
Shucai Xiao's avatar
Shucai Xiao committed
355
356
        {
            instruction_ref new_ins{};
Shucai Xiao's avatar
Shucai Xiao committed
357
            if(ins_map.count(input) > 0)
Shucai Xiao's avatar
Shucai Xiao committed
358
359
360
361
362
            {
                new_ins = ins_map[input];
            }
            else
            {
Shucai Xiao's avatar
Shucai Xiao committed
363
                new_ins = prog.insert_instruction(
Shucai Xiao's avatar
Shucai Xiao committed
364
                    std::next(input), op::capture{num_quant_params++, func}, input);
Shucai Xiao's avatar
Shucai Xiao committed
365
366
367
368
369
370
                ins_map[input] = new_ins;
            }
            new_args.push_back(new_ins);
        }
        instruction::replace(ins, ins->get_operator(), ins->get_shape(), new_args);
    }
Shucai Xiao's avatar
Shucai Xiao committed
371
372

    // set one pair of parameter for each argument
373
    prog.int8_quant_params->resize(num_quant_params, std::make_pair(-1.0f, -1.0f));
Shucai Xiao's avatar
Shucai Xiao committed
374
375
376
377
}

void capture_arguments(program& prog, const std::vector<std::string>& ins_names)
{
Shucai Xiao's avatar
Shucai Xiao committed
378
    auto calc_quant_params = [&](std::size_t ins_index, std::vector<migraphx::argument> args) {
379
380
381
382
383
384
        std::pair<float, float> param_pair{1.0f, 0.0f};

        // scale and shift is need for only int8 type, and we do not
        // consider shift, so set shift to 0
        std::vector<float> vec_val;
        args.front().visit([&](auto output) { vec_val.assign(output.begin(), output.end()); });
Shucai Xiao's avatar
Shucai Xiao committed
385
386
387
        auto max_val = *std::max_element(vec_val.begin(), vec_val.end());
        auto min_val = *std::min_element(vec_val.begin(), vec_val.end());
        auto max_abs = std::max(std::fabs(max_val), std::fabs(min_val));
388

Shucai Xiao's avatar
Shucai Xiao committed
389
        param_pair.first                     = 127.0f / max_abs;
390
        (*prog.int8_quant_params)[ins_index] = param_pair;
391
392
    };

Shucai Xiao's avatar
Shucai Xiao committed
393
    capture_arguments(prog, ins_names, calc_quant_params);
Shucai Xiao's avatar
Shucai Xiao committed
394
395
}

396
397
398
399
400
401
void capture_arguments(program& prog)
{
    std::vector<std::string> ins_names = {"dot", "convolution"};
    capture_arguments(prog, ins_names);
}

402
403
} // namespace MIGRAPHX_INLINE_NS
} // namespace migraphx