quantization.cpp 16.4 KB
Newer Older
Shucai Xiao's avatar
Shucai Xiao committed
1
#include <migraphx/quantization.hpp>
2
3
4
#include <migraphx/program.hpp>
#include <migraphx/instruction.hpp>
#include <migraphx/iterator_for.hpp>
5
#include <migraphx/op/convert.hpp>
Shucai Xiao's avatar
Shucai Xiao committed
6
#include <migraphx/op/dot.hpp>
Shucai Xiao's avatar
Shucai Xiao committed
7
#include <migraphx/op/mul.hpp>
8
#include <migraphx/op/add.hpp>
Shucai Xiao's avatar
Shucai Xiao committed
9
10
11
12
#include <migraphx/op/quant_dot.hpp>
#include <migraphx/op/convolution.hpp>
#include <migraphx/op/quant_convolution.hpp>
#include <migraphx/op/multibroadcast.hpp>
13
#include <migraphx/stringutils.hpp>
14
#include <migraphx/ranges.hpp>
15
#include <utility>
16
17
#include <iomanip>
#include <fstream>
18
19
20
21

namespace migraphx {
inline namespace MIGRAPHX_INLINE_NS {

Shucai Xiao's avatar
Shucai Xiao committed
22
instruction_ref insert_quant_ins(program& prog,
Shucai Xiao's avatar
Shucai Xiao committed
23
24
25
26
27
                                 instruction_ref& ins,
                                 shape::type_t type,
                                 std::unordered_map<instruction_ref, instruction_ref>& map_ins,
                                 float scale = 1.0f,
                                 float shift = 0.0f)
28
{
Shucai Xiao's avatar
Shucai Xiao committed
29
    if(map_ins.count(ins) > 0)
30
    {
Shucai Xiao's avatar
Shucai Xiao committed
31
        return map_ins[ins];
32
33
    }

Shucai Xiao's avatar
Shucai Xiao committed
34
35
36
37
38
    if(ins->name() == "undefined")
    {
        return ins;
    }

39
40
41
42
43
    if(scale < 0.0f)
    {
        MIGRAPHX_THROW("INSERT_QUANT_INS: scale less than 0");
    }

Shucai Xiao's avatar
Shucai Xiao committed
44
    assert(ins->get_shape().type() == shape::float_type ||
Shucai Xiao's avatar
Shucai Xiao committed
45
46
47
           ins->get_shape().type() == shape::double_type ||
           ins->get_shape().type() == shape::int32_type);
    instruction_ref quant_ins{};
Shucai Xiao's avatar
Shucai Xiao committed
48
    quant_ins    = prog.insert_instruction(std::next(ins), op::convert{type, scale, shift}, ins);
Shucai Xiao's avatar
Shucai Xiao committed
49
    map_ins[ins] = quant_ins;
50

Shucai Xiao's avatar
Shucai Xiao committed
51
    return quant_ins;
52
53
}

Shucai Xiao's avatar
Shucai Xiao committed
54
55
56
// This function is to convert any instructions specified in the input
// from double or float to float16 by inserting a convert operator.
// For the conversion, there could be cases of overflowing, but it
Shucai Xiao's avatar
Shucai Xiao committed
57
// is very rare in the area of deeping learning, so we just do a
Shucai Xiao's avatar
Shucai Xiao committed
58
// truncate of the input to get the fp16.
59
void quantize(program& prog, const std::vector<std::string>& ins_names)
60
{
61
    std::unordered_map<instruction_ref, instruction_ref> map_fp16;
Shucai Xiao's avatar
Shucai Xiao committed
62
    for(auto ins : iterator_for(prog))
63
    {
64
        // all indicates every instruction is converted
Shucai Xiao's avatar
Shucai Xiao committed
65
        if((not contains(ins_names, "all")) and (not contains(ins_names, ins->name())))
66
67
68
        {
            continue;
        }
69

70
        shape::type_t orig_type = ins->get_shape().type();
Shucai Xiao's avatar
Shucai Xiao committed
71
        // process all inputs, if input is a fp32 or fp64, convert it
72
        // to a fp16 by adding a convert operator.
73
        auto inputs = ins->inputs();
74
        std::vector<instruction_ref> converted_inputs;
Shucai Xiao's avatar
Shucai Xiao committed
75
        for(auto input : inputs)
76
77
        {
            auto s = input->get_shape();
Shucai Xiao's avatar
Shucai Xiao committed
78
            if(s.type() == shape::float_type || s.type() == shape::double_type)
79
            {
80
                // if the input is a convert operator, uses its input
81
82
                // as its current input
                instruction_ref input_fp16{};
83
                if(input->name() == "convert")
84
85
86
87
88
                {
                    input_fp16 = input->inputs().front();
                }
                else
                {
Shucai Xiao's avatar
Shucai Xiao committed
89
                    input_fp16 = insert_quant_ins(prog, input, shape::half_type, map_fp16);
90
                }
91
                converted_inputs.push_back(input_fp16);
92
            }
93
94
95
96
97
98
            else
            {
                converted_inputs.push_back(input);
            }
        }

99
        // no change for the input, go to the next instruction
Shucai Xiao's avatar
Shucai Xiao committed
100
        if(inputs == converted_inputs)
101
        {
102
            continue;
Shucai Xiao's avatar
Shucai Xiao committed
103
104
105
106
107
108
        }

        auto op        = ins->get_operator();
        auto ins_shape = compute_shape(op, converted_inputs);
        if(ins_shape.type() != orig_type)
        {
Shucai Xiao's avatar
Shucai Xiao committed
109
110
111
112
113
            // check the dead code case to avoid assert
            bool output_empty = ins->outputs().empty();
            auto ins_orig_type =
                prog.insert_instruction(std::next(ins), op::convert{orig_type}, ins);
            if(!output_empty)
114
            {
Shucai Xiao's avatar
Shucai Xiao committed
115
                prog.replace_instruction(ins, ins_orig_type);
Shucai Xiao's avatar
Shucai Xiao committed
116
            }
Shucai Xiao's avatar
Shucai Xiao committed
117
118
119
120
121
122
123
124
        }

        prog.replace_instruction(ins, op, converted_inputs);
    }
}

void quantize(program& prog) { quantize(prog, {"all"}); }

125
126
static std::vector<std::pair<float, float>> int8_quant_params;

Shucai Xiao's avatar
Shucai Xiao committed
127
// int8 quantization is different from fp16 since int8 can only handle value
Shucai Xiao's avatar
Shucai Xiao committed
128
// -128 ~ 127. To convert the float or double to int8, we need a scale and
Shucai Xiao's avatar
Shucai Xiao committed
129
// a shift, then the convert can be done as v_int8 = fp * scale + shift.
Shucai Xiao's avatar
Shucai Xiao committed
130
// To simplify the changes, we consider shift as 0.0f for now.
Shucai Xiao's avatar
Shucai Xiao committed
131
132
void quantize_int8(program& prog,
                   const std::vector<std::string>& ins_names,
133
                   const std::vector<std::pair<float, float>>& quant_params)
Shucai Xiao's avatar
Shucai Xiao committed
134
{
135
136
137
138
139
140
141
    for(size_t i = 0; i < quant_params.size(); i++)
    {
        auto param = quant_params.at(i);
        std::cout << "index = " << i << ", scale = " << param.first << "\t" << param.second
                  << std::endl;
    }
    std::cout << std::endl;
Shucai Xiao's avatar
Shucai Xiao committed
142

Shucai Xiao's avatar
Shucai Xiao committed
143
144
    // For now, we only support the int8 quantization of gemm and convolution
    std::vector<std::string> op_names = {"dot", "convolution"};
Shucai Xiao's avatar
Shucai Xiao committed
145
    if(!std::all_of(ins_names.begin(), ins_names.end(), [&](auto name) {
Shucai Xiao's avatar
Shucai Xiao committed
146
           return (std::find(op_names.begin(), op_names.end(), name) != op_names.end());
Shucai Xiao's avatar
Shucai Xiao committed
147
       }))
Shucai Xiao's avatar
Shucai Xiao committed
148
149
150
151
    {
        MIGRAPHX_THROW("QUANTIZE_INT8: only support DOT and CONVOLUTION operation");
    }

Shucai Xiao's avatar
Shucai Xiao committed
152
    std::size_t quant_param_index = 0;
Shucai Xiao's avatar
Shucai Xiao committed
153
    std::unordered_map<instruction_ref, instruction_ref> map_quant_ins;
154
    std::unordered_map<instruction_ref, std::size_t> map_index;
Shucai Xiao's avatar
Shucai Xiao committed
155
156
157
158
159
160
161
162
163
164
165
166
167
168
    for(auto ins : iterator_for(prog))
    {
        if(not contains(ins_names, ins->name()))
        {
            continue;
        }

        shape::type_t orig_type = ins->get_shape().type();

        // for the dot operator, there could be 2 or 3 input arguments
        // if the 3rd argument is available, convert it to an int32.
        std::vector<instruction_ref> converted_inputs;

        // process all inputs, if input is a fp32 or fp64, convert it
Shucai Xiao's avatar
Shucai Xiao committed
169
        // to a int8 type by adding a convert operator and replace
Shucai Xiao's avatar
Shucai Xiao committed
170
        // the operator with the corresponding int8 version
Shucai Xiao's avatar
Shucai Xiao committed
171
172
        auto inputs = ins->inputs();
        std::vector<std::pair<float, float>> ins_quant_params;
Shucai Xiao's avatar
Shucai Xiao committed
173
174
        for(auto input : inputs)
        {
175
176
177
178
179
180
181
182
            // calculate the index of each instruction to be quantized
            if(map_index.count(input) == 0)
            {
                map_index[input] = quant_param_index++;
            }
            auto param = quant_params[map_index[input]];
            ins_quant_params.push_back(param);

Shucai Xiao's avatar
Shucai Xiao committed
183
184
            // In general, the target_type is int8, but for the dot
            // operation, if it has 3 inputs, then the last one should
Shucai Xiao's avatar
Shucai Xiao committed
185
186
            // be converted to int32_type
            shape::type_t quant_type = shape::int8_type;
Shucai Xiao's avatar
Shucai Xiao committed
187
            if(ins->name() == "dot" and inputs.size() == 3 and input == inputs.back())
Shucai Xiao's avatar
Shucai Xiao committed
188
            {
Shucai Xiao's avatar
Shucai Xiao committed
189
190
                quant_type = shape::int32_type;
            }
Shucai Xiao's avatar
Shucai Xiao committed
191

Shucai Xiao's avatar
Shucai Xiao committed
192
            auto s = input->get_shape();
193
            if((s.type() == shape::float_type || s.type() == shape::double_type ||
Shucai Xiao's avatar
Shucai Xiao committed
194
195
                s.type() == shape::int32_type) &&
               s.type() != quant_type)
Shucai Xiao's avatar
Shucai Xiao committed
196
197
198
199
200
201
202
            {
                // if the input is a convert operator, uses its input
                // as its current input
                instruction_ref quant_input{};
                if(input->name() == "convert")
                {
                    auto tmp_ins = input->inputs().front();
Shucai Xiao's avatar
Shucai Xiao committed
203
                    if(tmp_ins->get_shape().type() == quant_type)
Shucai Xiao's avatar
Shucai Xiao committed
204
205
206
207
208
                    {
                        quant_input = input->inputs().front();
                    }
                    else
                    {
Shucai Xiao's avatar
Shucai Xiao committed
209
210
                        quant_input = insert_quant_ins(
                            prog, input, quant_type, map_quant_ins, param.first, param.second);
Shucai Xiao's avatar
Shucai Xiao committed
211
212
213
                    }
                }
                else
214
                {
Shucai Xiao's avatar
Shucai Xiao committed
215
216
                    quant_input = insert_quant_ins(
                        prog, input, quant_type, map_quant_ins, param.first, param.second);
217
                }
Shucai Xiao's avatar
Shucai Xiao committed
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
                converted_inputs.push_back(quant_input);
            }
            else
            {
                converted_inputs.push_back(input);
            }
        }

        // no change for the input, go to the next instruction
        if(inputs == converted_inputs)
        {
            continue;
        }

        // When converting from other types to int8_type, there are parameters
        // used as scale and shift(.0f), which will generate results diffrent from
        // the original results. To adjust the output to be "correct(approximatly
Shucai Xiao's avatar
Shucai Xiao committed
235
        // equal)", we need additional calculation for the adjustment
Shucai Xiao's avatar
Shucai Xiao committed
236
        if(ins->name() == "dot")
Shucai Xiao's avatar
Shucai Xiao committed
237
        {
Shucai Xiao's avatar
Shucai Xiao committed
238
239
240
241
            auto dot_op = any_cast<op::dot>(ins->get_operator());
            float new_alpha =
                dot_op.alpha / (ins_quant_params[0].first * ins_quant_params[1].first);
            float new_beta = dot_op.beta;
Shucai Xiao's avatar
Shucai Xiao committed
242
            // We need additional checking about the quant_alpha value. If
243
244
245
            // abs(quant_alpha) > 50 (some tmp value set here), we can convert
            // it to an integer as the new_alpha in the quant_dot
            float threshold = 50.0f;
Shucai Xiao's avatar
Shucai Xiao committed
246
            if(fabs(new_alpha) >= threshold && fabs(new_beta) >= threshold)
247
248
            {
                int32_t quant_alpha = static_cast<int32_t>(new_alpha);
Shucai Xiao's avatar
Shucai Xiao committed
249
250
251
                int32_t quant_beta  = static_cast<int32_t>(new_beta);
                shape quant_shape   = compute_shape(op::quant_dot{1, 0}, converted_inputs);
                if(quant_shape.type() == orig_type)
252
                {
Shucai Xiao's avatar
Shucai Xiao committed
253
254
                    prog.replace_instruction(
                        ins, op::quant_dot{quant_alpha, quant_beta}, converted_inputs);
255
256
257
                }
                else
                {
Shucai Xiao's avatar
Shucai Xiao committed
258
259
                    auto quant_dot = prog.insert_instruction(
                        ins, op::quant_dot{quant_alpha, quant_beta}, converted_inputs);
260
261
262
                    prog.replace_instruction(ins, op::convert{orig_type}, quant_dot);
                }
            }
Shucai Xiao's avatar
Shucai Xiao committed
263
264
            // only alpha can be quantized, quantization of beta will cause
            // big error, so we have to manually do the multiplication and
265
            // addition
Shucai Xiao's avatar
Shucai Xiao committed
266
            else if(fabs(new_alpha) >= threshold)
267
            {
268
269
                // truncate to the nearest integer
                new_alpha           = new_alpha > 0.0 ? new_alpha + 0.5 : new_alpha - 0.5;
270
                int32_t quant_alpha = static_cast<int32_t>(new_alpha);
Shucai Xiao's avatar
Shucai Xiao committed
271
272
                int32_t quant_beta  = 0;
                if(orig_type == shape::int32_type)
273
                {
Shucai Xiao's avatar
Shucai Xiao committed
274
                    if(inputs.size() == 2 or dot_op.beta == 0.0f)
275
                    {
Shucai Xiao's avatar
Shucai Xiao committed
276
277
                        prog.replace_instruction(
                            ins, op::quant_dot{quant_alpha, quant_beta}, converted_inputs);
278
279
280
281
                    }
                    // if there are 3 inputs, we need to consider the third argument
                    else
                    {
Shucai Xiao's avatar
Shucai Xiao committed
282
283
                        auto q_dot = prog.insert_instruction(
                            ins, op::quant_dot{quant_alpha, quant_beta}, converted_inputs);
284
285
                        std::vector<float> vec_beta(q_dot->get_shape().elements(), dot_op.beta);
                        auto l_beta = prog.add_literal(literal{orig_type, vec_beta});
Shucai Xiao's avatar
Shucai Xiao committed
286
287
                        auto beta_c =
                            prog.insert_instruction(ins, op::mul{}, l_beta, inputs.back());
288
289
290
291
292
                        prog.replace_instruction(ins, op::add{}, q_dot, beta_c);
                    }
                }
                else
                {
Shucai Xiao's avatar
Shucai Xiao committed
293
                    if(inputs.size() == 2 or dot_op.beta == 0.0f)
294
                    {
Shucai Xiao's avatar
Shucai Xiao committed
295
296
                        auto q_dot = prog.insert_instruction(
                            ins, op::quant_dot{quant_alpha, quant_beta}, converted_inputs);
297
298
299
300
301
                        prog.replace_instruction(ins, op::convert{orig_type}, q_dot);
                    }
                    // if there are 3 inputs, we need to consider the third argument
                    else
                    {
Shucai Xiao's avatar
Shucai Xiao committed
302
303
                        auto q_dot = prog.insert_instruction(
                            ins, op::quant_dot{quant_alpha, quant_beta}, converted_inputs);
304
305
306
                        auto oq_dot = prog.insert_instruction(ins, op::convert{orig_type}, q_dot);
                        std::vector<float> vec_beta(q_dot->get_shape().elements(), dot_op.beta);
                        auto l_beta = prog.add_literal(literal{oq_dot->get_shape(), vec_beta});
Shucai Xiao's avatar
Shucai Xiao committed
307
308
                        auto beta_c =
                            prog.insert_instruction(ins, op::mul{}, l_beta, inputs.back());
309
                        prog.replace_instruction(ins, op::add{}, oq_dot, beta_c);
310
311
312
313
314
315
316
                    }
                }
            }
            else
            {
                auto q_dot = prog.insert_instruction(ins, op::quant_dot{1, 0}, converted_inputs);
                std::vector<float> vec_alpha(q_dot->get_shape().elements(), new_alpha);
Shucai Xiao's avatar
Shucai Xiao committed
317
                if(orig_type == shape::int32_type)
318
319
                {
                    auto l_alpha = prog.add_literal(literal(ins->get_shape(), vec_alpha));
Shucai Xiao's avatar
Shucai Xiao committed
320
                    if(converted_inputs.size() == 2 or dot_op.beta == 0.0f)
321
322
323
324
325
326
327
                    {
                        prog.replace_instruction(ins, op::mul{}, l_alpha, q_dot);
                    }
                    // case of 3 arguments
                    else
                    {
                        std::vector<float> vec_beta(ins->get_shape().elements(), new_beta);
Shucai Xiao's avatar
Shucai Xiao committed
328
                        auto l_beta   = prog.add_literal(literal(ins->get_shape(), vec_beta));
329
                        auto alpha_ab = prog.insert_instruction(ins, op::mul{}, l_alpha, q_dot);
Shucai Xiao's avatar
Shucai Xiao committed
330
331
                        auto beta_c =
                            prog.insert_instruction(ins, op::mul{}, l_beta, inputs.back());
332
333
334
335
336
                        prog.replace_instruction(ins, op::add{}, alpha_ab, beta_c);
                    }
                }
                else
                {
Shucai Xiao's avatar
Shucai Xiao committed
337
                    auto oq_dot  = prog.insert_instruction(ins, op::convert{orig_type}, q_dot);
338
                    auto l_alpha = prog.add_literal(literal(ins->get_shape(), vec_alpha));
Shucai Xiao's avatar
Shucai Xiao committed
339
                    if(converted_inputs.size() == 2 or dot_op.beta == 0.0f)
340
341
342
343
344
345
346
                    {
                        prog.replace_instruction(ins, op::mul{}, l_alpha, oq_dot);
                    }
                    // case of 3 arguments
                    else
                    {
                        std::vector<float> vec_beta(ins->get_shape().elements(), new_beta);
Shucai Xiao's avatar
Shucai Xiao committed
347
                        auto l_beta   = prog.add_literal(literal(ins->get_shape(), vec_beta));
348
                        auto alpha_ab = prog.insert_instruction(ins, op::mul{}, l_alpha, oq_dot);
Shucai Xiao's avatar
Shucai Xiao committed
349
350
                        auto beta_c =
                            prog.insert_instruction(ins, op::mul{}, l_beta, inputs.back());
351
352
353
354
                        prog.replace_instruction(ins, op::add{}, alpha_ab, beta_c);
                    }
                }
            }
Shucai Xiao's avatar
Shucai Xiao committed
355
        }
Shucai Xiao's avatar
Shucai Xiao committed
356
        else if(ins->name() == "convolution")
Shucai Xiao's avatar
Shucai Xiao committed
357
        {
Shucai Xiao's avatar
Shucai Xiao committed
358
            // Current MIOpen convolution does not support alpha and beta,
Shucai Xiao's avatar
Shucai Xiao committed
359
            // so we need a separate multiply to adjust the output
Shucai Xiao's avatar
Shucai Xiao committed
360
361
362
363
364
365
            auto conv_op       = any_cast<op::convolution>(ins->get_operator());
            auto padding       = conv_op.padding;
            auto stride        = conv_op.stride;
            auto dilation      = conv_op.dilation;
            auto padding_mode  = conv_op.padding_mode;
            auto group         = conv_op.group;
366
            auto adjust_factor = 1.0f / (ins_quant_params[0].first * ins_quant_params[1].first);
Shucai Xiao's avatar
Shucai Xiao committed
367

368
369
370
371
372
373
            auto quant_conv = prog.insert_instruction(
                ins,
                op::quant_convolution{padding, stride, dilation, padding_mode, group},
                converted_inputs);
            auto fp_conv = prog.insert_instruction(ins, op::convert{shape::float_type, adjust_factor, 0.0f}, quant_conv);
            prog.replace_instruction(ins, op::convert{orig_type, 1.0f, 0.0f}, fp_conv);
Shucai Xiao's avatar
Shucai Xiao committed
374
375
376
        }
        else
        {
377
            MIGRAPHX_THROW("QUANTIZE_INT8: does not support operator" + ins->name());
Shucai Xiao's avatar
Shucai Xiao committed
378
        }
379
    }
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395

    if(quant_param_index != quant_params.size())
    {
        MIGRAPHX_THROW("QUANTIZE_INT8: number of scales does not match");
    }
}

void quantize_int8(program& prog, const std::vector<std::string>& ins_names)
{
    quantize_int8(prog, ins_names, int8_quant_params);
}

void quantize_int8(program& prog)
{
    std::vector<std::string> ins_names = {"dot", "convolution"};
    quantize_int8(prog, ins_names);
396
397
398
399
}

} // namespace MIGRAPHX_INLINE_NS
} // namespace migraphx