quantization.cpp 20.6 KB
Newer Older
Shucai Xiao's avatar
Shucai Xiao committed
1
#include <migraphx/quantization.hpp>
2
3
4
#include <migraphx/program.hpp>
#include <migraphx/instruction.hpp>
#include <migraphx/iterator_for.hpp>
5
#include <migraphx/op/convert.hpp>
Shucai Xiao's avatar
Shucai Xiao committed
6
7
#include <migraphx/op/clip.hpp>
#include <migraphx/op/round.hpp>
Shucai Xiao's avatar
Shucai Xiao committed
8
9
10
#include <migraphx/op/dot.hpp>
#include <migraphx/op/mul.hpp>
#include <migraphx/op/add.hpp>
Shucai Xiao's avatar
Shucai Xiao committed
11
#include <migraphx/op/quant_dot.hpp>
Shucai Xiao's avatar
Shucai Xiao committed
12
#include <migraphx/op/capture.hpp>
Shucai Xiao's avatar
Shucai Xiao committed
13
#include <migraphx/op/convolution.hpp>
Shucai Xiao's avatar
Shucai Xiao committed
14
#include <migraphx/op/quant_convolution.hpp>
Shucai Xiao's avatar
Shucai Xiao committed
15
#include <migraphx/op/multibroadcast.hpp>
16
#include <migraphx/stringutils.hpp>
17
#include <migraphx/ranges.hpp>
18
#include <migraphx/target.hpp>
19
#include <utility>
Shucai Xiao's avatar
Shucai Xiao committed
20
#include <set>
21
22
#include <iomanip>
#include <fstream>
Shucai Xiao's avatar
Shucai Xiao committed
23
#include <algorithm>
24
25
26
27

namespace migraphx {
inline namespace MIGRAPHX_INLINE_NS {

Shucai Xiao's avatar
Shucai Xiao committed
28
29
MIGRAPHX_DECLARE_ENV_VAR(MIGRAPHX_INT8_QUANTIZATION_PARAMS)

Shucai Xiao's avatar
Shucai Xiao committed
30
31
32
instruction_ref insert_quant_ins(program& prog,
                                 instruction_ref& ins,
                                 shape::type_t type,
Shucai Xiao's avatar
Shucai Xiao committed
33
34
35
                                 std::unordered_map<instruction_ref, instruction_ref>& map_ins,
                                 float scale = 1.0f,
                                 float shift = 0.0f)
36
{
Shucai Xiao's avatar
Shucai Xiao committed
37
    if(map_ins.count(ins) > 0)
38
    {
Shucai Xiao's avatar
Shucai Xiao committed
39
40
41
42
43
44
        return map_ins[ins];
    }

    if(ins->name() == "undefined")
    {
        return ins;
45
46
    }

Shucai Xiao's avatar
Shucai Xiao committed
47
48
    assert(ins->get_shape().type() == shape::float_type or
           ins->get_shape().type() == shape::double_type or
Shucai Xiao's avatar
Shucai Xiao committed
49
50
           ins->get_shape().type() == shape::int32_type or
           ins->get_shape().type() == shape::half_type);
Shucai Xiao's avatar
Shucai Xiao committed
51
    instruction_ref quant_ins{};
Shucai Xiao's avatar
Shucai Xiao committed
52
    auto insert_loc = std::next(ins);
Shucai Xiao's avatar
Shucai Xiao committed
53
    if(type == shape::int8_type)
Shucai Xiao's avatar
Shucai Xiao committed
54
55
    {
        auto scaled_ins = ins;
Shucai Xiao's avatar
Shucai Xiao committed
56
        if(scale != 1.0f)
Shucai Xiao's avatar
Shucai Xiao committed
57
58
        {
            auto float_ins = scaled_ins;
Shucai Xiao's avatar
Shucai Xiao committed
59
            if(scaled_ins->get_shape().type() != shape::float_type)
Shucai Xiao's avatar
Shucai Xiao committed
60
            {
Shucai Xiao's avatar
Shucai Xiao committed
61
62
                float_ins =
                    prog.insert_instruction(insert_loc, op::convert{shape::float_type}, scaled_ins);
Shucai Xiao's avatar
Shucai Xiao committed
63
64
            }
            std::vector<float> vec_scale(scaled_ins->get_shape().elements(), scale);
65
            auto l_scale = prog.add_literal(literal(float_ins->get_shape(), vec_scale));
Shucai Xiao's avatar
Shucai Xiao committed
66
            scaled_ins   = prog.insert_instruction(insert_loc, op::mul{}, l_scale, float_ins);
Shucai Xiao's avatar
Shucai Xiao committed
67
68
69
        }

        auto shifted_ins = scaled_ins;
Shucai Xiao's avatar
Shucai Xiao committed
70
        if(shift != 0.0f)
Shucai Xiao's avatar
Shucai Xiao committed
71
72
        {
            auto float_ins = shifted_ins;
Shucai Xiao's avatar
Shucai Xiao committed
73
            if(shifted_ins->get_shape().type() != shape::float_type)
Shucai Xiao's avatar
Shucai Xiao committed
74
            {
Shucai Xiao's avatar
Shucai Xiao committed
75
76
                float_ins = prog.insert_instruction(
                    insert_loc, op::convert{shape::float_type}, shifted_ins);
Shucai Xiao's avatar
Shucai Xiao committed
77
78
            }
            std::vector<float> vec_shift(shifted_ins->get_shape().elements(), shift);
79
            auto l_shift = prog.add_literal(literal(float_ins->get_shape(), vec_shift));
Shucai Xiao's avatar
Shucai Xiao committed
80
            shifted_ins  = prog.insert_instruction(insert_loc, op::add{}, l_shift, float_ins);
Shucai Xiao's avatar
Shucai Xiao committed
81
82
        }

83
        auto rounded_ins = prog.insert_instruction(insert_loc, op::round{}, shifted_ins);
Shucai Xiao's avatar
Shucai Xiao committed
84
        auto clipped_ins =
85
            prog.insert_instruction(insert_loc, op::clip{127.0f, -128.0f}, rounded_ins);
Shucai Xiao's avatar
Shucai Xiao committed
86
        quant_ins = prog.insert_instruction(insert_loc, op::convert{type}, clipped_ins);
Shucai Xiao's avatar
Shucai Xiao committed
87
88
89
    }
    else
    {
Shucai Xiao's avatar
Shucai Xiao committed
90
        quant_ins = prog.insert_instruction(insert_loc, op::convert{type}, ins);
Shucai Xiao's avatar
Shucai Xiao committed
91
    }
Shucai Xiao's avatar
Shucai Xiao committed
92

Shucai Xiao's avatar
Shucai Xiao committed
93
    map_ins[ins] = quant_ins;
94

Shucai Xiao's avatar
Shucai Xiao committed
95
    return quant_ins;
96
97
}

Shucai Xiao's avatar
Shucai Xiao committed
98
99
100
101
102
// This function is to convert any instructions specified in the input
// from double or float to float16 by inserting a convert operator.
// For the conversion, there could be cases of overflowing, but it
// is very rare in the area of deeping learning, so we just do a
// truncate of the input to get the fp16.
Shucai Xiao's avatar
Shucai Xiao committed
103
void quantize_fp16(program& prog, const std::vector<std::string>& ins_names)
104
{
105
    std::unordered_map<instruction_ref, instruction_ref> map_fp16;
Shucai Xiao's avatar
Shucai Xiao committed
106
    for(auto ins : iterator_for(prog))
107
    {
108
        // all indicates every instruction is converted
Shucai Xiao's avatar
Shucai Xiao committed
109
        if((not contains(ins_names, "all")) and (not contains(ins_names, ins->name())))
110
111
112
        {
            continue;
        }
113

114
        shape::type_t orig_type = ins->get_shape().type();
Shucai Xiao's avatar
Shucai Xiao committed
115
        // process all inputs, if input is a fp32 or fp64, convert it
116
        // to a fp16 by adding a convert operator.
117
        auto inputs = ins->inputs();
118
        std::vector<instruction_ref> converted_inputs;
Shucai Xiao's avatar
Shucai Xiao committed
119
        for(auto input : inputs)
120
121
        {
            auto s = input->get_shape();
Shucai Xiao's avatar
Shucai Xiao committed
122
            if(s.type() == shape::float_type || s.type() == shape::double_type)
123
            {
124
                // if the input is a convert operator, uses its input
125
126
                // as its current input
                instruction_ref input_fp16{};
Shucai Xiao's avatar
Shucai Xiao committed
127
128
                if(input->name() == "convert" and
                   input->inputs().front()->get_shape().type() == shape::half_type)
129
130
131
132
133
                {
                    input_fp16 = input->inputs().front();
                }
                else
                {
Shucai Xiao's avatar
Shucai Xiao committed
134
                    input_fp16 = insert_quant_ins(prog, input, shape::half_type, map_fp16);
135
                }
136
                converted_inputs.push_back(input_fp16);
137
            }
138
139
140
141
142
143
            else
            {
                converted_inputs.push_back(input);
            }
        }

144
        // no change for the input, go to the next instruction
Shucai Xiao's avatar
Shucai Xiao committed
145
        if(inputs == converted_inputs)
146
        {
147
            continue;
Shucai Xiao's avatar
Shucai Xiao committed
148
149
150
151
152
153
        }

        auto op        = ins->get_operator();
        auto ins_shape = compute_shape(op, converted_inputs);
        if(ins_shape.type() != orig_type)
        {
Shucai Xiao's avatar
Shucai Xiao committed
154
155
156
157
158
            // check the dead code case to avoid assert
            bool output_empty = ins->outputs().empty();
            auto ins_orig_type =
                prog.insert_instruction(std::next(ins), op::convert{orig_type}, ins);
            if(!output_empty)
159
            {
Shucai Xiao's avatar
Shucai Xiao committed
160
                prog.replace_instruction(ins, ins_orig_type);
161
            }
162
        }
Shucai Xiao's avatar
Shucai Xiao committed
163
164

        prog.replace_instruction(ins, op, converted_inputs);
165
166
167
    }
}

168
static void ins_quantize_int8(program& prog,
Shucai Xiao's avatar
Shucai Xiao committed
169
170
171
                              instruction_ref ins,
                              std::vector<instruction_ref>& converted_inputs,
                              const std::vector<std::pair<float, float>>& ins_quant_params)
Shucai Xiao's avatar
Shucai Xiao committed
172
173
{
    auto orig_type = ins->get_shape().type();
Shucai Xiao's avatar
Shucai Xiao committed
174
    auto inputs    = ins->inputs();
Shucai Xiao's avatar
Shucai Xiao committed
175
176
    if(ins->name() == "dot")
    {
Shucai Xiao's avatar
Shucai Xiao committed
177
178
179
        auto dot_op     = any_cast<op::dot>(ins->get_operator());
        float new_alpha = dot_op.alpha / (ins_quant_params[0].first * ins_quant_params[1].first);
        float new_beta  = dot_op.beta;
Shucai Xiao's avatar
Shucai Xiao committed
180
181
182
183
184
185
        // We need additional checking about the quant_alpha value. If
        // abs(quant_alpha) > 50 (some tmp value set here), we can convert
        // it to an integer as the new_alpha in the quant_dot
        float threshold = 50.0f;
        if(fabs(new_alpha) >= threshold && fabs(new_beta) >= threshold)
        {
186
187
            int32_t quant_alpha = static_cast<int32_t>(std::round(new_alpha));
            int32_t quant_beta  = static_cast<int32_t>(std::round(new_beta));
Shucai Xiao's avatar
Shucai Xiao committed
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
            if(shape::int32_type == orig_type)
            {
                prog.replace_instruction(
                    ins, op::quant_dot{quant_alpha, quant_beta}, converted_inputs);
            }
            else
            {
                auto quant_dot = prog.insert_instruction(
                    ins, op::quant_dot{quant_alpha, quant_beta}, converted_inputs);
                prog.replace_instruction(ins, op::convert{orig_type}, quant_dot);
            }
        }
        // either alpha or beta cannot be quantized because of too big
        // relative rounding error
        else
        {
            if(converted_inputs.size() == 3)
            {
                converted_inputs.pop_back();
            }
            auto q_dot   = prog.insert_instruction(ins, op::quant_dot{1, 0}, converted_inputs);
            auto f_dot   = prog.insert_instruction(ins, op::convert{shape::float_type}, q_dot);
            auto c_shape = q_dot->get_shape();
            std::vector<float> vec_alpha(c_shape.elements(), new_alpha);
            auto l_alpha =
                prog.add_literal(literal({shape::float_type, c_shape.lens()}, vec_alpha));

            if(inputs.size() == 3 and dot_op.beta != 0.0f)
            {
                auto alpha_ab = prog.insert_instruction(ins, op::mul{}, l_alpha, f_dot);
                std::vector<float> vec_beta(c_shape.elements(), dot_op.beta);
                auto l_beta =
                    prog.add_literal(literal({shape::float_type, c_shape.lens()}, vec_beta));
                instruction_ref beta_c{};
                if(orig_type != shape::float_type)
                {
Shucai Xiao's avatar
Shucai Xiao committed
224
225
                    auto fp32_c =
                        prog.insert_instruction(ins, op::convert{shape::float_type}, inputs.back());
226
                    beta_c = prog.insert_instruction(ins, op::mul{}, l_beta, fp32_c);
Shucai Xiao's avatar
Shucai Xiao committed
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
                }
                else
                {
                    beta_c = prog.insert_instruction(ins, op::mul{}, l_beta, inputs.back());
                }

                if(orig_type == shape::float_type)
                {
                    prog.replace_instruction(ins, op::add{}, alpha_ab, beta_c);
                }
                else
                {
                    auto f_res = prog.insert_instruction(ins, op::add{}, alpha_ab, beta_c);
                    prog.replace_instruction(ins, op::convert{orig_type}, f_res);
                }
            }
            else
            {
                if(orig_type == shape::float_type)
                {
                    prog.replace_instruction(ins, op::mul{}, l_alpha, f_dot);
                }
                else
                {
                    auto alpha_ab = prog.insert_instruction(ins, op::mul{}, l_alpha, f_dot);
                    prog.replace_instruction(ins, op::convert{orig_type}, alpha_ab);
                }
            }
        }
    }
    else if(ins->name() == "convolution")
    {
        // Current MIOpen convolution does not support alpha and beta,
        // so we need a separate multiply to adjust the output
Shucai Xiao's avatar
Shucai Xiao committed
261
262
263
264
265
266
267
268
        auto conv_op      = any_cast<op::convolution>(ins->get_operator());
        auto padding      = conv_op.padding;
        auto stride       = conv_op.stride;
        auto dilation     = conv_op.dilation;
        auto padding_mode = conv_op.padding_mode;
        auto group        = conv_op.group;
        auto adjust_factor =
            std::round(1.0f / (ins_quant_params[0].first * ins_quant_params[1].first));
Shucai Xiao's avatar
Shucai Xiao committed
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294

        auto quant_conv = prog.insert_instruction(
            ins,
            op::quant_convolution{padding, stride, dilation, padding_mode, group},
            converted_inputs);
        float threshold = 50.0f;
        std::vector<float> vec_factor(quant_conv->get_shape().elements(), adjust_factor);
        if(quant_conv->get_shape().type() == orig_type and adjust_factor >= threshold)
        {
            auto l_factor = prog.add_literal(
                literal(quant_conv->get_shape(), vec_factor.begin(), vec_factor.end()));
            prog.replace_instruction(ins, op::mul{}, quant_conv, l_factor);
        }
        // convert quant_conv output to float type, multiply the factor and
        // conver back to original type
        else
        {
            auto float_conv =
                prog.insert_instruction(ins, op::convert{shape::float_type}, quant_conv);
            auto l_factor = prog.add_literal(literal(float_conv->get_shape(), vec_factor));
            if(orig_type == shape::float_type)
            {
                prog.replace_instruction(ins, op::mul{}, l_factor, float_conv);
            }
            else
            {
Shucai Xiao's avatar
Shucai Xiao committed
295
                auto adjusted_conv = prog.insert_instruction(ins, op::mul{}, l_factor, float_conv);
Shucai Xiao's avatar
Shucai Xiao committed
296
297
298
299
300
301
                prog.replace_instruction(ins, op::convert{orig_type}, adjusted_conv);
            }
        }
    }
    else
    {
302
        MIGRAPHX_THROW("QUANTIZE_INT8: does not support operator " + ins->name());
Shucai Xiao's avatar
Shucai Xiao committed
303
304
305
    }
}

Shucai Xiao's avatar
Shucai Xiao committed
306
307
308
309
// int8 quantization is different from fp16 since int8 can only handle value
// -128 ~ 127. To convert the float or double to int8, we need a scale and
// a shift, then the convert can be done as v_int8 = fp * scale + shift.
// To simplify the changes, we consider shift as 0.0f for now.
Shucai Xiao's avatar
Shucai Xiao committed
310
void quantize_int8_impl(program& prog,
Shucai Xiao's avatar
Shucai Xiao committed
311
312
                   const std::vector<std::pair<float, float>>& quant_params,
                   const std::vector<std::string>& ins_names)
Shucai Xiao's avatar
Shucai Xiao committed
313
{
Shucai Xiao's avatar
Shucai Xiao committed
314
315
316
    if(enabled(MIGRAPHX_INT8_QUANTIZATION_PARAMS{}))
    {
        for(std::size_t i = 0; i < quant_params.size(); ++i)
317
318
        {
            auto param = quant_params.at(i);
Shucai Xiao's avatar
Shucai Xiao committed
319
320
            std::cout << "ins_index = " << i << ", scale = " << param.first
                      << ", shift = " << param.second << std::endl;
321
322
323
324
        }
        std::cout << std::endl;
    }

Shucai Xiao's avatar
Shucai Xiao committed
325
    // For now, we only support the int8 quantization of gemm and convolution
Shucai Xiao's avatar
Shucai Xiao committed
326
327
328
    std::set<std::string> op_names = {"convolution", "dot"};
    std::set<std::string> input_ins_names(ins_names.begin(), ins_names.end());
    if (!std::includes(op_names.begin(), op_names.end(), input_ins_names.begin(), input_ins_names.end()))
Shucai Xiao's avatar
Shucai Xiao committed
329
330
331
332
333
334
    {
        MIGRAPHX_THROW("QUANTIZE_INT8: only support DOT and CONVOLUTION operation");
    }

    std::size_t quant_param_index = 0;
    std::unordered_map<instruction_ref, instruction_ref> map_quant_ins;
Shucai Xiao's avatar
Shucai Xiao committed
335
    std::unordered_map<instruction_ref, std::size_t> map_ins_index;
Shucai Xiao's avatar
Shucai Xiao committed
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
    for(auto ins : iterator_for(prog))
    {
        if(not contains(ins_names, ins->name()))
        {
            continue;
        }

        // for the dot operator, there could be 2 or 3 input arguments
        // if the 3rd argument is available, convert it to an int32.
        std::vector<instruction_ref> converted_inputs;

        // process all inputs, if input is a fp32 or fp64, convert it
        // to a int8 type by adding a convert operator and replace
        // the operator with the corresponding int8 version
        auto inputs = ins->inputs();
        std::vector<std::pair<float, float>> ins_quant_params;
        for(auto input : inputs)
        {
            // calculate the index of each instruction to be quantized
Shucai Xiao's avatar
Shucai Xiao committed
355
356
            std::size_t ins_index =
                (map_ins_index.count(input) > 0) ? map_ins_index[input] : quant_param_index++;
Shucai Xiao's avatar
Shucai Xiao committed
357
358
359
            map_ins_index[input] = ins_index;

            auto param = quant_params[map_ins_index[input]];
Shucai Xiao's avatar
Shucai Xiao committed
360
361
362
363
364
365
            ins_quant_params.push_back(param);

            // In general, the target_type is int8, but for the dot
            // operation, if it has 3 inputs, then the last one should
            // be converted to int32_type
            shape::type_t quant_type = shape::int8_type;
Shucai Xiao's avatar
Shucai Xiao committed
366
            if((ins->name() == "dot") and (inputs.size() == 3) and (input == inputs.back()))
Shucai Xiao's avatar
Shucai Xiao committed
367
368
369
370
371
            {
                quant_type = shape::int32_type;
            }

            auto s = input->get_shape();
Shucai Xiao's avatar
Shucai Xiao committed
372
            if((s.type() == shape::float_type or s.type() == shape::double_type or
373
                s.type() == shape::half_type or s.type() == shape::int32_type) and
Shucai Xiao's avatar
Shucai Xiao committed
374
375
376
377
378
               s.type() != quant_type)
            {
                // if the input is a convert operator, uses its input
                // as its current input
                instruction_ref quant_input{};
Shucai Xiao's avatar
Shucai Xiao committed
379
380
                if(input->name() == "convert" and
                   input->inputs().front()->get_shape().type() == quant_type)
Shucai Xiao's avatar
Shucai Xiao committed
381
                {
Shucai Xiao's avatar
Shucai Xiao committed
382
                    quant_input = input->inputs().front();
383
384
                    // the scale in this case is not used, so tune the scale
                    // to 1.0f for this parameter
Shucai Xiao's avatar
Shucai Xiao committed
385
                    ins_quant_params.back() = std::pair<float, float>(1.0f, 0.0f);
Shucai Xiao's avatar
Shucai Xiao committed
386
387
388
                }
                else
                {
Shucai Xiao's avatar
Shucai Xiao committed
389
390
                    quant_input = insert_quant_ins(
                        prog, input, quant_type, map_quant_ins, param.first, param.second);
Shucai Xiao's avatar
Shucai Xiao committed
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
                }
                converted_inputs.push_back(quant_input);
            }
            else
            {
                converted_inputs.push_back(input);
            }
        }

        // no change for the input, go to the next instruction
        if(inputs == converted_inputs)
        {
            continue;
        }

406
        ins_quantize_int8(prog, ins, converted_inputs, ins_quant_params);
Shucai Xiao's avatar
Shucai Xiao committed
407
408
409
410
411
412
413
414
    }

    if(quant_param_index != quant_params.size())
    {
        MIGRAPHX_THROW("QUANTIZE_INT8: number of scales does not match");
    }
}

Shucai Xiao's avatar
Shucai Xiao committed
415
416
void quantize_int8(program& prog,
                   const target& t,
Shucai Xiao's avatar
Shucai Xiao committed
417
                   std::vector<program::parameter_map>& calibration,
Shucai Xiao's avatar
Shucai Xiao committed
418
                   const std::vector<std::string>& ins_names)
Shucai Xiao's avatar
Shucai Xiao committed
419
{
420
    // insert capture operator
Shucai Xiao's avatar
Shucai Xiao committed
421
    auto cap_prog          = prog;
422
423
424
425
426
    auto int8_quant_params = capture_arguments(cap_prog, t, ins_names);

    // use the calibration data to compute the quantization scale
    cap_prog.compile(t);

Shucai Xiao's avatar
Shucai Xiao committed
427
    // use all calibration data to run the program to calculate the
428
    // quantization scale and shift
Shucai Xiao's avatar
Shucai Xiao committed
429
    for(auto&& arg : calibration)
430
431
    {
        program::parameter_map m;
Shucai Xiao's avatar
Shucai Xiao committed
432
        for(auto&& x : cap_prog.get_parameter_shapes())
433
        {
Shucai Xiao's avatar
Shucai Xiao committed
434
            if(arg.count(x.first) > 0)
435
436
437
438
439
440
441
442
443
444
445
446
            {
                assert(x.second == arg[x.first].get_shape());
                m[x.first] = t.copy_to(arg[x.first]);
            }
            else
            {
                m[x.first] = t.allocate(x.second);
            }
        }
        cap_prog.eval(m);
    }

Shucai Xiao's avatar
Shucai Xiao committed
447
    quantize_int8_impl(prog, *int8_quant_params, ins_names);
Shucai Xiao's avatar
Shucai Xiao committed
448
449
}

Shucai Xiao's avatar
Shucai Xiao committed
450
451
// For the input of each input argument, we need to insert a
// capture operator to compute the scale and shift
Shucai Xiao's avatar
Shucai Xiao committed
452
std::size_t capture_arguments(program& prog,
Shucai Xiao's avatar
Shucai Xiao committed
453
454
                              const std::vector<std::string>& ins_names,
                              const std::function<void(std::size_t, std::vector<argument>)>& func)
Shucai Xiao's avatar
Shucai Xiao committed
455
{
456

Shucai Xiao's avatar
Shucai Xiao committed
457
    size_t num_quant_params = 0;
Shucai Xiao's avatar
Shucai Xiao committed
458
    // the int8 quantization only support dot and convolution
Shucai Xiao's avatar
Shucai Xiao committed
459
    std::vector<std::string> op_names = {"dot", "convolution"};
Shucai Xiao's avatar
Shucai Xiao committed
460
461
    std::set<std::string> input_ins_names(ins_names.begin(), ins_names.end());
    if (!std::includes(op_names.begin(), op_names.end(), input_ins_names.begin(), input_ins_names.end()))
Shucai Xiao's avatar
Shucai Xiao committed
462
463
464
465
466
467
468
    {
        MIGRAPHX_THROW("CAPTURE_ARGUMENTS: input operator is not supported");
    }

    std::unordered_map<instruction_ref, instruction_ref> ins_map;
    for(auto ins : iterator_for(prog))
    {
Shucai Xiao's avatar
Shucai Xiao committed
469
        if(not contains(ins_names, ins->name()))
Shucai Xiao's avatar
Shucai Xiao committed
470
471
472
473
474
475
        {
            continue;
        }

        auto inputs = ins->inputs();
        std::vector<instruction_ref> new_args;
Shucai Xiao's avatar
Shucai Xiao committed
476
        for(auto input : inputs)
Shucai Xiao's avatar
Shucai Xiao committed
477
478
        {
            instruction_ref new_ins{};
Shucai Xiao's avatar
Shucai Xiao committed
479
            if(ins_map.count(input) > 0)
Shucai Xiao's avatar
Shucai Xiao committed
480
481
482
483
484
            {
                new_ins = ins_map[input];
            }
            else
            {
Shucai Xiao's avatar
Shucai Xiao committed
485
                new_ins = prog.insert_instruction(
Shucai Xiao's avatar
Shucai Xiao committed
486
                    std::next(input), op::capture{num_quant_params++, func}, input);
Shucai Xiao's avatar
Shucai Xiao committed
487
488
489
490
491
492
                ins_map[input] = new_ins;
            }
            new_args.push_back(new_ins);
        }
        instruction::replace(ins, ins->get_operator(), ins->get_shape(), new_args);
    }
Shucai Xiao's avatar
Shucai Xiao committed
493

Shucai Xiao's avatar
Shucai Xiao committed
494
    return num_quant_params;
Shucai Xiao's avatar
Shucai Xiao committed
495
496
}

Shucai Xiao's avatar
Shucai Xiao committed
497
std::shared_ptr<std::vector<std::pair<float, float>>>
Shucai Xiao's avatar
Shucai Xiao committed
498
capture_arguments_impl(program& prog, const target& t, const std::vector<std::string>& ins_names)
Shucai Xiao's avatar
Shucai Xiao committed
499
{
Shucai Xiao's avatar
Shucai Xiao committed
500
501
502
503
    std::shared_ptr<std::vector<std::pair<float, float>>> int8_quant_params =
        std::make_shared<std::vector<std::pair<float, float>>>();
    std::shared_ptr<std::vector<float>> max_abs_vals = std::make_shared<std::vector<float>>();

Shucai Xiao's avatar
Shucai Xiao committed
504
505
    auto calc_quant_params = [int8_quant_params, max_abs_vals, &t](std::size_t ins_index,
                                                                   std::vector<argument> args) {
Shucai Xiao's avatar
Shucai Xiao committed
506
        std::pair<float, float> param_pair{64.0f, 0.0f};
507
508
509
510

        // scale and shift is need for only int8 type, and we do not
        // consider shift, so set shift to 0
        std::vector<float> vec_val;
Shucai Xiao's avatar
Shucai Xiao committed
511
        argument arg = t.copy_from(args.front());
Shucai Xiao's avatar
Shucai Xiao committed
512
        arg.visit([&](auto output) { vec_val.assign(output.begin(), output.end()); });
Shucai Xiao's avatar
Shucai Xiao committed
513
514
515
        auto max_val                = *std::max_element(vec_val.begin(), vec_val.end());
        auto min_val                = *std::min_element(vec_val.begin(), vec_val.end());
        auto max_abs                = std::max(std::fabs(max_val), std::fabs(min_val));
Shucai Xiao's avatar
Shucai Xiao committed
516
        max_abs_vals->at(ins_index) = std::max(max_abs_vals->at(ins_index), max_abs);
517

Shucai Xiao's avatar
Shucai Xiao committed
518
        // if all values are 0, no need to do scaling
Shucai Xiao's avatar
Shucai Xiao committed
519
        if(max_abs_vals->at(ins_index) == 0.0f)
Shucai Xiao's avatar
Shucai Xiao committed
520
521
522
523
524
525
526
        {
            param_pair.first = 1.0f;
        }
        else
        {
            param_pair.first = 127.0f / max_abs_vals->at(ins_index);
        }
Shucai Xiao's avatar
Shucai Xiao committed
527
        int8_quant_params->at(ins_index) = param_pair;
528
529
    };

Shucai Xiao's avatar
Shucai Xiao committed
530
531
    auto num_params = capture_arguments(prog, ins_names, calc_quant_params);

Shucai Xiao's avatar
Shucai Xiao committed
532
    int8_quant_params->resize(num_params, std::pair<float, float>(64.0f, 0.0f));
Shucai Xiao's avatar
Shucai Xiao committed
533
534
535
    max_abs_vals->resize(num_params, 0.0f);

    return int8_quant_params;
Shucai Xiao's avatar
Shucai Xiao committed
536
537
}

538
539
} // namespace MIGRAPHX_INLINE_NS
} // namespace migraphx