"...model/git@developer.sourcefind.cn:OpenDAS/megatron-lm.git" did not exist on "3b29f0e79d48a531a93c769f0a928bf13847e853"
quantization.cpp 21.1 KB
Newer Older
Shucai Xiao's avatar
Shucai Xiao committed
1
#include <migraphx/quantization.hpp>
2
3
4
#include <migraphx/program.hpp>
#include <migraphx/instruction.hpp>
#include <migraphx/iterator_for.hpp>
5
#include <migraphx/op/convert.hpp>
Shucai Xiao's avatar
Shucai Xiao committed
6
7
#include <migraphx/op/clip.hpp>
#include <migraphx/op/round.hpp>
Shucai Xiao's avatar
Shucai Xiao committed
8
9
10
#include <migraphx/op/dot.hpp>
#include <migraphx/op/mul.hpp>
#include <migraphx/op/add.hpp>
Shucai Xiao's avatar
Shucai Xiao committed
11
#include <migraphx/op/quant_dot.hpp>
Shucai Xiao's avatar
Shucai Xiao committed
12
#include <migraphx/op/capture.hpp>
Shucai Xiao's avatar
Shucai Xiao committed
13
#include <migraphx/op/convolution.hpp>
Shucai Xiao's avatar
Shucai Xiao committed
14
#include <migraphx/op/quant_convolution.hpp>
Shucai Xiao's avatar
Shucai Xiao committed
15
#include <migraphx/op/multibroadcast.hpp>
16
#include <migraphx/stringutils.hpp>
17
#include <migraphx/ranges.hpp>
18
#include <migraphx/target.hpp>
19
#include <utility>
20
21
#include <iomanip>
#include <fstream>
22
23
24
25

namespace migraphx {
inline namespace MIGRAPHX_INLINE_NS {

Shucai Xiao's avatar
Shucai Xiao committed
26
27
28
instruction_ref insert_quant_ins(program& prog,
                                 instruction_ref& ins,
                                 shape::type_t type,
Shucai Xiao's avatar
Shucai Xiao committed
29
30
31
                                 std::unordered_map<instruction_ref, instruction_ref>& map_ins,
                                 float scale = 1.0f,
                                 float shift = 0.0f)
32
{
Shucai Xiao's avatar
Shucai Xiao committed
33
    if(map_ins.count(ins) > 0)
34
    {
Shucai Xiao's avatar
Shucai Xiao committed
35
36
37
38
39
40
        return map_ins[ins];
    }

    if(ins->name() == "undefined")
    {
        return ins;
41
42
    }

Shucai Xiao's avatar
Shucai Xiao committed
43
44
    assert(ins->get_shape().type() == shape::float_type or
           ins->get_shape().type() == shape::double_type or
Shucai Xiao's avatar
Shucai Xiao committed
45
46
           ins->get_shape().type() == shape::int32_type);
    instruction_ref quant_ins{};
Shucai Xiao's avatar
Shucai Xiao committed
47
    auto insert_loc = std::next(ins);
Shucai Xiao's avatar
Shucai Xiao committed
48
    if(type == shape::int8_type)
Shucai Xiao's avatar
Shucai Xiao committed
49
50
    {
        auto scaled_ins = ins;
Shucai Xiao's avatar
Shucai Xiao committed
51
        if(scale != 1.0f)
Shucai Xiao's avatar
Shucai Xiao committed
52
53
        {
            auto float_ins = scaled_ins;
Shucai Xiao's avatar
Shucai Xiao committed
54
            if(scaled_ins->get_shape().type() != shape::float_type)
Shucai Xiao's avatar
Shucai Xiao committed
55
            {
Shucai Xiao's avatar
Shucai Xiao committed
56
57
                float_ins =
                    prog.insert_instruction(insert_loc, op::convert{shape::float_type}, scaled_ins);
Shucai Xiao's avatar
Shucai Xiao committed
58
59
60
            }
            std::vector<float> vec_scale(scaled_ins->get_shape().elements(), scale);
            auto l_scale = prog.add_literal(literal(scaled_ins->get_shape(), vec_scale));
Shucai Xiao's avatar
Shucai Xiao committed
61
            scaled_ins   = prog.insert_instruction(insert_loc, op::mul{}, l_scale, float_ins);
Shucai Xiao's avatar
Shucai Xiao committed
62
63
64
        }

        auto shifted_ins = scaled_ins;
Shucai Xiao's avatar
Shucai Xiao committed
65
        if(shift != 0.0f)
Shucai Xiao's avatar
Shucai Xiao committed
66
67
        {
            auto float_ins = shifted_ins;
Shucai Xiao's avatar
Shucai Xiao committed
68
            if(shifted_ins->get_shape().type() != shape::float_type)
Shucai Xiao's avatar
Shucai Xiao committed
69
            {
Shucai Xiao's avatar
Shucai Xiao committed
70
71
                float_ins = prog.insert_instruction(
                    insert_loc, op::convert{shape::float_type}, shifted_ins);
Shucai Xiao's avatar
Shucai Xiao committed
72
73
74
            }
            std::vector<float> vec_shift(shifted_ins->get_shape().elements(), shift);
            auto l_shift = prog.add_literal(literal(shifted_ins->get_shape(), vec_shift));
Shucai Xiao's avatar
Shucai Xiao committed
75
            shifted_ins  = prog.insert_instruction(insert_loc, op::add{}, l_shift, float_ins);
Shucai Xiao's avatar
Shucai Xiao committed
76
77
        }

78
        auto rounded_ins = prog.insert_instruction(insert_loc, op::round{}, shifted_ins);
Shucai Xiao's avatar
Shucai Xiao committed
79
        auto clipped_ins =
80
            prog.insert_instruction(insert_loc, op::clip{127.0f, -128.0f}, rounded_ins);
Shucai Xiao's avatar
Shucai Xiao committed
81
        quant_ins = prog.insert_instruction(insert_loc, op::convert{type}, clipped_ins);
Shucai Xiao's avatar
Shucai Xiao committed
82
83
84
    }
    else
    {
Shucai Xiao's avatar
Shucai Xiao committed
85
        quant_ins = prog.insert_instruction(insert_loc, op::convert{type}, ins);
Shucai Xiao's avatar
Shucai Xiao committed
86
    }
Shucai Xiao's avatar
Shucai Xiao committed
87

Shucai Xiao's avatar
Shucai Xiao committed
88
    map_ins[ins] = quant_ins;
89

Shucai Xiao's avatar
Shucai Xiao committed
90
    return quant_ins;
91
92
}

Shucai Xiao's avatar
Shucai Xiao committed
93
94
95
96
97
// This function is to convert any instructions specified in the input
// from double or float to float16 by inserting a convert operator.
// For the conversion, there could be cases of overflowing, but it
// is very rare in the area of deeping learning, so we just do a
// truncate of the input to get the fp16.
98
void quantize(program& prog, const std::vector<std::string>& ins_names)
99
{
100
    std::unordered_map<instruction_ref, instruction_ref> map_fp16;
Shucai Xiao's avatar
Shucai Xiao committed
101
    for(auto ins : iterator_for(prog))
102
    {
103
        // all indicates every instruction is converted
Shucai Xiao's avatar
Shucai Xiao committed
104
        if((not contains(ins_names, "all")) and (not contains(ins_names, ins->name())))
105
106
107
        {
            continue;
        }
108

109
        shape::type_t orig_type = ins->get_shape().type();
Shucai Xiao's avatar
Shucai Xiao committed
110
        // process all inputs, if input is a fp32 or fp64, convert it
111
        // to a fp16 by adding a convert operator.
112
        auto inputs = ins->inputs();
113
        std::vector<instruction_ref> converted_inputs;
Shucai Xiao's avatar
Shucai Xiao committed
114
        for(auto input : inputs)
115
116
        {
            auto s = input->get_shape();
Shucai Xiao's avatar
Shucai Xiao committed
117
            if(s.type() == shape::float_type || s.type() == shape::double_type)
118
            {
119
                // if the input is a convert operator, uses its input
120
121
                // as its current input
                instruction_ref input_fp16{};
Shucai Xiao's avatar
Shucai Xiao committed
122
123
                if(input->name() == "convert" and
                   input->inputs().front()->get_shape().type() == shape::half_type)
124
125
126
127
128
                {
                    input_fp16 = input->inputs().front();
                }
                else
                {
Shucai Xiao's avatar
Shucai Xiao committed
129
                    input_fp16 = insert_quant_ins(prog, input, shape::half_type, map_fp16);
130
                }
131
                converted_inputs.push_back(input_fp16);
132
            }
133
134
135
136
137
138
            else
            {
                converted_inputs.push_back(input);
            }
        }

139
        // no change for the input, go to the next instruction
Shucai Xiao's avatar
Shucai Xiao committed
140
        if(inputs == converted_inputs)
141
        {
142
            continue;
Shucai Xiao's avatar
Shucai Xiao committed
143
144
145
146
147
148
        }

        auto op        = ins->get_operator();
        auto ins_shape = compute_shape(op, converted_inputs);
        if(ins_shape.type() != orig_type)
        {
Shucai Xiao's avatar
Shucai Xiao committed
149
150
151
152
153
            // check the dead code case to avoid assert
            bool output_empty = ins->outputs().empty();
            auto ins_orig_type =
                prog.insert_instruction(std::next(ins), op::convert{orig_type}, ins);
            if(!output_empty)
154
            {
Shucai Xiao's avatar
Shucai Xiao committed
155
                prog.replace_instruction(ins, ins_orig_type);
156
            }
157
        }
Shucai Xiao's avatar
Shucai Xiao committed
158
159

        prog.replace_instruction(ins, op, converted_inputs);
160
161
162
    }
}

Shucai Xiao's avatar
Shucai Xiao committed
163
void quantize(program& prog) { quantize(prog, {"all"}); }
Shucai Xiao's avatar
Shucai Xiao committed
164

Shucai Xiao's avatar
Shucai Xiao committed
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
// int8 quantization is different from fp16 since int8 can only handle value
// -128 ~ 127. To convert the float or double to int8, we need a scale and
// a shift, then the convert can be done as v_int8 = fp * scale + shift.
// To simplify the changes, we consider shift as 0.0f for now.
void quantize_int8(program& prog,
                   const std::vector<std::string>& ins_names,
                   const std::vector<std::pair<float, float>>& quant_params)
{
    for(size_t i = 0; i < quant_params.size(); i++)
    {
        auto param = quant_params.at(i);
        std::cout << "index = " << i << ", scale = " << param.first << "\t" << param.second
                  << std::endl;
    }
    std::cout << std::endl;

    // For now, we only support the int8 quantization of gemm and convolution
    std::vector<std::string> op_names = {"dot", "convolution"};
    if(!std::all_of(ins_names.begin(), ins_names.end(), [&](auto name) {
           return (std::find(op_names.begin(), op_names.end(), name) != op_names.end());
       }))
    {
        MIGRAPHX_THROW("QUANTIZE_INT8: only support DOT and CONVOLUTION operation");
    }

    std::size_t quant_param_index = 0;
    std::unordered_map<instruction_ref, instruction_ref> map_quant_ins;
    std::unordered_map<instruction_ref, std::size_t> map_index;
    for(auto ins : iterator_for(prog))
    {
        if(not contains(ins_names, ins->name()))
        {
            continue;
        }

        shape::type_t orig_type = ins->get_shape().type();

        // for the dot operator, there could be 2 or 3 input arguments
        // if the 3rd argument is available, convert it to an int32.
        std::vector<instruction_ref> converted_inputs;

        // process all inputs, if input is a fp32 or fp64, convert it
        // to a int8 type by adding a convert operator and replace
        // the operator with the corresponding int8 version
        auto inputs = ins->inputs();
        std::vector<std::pair<float, float>> ins_quant_params;
        for(auto input : inputs)
        {
            // calculate the index of each instruction to be quantized
            if(map_index.count(input) == 0)
            {
                map_index[input] = quant_param_index++;
            }
            auto param = quant_params[map_index[input]];
            ins_quant_params.push_back(param);

            // In general, the target_type is int8, but for the dot
            // operation, if it has 3 inputs, then the last one should
            // be converted to int32_type
            shape::type_t quant_type = shape::int8_type;
            if(ins->name() == "dot" and inputs.size() == 3 and input == inputs.back())
            {
                quant_type = shape::int32_type;
            }

            auto s = input->get_shape();
Shucai Xiao's avatar
Shucai Xiao committed
231
232
            if((s.type() == shape::float_type or s.type() == shape::double_type or
                s.type() == shape::int32_type) and
Shucai Xiao's avatar
Shucai Xiao committed
233
234
235
236
237
238
239
240
241
242
243
244
245
246
               s.type() != quant_type)
            {
                // if the input is a convert operator, uses its input
                // as its current input
                instruction_ref quant_input{};
                if(input->name() == "convert")
                {
                    auto tmp_ins = input->inputs().front();
                    if(tmp_ins->get_shape().type() == quant_type)
                    {
                        quant_input = input->inputs().front();
                    }
                    else
                    {
Shucai Xiao's avatar
Shucai Xiao committed
247
248
                        quant_input = insert_quant_ins(
                            prog, input, quant_type, map_quant_ins, param.first, param.second);
Shucai Xiao's avatar
Shucai Xiao committed
249
250
251
252
                    }
                }
                else
                {
Shucai Xiao's avatar
Shucai Xiao committed
253
254
                    quant_input = insert_quant_ins(
                        prog, input, quant_type, map_quant_ins, param.first, param.second);
Shucai Xiao's avatar
Shucai Xiao committed
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
                }
                converted_inputs.push_back(quant_input);
            }
            else
            {
                converted_inputs.push_back(input);
            }
        }

        // no change for the input, go to the next instruction
        if(inputs == converted_inputs)
        {
            continue;
        }

        // When converting from other types to int8_type, there are parameters
        // used as scale and shift(.0f), which will generate results diffrent from
        // the original results. To adjust the output to be "correct(approximatly
        // equal)", we need additional calculation for the adjustment
        if(ins->name() == "dot")
        {
            auto dot_op = any_cast<op::dot>(ins->get_operator());
            float new_alpha =
                dot_op.alpha / (ins_quant_params[0].first * ins_quant_params[1].first);
            float new_beta = dot_op.beta;
            // We need additional checking about the quant_alpha value. If
            // abs(quant_alpha) > 50 (some tmp value set here), we can convert
            // it to an integer as the new_alpha in the quant_dot
            float threshold = 50.0f;
            if(fabs(new_alpha) >= threshold && fabs(new_beta) >= threshold)
            {
                int32_t quant_alpha = static_cast<int32_t>(new_alpha);
                int32_t quant_beta  = static_cast<int32_t>(new_beta);
288
                if(shape::int32_type == orig_type)
Shucai Xiao's avatar
Shucai Xiao committed
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
                {
                    prog.replace_instruction(
                        ins, op::quant_dot{quant_alpha, quant_beta}, converted_inputs);
                }
                else
                {
                    auto quant_dot = prog.insert_instruction(
                        ins, op::quant_dot{quant_alpha, quant_beta}, converted_inputs);
                    prog.replace_instruction(ins, op::convert{orig_type}, quant_dot);
                }
            }
            // either alpha or beta cannot be quantized because of too big
            // relative rounding error
            else
            {
Shucai Xiao's avatar
Shucai Xiao committed
304
                if(converted_inputs.size() == 3)
305
306
307
                {
                    converted_inputs.pop_back();
                }
Shucai Xiao's avatar
Shucai Xiao committed
308
309
310
                auto q_dot   = prog.insert_instruction(ins, op::quant_dot{1, 0}, converted_inputs);
                auto f_dot   = prog.insert_instruction(ins, op::convert{shape::float_type}, q_dot);
                auto c_shape = q_dot->get_shape();
311
                std::vector<float> vec_alpha(c_shape.elements(), new_alpha);
Shucai Xiao's avatar
Shucai Xiao committed
312
313
                auto l_alpha =
                    prog.add_literal(literal({shape::float_type, c_shape.lens()}, vec_alpha));
314

Shucai Xiao's avatar
Shucai Xiao committed
315
316
                if(inputs.size() == 3 and dot_op.beta != 0.0f)
                {
317
                    auto alpha_ab = prog.insert_instruction(ins, op::mul{}, l_alpha, f_dot);
Shucai Xiao's avatar
Shucai Xiao committed
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
                    std::vector<float> vec_beta(c_shape.elements(), dot_op.beta);
                    auto l_beta =
                        prog.add_literal(literal({shape::float_type, c_shape.lens()}, vec_beta));
                    instruction_ref beta_c{};
                    if(orig_type != shape::float_type)
                    {
                        auto fp32_c = prog.insert_instruction(
                            ins, op::convert{shape::float_type}, inputs.back());
                        auto fp32_beta_c = prog.insert_instruction(ins, op::mul{}, l_beta, fp32_c);
                        beta_c = prog.insert_instruction(ins, op::convert{orig_type}, fp32_beta_c);
                    }
                    else
                    {
                        beta_c = prog.insert_instruction(ins, op::mul{}, l_beta, inputs.back());
                    }
333

Shucai Xiao's avatar
Shucai Xiao committed
334
                    if(orig_type == shape::float_type)
335
336
337
338
339
340
341
                    {
                        prog.replace_instruction(ins, op::add{}, alpha_ab, beta_c);
                    }
                    else
                    {
                        auto f_res = prog.insert_instruction(ins, op::add{}, alpha_ab, beta_c);
                        prog.replace_instruction(ins, op::convert{orig_type}, f_res);
Shucai Xiao's avatar
Shucai Xiao committed
342
                    }
Shucai Xiao's avatar
Shucai Xiao committed
343
344
345
                }
                else
                {
Shucai Xiao's avatar
Shucai Xiao committed
346
                    if(orig_type == shape::float_type)
347
348
349
350
351
352
                    {
                        prog.replace_instruction(ins, op::mul{}, l_alpha, f_dot);
                    }
                    else
                    {
                        auto alpha_ab = prog.insert_instruction(ins, op::mul{}, l_alpha, f_dot);
Shucai Xiao's avatar
Shucai Xiao committed
353
                        prog.replace_instruction(ins, op::convert{orig_type}, alpha_ab);
354
                    }
Shucai Xiao's avatar
Shucai Xiao committed
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
                }
            }
        }
        else if(ins->name() == "convolution")
        {
            // Current MIOpen convolution does not support alpha and beta,
            // so we need a separate multiply to adjust the output
            auto conv_op       = any_cast<op::convolution>(ins->get_operator());
            auto padding       = conv_op.padding;
            auto stride        = conv_op.stride;
            auto dilation      = conv_op.dilation;
            auto padding_mode  = conv_op.padding_mode;
            auto group         = conv_op.group;
            auto adjust_factor = 1.0f / (ins_quant_params[0].first * ins_quant_params[1].first);

            auto quant_conv = prog.insert_instruction(
                ins,
                op::quant_convolution{padding, stride, dilation, padding_mode, group},
                converted_inputs);
Shucai Xiao's avatar
Shucai Xiao committed
374
375
            float threshold = 50.0f;
            std::vector<float> vec_factor(quant_conv->get_shape().elements(), adjust_factor);
Shucai Xiao's avatar
Shucai Xiao committed
376
            if(quant_conv->get_shape().type() == orig_type and adjust_factor >= threshold)
Shucai Xiao's avatar
Shucai Xiao committed
377
            {
Shucai Xiao's avatar
Shucai Xiao committed
378
379
                auto l_factor = prog.add_literal(
                    literal(quant_conv->get_shape(), vec_factor.begin(), vec_factor.end()));
Shucai Xiao's avatar
Shucai Xiao committed
380
381
382
383
384
385
                prog.replace_instruction(ins, op::mul{}, quant_conv, l_factor);
            }
            // convert quant_conv output to float type, multiply the factor and
            // conver back to original type
            else
            {
Shucai Xiao's avatar
Shucai Xiao committed
386
387
                auto float_conv =
                    prog.insert_instruction(ins, op::convert{shape::float_type}, quant_conv);
Shucai Xiao's avatar
Shucai Xiao committed
388
                auto l_factor = prog.add_literal(literal(float_conv->get_shape(), vec_factor));
Shucai Xiao's avatar
Shucai Xiao committed
389
                if(orig_type == shape::float_type)
Shucai Xiao's avatar
Shucai Xiao committed
390
391
392
393
394
                {
                    prog.replace_instruction(ins, op::mul{}, l_factor, float_conv);
                }
                else
                {
Shucai Xiao's avatar
Shucai Xiao committed
395
396
397
                    auto adjusted_conv =
                        prog.insert_instruction(ins, op::mul{}, l_factor, float_conv);
                    prog.replace_instruction(ins, op::convert{orig_type}, adjusted_conv);
Shucai Xiao's avatar
Shucai Xiao committed
398
399
                }
            }
Shucai Xiao's avatar
Shucai Xiao committed
400
401
402
403
404
405
406
407
408
409
410
411
412
        }
        else
        {
            MIGRAPHX_THROW("QUANTIZE_INT8: does not support operator" + ins->name());
        }
    }

    if(quant_param_index != quant_params.size())
    {
        MIGRAPHX_THROW("QUANTIZE_INT8: number of scales does not match");
    }
}

Shucai Xiao's avatar
Shucai Xiao committed
413
414
415
416
void quantize_int8(program& prog,
                   const target& t,
                   std::vector<program::parameter_map>& calibration_args,
                   const std::vector<std::string>& ins_names)
Shucai Xiao's avatar
Shucai Xiao committed
417
{
418
    // insert capture operator
Shucai Xiao's avatar
Shucai Xiao committed
419
    auto cap_prog          = prog;
420
421
422
423
424
    auto int8_quant_params = capture_arguments(cap_prog, t, ins_names);

    // use the calibration data to compute the quantization scale
    cap_prog.compile(t);

Shucai Xiao's avatar
Shucai Xiao committed
425
    // use all calibration data to run the program to calculate the
426
    // quantization scale and shift
Shucai Xiao's avatar
Shucai Xiao committed
427
    for(auto&& arg : calibration_args)
428
429
    {
        program::parameter_map m;
Shucai Xiao's avatar
Shucai Xiao committed
430
        for(auto&& x : cap_prog.get_parameter_shapes())
431
        {
Shucai Xiao's avatar
Shucai Xiao committed
432
            if(arg.count(x.first) > 0)
433
434
435
436
437
438
439
440
441
442
443
444
445
            {
                assert(x.second == arg[x.first].get_shape());
                m[x.first] = t.copy_to(arg[x.first]);
            }
            else
            {
                m[x.first] = t.allocate(x.second);
            }
        }
        cap_prog.eval(m);
    }

    quantize_int8(prog, ins_names, *int8_quant_params);
Shucai Xiao's avatar
Shucai Xiao committed
446
447
}

Shucai Xiao's avatar
Shucai Xiao committed
448
449
450
void quantize_int8(program& prog,
                   const target& t,
                   std::vector<program::parameter_map>& calibration_args)
Shucai Xiao's avatar
Shucai Xiao committed
451
452
{
    std::vector<std::string> ins_names = {"dot", "convolution"};
453
    quantize_int8(prog, t, calibration_args, ins_names);
Shucai Xiao's avatar
Shucai Xiao committed
454
455
}

Shucai Xiao's avatar
Shucai Xiao committed
456
457
// For the input of each input argument, we need to insert a
// capture operator to compute the scale and shift
Shucai Xiao's avatar
Shucai Xiao committed
458
std::size_t capture_arguments(program& prog,
Shucai Xiao's avatar
Shucai Xiao committed
459
460
                              const std::vector<std::string>& ins_names,
                              const std::function<void(std::size_t, std::vector<argument>)>& func)
Shucai Xiao's avatar
Shucai Xiao committed
461
{
462

Shucai Xiao's avatar
Shucai Xiao committed
463
    size_t num_quant_params = 0;
Shucai Xiao's avatar
Shucai Xiao committed
464
    // the int8 quantization only support dot and convolution
Shucai Xiao's avatar
Shucai Xiao committed
465
    std::vector<std::string> op_names = {"dot", "convolution", "quant_dot", "quant_convolution"};
Shucai Xiao's avatar
Shucai Xiao committed
466
467
468
    if(!std::all_of(ins_names.begin(), ins_names.end(), [&](auto name) {
           return std::find(op_names.begin(), op_names.end(), name) != op_names.end();
       }))
Shucai Xiao's avatar
Shucai Xiao committed
469
470
471
472
473
474
475
    {
        MIGRAPHX_THROW("CAPTURE_ARGUMENTS: input operator is not supported");
    }

    std::unordered_map<instruction_ref, instruction_ref> ins_map;
    for(auto ins : iterator_for(prog))
    {
Shucai Xiao's avatar
Shucai Xiao committed
476
        if(not contains(ins_names, ins->name()))
Shucai Xiao's avatar
Shucai Xiao committed
477
478
479
480
481
482
        {
            continue;
        }

        auto inputs = ins->inputs();
        std::vector<instruction_ref> new_args;
Shucai Xiao's avatar
Shucai Xiao committed
483
        for(auto input : inputs)
Shucai Xiao's avatar
Shucai Xiao committed
484
485
        {
            instruction_ref new_ins{};
Shucai Xiao's avatar
Shucai Xiao committed
486
            if(ins_map.count(input) > 0)
Shucai Xiao's avatar
Shucai Xiao committed
487
488
489
490
491
            {
                new_ins = ins_map[input];
            }
            else
            {
Shucai Xiao's avatar
Shucai Xiao committed
492
                new_ins = prog.insert_instruction(
Shucai Xiao's avatar
Shucai Xiao committed
493
                    std::next(input), op::capture{num_quant_params++, func}, input);
Shucai Xiao's avatar
Shucai Xiao committed
494
495
496
497
498
499
                ins_map[input] = new_ins;
            }
            new_args.push_back(new_ins);
        }
        instruction::replace(ins, ins->get_operator(), ins->get_shape(), new_args);
    }
Shucai Xiao's avatar
Shucai Xiao committed
500

Shucai Xiao's avatar
Shucai Xiao committed
501
    return num_quant_params;
Shucai Xiao's avatar
Shucai Xiao committed
502
503
}

Shucai Xiao's avatar
Shucai Xiao committed
504
std::shared_ptr<std::vector<std::pair<float, float>>>
Shucai Xiao's avatar
Shucai Xiao committed
505
capture_arguments_impl(program& prog, const target& t, const std::vector<std::string>& ins_names)
Shucai Xiao's avatar
Shucai Xiao committed
506
{
Shucai Xiao's avatar
Shucai Xiao committed
507
508
509
510
    std::shared_ptr<std::vector<std::pair<float, float>>> int8_quant_params =
        std::make_shared<std::vector<std::pair<float, float>>>();
    std::shared_ptr<std::vector<float>> max_abs_vals = std::make_shared<std::vector<float>>();

Shucai Xiao's avatar
Shucai Xiao committed
511
512
    auto calc_quant_params = [int8_quant_params, max_abs_vals, &t](std::size_t ins_index,
                                                                   std::vector<argument> args) {
Shucai Xiao's avatar
Shucai Xiao committed
513
        std::pair<float, float> param_pair{64.0f, 0.0f};
514
515
516
517

        // scale and shift is need for only int8 type, and we do not
        // consider shift, so set shift to 0
        std::vector<float> vec_val;
Shucai Xiao's avatar
Shucai Xiao committed
518
        argument arg = t.copy_from(args.front());
Shucai Xiao's avatar
Shucai Xiao committed
519
        arg.visit([&](auto output) { vec_val.assign(output.begin(), output.end()); });
Shucai Xiao's avatar
Shucai Xiao committed
520
521
522
        auto max_val                = *std::max_element(vec_val.begin(), vec_val.end());
        auto min_val                = *std::min_element(vec_val.begin(), vec_val.end());
        auto max_abs                = std::max(std::fabs(max_val), std::fabs(min_val));
Shucai Xiao's avatar
Shucai Xiao committed
523
        max_abs_vals->at(ins_index) = std::max(max_abs_vals->at(ins_index), max_abs);
524

Shucai Xiao's avatar
Shucai Xiao committed
525
        param_pair.first                 = 127.0f / max_abs_vals->at(ins_index);
Shucai Xiao's avatar
Shucai Xiao committed
526
        int8_quant_params->at(ins_index) = param_pair;
527
528
    };

Shucai Xiao's avatar
Shucai Xiao committed
529
530
    auto num_params = capture_arguments(prog, ins_names, calc_quant_params);

Shucai Xiao's avatar
Shucai Xiao committed
531
    int8_quant_params->resize(num_params, std::pair<float, float>(64.0f, 0.0f));
Shucai Xiao's avatar
Shucai Xiao committed
532
533
534
    max_abs_vals->resize(num_params, 0.0f);

    return int8_quant_params;
Shucai Xiao's avatar
Shucai Xiao committed
535
536
}

537
538
} // namespace MIGRAPHX_INLINE_NS
} // namespace migraphx