quantization.cpp 6.64 KB
Newer Older
Shucai Xiao's avatar
Shucai Xiao committed
1
#include <migraphx/quantization.hpp>
2
3
4
#include <migraphx/program.hpp>
#include <migraphx/instruction.hpp>
#include <migraphx/iterator_for.hpp>
5
#include <migraphx/op/convert.hpp>
Shucai Xiao's avatar
Shucai Xiao committed
6
7
8
#include <migraphx/op/dot.hpp>
#include <migraphx/op/mul.hpp>
#include <migraphx/op/add.hpp>
Shucai Xiao's avatar
Shucai Xiao committed
9
#include <migraphx/op/quant_dot.hpp>
Shucai Xiao's avatar
Shucai Xiao committed
10
#include <migraphx/op/capture.hpp>
Shucai Xiao's avatar
Shucai Xiao committed
11
#include <migraphx/op/convolution.hpp>
Shucai Xiao's avatar
Shucai Xiao committed
12
#include <migraphx/op/quant_convolution.hpp>
Shucai Xiao's avatar
Shucai Xiao committed
13
#include <migraphx/op/multibroadcast.hpp>
14
#include <migraphx/stringutils.hpp>
15
#include <migraphx/ranges.hpp>
16
17
18
19
20
#include <utility>

namespace migraphx {
inline namespace MIGRAPHX_INLINE_NS {

Shucai Xiao's avatar
Shucai Xiao committed
21
22
23
24
25
26
instruction_ref insert_quant_ins(program& prog,
                                 instruction_ref& ins,
                                 shape::type_t type,
                                 std::unordered_map<instruction_ref, instruction_ref>& map_ins,
                                 float scale = 1.0f,
                                 float shift = 0.0f)
27
{
Shucai Xiao's avatar
Shucai Xiao committed
28
    if(map_ins.count(ins) > 0)
29
    {
Shucai Xiao's avatar
Shucai Xiao committed
30
31
32
33
34
35
        return map_ins[ins];
    }

    if(ins->name() == "undefined")
    {
        return ins;
36
37
    }

Shucai Xiao's avatar
Shucai Xiao committed
38
    assert(ins->get_shape().type() == shape::float_type ||
Shucai Xiao's avatar
Shucai Xiao committed
39
40
41
42
43
           ins->get_shape().type() == shape::double_type ||
           ins->get_shape().type() == shape::int32_type);
    instruction_ref quant_ins{};
    quant_ins    = prog.insert_instruction(std::next(ins), op::convert{type, scale, shift}, ins);
    map_ins[ins] = quant_ins;
44

Shucai Xiao's avatar
Shucai Xiao committed
45
    return quant_ins;
46
47
}

Shucai Xiao's avatar
Shucai Xiao committed
48
49
50
51
52
// This function is to convert any instructions specified in the input
// from double or float to float16 by inserting a convert operator.
// For the conversion, there could be cases of overflowing, but it
// is very rare in the area of deeping learning, so we just do a
// truncate of the input to get the fp16.
53
void quantize(program& prog, const std::vector<std::string>& ins_names)
54
{
55
    std::unordered_map<instruction_ref, instruction_ref> map_fp16;
Shucai Xiao's avatar
Shucai Xiao committed
56
    for(auto ins : iterator_for(prog))
57
    {
58
        // all indicates every instruction is converted
Shucai Xiao's avatar
Shucai Xiao committed
59
        if((not contains(ins_names, "all")) and (not contains(ins_names, ins->name())))
60
61
62
        {
            continue;
        }
63

64
        shape::type_t orig_type = ins->get_shape().type();
Shucai Xiao's avatar
Shucai Xiao committed
65
        // process all inputs, if input is a fp32 or fp64, convert it
66
        // to a fp16 by adding a convert operator.
67
        auto inputs = ins->inputs();
68
        std::vector<instruction_ref> converted_inputs;
Shucai Xiao's avatar
Shucai Xiao committed
69
        for(auto input : inputs)
70
71
        {
            auto s = input->get_shape();
Shucai Xiao's avatar
Shucai Xiao committed
72
            if(s.type() == shape::float_type || s.type() == shape::double_type)
73
            {
74
                // if the input is a convert operator, uses its input
75
76
                // as its current input
                instruction_ref input_fp16{};
77
                if(input->name() == "convert")
78
79
80
81
82
                {
                    input_fp16 = input->inputs().front();
                }
                else
                {
Shucai Xiao's avatar
Shucai Xiao committed
83
                    input_fp16 = insert_quant_ins(prog, input, shape::half_type, map_fp16);
84
                }
85
                converted_inputs.push_back(input_fp16);
86
            }
87
88
89
90
91
92
            else
            {
                converted_inputs.push_back(input);
            }
        }

93
        // no change for the input, go to the next instruction
Shucai Xiao's avatar
Shucai Xiao committed
94
        if(inputs == converted_inputs)
95
        {
96
            continue;
Shucai Xiao's avatar
Shucai Xiao committed
97
98
99
100
101
102
        }

        auto op        = ins->get_operator();
        auto ins_shape = compute_shape(op, converted_inputs);
        if(ins_shape.type() != orig_type)
        {
Shucai Xiao's avatar
Shucai Xiao committed
103
104
105
106
107
            // check the dead code case to avoid assert
            bool output_empty = ins->outputs().empty();
            auto ins_orig_type =
                prog.insert_instruction(std::next(ins), op::convert{orig_type}, ins);
            if(!output_empty)
108
            {
Shucai Xiao's avatar
Shucai Xiao committed
109
                prog.replace_instruction(ins, ins_orig_type);
110
            }
111
        }
Shucai Xiao's avatar
Shucai Xiao committed
112
113

        prog.replace_instruction(ins, op, converted_inputs);
114
115
116
    }
}

Shucai Xiao's avatar
Shucai Xiao committed
117
void quantize(program& prog) { quantize(prog, {"all"}); }
Shucai Xiao's avatar
Shucai Xiao committed
118

Shucai Xiao's avatar
Shucai Xiao committed
119
static std::vector<std::pair<float, float>> int8_quant_params;
Shucai Xiao's avatar
Shucai Xiao committed
120

Shucai Xiao's avatar
Shucai Xiao committed
121
122
123
124
// function to compute the scale for each convert operator to convert to int8
void calc_quant_params(std::size_t ins_index, std::vector<migraphx::argument> args)
{
    std::pair<float, float> param_pair{1.0f, 0.0f};
Shucai Xiao's avatar
Shucai Xiao committed
125

Shucai Xiao's avatar
Shucai Xiao committed
126
127
128
129
130
131
132
133
    // scale and shift is need for only int8 type, and we do not
    // consider shift, so set shift to 0
    std::vector<float> vec_val;
    args.front().visit([&](auto output) { vec_val.assign(output.begin(), output.end()); });
    auto max_val     = *std::max_element(vec_val.begin(), vec_val.end());
    auto min_val     = *std::min_element(vec_val.begin(), vec_val.end());
    auto max_abs     = std::max(std::fabs(max_val), std::fabs(min_val));
    param_pair.first = 127.0f / max_abs;
Shucai Xiao's avatar
Shucai Xiao committed
134

Shucai Xiao's avatar
Shucai Xiao committed
135
136
    int8_quant_params[ins_index] = param_pair;
};
137

Shucai Xiao's avatar
Shucai Xiao committed
138
139
// For the input of each input argument, we need to insert a
// capture operator to compute the scale and shift
Shucai Xiao's avatar
Shucai Xiao committed
140
141
void capture_arguments(program& prog,
                       const std::vector<std::string>& ins_names,
Shucai Xiao's avatar
Shucai Xiao committed
142
                       std::function<void(std::size_t, std::vector<argument>)> func)
Shucai Xiao's avatar
Shucai Xiao committed
143
{
Shucai Xiao's avatar
Shucai Xiao committed
144
    size_t num_quant_params = 0;
Shucai Xiao's avatar
Shucai Xiao committed
145
    // the int8 quantization only support dot and convolution
Shucai Xiao's avatar
Shucai Xiao committed
146
    std::vector<std::string> op_names = {"dot", "convolution", "quant_dot", "quant_convolution"};
Shucai Xiao's avatar
Shucai Xiao committed
147
148
149
    if(!std::all_of(ins_names.begin(), ins_names.end(), [&](auto name) {
           return std::find(op_names.begin(), op_names.end(), name) != op_names.end();
       }))
Shucai Xiao's avatar
Shucai Xiao committed
150
151
152
153
154
155
156
    {
        MIGRAPHX_THROW("CAPTURE_ARGUMENTS: input operator is not supported");
    }

    std::unordered_map<instruction_ref, instruction_ref> ins_map;
    for(auto ins : iterator_for(prog))
    {
Shucai Xiao's avatar
Shucai Xiao committed
157
        if(not contains(ins_names, ins->name()))
Shucai Xiao's avatar
Shucai Xiao committed
158
159
160
161
162
163
        {
            continue;
        }

        auto inputs = ins->inputs();
        std::vector<instruction_ref> new_args;
Shucai Xiao's avatar
Shucai Xiao committed
164
        for(auto input : inputs)
Shucai Xiao's avatar
Shucai Xiao committed
165
166
        {
            instruction_ref new_ins{};
Shucai Xiao's avatar
Shucai Xiao committed
167
            if(ins_map.count(input) > 0)
Shucai Xiao's avatar
Shucai Xiao committed
168
169
170
171
172
            {
                new_ins = ins_map[input];
            }
            else
            {
Shucai Xiao's avatar
Shucai Xiao committed
173
                new_ins = prog.insert_instruction(
Shucai Xiao's avatar
Shucai Xiao committed
174
                    std::next(input), op::capture{num_quant_params++, func}, input);
Shucai Xiao's avatar
Shucai Xiao committed
175
176
177
178
179
180
                ins_map[input] = new_ins;
            }
            new_args.push_back(new_ins);
        }
        instruction::replace(ins, ins->get_operator(), ins->get_shape(), new_args);
    }
Shucai Xiao's avatar
Shucai Xiao committed
181
182
183
184
185
186
187
188

    // set one pair of parameter for each argument
    int8_quant_params.resize(num_quant_params, std::make_pair(-1.0f, -1.0f));
}

void capture_arguments(program& prog, const std::vector<std::string>& ins_names)
{
    capture_arguments(prog, ins_names, calc_quant_params);
Shucai Xiao's avatar
Shucai Xiao committed
189
190
}

191
192
} // namespace MIGRAPHX_INLINE_NS
} // namespace migraphx