pack_int8_args.cpp 6.85 KB
Newer Older
Shucai Xiao's avatar
Shucai Xiao committed
1
#include <iterator>
2
3
#include <migraphx/gpu/pack_int8_args.hpp>
#include <migraphx/gpu/int8_gemm_pack.hpp>
4
#include <migraphx/gpu/int8_conv_pack.hpp>
5
6
#include <migraphx/gpu/hip.hpp>
#include <migraphx/instruction.hpp>
Shucai Xiao's avatar
Shucai Xiao committed
7
#include <migraphx/instruction_ref.hpp>
8
9
#include <migraphx/program.hpp>
#include <migraphx/iterator_for.hpp>
Shucai Xiao's avatar
Shucai Xiao committed
10
11
#include <migraphx/make_op.hpp>
#include <migraphx/permutation.hpp>
12
13
14
15
16

namespace migraphx {
inline namespace MIGRAPHX_INLINE_NS {
namespace gpu {

Shucai Xiao's avatar
Shucai Xiao committed
17
static instruction_ref pad_ins(module& m, instruction_ref ins, int offset)
18
{
Shucai Xiao's avatar
Shucai Xiao committed
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
    auto s                         = ins->get_shape();
    auto lens                      = s.lens();
    auto k                         = lens[lens.size() + offset];
    auto pad_k                     = (k + 3) / 4 * 4;
    auto pad_lens                  = lens;
    pad_lens[lens.size() + offset] = pad_k;
    std::vector<int64_t> pad_dims(lens.size() * 2, 0);
    auto ret_ins = ins;
    if(pad_k != k)
    {
        pad_dims[lens.size() + offset] = pad_k - k;
        shape ps{s.type(), pad_lens};
        auto ins_out =
            m.insert_instruction(ins, make_op("hip::allocate", {{"shape", to_value(ps)}}));
        auto pad = make_op("pad", {{"pads", pad_dims}});
        ret_ins =
            m.insert_instruction(std::next(ins), make_op("gpu::pad", pad.to_value()), ins, ins_out);
    }

    return ret_ins;
}

static std::vector<instruction_ref> pad_inputs(module& m, instruction_ref ins)
{
    std::vector<instruction_ref> ret_inputs;
    auto inputs = ins->inputs();
    auto in0    = inputs.at(0);
    auto sa     = in0->get_shape();
    bool transa = sa.transposed();
    if(transa)
    {
        auto perm = find_permutation(sa);
        auto val  = in0->get_operator().to_value();
        if(val.contains("dims"))
        {
            int offset = static_cast<int>(perm.back()) - static_cast<int>(perm.size());
            auto t_in  = in0->inputs().front();
            auto p_in  = pad_ins(m, t_in, offset);
            auto dims  = val.at("dims").to_vector<int64_t>();
58
59
            auto r_in =
                m.insert_instruction(ins, make_op("transpose", {{"permutation", dims}}), p_in);
Shucai Xiao's avatar
Shucai Xiao committed
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
            ret_inputs.push_back(r_in);
        }
        else
        {
            shape cs{in0->get_shape().type(), in0->get_shape().lens()};
            auto con_out =
                m.insert_instruction(ins, make_op("hip::allocate", {{"shape", to_value(cs)}}));
            auto cin0 = m.insert_instruction(ins, make_op("gpu::contiguous"), in0, con_out);
            ret_inputs.push_back(pad_ins(m, cin0, -1));
        }
    }
    else
    {
        ret_inputs.push_back(pad_ins(m, in0, -1));
    }

    auto in1    = inputs.at(1);
    auto sb     = in1->get_shape();
    bool transb = sb.transposed();
    if(transb)
    {
        auto perm = find_permutation(sb);
        auto val  = in1->get_operator().to_value();
        if(val.contains("dims"))
        {
            int offset = static_cast<int>(perm[perm.size() - 2]) - static_cast<int>(perm.size());
            auto t_in  = in1->inputs().front();
            auto p_in  = pad_ins(m, t_in, offset);
            auto dims  = val.at("dims").to_vector<int64_t>();
89
90
            auto r_in =
                m.insert_instruction(ins, make_op("transpose", {{"permutation", dims}}), p_in);
Shucai Xiao's avatar
Shucai Xiao committed
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
            ret_inputs.push_back(r_in);
        }
        else
        {
            shape cs{in1->get_shape().type(), in1->get_shape().lens()};
            auto con_out =
                m.insert_instruction(ins, make_op("hip::allocate", {{"shape", to_value(cs)}}));
            auto cin1 = m.insert_instruction(ins, make_op("gpu::contiguous"), in1, con_out);
            ret_inputs.push_back(pad_ins(m, cin1, -2));
        }
    }
    else
    {
        ret_inputs.push_back(pad_ins(m, in1, -2));
    }
    std::copy(inputs.begin() + 2, inputs.end(), std::back_inserter(ret_inputs));

    return ret_inputs;
}

void pack_int8_args::apply(module& m) const
{
    for(auto ins : iterator_for(m))
114
115
116
    {
        if(ins->name() == "gpu::quant_gemm")
        {
Shucai Xiao's avatar
Shucai Xiao committed
117
118
119
120
121
122
            auto val = ins->get_operator().to_value();
            assert(val.contains("int8_x4_format"));
            if(not val.at("int8_x4_format").to<bool>())
            {
                return;
            }
123
            auto inputs = ins->inputs();
Shucai Xiao's avatar
Shucai Xiao committed
124
125
126
127
128
129
130
131
            auto lens   = inputs.at(0)->get_shape().lens();
            // gemm need the k to be multiple of 4, so need packing that dimension
            auto old_inputs = inputs;
            if((lens.back() % 4) != 0)
            {
                inputs = pad_inputs(m, ins);
            }

132
133
            bool transa = inputs[0]->get_shape().transposed();
            bool transb = inputs[1]->get_shape().transposed();
Shucai Xiao's avatar
Shucai Xiao committed
134
            if(!transb)
135
            {
Shucai Xiao's avatar
Shucai Xiao committed
136
137
138
139
140
                auto packed_b = m.insert_instruction(
                    ins, make_op("hip::allocate", {{"shape", to_value(inputs[1]->get_shape())}}));
                auto output_b = m.insert_instruction(
                    ins, make_op("gpu::int8_gemm_pack_a"), {inputs[1], packed_b});
                inputs[1] = output_b;
141
142
            }

Shucai Xiao's avatar
Shucai Xiao committed
143
            if(transa)
144
            {
Shucai Xiao's avatar
Shucai Xiao committed
145
146
147
148
149
150
151
152
153
154
                auto packed_a = m.insert_instruction(
                    ins, make_op("hip::allocate", {{"shape", to_value(inputs[0]->get_shape())}}));
                auto output_a = m.insert_instruction(
                    ins, make_op("gpu::int8_gemm_pack_b"), {inputs[0], packed_a});
                inputs[0] = output_a;
            }

            if(inputs != old_inputs)
            {
                m.replace_instruction(ins, ins->get_operator(), inputs);
155
156
            }
        }
Shucai Xiao's avatar
Shucai Xiao committed
157
        else if(ins->name() == "gpu::quant_convolution")
158
        {
Shucai Xiao's avatar
Shucai Xiao committed
159
160
161
162
163
            auto inputs   = ins->inputs();
            auto packed_x = m.insert_instruction(
                ins,
                make_op("hip::allocate",
                        {{"shape", to_value(pack_int8_shape(inputs[0]->get_shape()))}}));
Shucai Xiao's avatar
Shucai Xiao committed
164
            auto output_x =
Shucai Xiao's avatar
Shucai Xiao committed
165
                m.insert_instruction(ins, make_op("gpu::int8_conv_pack"), {inputs[0], packed_x});
166
167
            instruction::replace_argument(ins, inputs[0], output_x);

Shucai Xiao's avatar
Shucai Xiao committed
168
169
170
171
            auto packed_w = m.insert_instruction(
                ins,
                make_op("hip::allocate",
                        {{"shape", to_value(pack_int8_shape(inputs[1]->get_shape()))}}));
Shucai Xiao's avatar
Shucai Xiao committed
172
            auto output_w =
Shucai Xiao's avatar
Shucai Xiao committed
173
                m.insert_instruction(ins, make_op("gpu::int8_conv_pack"), {inputs[1], packed_w});
174
            instruction::replace_argument(ins, inputs[1], output_w);
175
176
177
178
        }
    }
}

179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
shape pack_int8_args::pack_int8_shape(const shape& s) const
{
    if(s.type() != shape::int8_type)
    {
        MIGRAPHX_THROW("PACK_INT8_ARGS: only process int8_type");
    }

    auto lens    = s.lens();
    auto strides = s.strides();
    lens[1]      = (lens[1] + 3) / 4 * 4;
    strides[0]   = strides[1] * lens[1];

    return {s.type(), lens, strides};
}

194
195
196
} // namespace gpu
} // namespace MIGRAPHX_INLINE_NS
} // namespace migraphx