quant_gemm.cpp 6.47 KB
Newer Older
Shucai Xiao's avatar
Shucai Xiao committed
1
#include <migraphx/gpu/quant_gemm.hpp>
2
#include <migraphx/gpu/device/pack.hpp>
Shucai Xiao's avatar
Shucai Xiao committed
3
4
5
6
7
8
9
#include <migraphx/gpu/context.hpp>

namespace migraphx {
inline namespace MIGRAPHX_INLINE_NS {
namespace gpu {

template <class... Ts>
10
rocblas_status generic_rocblas_gemm_ex(Ts&&... xs)
Shucai Xiao's avatar
Shucai Xiao committed
11
{
12
    return rocblas_gemm_ex(std::forward<Ts>(xs)...);
Shucai Xiao's avatar
Shucai Xiao committed
13
14
15
}

template <class... Ts>
16
rocblas_status generic_rocblas_batched_gemm_ex(Ts&&... xs)
Shucai Xiao's avatar
Shucai Xiao committed
17
{
18
    return rocblas_gemm_strided_batched_ex(std::forward<Ts>(xs)...);
Shucai Xiao's avatar
Shucai Xiao committed
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
}

template <class T>
struct compute_rocblas_type
{
    using type = T;
};

template <class T>
struct compute_rocblas_type<const T>
{
    using type = const typename compute_rocblas_type<T>::type;
};

template <>
struct compute_rocblas_type<half>
{
    using type = rocblas_half;
};

template <class T>
using rb_type = typename compute_rocblas_type<T>::type;

template <class T>
rb_type<T> to_rocblas_type(T x)
{
    return reinterpret_cast<const rb_type<T>&>(x);
}

template <class T>
rb_type<T>* to_rocblas_type(T* x)
{
    return reinterpret_cast<rb_type<T>*>(x);
}

shape miopen_quant_gemm::compute_shape(const std::vector<shape>& inputs) const
{
56
    std::vector<shape> input_shapes(inputs);
Shucai Xiao's avatar
Shucai Xiao committed
57
    if(!inputs.at(1).transposed())
58
59
60
    {
        input_shapes.pop_back();
    }
Shucai Xiao's avatar
Shucai Xiao committed
61
    if(inputs.at(0).transposed())
62
63
64
65
66
    {
        input_shapes.pop_back();
    }
    input_shapes.pop_back();

Shucai Xiao's avatar
Shucai Xiao committed
67
68
69
70
71
    check_shapes{input_shapes}.not_broadcasted();
    return op.compute_shape(input_shapes);
}

argument miopen_quant_gemm::compute(context& ctx,
Shucai Xiao's avatar
Shucai Xiao committed
72
73
                                    const shape& output_shape,
                                    const std::vector<argument>& args) const
Shucai Xiao's avatar
Shucai Xiao committed
74
{
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
    // handling the packing of B MUST be before handling that for A
    bool transa     = args[0].get_shape().transposed();
    bool transb     = args[1].get_shape().transposed();
    auto n_dim      = output_shape.lens().size();
    auto dim_1      = n_dim - 1;
    auto dim_0      = n_dim - 2;
    rocblas_int lda = args[0].get_shape().strides()[transa ? dim_1 : dim_0];
    rocblas_int ldb = args[1].get_shape().strides()[transb ? dim_1 : dim_0];
    rocblas_int ldc = args[2].get_shape().strides()[dim_0];

    size_t addi_ref_num = 0;
    if(!transb)
    {
        ++addi_ref_num;
        const argument& arg_b = args[args.size() - 1];
        // argument for B is the last one in the input argument vector
        // use the algorithm to pack A
        device::pack_a(ctx.get_stream().get(), args[1], arg_b);
    }

Shucai Xiao's avatar
Shucai Xiao committed
95
    // need to pack A in this scenario, use the algorithm to pack B in the
96
97
98
99
100
101
102
103
104
    // comment of the API
    if(transa)
    {
        ++addi_ref_num;
        const argument& arg_a = args[args.size() - 1 - addi_ref_num];
        device::pack_b(ctx.get_stream().get(), args[0], arg_a);
    }

    bool is_3inputs = (args.size() - addi_ref_num == 4);
Shucai Xiao's avatar
Shucai Xiao committed
105
    int8_t beta     = 0;
Shucai Xiao's avatar
Shucai Xiao committed
106
107
108
109
110
111
112
113
    if(is_3inputs)
    {
        beta = op.beta;
    }

    auto a_lens = args[0].get_shape().lens();
    auto b_lens = args[1].get_shape().lens();
    output_shape.visit_type([&](auto as) {
Shucai Xiao's avatar
Shucai Xiao committed
114
115
116
117
118
119
        auto alpha_r    = to_rocblas_type(as(op.alpha));
        auto beta_r     = to_rocblas_type(as(beta));
        auto out_lens   = output_shape.lens();
        rocblas_int m   = out_lens[dim_0];
        rocblas_int n   = out_lens[dim_1];
        rocblas_int k   = args[0].get_shape().lens()[dim_1];
120
121
        auto to_pointer = [&](auto&& arg) { return to_rocblas_type(as.from(arg.data())); };
        assert(k % 4 == 0);
Shucai Xiao's avatar
Shucai Xiao committed
122

Shucai Xiao's avatar
Shucai Xiao committed
123
124
125
126
        auto num_matrices = std::accumulate(
            out_lens.rbegin() + 2, out_lens.rend(), std::size_t{1}, std::multiplies<std::size_t>());
        if(num_matrices == 1)
        {
Shucai Xiao's avatar
Shucai Xiao committed
127
            // the rocblas_gemm API handles inputs and output matrices as
Shucai Xiao's avatar
Shucai Xiao committed
128
129
130
            // column-major format. When doing a C = A * B, we actually do
            // C^T = (B^T) * (A^T). That is the reason we input args[1] as
            // A and args[0] as B in calling the rocblas_gemm.
131
            generic_rocblas_gemm_ex(ctx.get_stream().get_rocblas(),
132
                                    transb ? rocblas_operation_transpose : rocblas_operation_none,
133
                                    transa ? rocblas_operation_transpose : rocblas_operation_none,
134
                                    n,
135
                                    m,
Shucai Xiao's avatar
Shucai Xiao committed
136
137
                                    k,
                                    &alpha_r,
138
139
140
                                    to_pointer(args[1]),
                                    rocblas_datatype_i8_r,
                                    ldb,
141
142
143
                                    to_pointer(args[0]),
                                    rocblas_datatype_i8_r,
                                    lda,
Shucai Xiao's avatar
Shucai Xiao committed
144
145
146
147
148
149
150
151
152
153
154
155
156
                                    &beta_r,
                                    to_pointer(args[2]),
                                    rocblas_datatype_i32_r,
                                    ldc,
                                    (is_3inputs ? to_pointer(args[3]) : to_pointer(args[2])),
                                    rocblas_datatype_i32_r,
                                    ldc,
                                    rocblas_datatype_i32_r,
                                    rocblas_gemm_algo_standard,
                                    0,
                                    0,
                                    nullptr,
                                    nullptr);
Shucai Xiao's avatar
Shucai Xiao committed
157
158
159
        }
        else
        {
160
            generic_rocblas_batched_gemm_ex(
Shucai Xiao's avatar
Shucai Xiao committed
161
162
163
164
165
166
167
168
                ctx.get_stream().get_rocblas(),
                transb ? rocblas_operation_transpose : rocblas_operation_none,
                transa ? rocblas_operation_transpose : rocblas_operation_none,
                n,
                m,
                k,
                &alpha_r,
                to_pointer(args[1]),
169
                rocblas_datatype_i8_r,
Shucai Xiao's avatar
Shucai Xiao committed
170
171
172
                ldb,
                k * n,
                to_pointer(args[0]),
173
                rocblas_datatype_i8_r,
Shucai Xiao's avatar
Shucai Xiao committed
174
175
176
                lda,
                m * k,
                &beta_r,
177
178
179
180
                to_pointer(args[2]),
                rocblas_datatype_i32_r,
                ldc,
                m * n,
Shucai Xiao's avatar
Shucai Xiao committed
181
                (is_3inputs ? to_pointer(args[3]) : to_pointer(args[2])),
182
                rocblas_datatype_i32_r,
Shucai Xiao's avatar
Shucai Xiao committed
183
184
                ldc,
                m * n,
185
186
187
                num_matrices,
                rocblas_datatype_i32_r,
                rocblas_gemm_algo_standard,
Shucai Xiao's avatar
Shucai Xiao committed
188
189
190
191
                0,
                0,
                nullptr,
                nullptr);
Shucai Xiao's avatar
Shucai Xiao committed
192
193
194
195
196
197
198
199
200
        }
    });

    return (is_3inputs ? args[3] : args[2]);
}

} // namespace gpu
} // namespace MIGRAPHX_INLINE_NS
} // namespace migraphx