quant_gemm.cpp 6.33 KB
Newer Older
Shucai Xiao's avatar
Shucai Xiao committed
1
#include <migraphx/gpu/quant_gemm.hpp>
2
#include <migraphx/gpu/device/pack.hpp>
Shucai Xiao's avatar
Shucai Xiao committed
3
4
5
6
7
8
9
#include <migraphx/gpu/context.hpp>

namespace migraphx {
inline namespace MIGRAPHX_INLINE_NS {
namespace gpu {

template <class... Ts>
10
rocblas_status generic_rocblas_gemm_ex(Ts&&... xs)
Shucai Xiao's avatar
Shucai Xiao committed
11
{
12
    return rocblas_gemm_ex(std::forward<Ts>(xs)...);
Shucai Xiao's avatar
Shucai Xiao committed
13
14
15
}

template <class... Ts>
16
rocblas_status generic_rocblas_batched_gemm_ex(Ts&&... xs)
Shucai Xiao's avatar
Shucai Xiao committed
17
{
18
    return rocblas_gemm_strided_batched_ex(std::forward<Ts>(xs)...);
Shucai Xiao's avatar
Shucai Xiao committed
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
}

template <class T>
struct compute_rocblas_type
{
    using type = T;
};

template <class T>
struct compute_rocblas_type<const T>
{
    using type = const typename compute_rocblas_type<T>::type;
};

template <>
struct compute_rocblas_type<half>
{
    using type = rocblas_half;
};

template <class T>
using rb_type = typename compute_rocblas_type<T>::type;

template <class T>
rb_type<T> to_rocblas_type(T x)
{
    return reinterpret_cast<const rb_type<T>&>(x);
}

template <class T>
rb_type<T>* to_rocblas_type(T* x)
{
    return reinterpret_cast<rb_type<T>*>(x);
}

shape miopen_quant_gemm::compute_shape(const std::vector<shape>& inputs) const
{
    std::vector<shape> input_shapes(inputs.begin(), inputs.begin() + inputs.size() - 1);
    check_shapes{input_shapes}.not_broadcasted();
    return op.compute_shape(input_shapes);
}

argument miopen_quant_gemm::compute(context& ctx,
Shucai Xiao's avatar
Shucai Xiao committed
62
63
                                    const shape& output_shape,
                                    const std::vector<argument>& args) const
Shucai Xiao's avatar
Shucai Xiao committed
64
{
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
    // handling the packing of B MUST be before handling that for A
    bool transa     = args[0].get_shape().transposed();
    bool transb     = args[1].get_shape().transposed();
    auto n_dim      = output_shape.lens().size();
    auto dim_1      = n_dim - 1;
    auto dim_0      = n_dim - 2;
    rocblas_int lda = args[0].get_shape().strides()[transa ? dim_1 : dim_0];
    rocblas_int ldb = args[1].get_shape().strides()[transb ? dim_1 : dim_0];
    rocblas_int ldc = args[2].get_shape().strides()[dim_0];

    size_t addi_ref_num = 0;
    if(!transb)
    {
        ++addi_ref_num;
        const argument& arg_b = args[args.size() - 1];
        // argument for B is the last one in the input argument vector
        // use the algorithm to pack A
        device::pack_a(ctx.get_stream().get(), args[1], arg_b);
    }

Shucai Xiao's avatar
Shucai Xiao committed
85
    // need to pack A in this scenario, use the algorithm to pack B in the
86
87
88
89
90
91
92
93
94
    // comment of the API
    if(transa)
    {
        ++addi_ref_num;
        const argument& arg_a = args[args.size() - 1 - addi_ref_num];
        device::pack_b(ctx.get_stream().get(), args[0], arg_a);
    }

    bool is_3inputs = (args.size() - addi_ref_num == 4);
Shucai Xiao's avatar
Shucai Xiao committed
95
    int8_t beta     = 0;
Shucai Xiao's avatar
Shucai Xiao committed
96
97
98
99
100
101
102
103
    if(is_3inputs)
    {
        beta = op.beta;
    }

    auto a_lens = args[0].get_shape().lens();
    auto b_lens = args[1].get_shape().lens();
    output_shape.visit_type([&](auto as) {
Shucai Xiao's avatar
Shucai Xiao committed
104
105
106
107
108
109
        auto alpha_r    = to_rocblas_type(as(op.alpha));
        auto beta_r     = to_rocblas_type(as(beta));
        auto out_lens   = output_shape.lens();
        rocblas_int m   = out_lens[dim_0];
        rocblas_int n   = out_lens[dim_1];
        rocblas_int k   = args[0].get_shape().lens()[dim_1];
110
111
        auto to_pointer = [&](auto&& arg) { return to_rocblas_type(as.from(arg.data())); };
        assert(k % 4 == 0);
Shucai Xiao's avatar
Shucai Xiao committed
112

Shucai Xiao's avatar
Shucai Xiao committed
113
114
115
116
        auto num_matrices = std::accumulate(
            out_lens.rbegin() + 2, out_lens.rend(), std::size_t{1}, std::multiplies<std::size_t>());
        if(num_matrices == 1)
        {
Shucai Xiao's avatar
Shucai Xiao committed
117
            // the rocblas_gemm API handles inputs and output matrices as
Shucai Xiao's avatar
Shucai Xiao committed
118
119
120
            // column-major format. When doing a C = A * B, we actually do
            // C^T = (B^T) * (A^T). That is the reason we input args[1] as
            // A and args[0] as B in calling the rocblas_gemm.
121
            generic_rocblas_gemm_ex(ctx.get_stream().get_rocblas(),
122
                                    transb ? rocblas_operation_transpose : rocblas_operation_none,
123
                                    transa ? rocblas_operation_transpose : rocblas_operation_none,
124
                                    n,
125
                                    m,
Shucai Xiao's avatar
Shucai Xiao committed
126
127
                                    k,
                                    &alpha_r,
128
129
130
                                    to_pointer(args[1]),
                                    rocblas_datatype_i8_r,
                                    ldb,
131
132
133
                                    to_pointer(args[0]),
                                    rocblas_datatype_i8_r,
                                    lda,
Shucai Xiao's avatar
Shucai Xiao committed
134
135
136
137
138
139
140
141
142
143
144
145
146
                                    &beta_r,
                                    to_pointer(args[2]),
                                    rocblas_datatype_i32_r,
                                    ldc,
                                    (is_3inputs ? to_pointer(args[3]) : to_pointer(args[2])),
                                    rocblas_datatype_i32_r,
                                    ldc,
                                    rocblas_datatype_i32_r,
                                    rocblas_gemm_algo_standard,
                                    0,
                                    0,
                                    nullptr,
                                    nullptr);
Shucai Xiao's avatar
Shucai Xiao committed
147
148
149
        }
        else
        {
150
            generic_rocblas_batched_gemm_ex(
Shucai Xiao's avatar
Shucai Xiao committed
151
152
153
154
155
156
157
158
                ctx.get_stream().get_rocblas(),
                transb ? rocblas_operation_transpose : rocblas_operation_none,
                transa ? rocblas_operation_transpose : rocblas_operation_none,
                n,
                m,
                k,
                &alpha_r,
                to_pointer(args[1]),
159
                rocblas_datatype_i8_r,
Shucai Xiao's avatar
Shucai Xiao committed
160
161
162
                ldb,
                k * n,
                to_pointer(args[0]),
163
                rocblas_datatype_i8_r,
Shucai Xiao's avatar
Shucai Xiao committed
164
165
166
                lda,
                m * k,
                &beta_r,
167
168
169
170
                to_pointer(args[2]),
                rocblas_datatype_i32_r,
                ldc,
                m * n,
Shucai Xiao's avatar
Shucai Xiao committed
171
                (is_3inputs ? to_pointer(args[3]) : to_pointer(args[2])),
172
                rocblas_datatype_i32_r,
Shucai Xiao's avatar
Shucai Xiao committed
173
174
                ldc,
                m * n,
175
176
177
                num_matrices,
                rocblas_datatype_i32_r,
                rocblas_gemm_algo_standard,
Shucai Xiao's avatar
Shucai Xiao committed
178
179
180
181
                0,
                0,
                nullptr,
                nullptr);
Shucai Xiao's avatar
Shucai Xiao committed
182
183
184
185
186
187
188
189
190
        }
    });

    return (is_3inputs ? args[3] : args[2]);
}

} // namespace gpu
} // namespace MIGRAPHX_INLINE_NS
} // namespace migraphx