gemm_impl.cpp 8.03 KB
Newer Older
Paul Fultz II's avatar
Paul Fultz II committed
1
#include <rocblas.h>
2
#include <migraphx/gpu/gemm_impl.hpp>
3
#include <migraphx/reduce_dims.hpp>
Shucai Xiao's avatar
Shucai Xiao committed
4
5
6
7
8

namespace migraphx {
inline namespace MIGRAPHX_INLINE_NS {
namespace gpu {

9
rocblas_datatype get_type(shape::type_t type)
Shucai Xiao's avatar
Shucai Xiao committed
10
{
11
    switch(type)
12
    {
13
14
15
16
17
18
19
    case shape::double_type: return rocblas_datatype_f64_r;
    case shape::float_type: return rocblas_datatype_f32_r;
    case shape::half_type: return rocblas_datatype_f16_r;
    case shape::int8_type: return rocblas_datatype_i8_r;
    case shape::uint8_type: return rocblas_datatype_u8_r;
    case shape::int32_type: return rocblas_datatype_i32_r;
    case shape::uint32_type: return rocblas_datatype_u32_r;
Paul Fultz II's avatar
Paul Fultz II committed
20
    case shape::tuple_type:
21
    case shape::bool_type:
22
23
24
25
    case shape::uint16_type:
    case shape::int16_type:
    case shape::int64_type:
    case shape::uint64_type: MIGRAPHX_THROW("ROCBLAS_GEMM: data type not supported!");
26
    }
27
28

    MIGRAPHX_THROW("ROCBLAS_GEMM: data type not supported!");
29
30
}

31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
void blas_shape(const shape& s)
{
    if(s.lens().size() < 2)
        return;
    if(std::none_of(s.strides().end() - 2, s.strides().end(), [&](auto i) { return i == 1; }))
        MIGRAPHX_THROW("GPU_GEMM: needs to have one matrix stride as 1");
    if(s.lens().size() < 3)
        return;
    shape batch_shape{s.type(),
                      {s.lens().begin(), s.lens().end() - 2},
                      {s.strides().begin(), s.strides().end() - 2}};
    auto batch_shapes = reduce_dims({batch_shape});
    if(batch_shapes.front().lens().size() != 1)
        MIGRAPHX_THROW("GPU_GEMM: Batch dimension is not collapsible");
}

47
48
49
50
51
52
53
54
55
template <class R, class... Ts, class... Us>
R rocblas_invoke(R (*f)(Ts...), Us... xs)
{
    if constexpr(sizeof...(Ts) == sizeof...(Us))
        return f(xs...);
    else
        return f(xs..., nullptr, nullptr);
}

56
57
58
59
60
61
62
63
64
65
66
67
static bool is_transposed(const shape& s)
{
    if(not s.transposed())
        return false;
    return s.strides().back() != 1;
}

static rocblas_int get_batch_stride(const argument& a)
{
    return a.get_shape().strides()[a.get_shape().strides().size() - 3];
}

68
template <class T>
Shucai Xiao's avatar
Shucai Xiao committed
69
70
71
72
73
void gemm_impl(context& ctx,
               const shape& output_shape,
               const std::vector<argument>& args,
               T alpha,
               T beta,
74
75
               bool int8_x4_format,
               bool compute_fp32)
Shucai Xiao's avatar
Shucai Xiao committed
76
{
77
78
    bool transa     = is_transposed(args[0].get_shape());
    bool transb     = is_transposed(args[1].get_shape());
79
80
81
82
83
    auto n_dim      = output_shape.lens().size();
    auto dim_1      = n_dim - 1;
    auto dim_0      = n_dim - 2;
    rocblas_int lda = args[0].get_shape().strides()[transa ? dim_1 : dim_0];
    rocblas_int ldb = args[1].get_shape().strides()[transb ? dim_1 : dim_0];
84
    rocblas_int ldc = args[2].get_shape().strides()[dim_0];
85

86
    bool is_3inputs = (args.size() == 4);
87
88
89
90
91
92
93
    if(!is_3inputs)
    {
        beta = 0;
    }
    rocblas_datatype arg_type = get_type(args[0].get_shape().type());
    auto output_type          = arg_type;
    if(output_type == rocblas_datatype_i8_r)
Shucai Xiao's avatar
Shucai Xiao committed
94
    {
95
        output_type = rocblas_datatype_i32_r;
Shucai Xiao's avatar
Shucai Xiao committed
96
    }
97
    auto compute_type = output_type;
98
99
100
101
102
    if(compute_fp32)
    {
        if(arg_type == rocblas_datatype_f16_r)
            compute_type = rocblas_datatype_f32_r;
    }
Shucai Xiao's avatar
Shucai Xiao committed
103

Shucai Xiao's avatar
Shucai Xiao committed
104
105
106
107
108
#if ROCBLAS_VERSION_MAJOR >= 2 && ROCBLAS_VERSION_MINOR >= 38
    rocblas_gemm_flags flag =
        int8_x4_format ? rocblas_gemm_flags_pack_int8x4 : rocblas_gemm_flags_none;
#else
    (void)int8_x4_format;
109
    int flag = 0;
Shucai Xiao's avatar
Shucai Xiao committed
110
111
#endif

Shucai Xiao's avatar
Shucai Xiao committed
112
113
114
    auto a_lens = args[0].get_shape().lens();
    auto b_lens = args[1].get_shape().lens();
    output_shape.visit_type([&](auto as) {
115
116
117
118
119
120
121
122
123
124
125
126
127
        auto alpha_r = as(alpha);
        auto beta_r  = as(beta);

        // use void pointer to select different data type if using fp32 mode
        void* alpha_v = &alpha_r;
        void* beta_v  = &beta_r;

        if(compute_fp32)
        {
            alpha_v = &alpha;
            beta_v  = &beta;
        }

Shucai Xiao's avatar
Shucai Xiao committed
128
129
130
131
        auto out_lens   = output_shape.lens();
        rocblas_int m   = out_lens[dim_0];
        rocblas_int n   = out_lens[dim_1];
        rocblas_int k   = args[0].get_shape().lens()[dim_1];
Shucai Xiao's avatar
Shucai Xiao committed
132
        auto to_pointer = [&](auto&& arg) { return as.from(arg.data()); };
Shucai Xiao's avatar
Shucai Xiao committed
133
        if(args[0].get_shape().type() == shape::int8_type and (k % 4) != 0 and int8_x4_format)
134
135
136
        {
            MIGRAPHX_THROW("ROCBLAS_GEMM: k size of int8 type input must be mutlple of 4!");
        }
Shucai Xiao's avatar
Shucai Xiao committed
137

Shucai Xiao's avatar
Shucai Xiao committed
138
139
140
141
        auto num_matrices = std::accumulate(
            out_lens.rbegin() + 2, out_lens.rend(), std::size_t{1}, std::multiplies<std::size_t>());
        if(num_matrices == 1)
        {
Shucai Xiao's avatar
Shucai Xiao committed
142
            // the rocblas_gemm API handles inputs and output matrices as
Shucai Xiao's avatar
Shucai Xiao committed
143
144
145
            // column-major format. When doing a C = A * B, we actually do
            // C^T = (B^T) * (A^T). That is the reason we input args[1] as
            // A and args[0] as B in calling the rocblas_gemm.
146
147
148
149
150
151
152
            rocblas_invoke(&rocblas_gemm_ex,
                           ctx.get_stream().get_rocblas(),
                           transb ? rocblas_operation_transpose : rocblas_operation_none,
                           transa ? rocblas_operation_transpose : rocblas_operation_none,
                           n,
                           m,
                           k,
153
                           alpha_v,
154
155
156
157
158
159
                           to_pointer(args.at(1)),
                           arg_type,
                           ldb,
                           to_pointer(args.at(0)),
                           arg_type,
                           lda,
160
                           beta_v,
161
162
163
164
165
166
167
168
169
                           to_pointer(args[2]),
                           output_type,
                           ldc,
                           is_3inputs ? to_pointer(args[3]) : to_pointer(args[2]),
                           output_type,
                           ldc,
                           compute_type,
                           rocblas_gemm_algo_standard,
                           0,
Shucai Xiao's avatar
Shucai Xiao committed
170
                           flag);
Shucai Xiao's avatar
Shucai Xiao committed
171
172
173
        }
        else
        {
174
175
176
            auto a_stride = get_batch_stride(args[0]);
            auto b_stride = get_batch_stride(args[1]);
            auto c_stride = get_batch_stride(args[2]);
177
178
179
180
181
182
183
            rocblas_invoke(&rocblas_gemm_strided_batched_ex,
                           ctx.get_stream().get_rocblas(),
                           transb ? rocblas_operation_transpose : rocblas_operation_none,
                           transa ? rocblas_operation_transpose : rocblas_operation_none,
                           n,
                           m,
                           k,
184
                           alpha_v,
185
186
187
                           to_pointer(args.at(1)),
                           arg_type,
                           ldb,
188
                           b_stride,
189
190
191
                           to_pointer(args.at(0)),
                           arg_type,
                           lda,
192
                           a_stride,
193
                           beta_v,
194
195
196
                           to_pointer(args[2]),
                           output_type,
                           ldc,
197
                           c_stride,
198
199
200
                           is_3inputs ? to_pointer(args[3]) : to_pointer(args[2]),
                           output_type,
                           ldc,
201
                           c_stride,
202
203
204
205
                           num_matrices,
                           compute_type,
                           rocblas_gemm_algo_standard,
                           0,
Shucai Xiao's avatar
Shucai Xiao committed
206
                           flag);
Shucai Xiao's avatar
Shucai Xiao committed
207
208
        }
    });
209
}
Shucai Xiao's avatar
Shucai Xiao committed
210

211
212
213
214
void gemm(context& ctx,
          const shape& output_shape,
          const std::vector<argument>& args,
          float alpha,
Shucai Xiao's avatar
Shucai Xiao committed
215
          float beta,
216
217
          bool int8_x4_format,
          bool compute_fp32)
218
{
219
    gemm_impl(ctx, output_shape, args, alpha, beta, int8_x4_format, compute_fp32);
220
221
222
223
224
225
}

void gemm(context& ctx,
          const shape& output_shape,
          const std::vector<argument>& args,
          int32_t alpha,
Shucai Xiao's avatar
Shucai Xiao committed
226
          int32_t beta,
227
228
          bool int8_x4_format,
          bool compute_fp32)
229
{
230
    gemm_impl(ctx, output_shape, args, alpha, beta, int8_x4_format, compute_fp32);
Shucai Xiao's avatar
Shucai Xiao committed
231
232
233
234
235
}

} // namespace gpu
} // namespace MIGRAPHX_INLINE_NS
} // namespace migraphx