Linear.cpp 14.2 KB
Newer Older
Zhekai Zhang's avatar
Zhekai Zhang committed
1
#include "Linear.h"
muyangli's avatar
muyangli committed
2
#include "kernels/zgemm/zgemm.h"
Zhekai Zhang's avatar
Zhekai Zhang committed
3
4
5
#include "kernels/gemm_f16.h"
#include "kernels/misc_kernels.h"
#include "kernels/awq/gemv_awq.h"
muyangli's avatar
muyangli committed
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
#include "kernels/dwconv.h"

#include <nvtx3/nvToolsExt.h>

using namespace nunchaku;

GEMM_F16::GEMM_F16(int in_features, int out_features, bool use_bias, Tensor::ScalarType dtype, Device device) :
    in_features(in_features), out_features(out_features)
{
    this->weight = Tensor::allocate({out_features, in_features}, dtype, device);
    this->bias = use_bias ? Tensor::allocate({out_features}, dtype, device) : Tensor{};

    registerParams
        (weight, "weight")
        (bias, "bias")
    ;
}

Tensor GEMM_F16::forward(Tensor x) {
    Tensor out = gemm_f16(x, this->weight, {}, this->bias, 1.0f);
    return out;
}
Zhekai Zhang's avatar
Zhekai Zhang committed
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75

GEMV_AWQ::GEMV_AWQ(int in_features, int out_features, bool use_bias, Tensor::ScalarType dtype, Device device) : 
    in_features(in_features), out_features(out_features), group_size(64), lora_rank(0), lora_scale(1.0f)
{
    this->qweight = Tensor::allocate({out_features / 4, ceilDiv(in_features, 8) * 4}, Tensor::INT32, device);
    this->wscales = Tensor::allocate({ceilDiv(in_features, group_size), out_features}, dtype, device);
    this->wzeros  = Tensor::allocate({ceilDiv(in_features, group_size), out_features}, dtype, device);
    this->bias = use_bias ? Tensor::allocate({out_features}, dtype, device) : Tensor{};

    // !!! lora layout is different from w4a4 !!!
    this->lora_down = Tensor::allocate({lora_rank, in_features}, dtype, device, true);
    this->lora_up = Tensor::allocate({out_features, lora_rank}, dtype, device, true);

    registerParams
        (qweight, "qweight")
        (wscales, "wscales")
        (wzeros, "wzeros")
        (bias, "bias")
        (lora_down, "lora_down", ParamFlags::Optional)
        (lora_up, "lora_up", ParamFlags::Optional)
    ;
}

void GEMV_AWQ::loadParam(std::string key, Tensor &dst, Tensor src) {
    if (key == "lora_down" || key == "lora_up") {
        assert(src.ndims() == 2);
        if (dst.shape.dataExtent != src.shape.dataExtent) {
            dst = src.copy(this->qweight.device());
            if (key == "lora_down") {
                const int new_rank = dst.shape[0];
                this->lora_rank = new_rank;
            }
        } else {
            dst.copy_(src);
        }
    } else {
        Module::loadParam(key, dst, src);
    }
}

Tensor GEMV_AWQ::forward(Tensor x) {
    debug("x", x);

    const int M = (int)x.numel() / x.shape[-1];
    Tensor out = gemv_awq(x, this->qweight, this->wscales, this->wzeros, M, out_features, in_features, group_size);
    if (bias.valid()) {
        // TODO: batch
        assert(out.numel() == bias.numel());
muyangli's avatar
muyangli committed
76
        out = kernels::add(out, bias.view(out.shape.dataExtent));
Zhekai Zhang's avatar
Zhekai Zhang committed
77
78
79
80
81
    }

    debug("out_before_lora", out);

    if (this->lora_rank > 0) {
muyangli's avatar
muyangli committed
82
        Tensor lora_act = gemm_f16(x, this->lora_down, {}, {}, 1.0f);
Zhekai Zhang's avatar
Zhekai Zhang committed
83
84
        debug("lora_act", lora_act);

muyangli's avatar
muyangli committed
85
        Tensor lora_out = gemm_f16(lora_act, this->lora_up, {}, {}, this->lora_scale);
Zhekai Zhang's avatar
Zhekai Zhang committed
86
87
        debug("lora_out", lora_out);

muyangli's avatar
muyangli committed
88
        out = kernels::add(out, lora_out);
Zhekai Zhang's avatar
Zhekai Zhang committed
89
90
91
92
93
94
95
96
97
98
99
    }

    debug("out", out);
    
    return out;
}


#define NO_LORA_FUSION 0

GEMM_W4A4::GEMM_W4A4(int in_features, int out_features, bool bias, Tensor::ScalarType dtype, Device device) : 
muyangli's avatar
muyangli committed
100
101
102
    in_features(in_features), out_features(out_features), 
    in_features_pad(ceilDiv(in_features, 128) * 128), out_features_pad(ceilDiv(out_features, 128) * 128),
    lora_rank(0), dtype(dtype)
Zhekai Zhang's avatar
Zhekai Zhang committed
103
{
muyangli's avatar
muyangli committed
104
105
    this->qweight = Tensor::allocate({out_features_pad, in_features_pad / 2}, Tensor::INT8, device, true);
    this->wscales = Tensor::allocate({in_features_pad / 64, out_features_pad}, dtype, device, true);
Zhekai Zhang's avatar
Zhekai Zhang committed
106

muyangli's avatar
muyangli committed
107
    this->bias = bias ? Tensor::allocate({out_features_pad}, dtype, device, true) : Tensor{};
Zhekai Zhang's avatar
Zhekai Zhang committed
108

muyangli's avatar
muyangli committed
109
110
    this->lora_down = Tensor::allocate({in_features_pad, lora_rank}, dtype, device, true);
    this->lora_up = Tensor::allocate({out_features_pad, lora_rank}, dtype, device, true);
Zhekai Zhang's avatar
Zhekai Zhang committed
111
112
113

    // TODO: smooth factor in FC1+FC2 fusion
    // TODO: smooth factor in non-Lora fusion
muyangli's avatar
muyangli committed
114
    this->smooth = Tensor::allocate({in_features_pad}, dtype, device, true);
Zhekai Zhang's avatar
Zhekai Zhang committed
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144

    registerParams
        (qweight, "qweight")
        (wscales, "wscales")
        (this->bias, "bias")
        (lora_down, "lora_down", ParamFlags::Optional)
        (lora_up, "lora_up", ParamFlags::Optional)
        (smooth, "smooth")
    ;

#if NO_LORA_FUSION
    checkCUBLAS(cublasCreate(&handle));
#endif
}

void GEMM_W4A4::loadParam(std::string key, Tensor &dst, Tensor src) {
    if (key == "lora_down" || key == "lora_up") {
        assert(src.ndims() == 2);
        if (dst.shape.dataExtent != src.shape.dataExtent) {
            dst = src.copy(this->qweight.device());
            this->lora_rank = dst.shape[1];
            this->lora_scales.resize(ceilDiv(this->lora_rank, 16), 1.0f);
        } else {
            dst.copy_(src);
        }
    } else {
        Module::loadParam(key, dst, src);
    }
}

muyangli's avatar
muyangli committed
145
146
147
148
149
150
151
152
Tensor GEMM_W4A4::forward(Tensor x) {
    return std::get<Tensor>(this->forward(x, FuseOptions::EMPTY, nullptr));
}

Tensor GEMM_W4A4::forward_silu(Tensor x) {
    return std::get<Tensor>(this->forward(x, FuseOptions::SILU, nullptr));
}

Zhekai Zhang's avatar
Zhekai Zhang committed
153
std::variant<Tensor, GEMM_W4A4::QuantizedActivation> GEMM_W4A4::forward(Tensor x, FuseOptions fuse, GEMM_W4A4 *nextGEMM) {
muyangli's avatar
muyangli committed
154
    return forward_quant(quantize(x, false), fuse, nextGEMM);
Zhekai Zhang's avatar
Zhekai Zhang committed
155
156
157
}

void GEMM_W4A4::forward(Tensor x, Tensor out, Tensor pool, Tensor norm_q, Tensor norm_k, Tensor rotary_emb) {
muyangli's avatar
muyangli committed
158
    QuantizedActivation qact = quantize(x, false);
Zhekai Zhang's avatar
Zhekai Zhang committed
159
160
161
162
163
164
165
166
167
168
169

#if !NO_LORA_FUSION

#if 0
    Tensor dummy = Tensor::empty_like(qact.lora_act);
    dummy.zero_();

    gemm_w4a4(qact.act, qweight, out, {}, qact.ascales, wscales, {}, pool, dummy, this->lora_up, {}, {}, norm_q, norm_k, rotary_emb, this->bias, {}, qact.is_unsigned);
    debug("gemm.nolora.out", out);
#endif

muyangli's avatar
muyangli committed
170
    kernels::gemm_w4a4(qact.act, qweight, out, {}, qact.ascales, wscales, {}, pool, qact.lora_act, this->lora_up, {}, {}, norm_q, norm_k, rotary_emb, this->bias, {}, {}, {}, qact.is_unsigned, this->lora_scales, false);
Zhekai Zhang's avatar
Zhekai Zhang committed
171
172
173
174
175

    debug("gemm.out", out);
#else
    const int M = (int)qact.act.numel() / qact.act.shape[-1];

muyangli's avatar
muyangli committed
176
    kernels::gemm_w4a4(qact.act, qweight, out, {}, qact.ascales, wscales, {}, pool, {}, {}, {}, {}, norm_q, norm_k, rotary_emb, this->bias, {}, qact.is_unsigned, this->lora_scales);
Zhekai Zhang's avatar
Zhekai Zhang committed
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209

    nvtxRangePushA("LoraUp");

    static const half one = 1.0;
    static const half zero = 0.0;
    // lora_up: [M, R] * [OC, R] => [M, OC]
    // cublas view: [OC, R] * [M, R]^T
    checkCUBLAS(cublasHgemm(
        handle, 
        CUBLAS_OP_T, CUBLAS_OP_N, 
        this->out_features, M, this->lora_rank,
        &one,
        this->lora_up.data_ptr<half>(),
        this->lora_rank,
        qact.lora_act.data_ptr<half>(),
        this->lora_rank,
        &one, 
        out.data_ptr<half>(),
        this->out_features));

    nvtxRangePop();
#endif
}

std::variant<Tensor, GEMM_W4A4::QuantizedActivation> GEMM_W4A4::forward_quant(QuantizedActivation qact, FuseOptions fuse, GEMM_W4A4 *nextGEMM) {
    Tensor out;
    QuantizedActivation qout;

    Tensor next_lora;
    Tensor next_smooth;

    const int M = (int)qact.act.numel() / qact.act.shape[-1];

muyangli's avatar
muyangli committed
210
211
212
213
    if (fuse == FuseOptions::EMPTY || fuse == FuseOptions::SILU) {
        // auto shape = TensorShape(qact.act.shape.dataExtent);
        // shape[-1] = out_features;
        auto shape = TensorShape(qact.actShape.dataExtent);
Zhekai Zhang's avatar
Zhekai Zhang committed
214
215
216
        shape[-1] = out_features;
        out = Tensor::allocate(shape, dtype, qweight.device());
    } else {
muyangli's avatar
muyangli committed
217
218
        qout.act = Tensor::allocate({M, out_features_pad / 2}, Tensor::INT8, qweight.device());
        qout.ascales = Tensor::allocate({out_features_pad / 64, M}, dtype, qweight.device());
Zhekai Zhang's avatar
Zhekai Zhang committed
219
220
        qout.lora_act = Tensor::allocate({M, lora_rank}, Tensor::FP32, qweight.device());
        qout.is_unsigned = true;
muyangli's avatar
muyangli committed
221
        qout.actShape = qact.actShape;
Zhekai Zhang's avatar
Zhekai Zhang committed
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243

        next_lora = nextGEMM->lora_down;
        next_smooth = nextGEMM->smooth;
    }

#if !NO_LORA_FUSION

#if 0
    Tensor dummy = Tensor::empty_like(qact.lora_act);
    dummy.zero_();

    gemm_w4a4(qact.act, qweight, out, qout.act, qact.ascales, wscales, qout.ascales, {}, dummy, this->lora_up, next_lora, qout.lora_act, {}, {}, {}, this->bias, next_smooth, qact.is_unsigned);

    if (fuse == FuseOptions::EMPTY) {
        debug("gemm.nolora.out", out);
    } else {
        debug("gemm.nolora.qout", qout.act);
        debug("gemm.nolora.oscales", qout.ascales);
        debug("gemm.nolora.lora_act_out", qout.lora_act);
    }
#endif

muyangli's avatar
muyangli committed
244
    kernels::gemm_w4a4(qact.act, qweight, out, qout.act, qact.ascales, wscales, qout.ascales, {}, qact.lora_act, this->lora_up, next_lora, qout.lora_act, {}, {}, {}, this->bias, next_smooth, {}, {}, qact.is_unsigned, this->lora_scales, fuse == FuseOptions::SILU);
Zhekai Zhang's avatar
Zhekai Zhang committed
245

muyangli's avatar
muyangli committed
246
    if (fuse == FuseOptions::EMPTY || fuse == FuseOptions::SILU) {
Zhekai Zhang's avatar
Zhekai Zhang committed
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
        debug("gemm.out", out);
    } else {
        debug("gemm.qout", qout.act);
        debug("gemm.oscales", qout.ascales);
        debug("gemm.lora_act_out", qout.lora_act);
    }

    
#else
    if (!out.valid()) {
        auto shape = TensorShape(qact.act.shape.dataExtent);
        shape[-1] = out_features;
        out = Tensor::allocate(shape, Tensor::FP16, qweight.device());
    }

muyangli's avatar
muyangli committed
262
    kernels::gemm_w4a4(qact.act, qweight, out, qout.act, qact.ascales, wscales, qout.ascales, {}, {}, {}, {}, {}, {}, {}, {}, this->bias, next_smooth, qact.is_unsigned, this->lora_scales);
Zhekai Zhang's avatar
Zhekai Zhang committed
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316

    nvtxRangePushA("LoraUp");

    static const half one = 1.0;
    static const half zero = 0.0;

    // lora_up: [M, R] * [OC, R]^T => [M, OC]
    // cublas view: [R, OC]^T * [R, M] => [OC, M]
    // lora_up layout wrong?
    checkCUBLAS(cublasHgemm(
        handle, 
        CUBLAS_OP_T, CUBLAS_OP_N, 
        this->out_features, M, this->lora_rank,
        &one,
        this->lora_up.data_ptr<half>(),
        this->lora_rank,
        qact.lora_act.data_ptr<half>(),
        this->lora_rank,
        &one, 
        out.data_ptr<half>(),
        this->out_features));

    nvtxRangePop();

    if (fuse == FuseOptions::GELU_QUANT) {
        nvtxRangePushA("LoraDown");
        // IC is for next lora (OC of this layer)
        // lora_down: [M, IC] * [IC, R] => [M, R]
        // cublas view: [R, IC] * [IC, M] => [R, M]
        checkCUBLAS(cublasHgemm(
            handle, 
            CUBLAS_OP_N, CUBLAS_OP_N, 
            this->lora_rank, M, this->out_features,
            &one,
            next_lora.data_ptr<half>(),
            this->lora_rank,
            out.data_ptr<half>(),
            this->out_features,
            &zero, 
            qout.lora_act.data_ptr<half>(),
            this->lora_rank));

        out = {};

        nvtxRangePop();
    }

#endif
    if (out.valid()) {
        return out;
    }
    return qout;
}

muyangli's avatar
muyangli committed
317
318
319
320
321
322
323
Tensor GEMM_W4A4::forward_quant(QuantizedActivation qact) {
    return std::get<Tensor>(this->forward_quant(qact, FuseOptions::EMPTY, nullptr));
}

GEMM_W4A4::QuantizedActivation GEMM_W4A4::quantize(Tensor x, bool fuse_glu) {
    const int actualM = x.numel() / x.shape[-1];
    const int M = ceilDiv(actualM, 256) * 256;
Zhekai Zhang's avatar
Zhekai Zhang committed
324

muyangli's avatar
muyangli committed
325
326
    // auto shape = TensorShape(x.shape.dataExtent);
    // shape[-1] = in_features / 2;
Zhekai Zhang's avatar
Zhekai Zhang committed
327
328

    QuantizedActivation qact;
muyangli's avatar
muyangli committed
329
330
    qact.act = Tensor::allocate({M, in_features_pad / 2}, Tensor::INT8, qweight.device());
    qact.ascales = Tensor::allocate({in_features_pad / 64, M}, dtype, qweight.device());
Zhekai Zhang's avatar
Zhekai Zhang committed
331
332
    qact.lora_act = Tensor::allocate({M, lora_rank}, Tensor::FP32, qweight.device());
    qact.is_unsigned = false;
muyangli's avatar
muyangli committed
333
    qact.actShape = x.shape.dataExtent;
Zhekai Zhang's avatar
Zhekai Zhang committed
334
335
336
337
338

#if !NO_LORA_FUSION
    debug("quantize.x", x);
    debug("quantize.smooth", this->smooth);

muyangli's avatar
muyangli committed
339
    kernels::quantize_w4a4_act_fuse_lora(x, qact.act, qact.ascales, this->lora_down, qact.lora_act, this->smooth, fuse_glu);
Zhekai Zhang's avatar
Zhekai Zhang committed
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366

    debug("quantize.qact", qact.act);
    debug("quantize.ascales", qact.ascales);
    debug("quantize.lora_act", qact.lora_act);
#else 
    static const half one = 1.0;
    static const half zero = 0.0;

    nvtxRangePushA("LoraDown");

    // lora_down: [M, IC] * [IC, R] => [M, R]
    // cublas view: [R, IC] * [IC, M]
    checkCUBLAS(cublasHgemm(
        handle, 
        CUBLAS_OP_N, CUBLAS_OP_N, 
        this->lora_rank, M, this->in_features,
        &one,
        lora_down.data_ptr<half>(),
        this->lora_rank,
        x.data_ptr<half>(),
        this->in_features,
        &zero, 
        qact.lora_act.data_ptr<half>(),
        this->lora_rank));

    nvtxRangePop();

muyangli's avatar
muyangli committed
367
    kernels::quantize_w4a4_act(x, qact.act, qact.ascales);
Zhekai Zhang's avatar
Zhekai Zhang committed
368
369
370
371
372

#endif

    return qact;
}
muyangli's avatar
muyangli committed
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431

GEMM_W8A8::GEMM_W8A8(int in_features, int out_features, bool bias, Tensor::ScalarType dtype, Device device) : 
    in_features(in_features), out_features(out_features), dtype(dtype)
{
    this->qweight = Tensor::allocate({out_features, in_features}, Tensor::INT8, device);
    this->wscales = Tensor::allocate({out_features}, dtype, device);
    this->bias = bias ? Tensor::allocate({out_features}, dtype, device, true) : Tensor{};

    registerParams
        (qweight, "qweight")
        (wscales, "wscales")
        (this->bias, "bias")
    ;
}

GEMM_W8A8::QuantizedActivation GEMM_W8A8::quantize(Tensor x, bool fuse_glu) {
    QuantizedActivation qact;
    auto qshape = x.shape;
    if (fuse_glu) {
        qshape[-1] /= 2;
    }
    qact.act = Tensor::allocate(qshape, Tensor::INT8, x.device());
    qact.ascales = Tensor::allocate({(int)x.numel() / x.shape[-1]}, this->dtype, x.device());

    debug("quantize.x", x);

    kernels::quantize_w8a8_act(x, qact.act, qact.ascales, fuse_glu);

    debug("quantize.qact", qact.act);
    debug("quantize.ascales", qact.ascales);

    return qact;
}

Tensor GEMM_W8A8::forward_quant(QuantizedActivation qact) {
    auto oshape = qact.act.shape;
    oshape[-1] = out_features;
    Tensor out = Tensor::allocate(oshape, this->dtype, qact.act.device());
    kernels::gemm_w8a8(qact.act, this->qweight, out, qact.ascales, this->wscales, this->bias);

    debug("gemm.out", out);
    return out;
}

DWCONV::DWCONV(int in_features, bool use_bias, Tensor::ScalarType dtype, Device device) : 
    in_features(in_features)
{
    this->weight = Tensor::allocate({in_features, 3, 3, 1}, dtype, device);
    this->bias = use_bias ? Tensor::allocate({in_features}, dtype, device) : Tensor{};

    registerParams
        (this->weight, "weight")
        (this->bias, "bias")
    ;
}

Tensor DWCONV::forward(Tensor x) {
    return dwconv_f16(x, this->weight, {}, this->bias);
}