mlp.py 2.33 KB
Newer Older
Haotian Tang's avatar
Haotian Tang committed
1
import torch.nn as nn
2
import awq_ext
Casper Hansen's avatar
Casper Hansen committed
3
import torch.nn.functional as F
Casper Hansen's avatar
Casper Hansen committed
4
from awq.modules.linear import WQLinear_GEMM, WQLinear_GEMV
Haotian Tang's avatar
Haotian Tang committed
5
6


7
class QuantFusedMLP(nn.Module):
Haotian Tang's avatar
Haotian Tang committed
8
9
10
11
    def __init__(
        self,
        gate_proj,
        down_proj,
12
13
        up_proj,
        activation = F.silu,
Haotian Tang's avatar
Haotian Tang committed
14
15
    ):
        super().__init__()
16

Haotian Tang's avatar
Haotian Tang committed
17
18
19
20
21
22
23
24
25
26
27
28
29
        self.register_buffer('gate_proj_qweight', gate_proj.qweight)
        self.register_buffer('gate_proj_scales', gate_proj.scales)
        self.register_buffer('gate_proj_qzeros', gate_proj.qzeros)
        self.register_buffer('up_proj_qweight', up_proj.qweight)
        self.register_buffer('up_proj_scales', up_proj.scales)
        self.register_buffer('up_proj_qzeros', up_proj.qzeros)

        self.in_features = gate_proj.in_features
        self.intermediate_size = gate_proj.out_features
        self.out_features = down_proj.out_features
        self.w_bit = gate_proj.w_bit
        self.down_proj = down_proj

Casper Hansen's avatar
Casper Hansen committed
30
        if isinstance(down_proj, WQLinear_GEMV):
31
            self.linear = awq_ext.gemv_forward_cuda
Casper Hansen's avatar
Casper Hansen committed
32
33
            self.group_size = down_proj.group_size
        else:
34
            self.linear = awq_ext.gemm_forward_cuda
Casper Hansen's avatar
Casper Hansen committed
35
36
            self.group_size = 8

37
38
        self.activation = activation

39
    def forward(self, x, routing_weights=None):
Casper Hansen's avatar
Casper Hansen committed
40
        out_shape = x.shape[:-1] + (self.intermediate_size,)
Haotian Tang's avatar
Haotian Tang committed
41
        x = x.reshape(-1, x.shape[-1])
Casper Hansen's avatar
Casper Hansen committed
42
        gate_output = self.linear(
Casper Hansen's avatar
Casper Hansen committed
43
44
45
46
            x,
            self.gate_proj_qweight,
            self.gate_proj_scales,
            self.gate_proj_qzeros,
Casper Hansen's avatar
Casper Hansen committed
47
            self.group_size,
Haotian Tang's avatar
Haotian Tang committed
48
        )
Casper Hansen's avatar
Casper Hansen committed
49
        up_output = self.linear(
Casper Hansen's avatar
Casper Hansen committed
50
51
52
53
            x,
            self.up_proj_qweight,
            self.up_proj_scales,
            self.up_proj_qzeros,
Casper Hansen's avatar
Casper Hansen committed
54
            self.group_size,
Haotian Tang's avatar
Haotian Tang committed
55
        )
56
        x = self.activation(gate_output) * up_output
Casper Hansen's avatar
Casper Hansen committed
57
58
59
        x = x.reshape(out_shape)
        x = self.down_proj(x)

60
61
62
        if routing_weights is not None:
            x = routing_weights * x

63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
        return x
        

class QuantLlamaMLP(QuantFusedMLP):
    r"""
    QuantLlamaMLP class kept for backward compatibilty, in the future, users 
    should always use `QuantFusedMLP` class instead.
    """
    def __init__(
        self,
        gate_proj,
        down_proj,
        up_proj
    ):
        super().__init__(gate_proj, down_proj, up_proj)