gemv.py 6.48 KB
Newer Older
1
2
import torch
import torch.nn as nn
3

4
5
try:
    import awq_ext  # with CUDA kernels
Ilyas Moutawwakil's avatar
Ilyas Moutawwakil committed
6

7
8
9
    AWQ_INSTALLED = True
except:
    AWQ_INSTALLED = False
10
11
12
13
14


def make_divisible(c, divisor):
    return (c + divisor - 1) // divisor

15

16
17
18
19
20
21
22
23
24
def calculate_zeros_width(in_features, group_size=128, pack_num=8):
    if group_size >= 128:
        size_multiplier = 1
    elif group_size == 64:
        size_multiplier = 2
    elif group_size == 32:
        size_multiplier = 4
    else:
        raise NotImplementedError
25

26
27
28
29
    base_width = make_divisible(in_features // group_size, pack_num)
    base_width = make_divisible(base_width, size_multiplier) * size_multiplier
    return base_width

30

31
32
33
class WQLinear_GEMV(nn.Module):
    def __init__(self, w_bit, group_size, in_features, out_features, bias, dev):
        super().__init__()
34

35
36
        if w_bit not in [4]:
            raise NotImplementedError("Only 4-bit are supported for now.")
37

38
39
40
41
42
43
44
45
46
        self.in_features = in_features
        self.out_features = out_features
        self.w_bit = w_bit
        self.group_size = group_size if group_size != -1 else in_features
        self.split_k_iters = 8

        # quick sanity check (make sure aligment)
        assert self.in_features % self.group_size == 0
        assert out_features % (32 // self.w_bit) == 0
47
        pack_num = 32 // self.w_bit
48

49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
        self.register_buffer(
            "qweight",
            torch.zeros(
                (out_features, in_features // pack_num), dtype=torch.int32, device=dev
            ),
        )
        self.register_buffer(
            "qzeros",
            torch.zeros(
                (out_features, calculate_zeros_width(in_features, self.group_size)),
                dtype=torch.int32,
                device=dev,
            ),
        )
        self.register_buffer(
            "scales",
            torch.zeros(
                (
                    out_features,
                    calculate_zeros_width(in_features, self.group_size) * pack_num,
                ),
                dtype=torch.float16,
                device=dev,
            ),
        )
74
        if bias:
75
76
77
            self.register_buffer(
                "bias", torch.zeros((out_features), dtype=torch.float16, device=dev)
            )
78
79
80
81
        else:
            self.bias = None

    @classmethod
82
83
84
85
86
87
88
89
90
91
92
    def from_linear(
        cls, linear, w_bit, group_size, init_only=False, scales=None, zeros=None
    ):
        awq_linear = cls(
            w_bit,
            group_size,
            linear.in_features,
            linear.out_features,
            linear.bias is not None,
            linear.weight.device,
        )
93
94
        if init_only:  # just prepare for loading sd
            return awq_linear
95

96
        # need scales and zeros info for real quantization
97
        assert scales is not None and zeros is not None
98
99
100
101
        scale_zeros = zeros * scales

        pack_num = 32 // awq_linear.w_bit
        qscales = torch.zeros(
102
103
104
105
            (
                scales.shape[0],
                calculate_zeros_width(linear.in_features, group_size) * pack_num,
            ),
106
            dtype=torch.float16,
107
            device=scales.device,
108
        )
109
        qscales[:, : scales.shape[1]] = scales
110
111
112
        awq_linear.scales = qscales
        if linear.bias is not None:
            awq_linear.bias = linear.bias.clone().half()
113

114
115
        intweight = []
        for idx in range(awq_linear.in_features):
116
117
118
119
120
121
            intweight.append(
                torch.round(
                    (linear.weight.data[:, idx] + scale_zeros[:, idx // group_size])
                    / awq_linear.scales[:, idx // group_size]
                ).to(torch.int)[:, None]
            )
122
123
        intweight = torch.cat(intweight, dim=1)
        intweight = intweight.to(dtype=torch.int32)
124
125
126
127
128
129
        qweight = torch.zeros(
            (intweight.shape[0], intweight.shape[1] // 32 * awq_linear.w_bit),
            dtype=torch.int32,
            device=intweight.device,
        )

130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
        for col in range(intweight.shape[1] // pack_num):
            if awq_linear.w_bit == 4:
                order_map = [0, 1, 2, 3, 4, 5, 6, 7]
            else:
                raise NotImplementedError("Only 4-bit are supported for now.")
            for i in range(pack_num):
                qweight_col = intweight[:, col * pack_num + order_map[i]]
                qweight[:, col] |= qweight_col << (i * awq_linear.w_bit)
        awq_linear.qweight = qweight

        zeros = zeros.to(dtype=torch.int32)
        qzeros = torch.zeros(
            (zeros.shape[0], calculate_zeros_width(linear.in_features, group_size)),
            dtype=torch.int32,
            device=zeros.device,
        )
146

147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
        for col in range((zeros.shape[1] + pack_num - 1) // pack_num):
            if awq_linear.w_bit == 4:
                order_map = [0, 1, 2, 3, 4, 5, 6, 7]
            else:
                raise NotImplementedError("Only 4-bit are supported for now.")
            for i in range(pack_num):
                if col * pack_num + order_map[i] >= zeros.shape[1]:
                    continue
                qzero_col = zeros[:, col * pack_num + order_map[i]]
                qzeros[:, col] |= qzero_col << (i * awq_linear.w_bit)
        awq_linear.qzeros = qzeros
        return awq_linear

    @torch.no_grad()
    def forward(self, x):
Ilyas Moutawwakil's avatar
Ilyas Moutawwakil committed
162
163
164
165
166
        assert AWQ_INSTALLED, (
            "AWQ kernels could not be loaded. "
            "Please install them from https://github.com/casper-hansen/AutoAWQ_kernels"
        )

167
        out_shape = x.shape[:-1] + (self.out_features,)
168
        inputs = x.reshape(-1, x.shape[-1])
169
170
171
172

        input_dtype = inputs.dtype
        if input_dtype != torch.float16:
            inputs = inputs.half()
173

174
        if inputs.shape[0] > 8:
175
176
177
178
179
180
181
182
            out = awq_ext.gemmv2_forward_cuda(
                inputs,
                self.qweight,
                self.scales,
                self.qzeros,
                self.group_size,
                self.split_k_iters,
            )
183
        else:
184
185
186
            out = awq_ext.gemv_forward_cuda(
                inputs, self.qweight, self.scales, self.qzeros, self.group_size
            )
187
188
189

        if input_dtype != torch.float16:
            out = out.to(dtype=input_dtype)
190

191
192
        out = out + self.bias if self.bias is not None else out
        return out.reshape(out_shape)
193

194
    def extra_repr(self) -> str:
195
196
197
198
199
200
201
202
        return (
            "in_features={}, out_features={}, bias={}, w_bit={}, group_size={}".format(
                self.in_features,
                self.out_features,
                self.bias is not None,
                self.w_bit,
                self.group_size,
            )
203
        )