gemv.py 6.3 KB
Newer Older
1
2
import torch
import torch.nn as nn
3

4
5
6
7
8
try:
    import awq_ext  # with CUDA kernels
    AWQ_INSTALLED = True
except:
    AWQ_INSTALLED = False
9
10
11
12
13


def make_divisible(c, divisor):
    return (c + divisor - 1) // divisor

14

15
16
17
18
19
20
21
22
23
def calculate_zeros_width(in_features, group_size=128, pack_num=8):
    if group_size >= 128:
        size_multiplier = 1
    elif group_size == 64:
        size_multiplier = 2
    elif group_size == 32:
        size_multiplier = 4
    else:
        raise NotImplementedError
24

25
26
27
28
    base_width = make_divisible(in_features // group_size, pack_num)
    base_width = make_divisible(base_width, size_multiplier) * size_multiplier
    return base_width

29

30
31
32
class WQLinear_GEMV(nn.Module):
    def __init__(self, w_bit, group_size, in_features, out_features, bias, dev):
        super().__init__()
33

34
35
        if w_bit not in [4]:
            raise NotImplementedError("Only 4-bit are supported for now.")
36

37
38
39
40
41
42
43
44
45
        self.in_features = in_features
        self.out_features = out_features
        self.w_bit = w_bit
        self.group_size = group_size if group_size != -1 else in_features
        self.split_k_iters = 8

        # quick sanity check (make sure aligment)
        assert self.in_features % self.group_size == 0
        assert out_features % (32 // self.w_bit) == 0
46
        pack_num = 32 // self.w_bit
47

48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
        self.register_buffer(
            "qweight",
            torch.zeros(
                (out_features, in_features // pack_num), dtype=torch.int32, device=dev
            ),
        )
        self.register_buffer(
            "qzeros",
            torch.zeros(
                (out_features, calculate_zeros_width(in_features, self.group_size)),
                dtype=torch.int32,
                device=dev,
            ),
        )
        self.register_buffer(
            "scales",
            torch.zeros(
                (
                    out_features,
                    calculate_zeros_width(in_features, self.group_size) * pack_num,
                ),
                dtype=torch.float16,
                device=dev,
            ),
        )
73
        if bias:
74
75
76
            self.register_buffer(
                "bias", torch.zeros((out_features), dtype=torch.float16, device=dev)
            )
77
78
79
80
        else:
            self.bias = None

    @classmethod
81
82
83
84
85
86
87
88
89
90
91
    def from_linear(
        cls, linear, w_bit, group_size, init_only=False, scales=None, zeros=None
    ):
        awq_linear = cls(
            w_bit,
            group_size,
            linear.in_features,
            linear.out_features,
            linear.bias is not None,
            linear.weight.device,
        )
92
93
        if init_only:  # just prepare for loading sd
            return awq_linear
94

95
        # need scales and zeros info for real quantization
96
        assert scales is not None and zeros is not None
97
98
99
100
        scale_zeros = zeros * scales

        pack_num = 32 // awq_linear.w_bit
        qscales = torch.zeros(
101
102
103
104
            (
                scales.shape[0],
                calculate_zeros_width(linear.in_features, group_size) * pack_num,
            ),
105
            dtype=torch.float16,
106
            device=scales.device,
107
        )
108
        qscales[:, : scales.shape[1]] = scales
109
110
111
        awq_linear.scales = qscales
        if linear.bias is not None:
            awq_linear.bias = linear.bias.clone().half()
112

113
114
        intweight = []
        for idx in range(awq_linear.in_features):
115
116
117
118
119
120
            intweight.append(
                torch.round(
                    (linear.weight.data[:, idx] + scale_zeros[:, idx // group_size])
                    / awq_linear.scales[:, idx // group_size]
                ).to(torch.int)[:, None]
            )
121
122
        intweight = torch.cat(intweight, dim=1)
        intweight = intweight.to(dtype=torch.int32)
123
124
125
126
127
128
        qweight = torch.zeros(
            (intweight.shape[0], intweight.shape[1] // 32 * awq_linear.w_bit),
            dtype=torch.int32,
            device=intweight.device,
        )

129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
        for col in range(intweight.shape[1] // pack_num):
            if awq_linear.w_bit == 4:
                order_map = [0, 1, 2, 3, 4, 5, 6, 7]
            else:
                raise NotImplementedError("Only 4-bit are supported for now.")
            for i in range(pack_num):
                qweight_col = intweight[:, col * pack_num + order_map[i]]
                qweight[:, col] |= qweight_col << (i * awq_linear.w_bit)
        awq_linear.qweight = qweight

        zeros = zeros.to(dtype=torch.int32)
        qzeros = torch.zeros(
            (zeros.shape[0], calculate_zeros_width(linear.in_features, group_size)),
            dtype=torch.int32,
            device=zeros.device,
        )
145

146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
        for col in range((zeros.shape[1] + pack_num - 1) // pack_num):
            if awq_linear.w_bit == 4:
                order_map = [0, 1, 2, 3, 4, 5, 6, 7]
            else:
                raise NotImplementedError("Only 4-bit are supported for now.")
            for i in range(pack_num):
                if col * pack_num + order_map[i] >= zeros.shape[1]:
                    continue
                qzero_col = zeros[:, col * pack_num + order_map[i]]
                qzeros[:, col] |= qzero_col << (i * awq_linear.w_bit)
        awq_linear.qzeros = qzeros
        return awq_linear

    @torch.no_grad()
    def forward(self, x):
161
        out_shape = x.shape[:-1] + (self.out_features,)
162
        inputs = x.reshape(-1, x.shape[-1])
163
164
165
166

        input_dtype = inputs.dtype
        if input_dtype != torch.float16:
            inputs = inputs.half()
167

168
        if inputs.shape[0] > 8:
169
170
171
172
173
174
175
176
            out = awq_ext.gemmv2_forward_cuda(
                inputs,
                self.qweight,
                self.scales,
                self.qzeros,
                self.group_size,
                self.split_k_iters,
            )
177
        else:
178
179
180
            out = awq_ext.gemv_forward_cuda(
                inputs, self.qweight, self.scales, self.qzeros, self.group_size
            )
181
182
183

        if input_dtype != torch.float16:
            out = out.to(dtype=input_dtype)
184

185
186
        out = out + self.bias if self.bias is not None else out
        return out.reshape(out_shape)
187

188
    def extra_repr(self) -> str:
189
190
191
192
193
194
195
196
        return (
            "in_features={}, out_features={}, bias={}, w_bit={}, group_size={}".format(
                self.in_features,
                self.out_features,
                self.bias is not None,
                self.w_bit,
                self.group_size,
            )
197
        )