quantizer.py 13 KB
Newer Older
Ji Lin's avatar
Ji Lin committed
1
import torch
Casper's avatar
Casper committed
2
3
4
5
import logging
import functools
import torch.nn as nn
from tqdm import tqdm
Vik Paruchuri's avatar
Vik Paruchuri committed
6
from typing import Dict, List
Casper's avatar
Casper committed
7
8
9
from collections import defaultdict
from awq.utils.utils import clear_memory
from awq.utils.calib_data import get_calib_dataset
Casper Hansen's avatar
Casper Hansen committed
10
from awq.quantize.scale import apply_scale, apply_clip
Casper's avatar
Casper committed
11
12
13
14
15
from awq.modules.linear import WQLinear_GEMM, WQLinear_GEMV
from awq.utils.module import append_str_prefix, get_op_name, get_named_linears, set_op_by_name


class AwqQuantizer:
16
    def __init__(self, awq_model, model, tokenizer, w_bit, group_size, version, 
Casper Hansen's avatar
Casper Hansen committed
17
                       calib_data, split, text_column) -> None:
Casper Hansen's avatar
Casper Hansen committed
18
        self.awq_model = awq_model
Casper's avatar
Casper committed
19
20
21
22
23
24
25
26
        self.model = model
        self.tokenizer = tokenizer
        self.w_bit = w_bit
        self.group_size = group_size
        self.version = version
        self.calib_data = calib_data
        self.split = split
        self.text_column = text_column
Casper Hansen's avatar
Casper Hansen committed
27
        self.modules, self.module_kwargs, self.inps = self.init_quant()
28
    
Casper's avatar
Casper committed
29
30
31
32
33
34
35
36
    def pseudo_quantize_tensor(self, w: torch.Tensor, get_scale_zp=False):
        org_w_shape = w.shape
        if self.group_size > 0:
            assert org_w_shape[-1] % self.group_size == 0
            w = w.reshape(-1, self.group_size)
        assert w.dim() == 2

        # zero point quantization
Ji Lin's avatar
Ji Lin committed
37
38
        max_val = w.amax(dim=1, keepdim=True)
        min_val = w.amin(dim=1, keepdim=True)
Casper's avatar
Casper committed
39
        max_int = 2 ** self.w_bit - 1
Ji Lin's avatar
Ji Lin committed
40
41
42
        min_int = 0
        scales = (max_val - min_val).clamp(min=1e-5) / max_int
        zeros = (-torch.round(min_val / scales)).clamp_(min_int, max_int)
Casper's avatar
Casper committed
43
44
45
46
47
48
49
50
51
52
53
54
55
56

        assert torch.isnan(scales).sum() == 0
        assert torch.isnan(w).sum() == 0

        w = (torch.clamp(torch.round(w / scales) + zeros, min_int, max_int) - zeros) * scales
        assert torch.isnan(w).sum() == 0

        w = w.reshape(org_w_shape)

        if get_scale_zp:
            return w, scales.view(w.shape[0], -1), zeros.view(w.shape[0], -1)
        else:
            return w
    
Casper Hansen's avatar
Casper Hansen committed
57
58
    def quantize(self):
        for i in tqdm(range(len(self.modules)), desc="AWQ"):
Casper's avatar
Casper committed
59
60
61
62
63
64
65
            # [STEP 1]: Get layer, extract linear modules, extract input features
            self.modules[i] = self.modules[i].cuda()
            named_linears = get_named_linears(self.modules[i])
            input_feat = self._get_input_feat(self.modules[i], named_linears)
            clear_memory()

            # [STEP 2]: Compute and apply scale list
Vik Paruchuri's avatar
Vik Paruchuri committed
66
            module_config: List[Dict] = self.awq_model.get_layers_for_scaling(
Casper's avatar
Casper committed
67
68
                self.modules[i], input_feat, self.module_kwargs
            )
Casper Hansen's avatar
Casper Hansen committed
69
            scales_list = [self._search_best_scale(self.modules[i], **layer) for layer in module_config]
Casper's avatar
Casper committed
70
71
72
73
            apply_scale(self.modules[i], scales_list, input_feat_dict=input_feat)
            scales_list = append_str_prefix(scales_list, get_op_name(self.model, self.modules[i]) + ".")

            # [STEP 3]: Compute and apply clipping list
Casper Hansen's avatar
Casper Hansen committed
74
75
76
            clip_list = self._search_best_clip(self.modules[i], named_linears, input_feat)
            apply_clip(self.modules[i], clip_list)
            clip_list = append_str_prefix(clip_list, get_op_name(self.model, self.modules[i]) + ".")
Casper's avatar
Casper committed
77
78

            # [STEP 4]: Quantize weights
79
80
81
            self._apply_quant(self.modules[i], named_linears)
            clear_memory()
    
Vik Paruchuri's avatar
Vik Paruchuri committed
82
    def _apply_quant(self, module, named_linears: Dict[str, nn.Linear]):
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
        for name, linear_layer in named_linears.items():
            # NOTE: small regression in perplexity if linear layer uses .cpu().float()
            linear_layer = linear_layer.cuda().half()

            linear_layer.weight.data, scales, zeros = self.pseudo_quantize_tensor(
                linear_layer.weight.data, 
                get_scale_zp=True
            )

            if self.version == 'GEMM':
                scales = scales.t().contiguous()
                zeros = zeros.t().contiguous()
                q_linear_module = WQLinear_GEMM

            elif self.version  == 'GEMV':
                q_linear_module = WQLinear_GEMV
Casper's avatar
Casper committed
99
            
100
101
102
103
104
105
106
107
108
109
110
111
            q_linear = q_linear_module.from_linear(
                linear=linear_layer,
                w_bit=self.w_bit,
                group_size=self.group_size,
                init_only=False,
                scales=scales,
                zeros=zeros
            )

            linear_layer.cpu()
            q_linear.to(next(module.parameters()).device)
            set_op_by_name(module, name, q_linear)
Casper's avatar
Casper committed
112
113
114
            clear_memory()

    @torch.no_grad()
Vik Paruchuri's avatar
Vik Paruchuri committed
115
    def _search_best_scale(self, module, prev_op, layers: List[nn.Linear], inp: torch.Tensor, module2inspect=None, kwargs={}):
Casper Hansen's avatar
Casper Hansen committed
116
117
118
119
120
121
122
        if module2inspect is None:
            assert len(layers) == 1
            module2inspect = layers[0]
        
        if "use_cache" in kwargs:
            kwargs.pop("use_cache")
        
Casper's avatar
Casper committed
123
        # Put x on the right device
Casper Hansen's avatar
Casper Hansen committed
124
        inp = inp.to(next(module2inspect.parameters()).device)
Casper's avatar
Casper committed
125
126

        # [STEP 1]: Compute maximum of weight
Casper Hansen's avatar
Casper Hansen committed
127
128
        weight = torch.cat([_m.weight for _m in layers], dim=0)
        org_shape = weight.shape
Casper's avatar
Casper committed
129
        weight = weight.view(-1, self.group_size)
Casper Hansen's avatar
Casper Hansen committed
130
131
132
        w_scale = weight.abs() / weight.abs().amax(dim=1, keepdim=True)
        w_scale = w_scale.view(org_shape)
        w_max = w_scale.mean(0)
Casper's avatar
Casper committed
133
134
135
        clear_memory(weight)

        # [STEP 2]: Compute maximum of x
Casper Hansen's avatar
Casper Hansen committed
136
        x_max = inp.abs().view(-1, inp.shape[-1]).mean(0)
Casper's avatar
Casper committed
137

Casper Hansen's avatar
Casper Hansen committed
138
        # [STEP 3]: Compute output of module
Casper's avatar
Casper committed
139
        with torch.no_grad():
140
141
142
            fp16_output = module2inspect(inp, **kwargs)
            if isinstance(fp16_output, tuple):
                fp16_output = fp16_output[0]
Casper's avatar
Casper committed
143
144
145
        
        # [STEP 4]: Compute loss
        best_scales = self._compute_best_scale(
Casper Hansen's avatar
Casper Hansen committed
146
            inp, w_max, x_max, module2inspect,
147
            layers, fp16_output, kwargs
Casper's avatar
Casper committed
148
149
        )
        
Casper Hansen's avatar
Casper Hansen committed
150
        return (get_op_name(module, prev_op), tuple([get_op_name(module, m) for m in layers]), best_scales)
Casper's avatar
Casper committed
151

Vik Paruchuri's avatar
Vik Paruchuri committed
152
    def _compute_best_scale(self, x, w_max, x_max, module2inspect, linears2scale: List[nn.Linear],
153
                                  fp16_output, kwargs={}):
Casper's avatar
Casper committed
154
155
156
        """
        Compute loss and select best scales

Casper's avatar
Casper committed
157
        L(s) = || Q(W * s) (s^-1 * X) - W * X ||
Casper's avatar
Casper committed
158
159
160
161
162
163
164
165
166
167
168
        Q: weight quantization function | pseudo_quantize_tensor(W * s)
        X: inputs from calib dataset    | X
        W: original weights in FP16     | layer
        s: per channel scaling factor   | s^-1 * X
        """
        n_grid = 20
        history = []
        best_ratio = -1
        best_scales = None
        best_error = float('inf')

Casper Hansen's avatar
Casper Hansen committed
169
        org_sd = {k: v.cpu() for k, v in module2inspect.state_dict().items()}
Casper's avatar
Casper committed
170
171
172
173
174
        
        device = x.device
        x_max = x_max.view(-1).to(device)
        w_max = w_max.view(-1).to(device)
        
Casper's avatar
Casper committed
175
176
        for ratio in range(n_grid):
            # create new scales
Casper's avatar
Casper committed
177
            ratio = ratio / n_grid
178

Casper Hansen's avatar
Casper Hansen committed
179
            # NOTE: s^-1 * x is fused here, according to paper
Casper's avatar
Casper committed
180
            scales = (x_max.pow(ratio) / w_max.pow(1-ratio)).clamp(min=1e-4)
Casper's avatar
Casper committed
181
            scales = scales / (scales.max() * scales.min()).sqrt()
Casper's avatar
Casper committed
182
            scales_view = scales.view(1, -1).to(device)
183

Casper Hansen's avatar
Casper Hansen committed
184
            # Q(W * s)
Casper's avatar
Casper committed
185
            for fc in linears2scale:
Casper's avatar
Casper committed
186
187
                fc.weight.mul_(scales_view)
                fc.weight.data = self.pseudo_quantize_tensor(fc.weight.data) / scales_view
Casper's avatar
Casper committed
188

189
190
191
192
193
            # W * X
            int_w_output = module2inspect(x, **kwargs)
            if isinstance(int_w_output, tuple):
                int_w_output = int_w_output[0]
            
Casper Hansen's avatar
Casper Hansen committed
194
195
            # compute mean squared error (L2 norm)
            loss = (fp16_output - int_w_output).float().pow(2).mean().item() # NOTE: float prevents overflow
Casper's avatar
Casper committed
196
197

            history.append(loss)
Casper's avatar
Casper committed
198
            if loss < best_error:
Casper's avatar
Casper committed
199
200
                best_error = loss
                best_ratio = ratio
Casper's avatar
Casper committed
201
                best_scales = scales.clone()
Casper Hansen's avatar
Casper Hansen committed
202
            module2inspect.load_state_dict(org_sd)
Casper's avatar
Casper committed
203

Casper's avatar
Casper committed
204
205
206
207
208
209
        if best_ratio == -1:
            logging.debug(history)
            raise Exception

        assert torch.isnan(best_scales).sum() == 0, best_scales

Casper Hansen's avatar
Casper Hansen committed
210
        return best_scales.detach().cpu()
Casper's avatar
Casper committed
211

Casper Hansen's avatar
Casper Hansen committed
212
213
214
215
    @torch.no_grad()
    def _search_best_clip(self, layer, named_linears, input_feat):
        clip_list = []
        avoid_clipping = ["q_", "k_", "query", "key", "Wqkv"]
Casper's avatar
Casper committed
216

Casper Hansen's avatar
Casper Hansen committed
217
218
219
220
221
222
223
224
225
226
        for name in named_linears:
            # due to qk bmm, it is hard to clip precisely
            if any([_ in name for _ in avoid_clipping]):
                continue

            named_linears[name].cuda()
            max_val = self._compute_best_clip(named_linears[name].weight, input_feat[name])
            clip_list.append((name, max_val))

            named_linears[name].cpu()
Casper Hansen's avatar
Casper Hansen committed
227
228
        
        return clip_list
Casper Hansen's avatar
Casper Hansen committed
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245

    @torch.no_grad()
    def _compute_best_clip(self, w: torch.Tensor, input_feat: torch.Tensor, n_grid=20, max_shrink=0.5, n_sample_token=512):
        assert w.dim() == 2
        org_w_shape = w.shape
        # w           [co, ci]      -> [co, 1, n_group, group size]
        # input_feat  [n_token, ci] -> [1, n_token, n_group, group size]
        group_size = self.group_size if self.group_size > 0 else w.shape[1]
        input_feat = input_feat.view(-1, input_feat.shape[-1])
        input_feat = input_feat.reshape(1, input_feat.shape[0], -1, group_size)
        input_feat = input_feat[:, 0::input_feat.shape[1] // n_sample_token]
        w = w.reshape(w.shape[0], 1, -1, group_size)

        oc_batch_size = 256 if w.shape[0] % 256 == 0 else 64  # prevent OOM
        assert w.shape[0] % oc_batch_size == 0
        w_all = w
        best_max_val_all = []
Casper's avatar
Casper committed
246

Casper Hansen's avatar
Casper Hansen committed
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
        for i_b in range(w.shape[0] // oc_batch_size):
            w = w_all[i_b * oc_batch_size: (i_b + 1) * oc_batch_size]

            org_max_val = w.abs().amax(dim=-1, keepdim=True)  # co, 1, n_group, 1

            best_max_val = org_max_val.clone()
            min_errs = torch.ones_like(org_max_val) * 1e9
            input_feat = input_feat.to(w.device)
            org_out = (input_feat * w).sum(dim=-1)  # co, n_token, n_group

            for i_s in range(int(max_shrink * n_grid)):
                max_val = org_max_val * (1 - i_s / n_grid)
                min_val = - max_val
                cur_w = torch.clamp(w, min_val, max_val)
                q_w = self.pseudo_quantize_tensor(cur_w)
                cur_out = (input_feat * q_w).sum(dim=-1)

                # co, 1, n_group, 1
                err = (cur_out - org_out).pow(2).mean(dim=1).view(min_errs.shape)
                del cur_w
                del cur_out
                cur_best_idx = err < min_errs
                min_errs[cur_best_idx] = err[cur_best_idx]
                best_max_val[cur_best_idx] = max_val[cur_best_idx]
            best_max_val_all.append(best_max_val)

        best_max_val = torch.cat(best_max_val_all, dim=0)

        clear_memory(input_feat)
        clear_memory(org_out)

        return best_max_val.squeeze(1)

    def init_quant(self, n_samples=128, seqlen=512):
        modules = self.awq_model.get_model_layers(self.model)
Casper's avatar
Casper committed
282
283
284
285
286
287
288
289
290
        samples = get_calib_dataset(
            data=self.calib_data, tokenizer=self.tokenizer, n_samples=n_samples, block_size=seqlen,
            split=self.split, text_column=self.text_column
        )
        samples = torch.cat(samples, dim=0)

        inps = []
        layer_kwargs = {}

Casper Hansen's avatar
Casper Hansen committed
291
292
        modules[0] = modules[0].cuda()
        self.awq_model.move_embed(self.model, "cuda")
Casper's avatar
Casper committed
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
        
        # get input and kwargs to layer 0
        # with_kwargs is only supported in PyTorch 2.0
        # use this Catcher hack for now
        class Catcher(nn.Module):
            def __init__(self, module):
                super().__init__()
                self.module = module

            def forward(self, hijacked_inputs, **kwargs):
                inps.append(hijacked_inputs)
                layer_kwargs.update(kwargs)
                raise ValueError  # early exit to break later inference

        # patch layer 0 to catch input and kwargs
Casper Hansen's avatar
Casper Hansen committed
308
        modules[0] = Catcher(modules[0])
Casper's avatar
Casper committed
309
310
311
312
313
        try:
            self.model(samples.to(next(self.model.parameters()).device))
        except ValueError:  # work with early exit
            pass
        del samples
Casper Hansen's avatar
Casper Hansen committed
314
        modules[0] = modules[0].module  # restore
Casper's avatar
Casper committed
315
316
        inps = inps[0]

Casper Hansen's avatar
Casper Hansen committed
317
318
        modules[0] = modules[0].cpu()
        self.awq_model.move_embed(self.model, "cpu")
Casper's avatar
Casper committed
319
320
321
        
        clear_memory()

Casper Hansen's avatar
Casper Hansen committed
322
        return modules, layer_kwargs, inps
Casper's avatar
Casper committed
323
324
325
326
327
328
329
330
331
332
333
334
335
336
    
    def _get_input_feat(self, layer, named_linears):
        # firstly, get input features of all linear layers
        def cache_input_hook(m, x, y, name, feat_dict):
            x = x[0]
            x = x.detach().cpu()
            feat_dict[name].append(x)

        input_feat = defaultdict(list)
        handles = []
        for name in named_linears:
            handles.append(named_linears[name].register_forward_hook(
                functools.partial(cache_input_hook, name=name,
                                feat_dict=input_feat)))
Casper Hansen's avatar
Casper Hansen committed
337
        self.inps = self.inps.to(next(layer.parameters()).device)  # in case multi-gpu
Casper's avatar
Casper committed
338
        # get output as next layer's input
Casper Hansen's avatar
Casper Hansen committed
339
        self.inps = layer(self.inps, **self.module_kwargs)[0]
Casper's avatar
Casper committed
340
341
342
343
344
345
        for h in handles:
            h.remove()
        # now solve for scaling and clipping
        input_feat = {k: torch.cat(v, dim=0) for k, v in input_feat.items()}
        
        return input_feat