"tools/robustness_eval.py" did not exist on "440fd1744a2b8e1669ff596e7fa8b16241856eea"
base.py 21 KB
Newer Older
1
import os
Casper Hansen's avatar
Casper Hansen committed
2
import gc
3
import json
Casper Hansen's avatar
Casper Hansen committed
4
import torch
5
import transformers
Casper Hansen's avatar
Casper Hansen committed
6
import torch.nn as nn
7

Casper Hansen's avatar
Casper Hansen committed
8
from tqdm import tqdm
Casper's avatar
Casper committed
9
from typing import List, Union, Dict
10
from safetensors.torch import save_file
Casper's avatar
Casper committed
11
from typing_extensions import Doc, Annotated
12
from huggingface_hub import snapshot_download
13
from transformers.modeling_utils import shard_checkpoint
14

Casper's avatar
Casper committed
15
16
17
18
19
20
21
22
23
24
25
from awq.modules.linear import (
    WQLinear_GEMM,
    WQLinear_GEMV,
    WQLinear_Marlin,
    WQLinear_Exllama,
    WQLinear_ExllamaV2,
    WQLinear_GEMVFast,
    marlin_post_init,
    exllama_post_init,
    exllamav2_post_init,
)
26
27
28
29
30
from awq.utils.module import (
    get_named_linears,
    set_op_by_name,
    exclude_layers_to_not_quantize,
)
Casper's avatar
Casper committed
31
32
33
34
from transformers import (
    AutoConfig,
    PreTrainedModel,
    PretrainedConfig,
35
36
    AutoProcessor,
    CLIPImageProcessor,
Casper's avatar
Casper committed
37
    PreTrainedTokenizer,
Casper's avatar
Casper committed
38
)
39
40
41
42
from accelerate.big_modeling import (
    init_empty_weights,
    load_checkpoint_and_dispatch,
)
Casper's avatar
Casper committed
43

44
45
46
47
48
from awq.models._config import AwqConfig
from awq.modules.act import ScaledActivation
from awq.quantize.quantizer import AwqQuantizer
from awq.utils.module import get_named_linears, set_op_by_name

49
# Since we support different `AutoModelForxxx` from transformers
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
# we need to define a custom mapping dict as below:
TRANSFORMERS_AUTO_MAPPING_DICT = {
    "mpt": "AutoModelForCausalLM",
    "llama": "AutoModelForCausalLM",
    "opt": "AutoModelForCausalLM",
    "RefinedWeb": "AutoModelForCausalLM",
    "RefinedWebModel": "AutoModelForCausalLM",
    "falcon": "AutoModelForCausalLM",
    "bloom": "AutoModelForCausalLM",
    "gptj": "AutoModelForCausalLM",
    "gpt_bigcode": "AutoModelForCausalLM",
    "mistral": "AutoModelForCausalLM",
    "mixtral": "AutoModelForCausalLM",
    "gpt_neox": "AutoModelForCausalLM",
    "aquila": "AutoModelForCausalLM",
    "Yi": "AutoModelForCausalLM",
    "qwen": "AutoModelForCausalLM",
Aoyu's avatar
Aoyu committed
67
    "baichuan": "AutoModelForCausalLM",
68
    "llava": "AutoModelForVision2Seq",
Junyang Lin's avatar
Junyang Lin committed
69
    "qwen2": "AutoModelForCausalLM",
TechxGenus's avatar
TechxGenus committed
70
    "gemma": "AutoModelForCausalLM",
71
72
}

73

74
class BaseAWQForCausalLM(nn.Module):
75
    def __init__(
Casper's avatar
Casper committed
76
77
78
79
80
81
82
83
84
85
86
87
88
        self,
        model: Annotated[PreTrainedModel, Doc("The pretrained or quantized model.")],
        model_type: Annotated[str, Doc("The model type, found in config.json.")],
        is_quantized: Annotated[
            bool, Doc("Indicates if the current model is quantized.")
        ],
        config: Annotated[PretrainedConfig, Doc("The config of the model.")],
        quant_config: Annotated[
            AwqConfig, Doc("The quantization config of the model.")
        ],
        processor: Annotated[
            AutoProcessor, Doc("An optional processor, e.g. for vision models.")
        ],
89
    ):
Casper's avatar
Casper committed
90
        """The base model for all AutoAWQ models."""
91
        super().__init__()
92
93
94
        self.model: PreTrainedModel = model
        self.model_type: str = model_type
        self.is_quantized: bool = is_quantized
95
        self.search_result = None
Casper's avatar
Casper committed
96
        self.config: PretrainedConfig = config
Casper's avatar
Casper committed
97
        self.quant_config: AwqConfig = quant_config
98
        self.processor: CLIPImageProcessor = processor
99

Casper's avatar
Casper committed
100
101
    def to(self, device: Annotated[str, Doc("The device to move your model to.")]):
        """A utility function for moving the model to a device."""
102
        return self.model.to(device)
103

104
    def forward(self, *args, **kwargs):
Casper's avatar
Casper committed
105
        """A forward function that mimics the torch forward."""
106
        return self.model(*args, **kwargs)
107

Casper Hansen's avatar
Casper Hansen committed
108
    def generate(self, *args, **kwargs):
Casper's avatar
Casper committed
109
        """A generate function that mimics the HF generate function."""
Casper Hansen's avatar
Casper Hansen committed
110
111
        with torch.inference_mode():
            return self.model.generate(*args, **kwargs)
112

Casper Hansen's avatar
Casper Hansen committed
113
    @torch.no_grad()
114
115
    def quantize(
        self,
Casper's avatar
Casper committed
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
        tokenizer: Annotated[
            PreTrainedTokenizer, Doc("The tokenizer to use for quantization.")
        ] = None,
        quant_config: Annotated[
            Dict, Doc("The quantization config you want to use.")
        ] = {},
        calib_data: Annotated[
            Union[str, List[str]],
            Doc(
                "The calibration dataset. Either a string pointing to Huggingface or a list of preloaded examples."
            ),
        ] = "pileval",
        split: Annotated[str, Doc("The split of calib_data.")] = "train",
        text_column: Annotated[str, Doc("The text column of calib_data.")] = "text",
        duo_scaling: Annotated[
            bool, Doc("Whether to scale using both w/x or just x.")
        ] = True,
        export_compatible: Annotated[
            bool,
            Doc(
                "This argument avoids real quantization by only applying the scales without quantizing down to FP16."
            ),
        ] = False,
139
140
141
142
143
144
        apply_clip: Annotated[
            bool,
            Doc(
                "Whether to apply clipping to the model during quantization. Some models may perform better with this set to False."
            ),
        ] = True,
145
    ):
Casper's avatar
Casper committed
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
        """
        The main quantization function that you can use to quantize your model.

        Example:

        ```python
        from awq import AutoAWQForCausalLM
        from transformers import AutoTokenizer

        model_path = "..."
        model = AutoAWQForCausalLM.from_pretrained(model_path)
        tokenizer = AutoTokenizer.from_pretrained(model_path)

        quant_config = { "zero_point": True, "q_group_size": 128, "w_bit": 4, "version": "GEMM" }
        model.quantize(tokenizer, quant_config)
        ```
        """
Casper's avatar
Casper committed
163
        self.quant_config: AwqConfig = AwqConfig.from_dict(quant_config)
164

Casper's avatar
Casper committed
165
166
167
        if hasattr(self, "modules_to_not_convert"):
            self.quant_config.modules_to_not_convert = self.modules_to_not_convert

168
        self.quantizer = AwqQuantizer(
169
170
171
172
173
            self,
            self.model,
            tokenizer,
            self.quant_config.w_bit,
            self.quant_config.q_group_size,
174
            self.quant_config.zero_point,
175
176
177
178
179
            self.quant_config.version,
            calib_data,
            split,
            text_column,
            duo_scaling,
Casper's avatar
Casper committed
180
            modules_to_not_convert=self.quant_config.modules_to_not_convert,
181
            export_compatible=export_compatible,
182
            apply_clip=apply_clip,
Casper Hansen's avatar
Casper Hansen committed
183
        )
184
        self.quantizer.quantize()
185

Casper Hansen's avatar
Casper Hansen committed
186
        self.is_quantized = True
187

188
189
190
191
192
    @torch.no_grad()
    def pack(self):
        """
        A utility function for the following scenario. Note that save_quantized will
        overwrite existing weights if you use the same quant_path.
193

Casper's avatar
Casper committed
194
195
196
        Example:

        ```python
197
198
199
200
        model.quantize(
            tokenizer,
            quant_config=quant_config,
            export_compatible=True
201
        )
202
203
204
        model.save_quantized(...)  # produces GGUF/other compat weights
        model.pack(...) # makes the model CUDA compat
        model.save_quantized(...)  # produces CUDA compat weights
Casper's avatar
Casper committed
205
        ```
206
207
        """
        self.quantizer.pack()
208

qwopqwop200's avatar
qwopqwop200 committed
209
    @staticmethod
Casper's avatar
Casper committed
210
    def fuse_layers(model):
qwopqwop200's avatar
qwopqwop200 committed
211
        pass
Casper's avatar
Casper committed
212

Casper's avatar
Casper committed
213
214
215
216
217
218
219
220
221
222
    def save_quantized(
        self,
        save_dir: Annotated[str, Doc("The directory to save your model to.")],
        safetensors: Annotated[
            bool, Doc("Whether to save the model as safetensors or torch files.")
        ] = True,
        shard_size: Annotated[
            str, Doc("The shard size for sharding large models into multiple chunks.")
        ] = "5GB",
    ):
223
        save_dir = save_dir[:-1] if save_dir[-1] == "/" else save_dir
224

Casper Hansen's avatar
Casper Hansen committed
225
226
        # Save model
        class EmptyModule(nn.Module):
227
228
229
230
231
            def __init__(self):
                super(EmptyModule, self).__init__()

            def forward(self, x):
                return x
232

Casper's avatar
Casper committed
233
234
        # Save model and config files with empty state dict
        self.model.config.quantization_config = self.quant_config.to_transformers_dict()
235
        self.model.generation_config.do_sample = True
Casper Hansen's avatar
Casper Hansen committed
236
        self.model.save_pretrained(save_dir, state_dict=EmptyModule().state_dict())
237

238
239
240
241
        # Vision transformers have a processor
        if self.processor is not None:
            self.processor.save_pretrained(save_dir)

Casper Hansen's avatar
Casper Hansen committed
242
        # Remove empty state dict
243
244
245
246
        default_paths = [
            f"{save_dir}/model.safetensors",
            f"{save_dir}/pytorch_model.bin",
        ]
247
248
249
        for path in default_paths:
            if os.path.exists(path):
                os.remove(path)
250

Casper Hansen's avatar
Casper Hansen committed
251
        # model_name has no extension, add it when saving state_dict
252
        model_name = "model.safetensors" if safetensors else "pytorch_model.bin"
253

Casper Hansen's avatar
Casper Hansen committed
254
255
        # shard checkpoint into chunks (10GB default)
        shards, index = shard_checkpoint(
256
            self.model.state_dict(), max_shard_size=shard_size, weights_name=model_name
Casper Hansen's avatar
Casper Hansen committed
257
        )
258

Casper Hansen's avatar
Casper Hansen committed
259
260
261
262
        for shard_file, shard in shards.items():
            if safetensors:
                # safetensors must be in the same memory, so we duplicate and use contiguous memory
                shard = {k: v.clone().contiguous() for k, v in shard.items()}
263
264
265
                save_file(
                    shard, os.path.join(save_dir, shard_file), metadata={"format": "pt"}
                )
Casper Hansen's avatar
Casper Hansen committed
266
267
            else:
                torch.save(shard, os.path.join(save_dir, shard_file))
268

Casper Hansen's avatar
Casper Hansen committed
269
270
        # save shard index
        if index is not None:
271
            with open(f"{save_dir}/{model_name}.index.json", "w+") as file:
Casper Hansen's avatar
Casper Hansen committed
272
                file.write(json.dumps(index, indent=4))
273

274
    @classmethod
275
276
    def from_pretrained(
        self,
Casper's avatar
Casper committed
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
        model_path: Annotated[str, Doc("A Huggingface path or local path to a model.")],
        model_type: Annotated[str, Doc("The model type, loaded from config.json.")],
        torch_dtype: Annotated[
            torch.dtype,
            Doc(
                "The dtype to load the model as. May not work with other values than float16."
            ),
        ] = torch.float16,
        trust_remote_code: Annotated[
            bool,
            Doc(
                "Useful for Huggingface repositories that have not been integrated into transformers yet."
            ),
        ] = True,
        safetensors: Annotated[
            bool, Doc("Whether to download/load safetensors instead of torch weights.")
        ] = True,
        device_map: Annotated[
            Union[str, Dict],
            Doc(
                "A device map that will be passed onto the model loading method from transformers."
            ),
        ] = None,
300
301
302
        download_kwargs: Annotated[
            Dict, Doc("Used for configure download model"),
        ] = None,
Casper's avatar
Casper committed
303
304
305
306
307
308
        **model_init_kwargs: Annotated[
            Dict,
            Doc(
                "Additional kwargs that are passed to the model during initialization."
            ),
        ],
309
    ):
Casper's avatar
Casper committed
310
        """A method for initialization of pretrained models, usually in FP16."""
Casper Hansen's avatar
Casper Hansen committed
311
312
        # Get weights path and quant config
        model_weights_path, config, quant_config = self._load_config(
313
314
315
            self, model_path, "", safetensors,
            trust_remote_code=trust_remote_code,
            download_kwargs=download_kwargs
316
        )
Casper's avatar
Casper committed
317

318
319
320
321
322
323
324
325
        target_cls_name = TRANSFORMERS_AUTO_MAPPING_DICT[config.model_type]
        target_cls = getattr(transformers, target_cls_name)

        processor = None
        if target_cls_name == "AutoModelForVision2Seq":
            processor = AutoProcessor.from_pretrained(model_weights_path)
            processor: CLIPImageProcessor = processor.image_processor

Casper Hansen's avatar
Casper Hansen committed
326
        # If not quantized, must load with AutoModelForCausalLM
327
        model = target_cls.from_pretrained(
Casper Hansen's avatar
Casper Hansen committed
328
329
330
331
            model_weights_path,
            trust_remote_code=trust_remote_code,
            torch_dtype=torch_dtype,
            use_safetensors=safetensors,
332
            device_map=device_map,
333
            **model_init_kwargs,
Casper Hansen's avatar
Casper Hansen committed
334
335
336
337
        )

        model.eval()

338
339
340
341
342
343
344
345
        return self(
            model,
            model_type,
            is_quantized=False,
            config=config,
            quant_config=quant_config,
            processor=processor,
        )
Casper Hansen's avatar
Casper Hansen committed
346

347
    @classmethod
348
349
    def from_quantized(
        self,
Casper's avatar
Casper committed
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
        model_path: Annotated[str, Doc("A Huggingface path or local path to a model.")],
        model_type: Annotated[str, Doc("The model type, loaded from config.json.")],
        model_filename: Annotated[
            str, Doc("Load a specific model's filename by specifying this argument.")
        ] = "",
        max_seq_len: Annotated[
            int,
            Doc(
                "The maximum sequence cached sequence length of the model. Larger values may increase loading time and memory usage."
            ),
        ] = None,
        torch_dtype: Annotated[
            torch.dtype,
            Doc(
                "The dtype to load the model as. May not work with other values than float16."
            ),
        ] = torch.float16,
        trust_remote_code: Annotated[
            bool,
            Doc(
                "Useful for Huggingface repositories that have not been integrated into transformers yet."
            ),
        ] = True,
        safetensors: Annotated[
            bool, Doc("Whether to download/load safetensors instead of torch weights.")
        ] = True,
        fuse_layers: Annotated[
            bool,
            Doc(
                "Whether to use fused/optimized combination of layers for increased speed."
            ),
        ] = True,
        use_exllama: Annotated[
            bool, Doc("Whether to map the weights to ExLlamaV1 kernels.")
        ] = False,
        use_exllama_v2: Annotated[
            bool, Doc("Whether to map the weights to ExLlamaV2 kernels.")
        ] = False,
        device_map: Annotated[
            Union[str, Dict],
            Doc(
                "A device map that will be passed onto the model loading method from transformers."
            ),
        ] = "balanced",
        offload_folder: Annotated[
            str,
            Doc("The folder ot offload the model to."),
        ] = None,
398
399
400
        download_kwargs: Annotated[
            Dict, Doc("Used for configure download model"),
        ] = None,
Casper's avatar
Casper committed
401
402
403
404
405
406
        **config_kwargs: Annotated[
            Dict,
            Doc(
                "Additional kwargs that are passed to the config during initialization."
            ),
        ],
407
    ):
Casper's avatar
Casper committed
408
        """A method for initialization of a quantized model, usually in INT4."""
Casper Hansen's avatar
Casper Hansen committed
409
410
        # [STEP 1-2] Load weights path and configs
        model_weights_path, config, quant_config = self._load_config(
411
412
413
414
415
            self,
            model_path,
            model_filename,
            safetensors,
            trust_remote_code,
Casper's avatar
Casper committed
416
            max_seq_len=max_seq_len,
417
            download_kwargs=download_kwargs,
418
            **config_kwargs,
Casper Hansen's avatar
Casper Hansen committed
419
        )
420
421
422

        target_cls_name = TRANSFORMERS_AUTO_MAPPING_DICT[config.model_type]
        target_cls = getattr(transformers, target_cls_name)
423

Casper Hansen's avatar
Casper Hansen committed
424
425
        # [STEP 3] Load model
        with init_empty_weights():
426
427
428
429
430
431
            model = target_cls.from_config(
                config=config,
                torch_dtype=torch_dtype,
                trust_remote_code=trust_remote_code,
            )

Casper Hansen's avatar
Casper Hansen committed
432
        # Prepare WQLinear layers, replace nn.Linear
433
434
435
436
437
438
439
440
441
        self._load_quantized_modules(
            self,
            model,
            quant_config,
            quant_config.version,
            use_exllama=use_exllama,
            use_exllama_v2=use_exllama_v2,
        )

Casper Hansen's avatar
Casper Hansen committed
442
443
        model.tie_weights()

444
445
446
        # loads the weights into modules and distributes
        # across available devices automatically
        load_checkpoint_and_dispatch(
Casper Hansen's avatar
Casper Hansen committed
447
448
            model,
            checkpoint=model_weights_path,
s4rduk4r's avatar
s4rduk4r committed
449
            device_map=device_map,
450
            no_split_module_classes=[self.layer_type],
s4rduk4r's avatar
s4rduk4r committed
451
            offload_folder=offload_folder,
452
            dtype=torch_dtype,
Casper Hansen's avatar
Casper Hansen committed
453
        )
454

Casper Hansen's avatar
Casper Hansen committed
455
        # Dispath to devices
456
        if fuse_layers:
Casper's avatar
Casper committed
457
            self.fuse_layers(model)
Ilyas Moutawwakil's avatar
Ilyas Moutawwakil committed
458

Casper's avatar
Casper committed
459
        if quant_config.version == "marlin":
460
461
462
            model = marlin_post_init(model)

        elif use_exllama:
463
464
465
            # creates q4 handle
            model = exllama_post_init(model)
        elif use_exllama_v2:
Ilyas Moutawwakil's avatar
Ilyas Moutawwakil committed
466
            # creates q4 handle and allocates scratch spaces wrt max_input_len and max_batch_size
467
468
            model = exllamav2_post_init(
                model,
Casper's avatar
Casper committed
469
                max_input_len=max_seq_len or 2048,
Ilyas Moutawwakil's avatar
Ilyas Moutawwakil committed
470
                max_batch_size=int(os.getenv("AWQ_BATCH_SIZE", 1)),
471
472
473
474
475
            )

        return self(
            model,
            model_type,
Casper's avatar
Casper committed
476
            is_quantized=True,
477
478
479
480
            config=config,
            quant_config=quant_config,
            processor=None,
        )
s4rduk4r's avatar
s4rduk4r committed
481

482
483
484
485
486
487
    def _load_config(
        self,
        model_path,
        model_filename,
        safetensors=True,
        trust_remote_code=True,
Casper's avatar
Casper committed
488
        max_seq_len=4096,
489
        download_kwargs=None,
490
491
        **config_kwargs,
    ):
492
        # [STEP 1] Download model if path is not a directory
493
        if not os.path.isdir(model_path):
494
            ignore_patterns = ["*msgpack*", "*h5*", "optimizer.pt"]
495
            if safetensors:
496
                ignore_patterns.extend(["*.pt*", "*.bin*", "consolidated*"])
497
            else:
Casper Hansen's avatar
Casper Hansen committed
498
                ignore_patterns.append("*.safetensors*")
499
500
501
502
503
504
505
506
507
508
509
510
511
            
            if download_kwargs is None:
                download_kwargs = {}
            
            if "ignore_patterns" in download_kwargs:
                download_kwargs_ignore_patterns = download_kwargs.pop("ignore_patterns")

                if isinstance(download_kwargs_ignore_patterns, str):
                    ignore_patterns.append(download_kwargs_ignore_patterns)
                elif isinstance(download_kwargs_ignore_patterns, list):
                    ignore_patterns.extend(download_kwargs_ignore_patterns)

            model_path = snapshot_download(model_path, ignore_patterns=ignore_patterns, **download_kwargs)
512
513
514

        if model_filename != "":
            model_weights_path = model_path + f"/{model_filename}"
515
516
        else:
            model_weights_path = model_path
517

518
        # [STEP 2] Load config and set sequence length
519
        # TODO: Create BaseAWQConfig class
Casper's avatar
Casper committed
520
        quant_config = AwqConfig.from_pretrained(model_path)
521

522
        # Load model config and set max generation length
Casper's avatar
Casper committed
523
        if max_seq_len is None and hasattr(self, "max_seq_len_key"):
524
525
526
            config = AutoConfig.from_pretrained(
                model_path, trust_remote_code=trust_remote_code, **config_kwargs
            )
Casper's avatar
Casper committed
527
            config.max_seq_len = getattr(config, self.max_seq_len_key, 2048)
528
529
            # To add the generate support for Multi-modal models as well
            if hasattr(config, "text_config"):
Casper's avatar
Casper committed
530
531
                config.text_config.max_seq_len = getattr(
                    config, self.max_seq_len_key, 2048
532
                )
533
        else:
Casper's avatar
Casper committed
534
            max_seq_len = 2048 if max_seq_len is None else max_seq_len
535
536
537
            config = AutoConfig.from_pretrained(
                model_path, trust_remote_code=trust_remote_code, **config_kwargs
            )
Casper's avatar
Casper committed
538
            config.max_seq_len = max_seq_len
539

Casper Hansen's avatar
Casper Hansen committed
540
        return model_weights_path, config, quant_config
Casper's avatar
Casper committed
541

542
543
544
    def _load_quantized_modules(
        self, model, quant_config, version, use_exllama, use_exllama_v2
    ):
545
        # Real quantization of weights
546
        assert not (
Casper's avatar
Casper committed
547
            version == "gemv" and (use_exllama or use_exllama_v2)
548
549
        ), "Exllama kernels only support GEMM version."

550
        # Get blocks of model
551
        layers = self.get_model_layers(model)
552

553
554
        for i in tqdm(range(len(layers)), desc="Replacing layers..."):
            layer = layers[i]
555
556

            # Get every linear layer in a block
557
            named_linears = get_named_linears(layer)
558

559
            # Filter out the linear layers we don't want to exclude
560
561
562
            named_linears = exclude_layers_to_not_quantize(
                named_linears, quant_config.modules_to_not_convert
            )
563

564
            # Replace activation functions
565
            self._scale_activations(self, layer)
566

567
            # Replace nn.Linear with WQLinear
568
            for name, module in named_linears.items():
Casper's avatar
Casper committed
569
                if version == "marlin":
570
571
                    q_linear_module = WQLinear_Marlin
                elif use_exllama:
572
573
574
                    q_linear_module = WQLinear_Exllama
                elif use_exllama_v2:
                    q_linear_module = WQLinear_ExllamaV2
Casper's avatar
Casper committed
575
                elif version == "gemm":
Casper Hansen's avatar
Casper Hansen committed
576
                    q_linear_module = WQLinear_GEMM
Casper's avatar
Casper committed
577
                elif version == "gemv":
Casper Hansen's avatar
Casper Hansen committed
578
                    q_linear_module = WQLinear_GEMV
Casper's avatar
Casper committed
579
580
                elif version == "gemv_fast":
                    q_linear_module = WQLinear_GEMVFast
581

Casper Hansen's avatar
Casper Hansen committed
582
                q_linear = q_linear_module.from_linear(
583
                    module, quant_config.w_bit, quant_config.q_group_size, True
Casper Hansen's avatar
Casper Hansen committed
584
                )
585
586
                q_linear.to(next(layer.parameters()).device)
                set_op_by_name(layer, name, q_linear)
587

588
589
            torch.cuda.empty_cache()
            gc.collect()
590

591
    @staticmethod
592
    def _scale_activations(self, layer):
593
        scale_dict = self.get_act_for_scaling(layer)
594

595
596
        if scale_dict["is_scalable"]:
            if not isinstance(scale_dict["scale_layer"], ScaledActivation):
597
                param = next(layer.parameters())
598

599
                # get activation scale
600
601
602
                scale_like = torch.ones(
                    scale_dict["scale_shape"], dtype=param.dtype, device=param.device
                )
603

604
                # scale activation
605
606
                scaled_act = ScaledActivation(scale_dict["scale_layer"], scale_like)
                set_op_by_name(layer, scale_dict["scale_name"], scaled_act)