auto.py 3.53 KB
Newer Older
1
import os
Casper's avatar
Casper committed
2
import logging
3
from transformers import AutoConfig
4
from awq.models import *
5
from awq.models.base import BaseAWQForCausalLM
6
7
8

AWQ_CAUSAL_LM_MODEL_MAP = {
    "mpt": MptAWQForCausalLM,
Casper Hansen's avatar
Casper Hansen committed
9
    "llama": LlamaAWQForCausalLM,
Casper Hansen's avatar
Casper Hansen committed
10
11
    "opt": OptAWQForCausalLM,
    "RefinedWeb": FalconAWQForCausalLM,
Casper Hansen's avatar
Casper Hansen committed
12
    "RefinedWebModel": FalconAWQForCausalLM,
Casper's avatar
Casper committed
13
    "falcon": FalconAWQForCausalLM,
EC2 Default User's avatar
EC2 Default User committed
14
    "bloom": BloomAWQForCausalLM,
15
    "gptj": GPTJAWQForCausalLM,
Casper Hansen's avatar
Casper Hansen committed
16
    "gpt_bigcode": GptBigCodeAWQForCausalLM,
twaka's avatar
twaka committed
17
    "mistral": MistralAWQForCausalLM,
18
    "mixtral": MixtralAWQForCausalLM,
twaka's avatar
twaka committed
19
    "gpt_neox": GPTNeoXAWQForCausalLM,
ldwang's avatar
ldwang committed
20
    "aquila": AquilaAWQForCausalLM,
Qing's avatar
Qing committed
21
    "Yi": YiAWQForCausalLM,
22
    "qwen": QwenAWQForCausalLM,
Aoyu's avatar
Aoyu committed
23
    "baichuan": BaichuanAWQForCausalLM,
24
    "llava": LlavaAWQForCausalLM,
Casper's avatar
Casper committed
25
    "qwen2": Qwen2AWQForCausalLM,
TechxGenus's avatar
TechxGenus committed
26
    "gemma": GemmaAWQForCausalLM,
少年's avatar
少年 committed
27
    "starcoder2": Starcoder2AWQForCausalLM,
28
29
}

30

31
def check_and_get_model_type(model_dir, trust_remote_code=True, **model_init_kwargs):
32
33
34
    config = AutoConfig.from_pretrained(
        model_dir, trust_remote_code=trust_remote_code, **model_init_kwargs
    )
35
36
37
38
39
    if config.model_type not in AWQ_CAUSAL_LM_MODEL_MAP.keys():
        raise TypeError(f"{config.model_type} isn't supported yet.")
    model_type = config.model_type
    return model_type

40

41
42
class AutoAWQForCausalLM:
    def __init__(self):
43
44
45
46
47
        raise EnvironmentError(
            "You must instantiate AutoAWQForCausalLM with\n"
            "AutoAWQForCausalLM.from_quantized or AutoAWQForCausalLM.from_pretrained"
        )

48
    @classmethod
49
50
51
52
    def from_pretrained(
        self,
        model_path,
        trust_remote_code=True,
Casper's avatar
Casper committed
53
        safetensors=True,
54
        device_map=None,
55
        download_kwargs=None,
56
57
58
59
60
        **model_init_kwargs,
    ) -> BaseAWQForCausalLM:
        model_type = check_and_get_model_type(
            model_path, trust_remote_code, **model_init_kwargs
        )
61
62

        return AWQ_CAUSAL_LM_MODEL_MAP[model_type].from_pretrained(
63
64
65
66
67
            model_path,
            model_type,
            trust_remote_code=trust_remote_code,
            safetensors=safetensors,
            device_map=device_map,
68
            download_kwargs=download_kwargs,
69
            **model_init_kwargs,
70
        )
71
72

    @classmethod
73
74
75
76
    def from_quantized(
        self,
        quant_path,
        quant_filename="",
Casper's avatar
Casper committed
77
        max_seq_len=2048,
78
79
80
81
82
83
84
85
        trust_remote_code=True,
        fuse_layers=True,
        use_exllama=False,
        use_exllama_v2=False,
        batch_size=1,
        safetensors=True,
        device_map="balanced",
        offload_folder=None,
86
        download_kwargs=None,
87
88
        **config_kwargs,
    ) -> BaseAWQForCausalLM:
89
        os.environ["AWQ_BATCH_SIZE"] = str(batch_size)
90
        model_type = check_and_get_model_type(quant_path, trust_remote_code)
91

Casper's avatar
Casper committed
92
93
94
95
96
97
98
        if config_kwargs.get("max_new_tokens") is not None:
            max_seq_len = config_kwargs["max_new_tokens"]
            logging.warning(
                "max_new_tokens argument is deprecated... gracefully "
                "setting max_seq_len=max_new_tokens."
            )

99
        return AWQ_CAUSAL_LM_MODEL_MAP[model_type].from_quantized(
100
101
102
            quant_path,
            model_type,
            quant_filename,
Casper's avatar
Casper committed
103
            max_seq_len,
104
105
106
107
108
109
110
            trust_remote_code=trust_remote_code,
            fuse_layers=fuse_layers,
            use_exllama=use_exllama,
            use_exllama_v2=use_exllama_v2,
            safetensors=safetensors,
            device_map=device_map,
            offload_folder=offload_folder,
111
            download_kwargs=download_kwargs,
112
            **config_kwargs,
s4rduk4r's avatar
s4rduk4r committed
113
        )