"example/example-smart-ptr.py" did not exist on "a576e6a8ca5f22f9f9d5f149929637c3337ad086"
quantization.py 9.85 KB
Newer Older
chenych's avatar
chenych committed
1
# Copyright 2025 HuggingFace Inc. and the LlamaFactory team.
chenych's avatar
chenych committed
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
#
# This code is inspired by the HuggingFace's Transformers and Optimum library.
# https://github.com/huggingface/transformers/blob/v4.41.0/src/transformers/utils/quantization_config.py
# https://github.com/huggingface/optimum/blob/v1.20.0/optimum/gptq/data.py
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import os
import random
chenych's avatar
chenych committed
21
from typing import TYPE_CHECKING, Any
chenych's avatar
chenych committed
22
23
24
25
26
27
28

import torch
from datasets import load_dataset
from transformers import BitsAndBytesConfig, EetqConfig, GPTQConfig, HqqConfig
from transformers.integrations import is_deepspeed_zero3_enabled
from transformers.modeling_utils import is_fsdp_enabled

luopl's avatar
luopl committed
29
from ...extras import logging
chenych's avatar
chenych committed
30
from ...extras.constants import FILEEXT2TYPE, QuantizationMethod
luopl's avatar
luopl committed
31
from ...extras.misc import check_version, get_current_device
chenych's avatar
chenych committed
32
33
34
35
36
37
38
39


if TYPE_CHECKING:
    from transformers import PretrainedConfig, PreTrainedTokenizer

    from ...hparams import ModelArguments


luopl's avatar
luopl committed
40
logger = logging.get_logger(__name__)
chenych's avatar
chenych committed
41
42


chenych's avatar
chenych committed
43
44
def _get_quantization_dataset(tokenizer: "PreTrainedTokenizer", model_args: "ModelArguments") -> list[dict[str, Any]]:
    r"""Prepare the tokenized dataset to perform AutoGPTQ. Do not use tensor output for JSON serialization."""
chenych's avatar
chenych committed
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
    if os.path.isfile(model_args.export_quantization_dataset):
        data_path = FILEEXT2TYPE.get(model_args.export_quantization_dataset.split(".")[-1], None)
        data_files = model_args.export_quantization_dataset
    else:
        data_path = model_args.export_quantization_dataset
        data_files = None

    dataset = load_dataset(
        path=data_path,
        data_files=data_files,
        split="train",
        cache_dir=model_args.cache_dir,
        token=model_args.hf_hub_token,
    )

    samples = []
    maxlen = model_args.export_quantization_maxlen
    for _ in range(model_args.export_quantization_nsamples):
        n_try = 0
        while True:
            if n_try > 100:
                raise ValueError("Cannot find satisfying example, considering decrease `export_quantization_maxlen`.")

            sample_idx = random.randint(0, len(dataset) - 1)
chenych's avatar
chenych committed
69
            sample: dict[str, torch.Tensor] = tokenizer(dataset[sample_idx]["text"], return_tensors="pt")
chenych's avatar
chenych committed
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
            n_try += 1
            if sample["input_ids"].size(1) > maxlen:
                break  # TODO: fix large maxlen

        word_idx = random.randint(0, sample["input_ids"].size(1) - maxlen - 1)
        input_ids = sample["input_ids"][:, word_idx : word_idx + maxlen]
        attention_mask = sample["attention_mask"][:, word_idx : word_idx + maxlen]
        samples.append({"input_ids": input_ids.tolist(), "attention_mask": attention_mask.tolist()})

    return samples


def configure_quantization(
    config: "PretrainedConfig",
    tokenizer: "PreTrainedTokenizer",
    model_args: "ModelArguments",
shihm's avatar
uodata  
shihm committed
86
    is_trainable: bool,
chenych's avatar
chenych committed
87
    init_kwargs: dict[str, Any],
chenych's avatar
chenych committed
88
) -> None:
chenych's avatar
chenych committed
89
    r"""Priority: PTQ-quantized (train/infer) > AutoGPTQ (export) > On-the-fly quantization (train/infer)."""
chenych's avatar
chenych committed
90
91
    if getattr(config, "quantization_config", None):  # ptq
        if model_args.quantization_bit is not None:
luopl's avatar
luopl committed
92
            logger.warning_rank0("`quantization_bit` will not affect on the PTQ-quantized models.")
chenych's avatar
chenych committed
93

chenych's avatar
chenych committed
94
        quantization_config: dict[str, Any] = getattr(config, "quantization_config", None)
chenych's avatar
chenych committed
95
96
        quant_method = quantization_config.get("quant_method", "")

shihm's avatar
uodata  
shihm committed
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
        if quant_method not in (QuantizationMethod.MXFP4, QuantizationMethod.FP8) and (
            is_deepspeed_zero3_enabled() or is_fsdp_enabled()
        ):
            # mxfp4 will dequant the model weights
            raise ValueError("DeepSpeed ZeRO-3 or FSDP is incompatible with PTQ-quantized models.")

        if quant_method == QuantizationMethod.MXFP4:
            from transformers import Mxfp4Config

            quant_config = Mxfp4Config(dequantize=True)
            init_kwargs["quantization_config"] = quant_config
            init_kwargs["ignore_mismatched_sizes"] = True

        if quant_method == QuantizationMethod.FP8:
            from transformers import FineGrainedFP8Config

            quant_config = FineGrainedFP8Config(dequantize=True)
            init_kwargs["quantization_config"] = quant_config
            init_kwargs["ignore_mismatched_sizes"] = True

chenych's avatar
chenych committed
117
        if quant_method == QuantizationMethod.GPTQ:
chenych's avatar
chenych committed
118
            check_version("gptqmodel>=2.0.0", mandatory=True)
chenych's avatar
chenych committed
119
120
121
122
            quantization_config.pop("disable_exllama", None)  # remove deprecated args
            quantization_config["use_exllama"] = False  # disable exllama

        if quant_method == QuantizationMethod.AWQ:
luopl's avatar
luopl committed
123
            check_version("autoawq", mandatory=True)
chenych's avatar
chenych committed
124
125

        if quant_method == QuantizationMethod.AQLM:
luopl's avatar
luopl committed
126
            check_version("aqlm>=1.1.0", mandatory=True)
chenych's avatar
chenych committed
127
128
129
            quantization_config["bits"] = 2

        quant_bits = quantization_config.get("bits", "?")
luopl's avatar
luopl committed
130
        logger.info_rank0(f"Loading {quant_bits}-bit {quant_method.upper()}-quantized model.")
chenych's avatar
chenych committed
131

chenych's avatar
chenych committed
132
    elif model_args.export_quantization_bit is not None:  # gptqmodel
chenych's avatar
chenych committed
133
134
135
        if model_args.export_quantization_bit not in [8, 4, 3, 2]:
            raise ValueError("AutoGPTQ only accepts 2/3/4/8-bit quantization.")

chenych's avatar
chenych committed
136
137
        check_version("optimum>=1.24.0", mandatory=True)
        check_version("gptqmodel>=2.0.0", mandatory=True)
chenych's avatar
chenych committed
138
139
140
141
142
        from accelerate.utils import get_max_memory

        if getattr(config, "model_type", None) == "chatglm":
            raise ValueError("ChatGLM model is not supported yet.")

chenych's avatar
chenych committed
143
144
145
146
147
148
149
150
151
152
153
154
        try:
            from optimum.gptq import utils as gq_utils

            if "language_model.model.layers" not in gq_utils.BLOCK_PATTERNS:
                gq_utils.BLOCK_PATTERNS.insert(0, "language_model.model.layers")
        except ImportError:
            pass

        block_name_to_quantize = None
        if getattr(config, "model_type", None) in ["gemma3", "paligemma"]:
            block_name_to_quantize = "language_model.model.layers"

chenych's avatar
chenych committed
155
156
        init_kwargs["quantization_config"] = GPTQConfig(
            bits=model_args.export_quantization_bit,
chenych's avatar
chenych committed
157
            tokenizer=tokenizer,
chenych's avatar
chenych committed
158
            dataset=_get_quantization_dataset(tokenizer, model_args),
chenych's avatar
chenych committed
159
            block_name_to_quantize=block_name_to_quantize,
chenych's avatar
chenych committed
160
161
162
        )
        init_kwargs["device_map"] = "auto"
        init_kwargs["max_memory"] = get_max_memory()
chenych's avatar
chenych committed
163
164
        model_args.compute_dtype = torch.float16  # force fp16 for gptqmodel
        logger.info_rank0(f"Quantizing model to {model_args.export_quantization_bit} bit with GPTQModel.")
chenych's avatar
chenych committed
165
166

    elif model_args.quantization_bit is not None:  # on-the-fly
chenych's avatar
chenych committed
167
        if model_args.quantization_method == QuantizationMethod.BNB:
chenych's avatar
chenych committed
168
            if model_args.quantization_bit == 8:
luopl's avatar
luopl committed
169
                check_version("bitsandbytes>=0.37.0", mandatory=True)
chenych's avatar
chenych committed
170
171
                init_kwargs["quantization_config"] = BitsAndBytesConfig(load_in_8bit=True)
            elif model_args.quantization_bit == 4:
luopl's avatar
luopl committed
172
                check_version("bitsandbytes>=0.39.0", mandatory=True)
chenych's avatar
chenych committed
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
                init_kwargs["quantization_config"] = BitsAndBytesConfig(
                    load_in_4bit=True,
                    bnb_4bit_compute_dtype=model_args.compute_dtype,
                    bnb_4bit_use_double_quant=model_args.double_quantization,
                    bnb_4bit_quant_type=model_args.quantization_type,
                    bnb_4bit_quant_storage=model_args.compute_dtype,  # crucial for fsdp+qlora
                )
            else:
                raise ValueError("Bitsandbytes only accepts 4-bit or 8-bit quantization.")

            # Do not assign device map if:
            # 1. deepspeed zero3 or fsdp (train)
            # 2. auto quantization device map (inference)
            if is_deepspeed_zero3_enabled() or is_fsdp_enabled() or model_args.quantization_device_map == "auto":
                if model_args.quantization_bit != 4:
                    raise ValueError("Only 4-bit quantized model can use fsdp+qlora or auto device map.")

luopl's avatar
luopl committed
190
                check_version("bitsandbytes>=0.43.0", mandatory=True)
chenych's avatar
chenych committed
191
192
193
            else:
                init_kwargs["device_map"] = {"": get_current_device()}  # change auto device map for inference

luopl's avatar
luopl committed
194
            logger.info_rank0(f"Quantizing model to {model_args.quantization_bit} bit with bitsandbytes.")
chenych's avatar
chenych committed
195
        elif model_args.quantization_method == QuantizationMethod.HQQ:
chenych's avatar
chenych committed
196
197
198
199
200
201
            if model_args.quantization_bit not in [8, 6, 5, 4, 3, 2, 1]:
                raise ValueError("HQQ only accepts 1/2/3/4/5/6/8-bit quantization.")

            if is_deepspeed_zero3_enabled() or is_fsdp_enabled():
                raise ValueError("HQQ quantization is incompatible with DeepSpeed ZeRO-3 or FSDP.")

luopl's avatar
luopl committed
202
            check_version("hqq", mandatory=True)
chenych's avatar
chenych committed
203
204
205
            init_kwargs["quantization_config"] = HqqConfig(
                nbits=model_args.quantization_bit, quant_zero=False, quant_scale=False, axis=0
            )  # use ATEN kernel (axis=0) for performance
luopl's avatar
luopl committed
206
            logger.info_rank0(f"Quantizing model to {model_args.quantization_bit} bit with HQQ.")
chenych's avatar
chenych committed
207
        elif model_args.quantization_method == QuantizationMethod.EETQ:
chenych's avatar
chenych committed
208
209
210
211
212
213
            if model_args.quantization_bit != 8:
                raise ValueError("EETQ only accepts 8-bit quantization.")

            if is_deepspeed_zero3_enabled() or is_fsdp_enabled():
                raise ValueError("EETQ quantization is incompatible with DeepSpeed ZeRO-3 or FSDP.")

luopl's avatar
luopl committed
214
            check_version("eetq", mandatory=True)
chenych's avatar
chenych committed
215
            init_kwargs["quantization_config"] = EetqConfig()
luopl's avatar
luopl committed
216
            logger.info_rank0(f"Quantizing model to {model_args.quantization_bit} bit with EETQ.")