Unverified Commit 90ee9cea authored by Arthur's avatar Arthur Committed by GitHub
Browse files

Revert "add exllamav2 arg" (#27102)

Revert "add exllamav2 arg (#26437)"

This reverts commit 8214d6e7.
parent aa4198a2
...@@ -128,22 +128,12 @@ For 4-bit model, you can use the exllama kernels in order to a faster inference ...@@ -128,22 +128,12 @@ For 4-bit model, you can use the exllama kernels in order to a faster inference
```py ```py
import torch import torch
gptq_config = GPTQConfig(bits=4) gptq_config = GPTQConfig(bits=4, disable_exllama=False)
model = AutoModelForCausalLM.from_pretrained("{your_username}/opt-125m-gptq", device_map="auto", quantization_config = gptq_config)
```
With the release of the exllamav2 kernels, you can get faster inference speed compared to the exllama kernels. You just need to
pass `use_exllama_v2=True` in [`GPTQConfig`] and disable exllama kernels:
```py
import torch
gptq_config = GPTQConfig(bits=4, use_exllama_v2=True)
model = AutoModelForCausalLM.from_pretrained("{your_username}/opt-125m-gptq", device_map="auto", quantization_config = gptq_config) model = AutoModelForCausalLM.from_pretrained("{your_username}/opt-125m-gptq", device_map="auto", quantization_config = gptq_config)
``` ```
Note that only 4-bit models are supported for now. Furthermore, it is recommended to deactivate the exllama kernels if you are finetuning a quantized model with peft. Note that only 4-bit models are supported for now. Furthermore, it is recommended to deactivate the exllama kernels if you are finetuning a quantized model with peft.
You can find the benchmark of these kernels [here](https://github.com/huggingface/optimum/tree/main/tests/benchmark#gptq-benchmark)
#### Fine-tune a quantized model #### Fine-tune a quantized model
With the official support of adapters in the Hugging Face ecosystem, you can fine-tune models that have been quantized with GPTQ. With the official support of adapters in the Hugging Face ecosystem, you can fine-tune models that have been quantized with GPTQ.
......
...@@ -2759,7 +2759,7 @@ class PreTrainedModel(nn.Module, ModuleUtilsMixin, GenerationMixin, PushToHubMix ...@@ -2759,7 +2759,7 @@ class PreTrainedModel(nn.Module, ModuleUtilsMixin, GenerationMixin, PushToHubMix
logger.warning( logger.warning(
"You passed `quantization_config` to `from_pretrained` but the model you're loading already has a " "You passed `quantization_config` to `from_pretrained` but the model you're loading already has a "
"`quantization_config` attribute and has already quantized weights. However, loading attributes" "`quantization_config` attribute and has already quantized weights. However, loading attributes"
" (e.g. disable_exllama, use_cuda_fp16, max_input_length, use_exllama_v2) will be overwritten with the one you passed to `from_pretrained`. The rest will be ignored." " (e.g. disable_exllama, use_cuda_fp16, max_input_length) will be overwritten with the one you passed to `from_pretrained`. The rest will be ignored."
) )
if ( if (
quantization_method_from_args == QuantizationMethod.GPTQ quantization_method_from_args == QuantizationMethod.GPTQ
......
...@@ -349,8 +349,6 @@ class GPTQConfig(QuantizationConfigMixin): ...@@ -349,8 +349,6 @@ class GPTQConfig(QuantizationConfigMixin):
max_input_length (`int`, *optional*): max_input_length (`int`, *optional*):
The maximum input length. This is needed to initialize a buffer that depends on the maximum expected input The maximum input length. This is needed to initialize a buffer that depends on the maximum expected input
length. It is specific to the exllama backend with act-order. length. It is specific to the exllama backend with act-order.
use_exllama_v2 (`bool`, *optional*, defaults to `False`):
Whether to use exllamav2 backend. Only works with `bits` = 4.
""" """
def __init__( def __init__(
...@@ -371,7 +369,6 @@ class GPTQConfig(QuantizationConfigMixin): ...@@ -371,7 +369,6 @@ class GPTQConfig(QuantizationConfigMixin):
pad_token_id: Optional[int] = None, pad_token_id: Optional[int] = None,
disable_exllama: bool = False, disable_exllama: bool = False,
max_input_length: Optional[int] = None, max_input_length: Optional[int] = None,
use_exllama_v2: bool = False,
**kwargs, **kwargs,
): ):
self.quant_method = QuantizationMethod.GPTQ self.quant_method = QuantizationMethod.GPTQ
...@@ -391,14 +388,11 @@ class GPTQConfig(QuantizationConfigMixin): ...@@ -391,14 +388,11 @@ class GPTQConfig(QuantizationConfigMixin):
self.pad_token_id = pad_token_id self.pad_token_id = pad_token_id
self.disable_exllama = disable_exllama self.disable_exllama = disable_exllama
self.max_input_length = max_input_length self.max_input_length = max_input_length
self.use_exllama_v2 = use_exllama_v2
# needed for compatibility with optimum gptq config
self.disable_exllamav2 = not use_exllama_v2
self.post_init() self.post_init()
def get_loading_attributes(self): def get_loading_attributes(self):
attibutes_dict = copy.deepcopy(self.__dict__) attibutes_dict = copy.deepcopy(self.__dict__)
loading_attibutes = ["disable_exllama", "use_exllama_v2", "use_cuda_fp16", "max_input_length"] loading_attibutes = ["disable_exllama", "use_cuda_fp16", "max_input_length"]
loading_attibutes_dict = {i: j for i, j in attibutes_dict.items() if i in loading_attibutes} loading_attibutes_dict = {i: j for i, j in attibutes_dict.items() if i in loading_attibutes}
return loading_attibutes_dict return loading_attibutes_dict
...@@ -424,19 +418,3 @@ class GPTQConfig(QuantizationConfigMixin): ...@@ -424,19 +418,3 @@ class GPTQConfig(QuantizationConfigMixin):
f"""dataset needs to be either a list of string or a value in f"""dataset needs to be either a list of string or a value in
['wikitext2','c4','c4-new','ptb','ptb-new'], but we found {self.dataset}""" ['wikitext2','c4','c4-new','ptb','ptb-new'], but we found {self.dataset}"""
) )
if self.bits == 4:
if self.use_exllama_v2:
optimum_version = version.parse(importlib.metadata.version("optimum"))
autogptq_version = version.parse(importlib.metadata.version("auto_gptq"))
if optimum_version <= version.parse("1.13.2") or autogptq_version <= version.parse("0.4.2"):
raise ValueError(
f"You need optimum > 1.13.2 and auto-gptq > 0.4.2 . Make sure to have that version installed - detected version : optimum {optimum_version} and autogptq {autogptq_version}"
)
self.disable_exllama = True
logger.warning("You have activated exllamav2 kernels. Exllama kernels will be disabled.")
if not self.disable_exllama:
logger.warning(
"""You have activated exllama backend. Note that you can get better inference
speed using exllamav2 kernel by setting `use_exllama_v2=True`.`disable_exllama` will be deprecated
in future version."""
)
...@@ -178,7 +178,6 @@ class GPTQTest(unittest.TestCase): ...@@ -178,7 +178,6 @@ class GPTQTest(unittest.TestCase):
group_size=self.group_size, group_size=self.group_size,
bits=self.bits, bits=self.bits,
disable_exllama=self.disable_exllama, disable_exllama=self.disable_exllama,
disable_exllamav2=True,
) )
self.assertTrue(self.quantized_model.transformer.h[0].mlp.dense_4h_to_h.__class__ == QuantLinear) self.assertTrue(self.quantized_model.transformer.h[0].mlp.dense_4h_to_h.__class__ == QuantLinear)
...@@ -282,7 +281,8 @@ class GPTQTestActOrderExllama(unittest.TestCase): ...@@ -282,7 +281,8 @@ class GPTQTestActOrderExllama(unittest.TestCase):
""" """
Setup quantized model Setup quantized model
""" """
cls.quantization_config = GPTQConfig(bits=4, max_input_length=4028)
cls.quantization_config = GPTQConfig(bits=4, disable_exllama=False, max_input_length=4028)
cls.quantized_model = AutoModelForCausalLM.from_pretrained( cls.quantized_model = AutoModelForCausalLM.from_pretrained(
cls.model_name, cls.model_name,
revision=cls.revision, revision=cls.revision,
...@@ -334,62 +334,6 @@ class GPTQTestActOrderExllama(unittest.TestCase): ...@@ -334,62 +334,6 @@ class GPTQTestActOrderExllama(unittest.TestCase):
self.quantized_model.generate(**inp, num_beams=1, min_new_tokens=3, max_new_tokens=3) self.quantized_model.generate(**inp, num_beams=1, min_new_tokens=3, max_new_tokens=3)
@slow
@require_optimum
@require_auto_gptq
@require_torch_gpu
@require_accelerate
class GPTQTestExllamaV2(unittest.TestCase):
"""
Test GPTQ model with exllamav2 kernel and desc_act=True (also known as act-order).
More information on those arguments here:
https://huggingface.co/docs/transformers/main_classes/quantization#transformers.GPTQConfig
"""
EXPECTED_OUTPUTS = set()
EXPECTED_OUTPUTS.add("Hello my name is Katie and I am a 20 year")
model_name = "hf-internal-testing/Llama-2-7B-GPTQ"
revision = "gptq-4bit-128g-actorder_True"
input_text = "Hello my name is"
@classmethod
def setUpClass(cls):
"""
Setup quantized model
"""
cls.quantization_config = GPTQConfig(bits=4, use_exllama_v2=True)
cls.quantized_model = AutoModelForCausalLM.from_pretrained(
cls.model_name,
revision=cls.revision,
torch_dtype=torch.float16,
device_map={"": 0},
quantization_config=cls.quantization_config,
)
cls.tokenizer = AutoTokenizer.from_pretrained(cls.model_name, use_fast=True)
def check_inference_correctness(self, model):
"""
Test the generation quality of the quantized model and see that we are matching the expected output.
Given that we are operating on small numbers + the testing model is relatively small, we might not get
the same output across GPUs. So we'll generate few tokens (5-10) and check their output.
"""
# Check that inference pass works on the model
encoded_input = self.tokenizer(self.input_text, return_tensors="pt")
# Check the exactness of the results
output_sequences = model.generate(input_ids=encoded_input["input_ids"].to(0), max_new_tokens=10)
# Get the generation
self.assertIn(self.tokenizer.decode(output_sequences[0], skip_special_tokens=True), self.EXPECTED_OUTPUTS)
def test_generate_quality(self):
"""
Simple test to check the quality of the model by comapring the the generated tokens with the expected tokens
"""
self.check_inference_correctness(self.quantized_model)
# fail when run all together # fail when run all together
@pytest.mark.skip @pytest.mark.skip
@require_accelerate @require_accelerate
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment