Unverified Commit 48eae6f4 authored by Sayak Paul's avatar Sayak Paul Committed by GitHub
Browse files

[Quantizers] add `is_compileable` property to quantizers. (#11736)

add is_compileable property to quantizers.
parent 66394bf6
......@@ -227,3 +227,8 @@ class DiffusersQuantizer(ABC):
@property
@abstractmethod
def is_trainable(self): ...
@property
def is_compileable(self) -> bool:
"""Flag indicating whether the quantized model can be compiled"""
return False
......@@ -564,6 +564,10 @@ class BnB8BitDiffusersQuantizer(DiffusersQuantizer):
# Because we're mandating `bitsandbytes` 0.43.3.
return True
@property
def is_compileable(self) -> bool:
return True
def _dequantize(self, model):
from .utils import dequantize_and_replace
......
......@@ -146,6 +146,10 @@ class GGUFQuantizer(DiffusersQuantizer):
def is_trainable(self) -> bool:
return False
@property
def is_compileable(self) -> bool:
return True
def _dequantize(self, model):
is_model_on_cpu = model.device.type == "cpu"
if is_model_on_cpu:
......
......@@ -175,3 +175,7 @@ class QuantoQuantizer(DiffusersQuantizer):
@property
def is_serializable(self):
return True
@property
def is_compileable(self) -> bool:
return True
......@@ -335,3 +335,7 @@ class TorchAoHfQuantizer(DiffusersQuantizer):
@property
def is_trainable(self):
return self.quantization_config.quant_type.startswith("int8")
@property
def is_compileable(self) -> bool:
return True
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment