Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
renzhc
diffusers_dcu
Commits
48eae6f4
Unverified
Commit
48eae6f4
authored
Jun 19, 2025
by
Sayak Paul
Committed by
GitHub
Jun 19, 2025
Browse files
[Quantizers] add `is_compileable` property to quantizers. (#11736)
add is_compileable property to quantizers.
parent
66394bf6
Changes
5
Hide whitespace changes
Inline
Side-by-side
Showing
5 changed files
with
21 additions
and
0 deletions
+21
-0
src/diffusers/quantizers/base.py
src/diffusers/quantizers/base.py
+5
-0
src/diffusers/quantizers/bitsandbytes/bnb_quantizer.py
src/diffusers/quantizers/bitsandbytes/bnb_quantizer.py
+4
-0
src/diffusers/quantizers/gguf/gguf_quantizer.py
src/diffusers/quantizers/gguf/gguf_quantizer.py
+4
-0
src/diffusers/quantizers/quanto/quanto_quantizer.py
src/diffusers/quantizers/quanto/quanto_quantizer.py
+4
-0
src/diffusers/quantizers/torchao/torchao_quantizer.py
src/diffusers/quantizers/torchao/torchao_quantizer.py
+4
-0
No files found.
src/diffusers/quantizers/base.py
View file @
48eae6f4
...
@@ -227,3 +227,8 @@ class DiffusersQuantizer(ABC):
...
@@ -227,3 +227,8 @@ class DiffusersQuantizer(ABC):
@
property
@
property
@
abstractmethod
@
abstractmethod
def
is_trainable
(
self
):
...
def
is_trainable
(
self
):
...
@
property
def
is_compileable
(
self
)
->
bool
:
"""Flag indicating whether the quantized model can be compiled"""
return
False
src/diffusers/quantizers/bitsandbytes/bnb_quantizer.py
View file @
48eae6f4
...
@@ -564,6 +564,10 @@ class BnB8BitDiffusersQuantizer(DiffusersQuantizer):
...
@@ -564,6 +564,10 @@ class BnB8BitDiffusersQuantizer(DiffusersQuantizer):
# Because we're mandating `bitsandbytes` 0.43.3.
# Because we're mandating `bitsandbytes` 0.43.3.
return
True
return
True
@
property
def
is_compileable
(
self
)
->
bool
:
return
True
def
_dequantize
(
self
,
model
):
def
_dequantize
(
self
,
model
):
from
.utils
import
dequantize_and_replace
from
.utils
import
dequantize_and_replace
...
...
src/diffusers/quantizers/gguf/gguf_quantizer.py
View file @
48eae6f4
...
@@ -146,6 +146,10 @@ class GGUFQuantizer(DiffusersQuantizer):
...
@@ -146,6 +146,10 @@ class GGUFQuantizer(DiffusersQuantizer):
def
is_trainable
(
self
)
->
bool
:
def
is_trainable
(
self
)
->
bool
:
return
False
return
False
@
property
def
is_compileable
(
self
)
->
bool
:
return
True
def
_dequantize
(
self
,
model
):
def
_dequantize
(
self
,
model
):
is_model_on_cpu
=
model
.
device
.
type
==
"cpu"
is_model_on_cpu
=
model
.
device
.
type
==
"cpu"
if
is_model_on_cpu
:
if
is_model_on_cpu
:
...
...
src/diffusers/quantizers/quanto/quanto_quantizer.py
View file @
48eae6f4
...
@@ -175,3 +175,7 @@ class QuantoQuantizer(DiffusersQuantizer):
...
@@ -175,3 +175,7 @@ class QuantoQuantizer(DiffusersQuantizer):
@
property
@
property
def
is_serializable
(
self
):
def
is_serializable
(
self
):
return
True
return
True
@
property
def
is_compileable
(
self
)
->
bool
:
return
True
src/diffusers/quantizers/torchao/torchao_quantizer.py
View file @
48eae6f4
...
@@ -335,3 +335,7 @@ class TorchAoHfQuantizer(DiffusersQuantizer):
...
@@ -335,3 +335,7 @@ class TorchAoHfQuantizer(DiffusersQuantizer):
@
property
@
property
def
is_trainable
(
self
):
def
is_trainable
(
self
):
return
self
.
quantization_config
.
quant_type
.
startswith
(
"int8"
)
return
self
.
quantization_config
.
quant_type
.
startswith
(
"int8"
)
@
property
def
is_compileable
(
self
)
->
bool
:
return
True
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment