Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
renzhc
diffusers_dcu
Commits
ca60ad8e
Unverified
Commit
ca60ad8e
authored
Jan 22, 2025
by
Aryan
Committed by
GitHub
Jan 22, 2025
Browse files
Improve TorchAO error message (#10627)
improve error message
parent
beacaa55
Changes
1
Show whitespace changes
Inline
Side-by-side
Showing
1 changed file
with
10 additions
and
3 deletions
+10
-3
src/diffusers/quantizers/quantization_config.py
src/diffusers/quantizers/quantization_config.py
+10
-3
No files found.
src/diffusers/quantizers/quantization_config.py
View file @
ca60ad8e
...
@@ -481,8 +481,15 @@ class TorchAoConfig(QuantizationConfigMixin):
...
@@ -481,8 +481,15 @@ class TorchAoConfig(QuantizationConfigMixin):
TORCHAO_QUANT_TYPE_METHODS
=
self
.
_get_torchao_quant_type_to_method
()
TORCHAO_QUANT_TYPE_METHODS
=
self
.
_get_torchao_quant_type_to_method
()
if
self
.
quant_type
not
in
TORCHAO_QUANT_TYPE_METHODS
.
keys
():
if
self
.
quant_type
not
in
TORCHAO_QUANT_TYPE_METHODS
.
keys
():
is_floating_quant_type
=
self
.
quant_type
.
startswith
(
"float"
)
or
self
.
quant_type
.
startswith
(
"fp"
)
if
is_floating_quant_type
and
not
self
.
_is_cuda_capability_atleast_8_9
():
raise
ValueError
(
raise
ValueError
(
f
"Requested quantization type:
{
self
.
quant_type
}
is not supported yet or is incorrect. If you think the "
f
"Requested quantization type:
{
self
.
quant_type
}
is not supported on GPUs with CUDA capability <= 8.9. You "
f
"can check the CUDA capability of your GPU using `torch.cuda.get_device_capability()`."
)
raise
ValueError
(
f
"Requested quantization type:
{
self
.
quant_type
}
is not supported or is an incorrect `quant_type` name. If you think the "
f
"provided quantization type should be supported, please open an issue at https://github.com/huggingface/diffusers/issues."
f
"provided quantization type should be supported, please open an issue at https://github.com/huggingface/diffusers/issues."
)
)
...
@@ -652,13 +659,13 @@ class TorchAoConfig(QuantizationConfigMixin):
...
@@ -652,13 +659,13 @@ class TorchAoConfig(QuantizationConfigMixin):
def
__repr__
(
self
):
def
__repr__
(
self
):
r
"""
r
"""
Example of how this looks for `TorchAoConfig("uint
_a16w4
", group_size=32)`:
Example of how this looks for `TorchAoConfig("uint
4wo
", group_size=32)`:
```
```
TorchAoConfig {
TorchAoConfig {
"modules_to_not_convert": null,
"modules_to_not_convert": null,
"quant_method": "torchao",
"quant_method": "torchao",
"quant_type": "uint
_a16w4
",
"quant_type": "uint
4wo
",
"quant_type_kwargs": {
"quant_type_kwargs": {
"group_size": 32
"group_size": 32
}
}
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment