Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
renzhc
diffusers_dcu
Commits
123506ee
Unverified
Commit
123506ee
authored
Aug 14, 2025
by
Sayak Paul
Committed by
GitHub
Aug 14, 2025
Browse files
make parallel loading flag a part of constants. (#12137)
parent
8c48ec05
Changes
3
Hide whitespace changes
Inline
Side-by-side
Showing
3 changed files
with
4 additions
and
5 deletions
+4
-5
src/diffusers/models/modeling_utils.py
src/diffusers/models/modeling_utils.py
+2
-3
src/diffusers/utils/__init__.py
src/diffusers/utils/__init__.py
+1
-1
src/diffusers/utils/constants.py
src/diffusers/utils/constants.py
+1
-1
No files found.
src/diffusers/models/modeling_utils.py
View file @
123506ee
...
@@ -42,9 +42,8 @@ from ..quantizers import DiffusersAutoQuantizer, DiffusersQuantizer
...
@@ -42,9 +42,8 @@ from ..quantizers import DiffusersAutoQuantizer, DiffusersQuantizer
from
..quantizers.quantization_config
import
QuantizationMethod
from
..quantizers.quantization_config
import
QuantizationMethod
from
..utils
import
(
from
..utils
import
(
CONFIG_NAME
,
CONFIG_NAME
,
ENV_VARS_TRUE_VALUES
,
FLAX_WEIGHTS_NAME
,
FLAX_WEIGHTS_NAME
,
HF_PARALLEL_LOADING
_FLAG
,
HF_
ENABLE_
PARALLEL_LOADING
,
SAFE_WEIGHTS_INDEX_NAME
,
SAFE_WEIGHTS_INDEX_NAME
,
SAFETENSORS_WEIGHTS_NAME
,
SAFETENSORS_WEIGHTS_NAME
,
WEIGHTS_INDEX_NAME
,
WEIGHTS_INDEX_NAME
,
...
@@ -962,7 +961,7 @@ class ModelMixin(torch.nn.Module, PushToHubMixin):
...
@@ -962,7 +961,7 @@ class ModelMixin(torch.nn.Module, PushToHubMixin):
dduf_entries
:
Optional
[
Dict
[
str
,
DDUFEntry
]]
=
kwargs
.
pop
(
"dduf_entries"
,
None
)
dduf_entries
:
Optional
[
Dict
[
str
,
DDUFEntry
]]
=
kwargs
.
pop
(
"dduf_entries"
,
None
)
disable_mmap
=
kwargs
.
pop
(
"disable_mmap"
,
False
)
disable_mmap
=
kwargs
.
pop
(
"disable_mmap"
,
False
)
is_parallel_loading_enabled
=
os
.
environ
.
get
(
HF_PARALLEL_LOADING_FLAG
,
""
).
upper
()
in
ENV_VARS_TRUE_VALUES
is_parallel_loading_enabled
=
HF_ENABLE_PARALLEL_LOADING
if
is_parallel_loading_enabled
and
not
low_cpu_mem_usage
:
if
is_parallel_loading_enabled
and
not
low_cpu_mem_usage
:
raise
NotImplementedError
(
"Parallel loading is not supported when not using `low_cpu_mem_usage`."
)
raise
NotImplementedError
(
"Parallel loading is not supported when not using `low_cpu_mem_usage`."
)
...
...
src/diffusers/utils/__init__.py
View file @
123506ee
...
@@ -25,8 +25,8 @@ from .constants import (
...
@@ -25,8 +25,8 @@ from .constants import (
DIFFUSERS_DYNAMIC_MODULE_NAME
,
DIFFUSERS_DYNAMIC_MODULE_NAME
,
FLAX_WEIGHTS_NAME
,
FLAX_WEIGHTS_NAME
,
GGUF_FILE_EXTENSION
,
GGUF_FILE_EXTENSION
,
HF_ENABLE_PARALLEL_LOADING
,
HF_MODULES_CACHE
,
HF_MODULES_CACHE
,
HF_PARALLEL_LOADING_FLAG
,
HUGGINGFACE_CO_RESOLVE_ENDPOINT
,
HUGGINGFACE_CO_RESOLVE_ENDPOINT
,
MIN_PEFT_VERSION
,
MIN_PEFT_VERSION
,
ONNX_EXTERNAL_WEIGHTS_NAME
,
ONNX_EXTERNAL_WEIGHTS_NAME
,
...
...
src/diffusers/utils/constants.py
View file @
123506ee
...
@@ -44,7 +44,7 @@ DIFFUSERS_REQUEST_TIMEOUT = 60
...
@@ -44,7 +44,7 @@ DIFFUSERS_REQUEST_TIMEOUT = 60
DIFFUSERS_ATTN_BACKEND
=
os
.
getenv
(
"DIFFUSERS_ATTN_BACKEND"
,
"native"
)
DIFFUSERS_ATTN_BACKEND
=
os
.
getenv
(
"DIFFUSERS_ATTN_BACKEND"
,
"native"
)
DIFFUSERS_ATTN_CHECKS
=
os
.
getenv
(
"DIFFUSERS_ATTN_CHECKS"
,
"0"
)
in
ENV_VARS_TRUE_VALUES
DIFFUSERS_ATTN_CHECKS
=
os
.
getenv
(
"DIFFUSERS_ATTN_CHECKS"
,
"0"
)
in
ENV_VARS_TRUE_VALUES
DEFAULT_HF_PARALLEL_LOADING_WORKERS
=
8
DEFAULT_HF_PARALLEL_LOADING_WORKERS
=
8
HF_PARALLEL_LOADING
_FLAG
=
"HF_ENABLE_PARALLEL_LOADING"
HF_
ENABLE_
PARALLEL_LOADING
=
os
.
environ
.
get
(
"HF_ENABLE_PARALLEL_LOADING"
,
""
).
upper
()
in
ENV_VARS_TRUE_VALUES
# Below should be `True` if the current version of `peft` and `transformers` are compatible with
# Below should be `True` if the current version of `peft` and `transformers` are compatible with
# PEFT backend. Will automatically fall back to PEFT backend if the correct versions of the libraries are
# PEFT backend. Will automatically fall back to PEFT backend if the correct versions of the libraries are
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment