Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
renzhc
diffusers_dcu
Commits
265840a0
Unverified
Commit
265840a0
authored
Jul 10, 2025
by
Sayak Paul
Committed by
GitHub
Jul 10, 2025
Browse files
[LoRA] fix: disabling hooks when loading loras. (#11896)
fix: disabling hooks when loading loras.
parent
9f4d997d
Changes
2
Hide whitespace changes
Inline
Side-by-side
Showing
2 changed files
with
32 additions
and
1 deletion
+32
-1
src/diffusers/loaders/lora_base.py
src/diffusers/loaders/lora_base.py
+1
-1
tests/lora/utils.py
tests/lora/utils.py
+31
-0
No files found.
src/diffusers/loaders/lora_base.py
View file @
265840a0
...
...
@@ -470,7 +470,7 @@ def _func_optionally_disable_offloading(_pipeline):
for
_
,
component
in
_pipeline
.
components
.
items
():
if
not
isinstance
(
component
,
nn
.
Module
)
or
not
hasattr
(
component
,
"_hf_hook"
):
continue
remove_hook_from_module
(
component
,
recurse
=
is_sequential_cpu_offload
)
remove_hook_from_module
(
component
,
recurse
=
is_sequential_cpu_offload
)
return
(
is_model_cpu_offload
,
is_sequential_cpu_offload
,
is_group_offload
)
...
...
tests/lora/utils.py
View file @
265840a0
...
...
@@ -2510,3 +2510,34 @@ class PeftLoraLoaderMixinTests:
# materializes the test methods on invocation which cannot be overridden.
return
self
.
_test_group_offloading_inference_denoiser
(
offload_type
,
use_stream
)
@
require_torch_accelerator
def
test_lora_loading_model_cpu_offload
(
self
):
components
,
_
,
denoiser_lora_config
=
self
.
get_dummy_components
(
self
.
scheduler_classes
[
0
])
_
,
_
,
inputs
=
self
.
get_dummy_inputs
(
with_generator
=
False
)
pipe
=
self
.
pipeline_class
(
**
components
)
pipe
=
pipe
.
to
(
torch_device
)
pipe
.
set_progress_bar_config
(
disable
=
None
)
denoiser
=
pipe
.
transformer
if
self
.
unet_kwargs
is
None
else
pipe
.
unet
denoiser
.
add_adapter
(
denoiser_lora_config
)
self
.
assertTrue
(
check_if_lora_correctly_set
(
denoiser
),
"Lora not correctly set in denoiser."
)
output_lora
=
pipe
(
**
inputs
,
generator
=
torch
.
manual_seed
(
0
))[
0
]
with
tempfile
.
TemporaryDirectory
()
as
tmpdirname
:
modules_to_save
=
self
.
_get_modules_to_save
(
pipe
,
has_denoiser
=
True
)
lora_state_dicts
=
self
.
_get_lora_state_dicts
(
modules_to_save
)
self
.
pipeline_class
.
save_lora_weights
(
save_directory
=
tmpdirname
,
safe_serialization
=
True
,
**
lora_state_dicts
)
# reinitialize the pipeline to mimic the inference workflow.
components
,
_
,
denoiser_lora_config
=
self
.
get_dummy_components
(
self
.
scheduler_classes
[
0
])
pipe
=
self
.
pipeline_class
(
**
components
)
pipe
.
enable_model_cpu_offload
(
device
=
torch_device
)
pipe
.
load_lora_weights
(
tmpdirname
)
denoiser
=
pipe
.
transformer
if
self
.
unet_kwargs
is
None
else
pipe
.
unet
self
.
assertTrue
(
check_if_lora_correctly_set
(
denoiser
),
"Lora not correctly set in denoiser."
)
output_lora_loaded
=
pipe
(
**
inputs
,
generator
=
torch
.
manual_seed
(
0
))[
0
]
self
.
assertTrue
(
np
.
allclose
(
output_lora
,
output_lora_loaded
,
atol
=
1e-3
,
rtol
=
1e-3
))
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment