Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
OpenDAS
diffusers
Commits
aa1f00fd
Unverified
Commit
aa1f00fd
authored
Apr 10, 2024
by
YiYi Xu
Committed by
GitHub
Apr 10, 2024
Browse files
Fix cpu offload related slow tests (#7618)
* fix * up --------- Co-authored-by:
yiyixuxu
<
yixu310@gmail,com
>
parent
d95b9934
Changes
10
Hide whitespace changes
Inline
Side-by-side
Showing
10 changed files
with
12 additions
and
145 deletions
+12
-145
src/diffusers/pipelines/deepfloyd_if/pipeline_if.py
src/diffusers/pipelines/deepfloyd_if/pipeline_if.py
+1
-18
src/diffusers/pipelines/deepfloyd_if/pipeline_if_img2img.py
src/diffusers/pipelines/deepfloyd_if/pipeline_if_img2img.py
+1
-19
src/diffusers/pipelines/deepfloyd_if/pipeline_if_img2img_superresolution.py
...lines/deepfloyd_if/pipeline_if_img2img_superresolution.py
+1
-21
src/diffusers/pipelines/deepfloyd_if/pipeline_if_inpainting.py
...iffusers/pipelines/deepfloyd_if/pipeline_if_inpainting.py
+1
-19
src/diffusers/pipelines/deepfloyd_if/pipeline_if_inpainting_superresolution.py
...es/deepfloyd_if/pipeline_if_inpainting_superresolution.py
+1
-19
src/diffusers/pipelines/deepfloyd_if/pipeline_if_superresolution.py
...ers/pipelines/deepfloyd_if/pipeline_if_superresolution.py
+1
-19
src/diffusers/pipelines/kandinsky/pipeline_kandinsky_combined.py
...fusers/pipelines/kandinsky/pipeline_kandinsky_combined.py
+3
-0
src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_combined.py
.../pipelines/kandinsky2_2/pipeline_kandinsky2_2_combined.py
+3
-0
src/diffusers/pipelines/kandinsky3/pipeline_kandinsky3.py
src/diffusers/pipelines/kandinsky3/pipeline_kandinsky3.py
+0
-15
src/diffusers/pipelines/kandinsky3/pipeline_kandinsky3_img2img.py
...users/pipelines/kandinsky3/pipeline_kandinsky3_img2img.py
+0
-15
No files found.
src/diffusers/pipelines/deepfloyd_if/pipeline_if.py
View file @
aa1f00fd
...
...
@@ -12,7 +12,6 @@ from ...models import UNet2DConditionModel
from
...schedulers
import
DDPMScheduler
from
...utils
import
(
BACKENDS_MAPPING
,
is_accelerate_available
,
is_bs4_available
,
is_ftfy_available
,
logging
,
...
...
@@ -115,6 +114,7 @@ class IFPipeline(DiffusionPipeline, LoraLoaderMixin):
_optional_components
=
[
"tokenizer"
,
"text_encoder"
,
"safety_checker"
,
"feature_extractor"
,
"watermarker"
]
model_cpu_offload_seq
=
"text_encoder->unet"
_exclude_from_cpu_offload
=
[
"watermarker"
]
def
__init__
(
self
,
...
...
@@ -156,20 +156,6 @@ class IFPipeline(DiffusionPipeline, LoraLoaderMixin):
)
self
.
register_to_config
(
requires_safety_checker
=
requires_safety_checker
)
def
remove_all_hooks
(
self
):
if
is_accelerate_available
():
from
accelerate.hooks
import
remove_hook_from_module
else
:
raise
ImportError
(
"Please install accelerate via `pip install accelerate`"
)
for
model
in
[
self
.
text_encoder
,
self
.
unet
,
self
.
safety_checker
]:
if
model
is
not
None
:
remove_hook_from_module
(
model
,
recurse
=
True
)
self
.
unet_offload_hook
=
None
self
.
text_encoder_offload_hook
=
None
self
.
final_offload_hook
=
None
@
torch
.
no_grad
()
def
encode_prompt
(
self
,
...
...
@@ -335,9 +321,6 @@ class IFPipeline(DiffusionPipeline, LoraLoaderMixin):
nsfw_detected
=
None
watermark_detected
=
None
if
hasattr
(
self
,
"unet_offload_hook"
)
and
self
.
unet_offload_hook
is
not
None
:
self
.
unet_offload_hook
.
offload
()
return
image
,
nsfw_detected
,
watermark_detected
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs
...
...
src/diffusers/pipelines/deepfloyd_if/pipeline_if_img2img.py
View file @
aa1f00fd
...
...
@@ -15,7 +15,6 @@ from ...schedulers import DDPMScheduler
from
...utils
import
(
BACKENDS_MAPPING
,
PIL_INTERPOLATION
,
is_accelerate_available
,
is_bs4_available
,
is_ftfy_available
,
logging
,
...
...
@@ -139,6 +138,7 @@ class IFImg2ImgPipeline(DiffusionPipeline, LoraLoaderMixin):
_optional_components
=
[
"tokenizer"
,
"text_encoder"
,
"safety_checker"
,
"feature_extractor"
,
"watermarker"
]
model_cpu_offload_seq
=
"text_encoder->unet"
_exclude_from_cpu_offload
=
[
"watermarker"
]
def
__init__
(
self
,
...
...
@@ -180,21 +180,6 @@ class IFImg2ImgPipeline(DiffusionPipeline, LoraLoaderMixin):
)
self
.
register_to_config
(
requires_safety_checker
=
requires_safety_checker
)
# Copied from diffusers.pipelines.deepfloyd_if.pipeline_if.IFPipeline.remove_all_hooks
def
remove_all_hooks
(
self
):
if
is_accelerate_available
():
from
accelerate.hooks
import
remove_hook_from_module
else
:
raise
ImportError
(
"Please install accelerate via `pip install accelerate`"
)
for
model
in
[
self
.
text_encoder
,
self
.
unet
,
self
.
safety_checker
]:
if
model
is
not
None
:
remove_hook_from_module
(
model
,
recurse
=
True
)
self
.
unet_offload_hook
=
None
self
.
text_encoder_offload_hook
=
None
self
.
final_offload_hook
=
None
@
torch
.
no_grad
()
def
encode_prompt
(
self
,
...
...
@@ -361,9 +346,6 @@ class IFImg2ImgPipeline(DiffusionPipeline, LoraLoaderMixin):
nsfw_detected
=
None
watermark_detected
=
None
if
hasattr
(
self
,
"unet_offload_hook"
)
and
self
.
unet_offload_hook
is
not
None
:
self
.
unet_offload_hook
.
offload
()
return
image
,
nsfw_detected
,
watermark_detected
# Copied from diffusers.pipelines.deepfloyd_if.pipeline_if.IFPipeline.prepare_extra_step_kwargs
...
...
src/diffusers/pipelines/deepfloyd_if/pipeline_if_img2img_superresolution.py
View file @
aa1f00fd
...
...
@@ -16,7 +16,6 @@ from ...schedulers import DDPMScheduler
from
...utils
import
(
BACKENDS_MAPPING
,
PIL_INTERPOLATION
,
is_accelerate_available
,
is_bs4_available
,
is_ftfy_available
,
logging
,
...
...
@@ -143,6 +142,7 @@ class IFImg2ImgSuperResolutionPipeline(DiffusionPipeline, LoraLoaderMixin):
_optional_components
=
[
"tokenizer"
,
"text_encoder"
,
"safety_checker"
,
"feature_extractor"
]
model_cpu_offload_seq
=
"text_encoder->unet"
_exclude_from_cpu_offload
=
[
"watermarker"
]
def
__init__
(
self
,
...
...
@@ -191,21 +191,6 @@ class IFImg2ImgSuperResolutionPipeline(DiffusionPipeline, LoraLoaderMixin):
)
self
.
register_to_config
(
requires_safety_checker
=
requires_safety_checker
)
# Copied from diffusers.pipelines.deepfloyd_if.pipeline_if.IFPipeline.remove_all_hooks
def
remove_all_hooks
(
self
):
if
is_accelerate_available
():
from
accelerate.hooks
import
remove_hook_from_module
else
:
raise
ImportError
(
"Please install accelerate via `pip install accelerate`"
)
for
model
in
[
self
.
text_encoder
,
self
.
unet
,
self
.
safety_checker
]:
if
model
is
not
None
:
remove_hook_from_module
(
model
,
recurse
=
True
)
self
.
unet_offload_hook
=
None
self
.
text_encoder_offload_hook
=
None
self
.
final_offload_hook
=
None
# Copied from diffusers.pipelines.deepfloyd_if.pipeline_if.IFPipeline._text_preprocessing
def
_text_preprocessing
(
self
,
text
,
clean_caption
=
False
):
if
clean_caption
and
not
is_bs4_available
():
...
...
@@ -513,9 +498,6 @@ class IFImg2ImgSuperResolutionPipeline(DiffusionPipeline, LoraLoaderMixin):
nsfw_detected
=
None
watermark_detected
=
None
if
hasattr
(
self
,
"unet_offload_hook"
)
and
self
.
unet_offload_hook
is
not
None
:
self
.
unet_offload_hook
.
offload
()
return
image
,
nsfw_detected
,
watermark_detected
# Copied from diffusers.pipelines.deepfloyd_if.pipeline_if.IFPipeline.prepare_extra_step_kwargs
...
...
@@ -1012,8 +994,6 @@ class IFImg2ImgSuperResolutionPipeline(DiffusionPipeline, LoraLoaderMixin):
nsfw_detected
=
None
watermark_detected
=
None
if
hasattr
(
self
,
"unet_offload_hook"
)
and
self
.
unet_offload_hook
is
not
None
:
self
.
unet_offload_hook
.
offload
()
else
:
# 10. Post-processing
image
=
(
image
/
2
+
0.5
).
clamp
(
0
,
1
)
...
...
src/diffusers/pipelines/deepfloyd_if/pipeline_if_inpainting.py
View file @
aa1f00fd
...
...
@@ -15,7 +15,6 @@ from ...schedulers import DDPMScheduler
from
...utils
import
(
BACKENDS_MAPPING
,
PIL_INTERPOLATION
,
is_accelerate_available
,
is_bs4_available
,
is_ftfy_available
,
logging
,
...
...
@@ -142,6 +141,7 @@ class IFInpaintingPipeline(DiffusionPipeline, LoraLoaderMixin):
_optional_components
=
[
"tokenizer"
,
"text_encoder"
,
"safety_checker"
,
"feature_extractor"
,
"watermarker"
]
model_cpu_offload_seq
=
"text_encoder->unet"
_exclude_from_cpu_offload
=
[
"watermarker"
]
def
__init__
(
self
,
...
...
@@ -183,21 +183,6 @@ class IFInpaintingPipeline(DiffusionPipeline, LoraLoaderMixin):
)
self
.
register_to_config
(
requires_safety_checker
=
requires_safety_checker
)
# Copied from diffusers.pipelines.deepfloyd_if.pipeline_if.IFPipeline.remove_all_hooks
def
remove_all_hooks
(
self
):
if
is_accelerate_available
():
from
accelerate.hooks
import
remove_hook_from_module
else
:
raise
ImportError
(
"Please install accelerate via `pip install accelerate`"
)
for
model
in
[
self
.
text_encoder
,
self
.
unet
,
self
.
safety_checker
]:
if
model
is
not
None
:
remove_hook_from_module
(
model
,
recurse
=
True
)
self
.
unet_offload_hook
=
None
self
.
text_encoder_offload_hook
=
None
self
.
final_offload_hook
=
None
@
torch
.
no_grad
()
# Copied from diffusers.pipelines.deepfloyd_if.pipeline_if.IFPipeline.encode_prompt
def
encode_prompt
(
...
...
@@ -365,9 +350,6 @@ class IFInpaintingPipeline(DiffusionPipeline, LoraLoaderMixin):
nsfw_detected
=
None
watermark_detected
=
None
if
hasattr
(
self
,
"unet_offload_hook"
)
and
self
.
unet_offload_hook
is
not
None
:
self
.
unet_offload_hook
.
offload
()
return
image
,
nsfw_detected
,
watermark_detected
# Copied from diffusers.pipelines.deepfloyd_if.pipeline_if.IFPipeline.prepare_extra_step_kwargs
...
...
src/diffusers/pipelines/deepfloyd_if/pipeline_if_inpainting_superresolution.py
View file @
aa1f00fd
...
...
@@ -16,7 +16,6 @@ from ...schedulers import DDPMScheduler
from
...utils
import
(
BACKENDS_MAPPING
,
PIL_INTERPOLATION
,
is_accelerate_available
,
is_bs4_available
,
is_ftfy_available
,
logging
,
...
...
@@ -145,6 +144,7 @@ class IFInpaintingSuperResolutionPipeline(DiffusionPipeline, LoraLoaderMixin):
model_cpu_offload_seq
=
"text_encoder->unet"
_optional_components
=
[
"tokenizer"
,
"text_encoder"
,
"safety_checker"
,
"feature_extractor"
,
"watermarker"
]
_exclude_from_cpu_offload
=
[
"watermarker"
]
def
__init__
(
self
,
...
...
@@ -193,21 +193,6 @@ class IFInpaintingSuperResolutionPipeline(DiffusionPipeline, LoraLoaderMixin):
)
self
.
register_to_config
(
requires_safety_checker
=
requires_safety_checker
)
# Copied from diffusers.pipelines.deepfloyd_if.pipeline_if.IFPipeline.remove_all_hooks
def
remove_all_hooks
(
self
):
if
is_accelerate_available
():
from
accelerate.hooks
import
remove_hook_from_module
else
:
raise
ImportError
(
"Please install accelerate via `pip install accelerate`"
)
for
model
in
[
self
.
text_encoder
,
self
.
unet
,
self
.
safety_checker
]:
if
model
is
not
None
:
remove_hook_from_module
(
model
,
recurse
=
True
)
self
.
unet_offload_hook
=
None
self
.
text_encoder_offload_hook
=
None
self
.
final_offload_hook
=
None
# Copied from diffusers.pipelines.deepfloyd_if.pipeline_if.IFPipeline._text_preprocessing
def
_text_preprocessing
(
self
,
text
,
clean_caption
=
False
):
if
clean_caption
and
not
is_bs4_available
():
...
...
@@ -515,9 +500,6 @@ class IFInpaintingSuperResolutionPipeline(DiffusionPipeline, LoraLoaderMixin):
nsfw_detected
=
None
watermark_detected
=
None
if
hasattr
(
self
,
"unet_offload_hook"
)
and
self
.
unet_offload_hook
is
not
None
:
self
.
unet_offload_hook
.
offload
()
return
image
,
nsfw_detected
,
watermark_detected
# Copied from diffusers.pipelines.deepfloyd_if.pipeline_if.IFPipeline.prepare_extra_step_kwargs
...
...
src/diffusers/pipelines/deepfloyd_if/pipeline_if_superresolution.py
View file @
aa1f00fd
...
...
@@ -15,7 +15,6 @@ from ...models import UNet2DConditionModel
from
...schedulers
import
DDPMScheduler
from
...utils
import
(
BACKENDS_MAPPING
,
is_accelerate_available
,
is_bs4_available
,
is_ftfy_available
,
logging
,
...
...
@@ -101,6 +100,7 @@ class IFSuperResolutionPipeline(DiffusionPipeline, LoraLoaderMixin):
_optional_components
=
[
"tokenizer"
,
"text_encoder"
,
"safety_checker"
,
"feature_extractor"
,
"watermarker"
]
model_cpu_offload_seq
=
"text_encoder->unet"
_exclude_from_cpu_offload
=
[
"watermarker"
]
def
__init__
(
self
,
...
...
@@ -149,21 +149,6 @@ class IFSuperResolutionPipeline(DiffusionPipeline, LoraLoaderMixin):
)
self
.
register_to_config
(
requires_safety_checker
=
requires_safety_checker
)
# Copied from diffusers.pipelines.deepfloyd_if.pipeline_if.IFPipeline.remove_all_hooks
def
remove_all_hooks
(
self
):
if
is_accelerate_available
():
from
accelerate.hooks
import
remove_hook_from_module
else
:
raise
ImportError
(
"Please install accelerate via `pip install accelerate`"
)
for
model
in
[
self
.
text_encoder
,
self
.
unet
,
self
.
safety_checker
]:
if
model
is
not
None
:
remove_hook_from_module
(
model
,
recurse
=
True
)
self
.
unet_offload_hook
=
None
self
.
text_encoder_offload_hook
=
None
self
.
final_offload_hook
=
None
# Copied from diffusers.pipelines.deepfloyd_if.pipeline_if.IFPipeline._text_preprocessing
def
_text_preprocessing
(
self
,
text
,
clean_caption
=
False
):
if
clean_caption
and
not
is_bs4_available
():
...
...
@@ -471,9 +456,6 @@ class IFSuperResolutionPipeline(DiffusionPipeline, LoraLoaderMixin):
nsfw_detected
=
None
watermark_detected
=
None
if
hasattr
(
self
,
"unet_offload_hook"
)
and
self
.
unet_offload_hook
is
not
None
:
self
.
unet_offload_hook
.
offload
()
return
image
,
nsfw_detected
,
watermark_detected
# Copied from diffusers.pipelines.deepfloyd_if.pipeline_if.IFPipeline.prepare_extra_step_kwargs
...
...
src/diffusers/pipelines/kandinsky/pipeline_kandinsky_combined.py
View file @
aa1f00fd
...
...
@@ -143,6 +143,7 @@ class KandinskyCombinedPipeline(DiffusionPipeline):
_load_connected_pipes
=
True
model_cpu_offload_seq
=
"text_encoder->unet->movq->prior_prior->prior_image_encoder->prior_text_encoder"
_exclude_from_cpu_offload
=
[
"prior_prior"
]
def
__init__
(
self
,
...
...
@@ -360,6 +361,7 @@ class KandinskyImg2ImgCombinedPipeline(DiffusionPipeline):
_load_connected_pipes
=
True
model_cpu_offload_seq
=
"prior_text_encoder->prior_image_encoder->prior_prior->"
"text_encoder->unet->movq"
_exclude_from_cpu_offload
=
[
"prior_prior"
]
def
__init__
(
self
,
...
...
@@ -600,6 +602,7 @@ class KandinskyInpaintCombinedPipeline(DiffusionPipeline):
_load_connected_pipes
=
True
model_cpu_offload_seq
=
"prior_text_encoder->prior_image_encoder->prior_prior->text_encoder->unet->movq"
_exclude_from_cpu_offload
=
[
"prior_prior"
]
def
__init__
(
self
,
...
...
src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_combined.py
View file @
aa1f00fd
...
...
@@ -135,6 +135,7 @@ class KandinskyV22CombinedPipeline(DiffusionPipeline):
model_cpu_offload_seq
=
"prior_text_encoder->prior_image_encoder->unet->movq"
_load_connected_pipes
=
True
_exclude_from_cpu_offload
=
[
"prior_prior"
]
def
__init__
(
self
,
...
...
@@ -362,6 +363,7 @@ class KandinskyV22Img2ImgCombinedPipeline(DiffusionPipeline):
model_cpu_offload_seq
=
"prior_text_encoder->prior_image_encoder->unet->movq"
_load_connected_pipes
=
True
_exclude_from_cpu_offload
=
[
"prior_prior"
]
def
__init__
(
self
,
...
...
@@ -610,6 +612,7 @@ class KandinskyV22InpaintCombinedPipeline(DiffusionPipeline):
model_cpu_offload_seq
=
"prior_text_encoder->prior_image_encoder->unet->movq"
_load_connected_pipes
=
True
_exclude_from_cpu_offload
=
[
"prior_prior"
]
def
__init__
(
self
,
...
...
src/diffusers/pipelines/kandinsky3/pipeline_kandinsky3.py
View file @
aa1f00fd
...
...
@@ -8,7 +8,6 @@ from ...models import Kandinsky3UNet, VQModel
from
...schedulers
import
DDPMScheduler
from
...utils
import
(
deprecate
,
is_accelerate_available
,
logging
,
replace_example_docstring
,
)
...
...
@@ -72,20 +71,6 @@ class Kandinsky3Pipeline(DiffusionPipeline, LoraLoaderMixin):
tokenizer
=
tokenizer
,
text_encoder
=
text_encoder
,
unet
=
unet
,
scheduler
=
scheduler
,
movq
=
movq
)
def
remove_all_hooks
(
self
):
if
is_accelerate_available
():
from
accelerate.hooks
import
remove_hook_from_module
else
:
raise
ImportError
(
"Please install accelerate via `pip install accelerate`"
)
for
model
in
[
self
.
text_encoder
,
self
.
unet
,
self
.
movq
]:
if
model
is
not
None
:
remove_hook_from_module
(
model
,
recurse
=
True
)
self
.
unet_offload_hook
=
None
self
.
text_encoder_offload_hook
=
None
self
.
final_offload_hook
=
None
def
process_embeds
(
self
,
embeddings
,
attention_mask
,
cut_context
):
if
cut_context
:
embeddings
[
attention_mask
==
0
]
=
torch
.
zeros_like
(
embeddings
[
attention_mask
==
0
])
...
...
src/diffusers/pipelines/kandinsky3/pipeline_kandinsky3_img2img.py
View file @
aa1f00fd
...
...
@@ -12,7 +12,6 @@ from ...models import Kandinsky3UNet, VQModel
from
...schedulers
import
DDPMScheduler
from
...utils
import
(
deprecate
,
is_accelerate_available
,
logging
,
replace_example_docstring
,
)
...
...
@@ -96,20 +95,6 @@ class Kandinsky3Img2ImgPipeline(DiffusionPipeline, LoraLoaderMixin):
return
timesteps
,
num_inference_steps
-
t_start
def
remove_all_hooks
(
self
):
if
is_accelerate_available
():
from
accelerate.hooks
import
remove_hook_from_module
else
:
raise
ImportError
(
"Please install accelerate via `pip install accelerate`"
)
for
model
in
[
self
.
text_encoder
,
self
.
unet
]:
if
model
is
not
None
:
remove_hook_from_module
(
model
,
recurse
=
True
)
self
.
unet_offload_hook
=
None
self
.
text_encoder_offload_hook
=
None
self
.
final_offload_hook
=
None
def
_process_embeds
(
self
,
embeddings
,
attention_mask
,
cut_context
):
# return embeddings, attention_mask
if
cut_context
:
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment