Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
renzhc
diffusers_dcu
Commits
92a57a8e
Unverified
Commit
92a57a8e
authored
Jul 12, 2023
by
Patrick von Platen
Committed by
GitHub
Jul 12, 2023
Browse files
Fix kandinsky remove safety (#4065)
* Finish * make style
parent
d7280b74
Changes
6
Hide whitespace changes
Inline
Side-by-side
Showing
6 changed files
with
81 additions
and
9 deletions
+81
-9
src/diffusers/pipelines/kandinsky/pipeline_kandinsky.py
src/diffusers/pipelines/kandinsky/pipeline_kandinsky.py
+0
-3
src/diffusers/pipelines/kandinsky/pipeline_kandinsky_img2img.py
...ffusers/pipelines/kandinsky/pipeline_kandinsky_img2img.py
+0
-3
src/diffusers/pipelines/kandinsky/pipeline_kandinsky_inpaint.py
...ffusers/pipelines/kandinsky/pipeline_kandinsky_inpaint.py
+0
-3
tests/pipelines/kandinsky/test_kandinsky.py
tests/pipelines/kandinsky/test_kandinsky.py
+27
-0
tests/pipelines/kandinsky/test_kandinsky_img2img.py
tests/pipelines/kandinsky/test_kandinsky_img2img.py
+27
-0
tests/pipelines/kandinsky/test_kandinsky_inpaint.py
tests/pipelines/kandinsky/test_kandinsky_inpaint.py
+27
-0
No files found.
src/diffusers/pipelines/kandinsky/pipeline_kandinsky.py
View file @
92a57a8e
...
...
@@ -273,9 +273,6 @@ class KandinskyPipeline(DiffusionPipeline):
for
cpu_offloaded_model
in
[
self
.
text_encoder
,
self
.
unet
,
self
.
movq
]:
_
,
hook
=
cpu_offload_with_hook
(
cpu_offloaded_model
,
device
,
prev_module_hook
=
hook
)
if
self
.
safety_checker
is
not
None
:
_
,
hook
=
cpu_offload_with_hook
(
self
.
safety_checker
,
device
,
prev_module_hook
=
hook
)
# We'll offload the last model manually.
self
.
final_offload_hook
=
hook
...
...
src/diffusers/pipelines/kandinsky/pipeline_kandinsky_img2img.py
View file @
92a57a8e
...
...
@@ -308,9 +308,6 @@ class KandinskyImg2ImgPipeline(DiffusionPipeline):
for
cpu_offloaded_model
in
[
self
.
text_encoder
,
self
.
unet
,
self
.
movq
]:
_
,
hook
=
cpu_offload_with_hook
(
cpu_offloaded_model
,
device
,
prev_module_hook
=
hook
)
if
self
.
safety_checker
is
not
None
:
_
,
hook
=
cpu_offload_with_hook
(
self
.
safety_checker
,
device
,
prev_module_hook
=
hook
)
# We'll offload the last model manually.
self
.
final_offload_hook
=
hook
...
...
src/diffusers/pipelines/kandinsky/pipeline_kandinsky_inpaint.py
View file @
92a57a8e
...
...
@@ -433,9 +433,6 @@ class KandinskyInpaintPipeline(DiffusionPipeline):
for
cpu_offloaded_model
in
[
self
.
text_encoder
,
self
.
unet
,
self
.
movq
]:
_
,
hook
=
cpu_offload_with_hook
(
cpu_offloaded_model
,
device
,
prev_module_hook
=
hook
)
if
self
.
safety_checker
is
not
None
:
_
,
hook
=
cpu_offload_with_hook
(
self
.
safety_checker
,
device
,
prev_module_hook
=
hook
)
# We'll offload the last model manually.
self
.
final_offload_hook
=
hook
...
...
tests/pipelines/kandinsky/test_kandinsky.py
View file @
92a57a8e
...
...
@@ -230,6 +230,33 @@ class KandinskyPipelineFastTests(PipelineTesterMixin, unittest.TestCase):
np
.
abs
(
image_from_tuple_slice
.
flatten
()
-
expected_slice
).
max
()
<
1e-2
),
f
" expected_slice
{
expected_slice
}
, but got
{
image_from_tuple_slice
.
flatten
()
}
"
@
require_torch_gpu
def
test_offloads
(
self
):
pipes
=
[]
components
=
self
.
get_dummy_components
()
sd_pipe
=
self
.
pipeline_class
(
**
components
).
to
(
torch_device
)
pipes
.
append
(
sd_pipe
)
components
=
self
.
get_dummy_components
()
sd_pipe
=
self
.
pipeline_class
(
**
components
)
sd_pipe
.
enable_model_cpu_offload
()
pipes
.
append
(
sd_pipe
)
components
=
self
.
get_dummy_components
()
sd_pipe
=
self
.
pipeline_class
(
**
components
)
sd_pipe
.
enable_sequential_cpu_offload
()
pipes
.
append
(
sd_pipe
)
image_slices
=
[]
for
pipe
in
pipes
:
inputs
=
self
.
get_dummy_inputs
(
torch_device
)
image
=
pipe
(
**
inputs
).
images
image_slices
.
append
(
image
[
0
,
-
3
:,
-
3
:,
-
1
].
flatten
())
assert
np
.
abs
(
image_slices
[
0
]
-
image_slices
[
1
]).
max
()
<
1e-3
assert
np
.
abs
(
image_slices
[
0
]
-
image_slices
[
2
]).
max
()
<
1e-3
@
slow
@
require_torch_gpu
...
...
tests/pipelines/kandinsky/test_kandinsky_img2img.py
View file @
92a57a8e
...
...
@@ -242,6 +242,33 @@ class KandinskyImg2ImgPipelineFastTests(PipelineTesterMixin, unittest.TestCase):
np
.
abs
(
image_from_tuple_slice
.
flatten
()
-
expected_slice
).
max
()
<
1e-2
),
f
" expected_slice
{
expected_slice
}
, but got
{
image_from_tuple_slice
.
flatten
()
}
"
@
require_torch_gpu
def
test_offloads
(
self
):
pipes
=
[]
components
=
self
.
get_dummy_components
()
sd_pipe
=
self
.
pipeline_class
(
**
components
).
to
(
torch_device
)
pipes
.
append
(
sd_pipe
)
components
=
self
.
get_dummy_components
()
sd_pipe
=
self
.
pipeline_class
(
**
components
)
sd_pipe
.
enable_model_cpu_offload
()
pipes
.
append
(
sd_pipe
)
components
=
self
.
get_dummy_components
()
sd_pipe
=
self
.
pipeline_class
(
**
components
)
sd_pipe
.
enable_sequential_cpu_offload
()
pipes
.
append
(
sd_pipe
)
image_slices
=
[]
for
pipe
in
pipes
:
inputs
=
self
.
get_dummy_inputs
(
torch_device
)
image
=
pipe
(
**
inputs
).
images
image_slices
.
append
(
image
[
0
,
-
3
:,
-
3
:,
-
1
].
flatten
())
assert
np
.
abs
(
image_slices
[
0
]
-
image_slices
[
1
]).
max
()
<
1e-3
assert
np
.
abs
(
image_slices
[
0
]
-
image_slices
[
2
]).
max
()
<
1e-3
@
slow
@
require_torch_gpu
...
...
tests/pipelines/kandinsky/test_kandinsky_inpaint.py
View file @
92a57a8e
...
...
@@ -250,6 +250,33 @@ class KandinskyInpaintPipelineFastTests(PipelineTesterMixin, unittest.TestCase):
def
test_inference_batch_single_identical
(
self
):
super
().
test_inference_batch_single_identical
(
expected_max_diff
=
3e-3
)
@
require_torch_gpu
def
test_offloads
(
self
):
pipes
=
[]
components
=
self
.
get_dummy_components
()
sd_pipe
=
self
.
pipeline_class
(
**
components
).
to
(
torch_device
)
pipes
.
append
(
sd_pipe
)
components
=
self
.
get_dummy_components
()
sd_pipe
=
self
.
pipeline_class
(
**
components
)
sd_pipe
.
enable_model_cpu_offload
()
pipes
.
append
(
sd_pipe
)
components
=
self
.
get_dummy_components
()
sd_pipe
=
self
.
pipeline_class
(
**
components
)
sd_pipe
.
enable_sequential_cpu_offload
()
pipes
.
append
(
sd_pipe
)
image_slices
=
[]
for
pipe
in
pipes
:
inputs
=
self
.
get_dummy_inputs
(
torch_device
)
image
=
pipe
(
**
inputs
).
images
image_slices
.
append
(
image
[
0
,
-
3
:,
-
3
:,
-
1
].
flatten
())
assert
np
.
abs
(
image_slices
[
0
]
-
image_slices
[
1
]).
max
()
<
1e-3
assert
np
.
abs
(
image_slices
[
0
]
-
image_slices
[
2
]).
max
()
<
1e-3
@
slow
@
require_torch_gpu
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment