Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
renzhc
diffusers_dcu
Commits
42beaf1d
Unverified
Commit
42beaf1d
authored
Feb 27, 2023
by
Will Berman
Committed by
GitHub
Feb 27, 2023
Browse files
move pipeline based test skips out of pipeline mixin (#2486)
parent
824cb538
Changes
7
Hide whitespace changes
Inline
Side-by-side
Showing
7 changed files
with
88 additions
and
61 deletions
+88
-61
tests/pipelines/dance_diffusion/test_dance_diffusion.py
tests/pipelines/dance_diffusion/test_dance_diffusion.py
+17
-1
tests/pipelines/repaint/test_repaint.py
tests/pipelines/repaint/test_repaint.py
+23
-1
tests/pipelines/stable_diffusion/test_cycle_diffusion.py
tests/pipelines/stable_diffusion/test_cycle_diffusion.py
+21
-1
tests/pipelines/stable_diffusion/test_stable_diffusion_img2img.py
...pelines/stable_diffusion/test_stable_diffusion_img2img.py
+17
-1
tests/pipelines/stable_diffusion/test_stable_diffusion_pix2pix_zero.py
...es/stable_diffusion/test_stable_diffusion_pix2pix_zero.py
+5
-0
tests/pipelines/stable_diffusion_2/test_stable_diffusion_depth.py
...pelines/stable_diffusion_2/test_stable_diffusion_depth.py
+4
-0
tests/test_pipelines_common.py
tests/test_pipelines_common.py
+1
-57
No files found.
tests/pipelines/dance_diffusion/test_dance_diffusion.py
View file @
42beaf1d
...
...
@@ -21,7 +21,7 @@ import torch
from
diffusers
import
DanceDiffusionPipeline
,
IPNDMScheduler
,
UNet1DModel
from
diffusers.utils
import
slow
,
torch_device
from
diffusers.utils.testing_utils
import
require_torch_gpu
from
diffusers.utils.testing_utils
import
require_torch_gpu
,
skip_mps
from
...test_pipelines_common
import
PipelineTesterMixin
...
...
@@ -87,6 +87,22 @@ class DanceDiffusionPipelineFastTests(PipelineTesterMixin, unittest.TestCase):
expected_slice
=
np
.
array
([
-
0.7265
,
1.0000
,
-
0.8388
,
0.1175
,
0.9498
,
-
1.0000
])
assert
np
.
abs
(
audio_slice
.
flatten
()
-
expected_slice
).
max
()
<
1e-2
@
skip_mps
def
test_save_load_local
(
self
):
return
super
().
test_save_load_local
()
@
skip_mps
def
test_dict_tuple_outputs_equivalent
(
self
):
return
super
().
test_dict_tuple_outputs_equivalent
()
@
skip_mps
def
test_save_load_optional_components
(
self
):
return
super
().
test_save_load_optional_components
()
@
skip_mps
def
test_attention_slicing_forward_pass
(
self
):
return
super
().
test_attention_slicing_forward_pass
()
@
slow
@
require_torch_gpu
...
...
tests/pipelines/repaint/test_repaint.py
View file @
42beaf1d
...
...
@@ -20,7 +20,7 @@ import numpy as np
import
torch
from
diffusers
import
RePaintPipeline
,
RePaintScheduler
,
UNet2DModel
from
diffusers.utils.testing_utils
import
load_image
,
load_numpy
,
nightly
,
require_torch_gpu
,
torch_device
from
diffusers.utils.testing_utils
import
load_image
,
load_numpy
,
nightly
,
require_torch_gpu
,
skip_mps
,
torch_device
from
...test_pipelines_common
import
PipelineTesterMixin
...
...
@@ -84,6 +84,28 @@ class RepaintPipelineFastTests(PipelineTesterMixin, unittest.TestCase):
assert
np
.
abs
(
image_slice
.
flatten
()
-
expected_slice
).
max
()
<
1e-3
@
skip_mps
def
test_save_load_local
(
self
):
return
super
().
test_save_load_local
()
# RePaint can hardly be made deterministic since the scheduler is currently always
# nondeterministic
@
unittest
.
skip
(
"non-deterministic pipeline"
)
def
test_inference_batch_single_identical
(
self
):
return
super
().
test_inference_batch_single_identical
()
@
skip_mps
def
test_dict_tuple_outputs_equivalent
(
self
):
return
super
().
test_dict_tuple_outputs_equivalent
()
@
skip_mps
def
test_save_load_optional_components
(
self
):
return
super
().
test_save_load_optional_components
()
@
skip_mps
def
test_attention_slicing_forward_pass
(
self
):
return
super
().
test_attention_slicing_forward_pass
()
@
nightly
@
require_torch_gpu
...
...
tests/pipelines/stable_diffusion/test_cycle_diffusion.py
View file @
42beaf1d
...
...
@@ -23,7 +23,7 @@ from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from
diffusers
import
AutoencoderKL
,
CycleDiffusionPipeline
,
DDIMScheduler
,
UNet2DConditionModel
from
diffusers.utils
import
floats_tensor
,
load_image
,
load_numpy
,
slow
,
torch_device
from
diffusers.utils.testing_utils
import
require_torch_gpu
from
diffusers.utils.testing_utils
import
require_torch_gpu
,
skip_mps
from
...test_pipelines_common
import
PipelineTesterMixin
...
...
@@ -149,6 +149,26 @@ class CycleDiffusionPipelineFastTests(PipelineTesterMixin, unittest.TestCase):
assert
np
.
abs
(
image_slice
.
flatten
()
-
expected_slice
).
max
()
<
1e-2
@
skip_mps
def
test_save_load_local
(
self
):
return
super
().
test_save_load_local
()
@
unittest
.
skip
(
"non-deterministic pipeline"
)
def
test_inference_batch_single_identical
(
self
):
return
super
().
test_inference_batch_single_identical
()
@
skip_mps
def
test_dict_tuple_outputs_equivalent
(
self
):
return
super
().
test_dict_tuple_outputs_equivalent
()
@
skip_mps
def
test_save_load_optional_components
(
self
):
return
super
().
test_save_load_optional_components
()
@
skip_mps
def
test_attention_slicing_forward_pass
(
self
):
return
super
().
test_attention_slicing_forward_pass
()
@
slow
@
require_torch_gpu
...
...
tests/pipelines/stable_diffusion/test_stable_diffusion_img2img.py
View file @
42beaf1d
...
...
@@ -31,7 +31,7 @@ from diffusers import (
UNet2DConditionModel
,
)
from
diffusers.utils
import
floats_tensor
,
load_image
,
load_numpy
,
nightly
,
slow
,
torch_device
from
diffusers.utils.testing_utils
import
require_torch_gpu
from
diffusers.utils.testing_utils
import
require_torch_gpu
,
skip_mps
from
...test_pipelines_common
import
PipelineTesterMixin
...
...
@@ -213,6 +213,22 @@ class StableDiffusionImg2ImgPipelineFastTests(PipelineTesterMixin, unittest.Test
assert
images
.
shape
==
(
batch_size
*
num_images_per_prompt
,
32
,
32
,
3
)
@
skip_mps
def
test_save_load_local
(
self
):
return
super
().
test_save_load_local
()
@
skip_mps
def
test_dict_tuple_outputs_equivalent
(
self
):
return
super
().
test_dict_tuple_outputs_equivalent
()
@
skip_mps
def
test_save_load_optional_components
(
self
):
return
super
().
test_save_load_optional_components
()
@
skip_mps
def
test_attention_slicing_forward_pass
(
self
):
return
super
().
test_attention_slicing_forward_pass
()
@
slow
@
require_torch_gpu
...
...
tests/pipelines/stable_diffusion/test_stable_diffusion_pix2pix_zero.py
View file @
42beaf1d
...
...
@@ -223,6 +223,11 @@ class StableDiffusionPix2PixZeroPipelineFastTests(PipelineTesterMixin, unittest.
assert
images
.
shape
==
(
batch_size
*
num_images_per_prompt
,
64
,
64
,
3
)
# Non-determinism caused by the scheduler optimizing the latent inputs during inference
@
unittest
.
skip
(
"non-deterministic pipeline"
)
def
test_inference_batch_single_identical
(
self
):
return
super
().
test_inference_batch_single_identical
()
@
slow
@
require_torch_gpu
...
...
tests/pipelines/stable_diffusion_2/test_stable_diffusion_depth.py
View file @
42beaf1d
...
...
@@ -382,6 +382,10 @@ class StableDiffusionDepth2ImgPipelineFastTests(PipelineTesterMixin, unittest.Te
assert
np
.
abs
(
image_slice
.
flatten
()
-
expected_slice
).
max
()
<
1e-3
@
skip_mps
def
test_attention_slicing_forward_pass
(
self
):
return
super
().
test_attention_slicing_forward_pass
()
@
slow
@
require_torch_gpu
...
...
tests/test_pipelines_common.py
View file @
42beaf1d
...
...
@@ -11,14 +11,7 @@ import numpy as np
import
torch
import
diffusers
from
diffusers
import
(
CycleDiffusionPipeline
,
DanceDiffusionPipeline
,
DiffusionPipeline
,
RePaintPipeline
,
StableDiffusionDepth2ImgPipeline
,
StableDiffusionImg2ImgPipeline
,
)
from
diffusers
import
DiffusionPipeline
from
diffusers.utils
import
logging
from
diffusers.utils.import_utils
import
is_accelerate_available
,
is_xformers_available
from
diffusers.utils.testing_utils
import
require_torch
,
torch_device
...
...
@@ -83,15 +76,6 @@ class PipelineTesterMixin:
torch
.
cuda
.
empty_cache
()
def
test_save_load_local
(
self
):
if
torch_device
==
"mps"
and
self
.
pipeline_class
in
(
DanceDiffusionPipeline
,
CycleDiffusionPipeline
,
RePaintPipeline
,
StableDiffusionImg2ImgPipeline
,
):
# FIXME: inconsistent outputs on MPS
return
components
=
self
.
get_dummy_components
()
pipe
=
self
.
pipeline_class
(
**
components
)
pipe
.
to
(
torch_device
)
...
...
@@ -199,18 +183,6 @@ class PipelineTesterMixin:
def
_test_inference_batch_single_identical
(
self
,
test_max_difference
=
None
,
test_mean_pixel_difference
=
None
,
relax_max_difference
=
False
):
if
self
.
pipeline_class
.
__name__
in
[
"CycleDiffusionPipeline"
,
"RePaintPipeline"
,
"StableDiffusionPix2PixZeroPipeline"
,
]:
# RePaint can hardly be made deterministic since the scheduler is currently always
# nondeterministic
# CycleDiffusion is also slightly nondeterministic
# There's a training loop inside Pix2PixZero and is guided by edit directions. This is
# why the slight non-determinism.
return
if
test_max_difference
is
None
:
# TODO(Pedro) - not sure why, but not at all reproducible at the moment it seems
# make sure that batched and non-batched is identical
...
...
@@ -283,15 +255,6 @@ class PipelineTesterMixin:
assert_mean_pixel_difference
(
output_batch
[
0
][
0
],
output
[
0
][
0
])
def
test_dict_tuple_outputs_equivalent
(
self
):
if
torch_device
==
"mps"
and
self
.
pipeline_class
in
(
DanceDiffusionPipeline
,
CycleDiffusionPipeline
,
RePaintPipeline
,
StableDiffusionImg2ImgPipeline
,
):
# FIXME: inconsistent outputs on MPS
return
components
=
self
.
get_dummy_components
()
pipe
=
self
.
pipeline_class
(
**
components
)
pipe
.
to
(
torch_device
)
...
...
@@ -370,15 +333,6 @@ class PipelineTesterMixin:
if
not
hasattr
(
self
.
pipeline_class
,
"_optional_components"
):
return
if
torch_device
==
"mps"
and
self
.
pipeline_class
in
(
DanceDiffusionPipeline
,
CycleDiffusionPipeline
,
RePaintPipeline
,
StableDiffusionImg2ImgPipeline
,
):
# FIXME: inconsistent outputs on MPS
return
components
=
self
.
get_dummy_components
()
pipe
=
self
.
pipeline_class
(
**
components
)
pipe
.
to
(
torch_device
)
...
...
@@ -440,16 +394,6 @@ class PipelineTesterMixin:
if
not
self
.
test_attention_slicing
:
return
if
torch_device
==
"mps"
and
self
.
pipeline_class
in
(
DanceDiffusionPipeline
,
CycleDiffusionPipeline
,
RePaintPipeline
,
StableDiffusionImg2ImgPipeline
,
StableDiffusionDepth2ImgPipeline
,
):
# FIXME: inconsistent outputs on MPS
return
components
=
self
.
get_dummy_components
()
pipe
=
self
.
pipeline_class
(
**
components
)
pipe
.
to
(
torch_device
)
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment