Unverified Commit 42beaf1d authored by Will Berman's avatar Will Berman Committed by GitHub
Browse files

move pipeline based test skips out of pipeline mixin (#2486)

parent 824cb538
...@@ -21,7 +21,7 @@ import torch ...@@ -21,7 +21,7 @@ import torch
from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNet1DModel from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNet1DModel
from diffusers.utils import slow, torch_device from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import require_torch_gpu from diffusers.utils.testing_utils import require_torch_gpu, skip_mps
from ...test_pipelines_common import PipelineTesterMixin from ...test_pipelines_common import PipelineTesterMixin
...@@ -87,6 +87,22 @@ class DanceDiffusionPipelineFastTests(PipelineTesterMixin, unittest.TestCase): ...@@ -87,6 +87,22 @@ class DanceDiffusionPipelineFastTests(PipelineTesterMixin, unittest.TestCase):
expected_slice = np.array([-0.7265, 1.0000, -0.8388, 0.1175, 0.9498, -1.0000]) expected_slice = np.array([-0.7265, 1.0000, -0.8388, 0.1175, 0.9498, -1.0000])
assert np.abs(audio_slice.flatten() - expected_slice).max() < 1e-2 assert np.abs(audio_slice.flatten() - expected_slice).max() < 1e-2
@skip_mps
def test_save_load_local(self):
return super().test_save_load_local()
@skip_mps
def test_dict_tuple_outputs_equivalent(self):
return super().test_dict_tuple_outputs_equivalent()
@skip_mps
def test_save_load_optional_components(self):
return super().test_save_load_optional_components()
@skip_mps
def test_attention_slicing_forward_pass(self):
return super().test_attention_slicing_forward_pass()
@slow @slow
@require_torch_gpu @require_torch_gpu
......
...@@ -20,7 +20,7 @@ import numpy as np ...@@ -20,7 +20,7 @@ import numpy as np
import torch import torch
from diffusers import RePaintPipeline, RePaintScheduler, UNet2DModel from diffusers import RePaintPipeline, RePaintScheduler, UNet2DModel
from diffusers.utils.testing_utils import load_image, load_numpy, nightly, require_torch_gpu, torch_device from diffusers.utils.testing_utils import load_image, load_numpy, nightly, require_torch_gpu, skip_mps, torch_device
from ...test_pipelines_common import PipelineTesterMixin from ...test_pipelines_common import PipelineTesterMixin
...@@ -84,6 +84,28 @@ class RepaintPipelineFastTests(PipelineTesterMixin, unittest.TestCase): ...@@ -84,6 +84,28 @@ class RepaintPipelineFastTests(PipelineTesterMixin, unittest.TestCase):
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3 assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3
@skip_mps
def test_save_load_local(self):
return super().test_save_load_local()
# RePaint can hardly be made deterministic since the scheduler is currently always
# nondeterministic
@unittest.skip("non-deterministic pipeline")
def test_inference_batch_single_identical(self):
return super().test_inference_batch_single_identical()
@skip_mps
def test_dict_tuple_outputs_equivalent(self):
return super().test_dict_tuple_outputs_equivalent()
@skip_mps
def test_save_load_optional_components(self):
return super().test_save_load_optional_components()
@skip_mps
def test_attention_slicing_forward_pass(self):
return super().test_attention_slicing_forward_pass()
@nightly @nightly
@require_torch_gpu @require_torch_gpu
......
...@@ -23,7 +23,7 @@ from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer ...@@ -23,7 +23,7 @@ from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, CycleDiffusionPipeline, DDIMScheduler, UNet2DConditionModel from diffusers import AutoencoderKL, CycleDiffusionPipeline, DDIMScheduler, UNet2DConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import require_torch_gpu from diffusers.utils.testing_utils import require_torch_gpu, skip_mps
from ...test_pipelines_common import PipelineTesterMixin from ...test_pipelines_common import PipelineTesterMixin
...@@ -149,6 +149,26 @@ class CycleDiffusionPipelineFastTests(PipelineTesterMixin, unittest.TestCase): ...@@ -149,6 +149,26 @@ class CycleDiffusionPipelineFastTests(PipelineTesterMixin, unittest.TestCase):
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
@skip_mps
def test_save_load_local(self):
return super().test_save_load_local()
@unittest.skip("non-deterministic pipeline")
def test_inference_batch_single_identical(self):
return super().test_inference_batch_single_identical()
@skip_mps
def test_dict_tuple_outputs_equivalent(self):
return super().test_dict_tuple_outputs_equivalent()
@skip_mps
def test_save_load_optional_components(self):
return super().test_save_load_optional_components()
@skip_mps
def test_attention_slicing_forward_pass(self):
return super().test_attention_slicing_forward_pass()
@slow @slow
@require_torch_gpu @require_torch_gpu
......
...@@ -31,7 +31,7 @@ from diffusers import ( ...@@ -31,7 +31,7 @@ from diffusers import (
UNet2DConditionModel, UNet2DConditionModel,
) )
from diffusers.utils import floats_tensor, load_image, load_numpy, nightly, slow, torch_device from diffusers.utils import floats_tensor, load_image, load_numpy, nightly, slow, torch_device
from diffusers.utils.testing_utils import require_torch_gpu from diffusers.utils.testing_utils import require_torch_gpu, skip_mps
from ...test_pipelines_common import PipelineTesterMixin from ...test_pipelines_common import PipelineTesterMixin
...@@ -213,6 +213,22 @@ class StableDiffusionImg2ImgPipelineFastTests(PipelineTesterMixin, unittest.Test ...@@ -213,6 +213,22 @@ class StableDiffusionImg2ImgPipelineFastTests(PipelineTesterMixin, unittest.Test
assert images.shape == (batch_size * num_images_per_prompt, 32, 32, 3) assert images.shape == (batch_size * num_images_per_prompt, 32, 32, 3)
@skip_mps
def test_save_load_local(self):
return super().test_save_load_local()
@skip_mps
def test_dict_tuple_outputs_equivalent(self):
return super().test_dict_tuple_outputs_equivalent()
@skip_mps
def test_save_load_optional_components(self):
return super().test_save_load_optional_components()
@skip_mps
def test_attention_slicing_forward_pass(self):
return super().test_attention_slicing_forward_pass()
@slow @slow
@require_torch_gpu @require_torch_gpu
......
...@@ -223,6 +223,11 @@ class StableDiffusionPix2PixZeroPipelineFastTests(PipelineTesterMixin, unittest. ...@@ -223,6 +223,11 @@ class StableDiffusionPix2PixZeroPipelineFastTests(PipelineTesterMixin, unittest.
assert images.shape == (batch_size * num_images_per_prompt, 64, 64, 3) assert images.shape == (batch_size * num_images_per_prompt, 64, 64, 3)
# Non-determinism caused by the scheduler optimizing the latent inputs during inference
@unittest.skip("non-deterministic pipeline")
def test_inference_batch_single_identical(self):
return super().test_inference_batch_single_identical()
@slow @slow
@require_torch_gpu @require_torch_gpu
......
...@@ -382,6 +382,10 @@ class StableDiffusionDepth2ImgPipelineFastTests(PipelineTesterMixin, unittest.Te ...@@ -382,6 +382,10 @@ class StableDiffusionDepth2ImgPipelineFastTests(PipelineTesterMixin, unittest.Te
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3 assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3
@skip_mps
def test_attention_slicing_forward_pass(self):
return super().test_attention_slicing_forward_pass()
@slow @slow
@require_torch_gpu @require_torch_gpu
......
...@@ -11,14 +11,7 @@ import numpy as np ...@@ -11,14 +11,7 @@ import numpy as np
import torch import torch
import diffusers import diffusers
from diffusers import ( from diffusers import DiffusionPipeline
CycleDiffusionPipeline,
DanceDiffusionPipeline,
DiffusionPipeline,
RePaintPipeline,
StableDiffusionDepth2ImgPipeline,
StableDiffusionImg2ImgPipeline,
)
from diffusers.utils import logging from diffusers.utils import logging
from diffusers.utils.import_utils import is_accelerate_available, is_xformers_available from diffusers.utils.import_utils import is_accelerate_available, is_xformers_available
from diffusers.utils.testing_utils import require_torch, torch_device from diffusers.utils.testing_utils import require_torch, torch_device
...@@ -83,15 +76,6 @@ class PipelineTesterMixin: ...@@ -83,15 +76,6 @@ class PipelineTesterMixin:
torch.cuda.empty_cache() torch.cuda.empty_cache()
def test_save_load_local(self): def test_save_load_local(self):
if torch_device == "mps" and self.pipeline_class in (
DanceDiffusionPipeline,
CycleDiffusionPipeline,
RePaintPipeline,
StableDiffusionImg2ImgPipeline,
):
# FIXME: inconsistent outputs on MPS
return
components = self.get_dummy_components() components = self.get_dummy_components()
pipe = self.pipeline_class(**components) pipe = self.pipeline_class(**components)
pipe.to(torch_device) pipe.to(torch_device)
...@@ -199,18 +183,6 @@ class PipelineTesterMixin: ...@@ -199,18 +183,6 @@ class PipelineTesterMixin:
def _test_inference_batch_single_identical( def _test_inference_batch_single_identical(
self, test_max_difference=None, test_mean_pixel_difference=None, relax_max_difference=False self, test_max_difference=None, test_mean_pixel_difference=None, relax_max_difference=False
): ):
if self.pipeline_class.__name__ in [
"CycleDiffusionPipeline",
"RePaintPipeline",
"StableDiffusionPix2PixZeroPipeline",
]:
# RePaint can hardly be made deterministic since the scheduler is currently always
# nondeterministic
# CycleDiffusion is also slightly nondeterministic
# There's a training loop inside Pix2PixZero and is guided by edit directions. This is
# why the slight non-determinism.
return
if test_max_difference is None: if test_max_difference is None:
# TODO(Pedro) - not sure why, but not at all reproducible at the moment it seems # TODO(Pedro) - not sure why, but not at all reproducible at the moment it seems
# make sure that batched and non-batched is identical # make sure that batched and non-batched is identical
...@@ -283,15 +255,6 @@ class PipelineTesterMixin: ...@@ -283,15 +255,6 @@ class PipelineTesterMixin:
assert_mean_pixel_difference(output_batch[0][0], output[0][0]) assert_mean_pixel_difference(output_batch[0][0], output[0][0])
def test_dict_tuple_outputs_equivalent(self): def test_dict_tuple_outputs_equivalent(self):
if torch_device == "mps" and self.pipeline_class in (
DanceDiffusionPipeline,
CycleDiffusionPipeline,
RePaintPipeline,
StableDiffusionImg2ImgPipeline,
):
# FIXME: inconsistent outputs on MPS
return
components = self.get_dummy_components() components = self.get_dummy_components()
pipe = self.pipeline_class(**components) pipe = self.pipeline_class(**components)
pipe.to(torch_device) pipe.to(torch_device)
...@@ -370,15 +333,6 @@ class PipelineTesterMixin: ...@@ -370,15 +333,6 @@ class PipelineTesterMixin:
if not hasattr(self.pipeline_class, "_optional_components"): if not hasattr(self.pipeline_class, "_optional_components"):
return return
if torch_device == "mps" and self.pipeline_class in (
DanceDiffusionPipeline,
CycleDiffusionPipeline,
RePaintPipeline,
StableDiffusionImg2ImgPipeline,
):
# FIXME: inconsistent outputs on MPS
return
components = self.get_dummy_components() components = self.get_dummy_components()
pipe = self.pipeline_class(**components) pipe = self.pipeline_class(**components)
pipe.to(torch_device) pipe.to(torch_device)
...@@ -440,16 +394,6 @@ class PipelineTesterMixin: ...@@ -440,16 +394,6 @@ class PipelineTesterMixin:
if not self.test_attention_slicing: if not self.test_attention_slicing:
return return
if torch_device == "mps" and self.pipeline_class in (
DanceDiffusionPipeline,
CycleDiffusionPipeline,
RePaintPipeline,
StableDiffusionImg2ImgPipeline,
StableDiffusionDepth2ImgPipeline,
):
# FIXME: inconsistent outputs on MPS
return
components = self.get_dummy_components() components = self.get_dummy_components()
pipe = self.pipeline_class(**components) pipe = self.pipeline_class(**components)
pipe.to(torch_device) pipe.to(torch_device)
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment