Unverified Commit 2c82e0c4 authored by Anton Lozhkov's avatar Anton Lozhkov Committed by GitHub
Browse files

Reorganize pipeline tests (#963)

* Reorganize pipeline tests

* fix vq
parent 2d35f673
......@@ -30,13 +30,16 @@ from diffusers import (
VQModel,
)
from diffusers.utils import floats_tensor, load_image, slow, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from ...test_pipelines_common import PipelineTesterMixin
torch.backends.cuda.matmul.allow_tf32 = False
class PipelineFastTests(unittest.TestCase):
class StableDiffusionImg2ImgPipelineFastTests(PipelineTesterMixin, unittest.TestCase):
def tearDown(self):
# clean up the VRAM after each test
super().tearDown()
......@@ -461,8 +464,8 @@ class PipelineFastTests(unittest.TestCase):
@slow
@unittest.skipIf(torch_device == "cpu", "Stable diffusion is supposed to run on GPU")
class PipelineIntegrationTests(unittest.TestCase):
@require_torch_gpu
class StableDiffusionImg2ImgPipelineIntegrationTests(unittest.TestCase):
def tearDown(self):
# clean up the VRAM after each test
super().tearDown()
......
......@@ -29,14 +29,17 @@ from diffusers import (
VQModel,
)
from diffusers.utils import floats_tensor, load_image, slow, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from ...test_pipelines_common import PipelineTesterMixin
torch.backends.cuda.matmul.allow_tf32 = False
class PipelineFastTests(unittest.TestCase):
class StableDiffusionInpaintPipelineFastTests(PipelineTesterMixin, unittest.TestCase):
def tearDown(self):
# clean up the VRAM after each test
super().tearDown()
......@@ -258,8 +261,8 @@ class PipelineFastTests(unittest.TestCase):
@slow
@unittest.skipIf(torch_device == "cpu", "Stable diffusion is supposed to run on GPU")
class PipelineIntegrationTests(unittest.TestCase):
@require_torch_gpu
class StableDiffusionInpaintPipelineIntegrationTests(unittest.TestCase):
def tearDown(self):
# clean up the VRAM after each test
super().tearDown()
......
......@@ -31,14 +31,17 @@ from diffusers import (
VQModel,
)
from diffusers.utils import floats_tensor, load_image, slow, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from ...test_pipelines_common import PipelineTesterMixin
torch.backends.cuda.matmul.allow_tf32 = False
class PipelineFastTests(unittest.TestCase):
class StableDiffusionInpaintLegacyPipelineFastTests(PipelineTesterMixin, unittest.TestCase):
def tearDown(self):
# clean up the VRAM after each test
super().tearDown()
......@@ -338,8 +341,8 @@ class PipelineFastTests(unittest.TestCase):
@slow
@unittest.skipIf(torch_device == "cpu", "Stable diffusion is supposed to run on GPU")
class PipelineIntegrationTests(unittest.TestCase):
@require_torch_gpu
class StableDiffusionInpaintLegacyPipelineIntegrationTests(unittest.TestCase):
def tearDown(self):
# clean up the VRAM after each test
super().tearDown()
......
This diff is collapsed.
from diffusers.utils.testing_utils import require_torch
@require_torch
class PipelineTesterMixin:
"""
This mixin is designed to be used with unittest.TestCase classes.
It provides a set of common tests for each PyTorch pipeline, e.g. saving and loading the pipeline,
equivalence of dict and tuple outputs, etc.
"""
pass
......@@ -30,8 +30,8 @@ if is_flax_available():
from jax import pmap
@require_flax
@slow
@require_flax
class FlaxPipelineTests(unittest.TestCase):
def test_dummy_all_tpus(self):
pipeline, params = FlaxStableDiffusionPipeline.from_pretrained(
......
from diffusers.utils.testing_utils import require_onnxruntime
@require_onnxruntime
class OnnxPipelineTesterMixin:
"""
This mixin is designed to be used with unittest.TestCase classes.
It provides a set of common tests for each ONNXRuntime pipeline, e.g. saving and loading the pipeline,
equivalence of dict and tuple outputs, etc.
"""
pass
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment