Unverified Commit 3a9d7d97 authored by Patrick von Platen's avatar Patrick von Platen Committed by GitHub
Browse files

[Tests] parallelize (#3078)



* [Tests] parallelize

* finish folder structuring

* Parallelize tests more

* Correct saving of pipelines

* make sure logging level is correct

* try again

* Apply suggestions from code review
Co-authored-by: default avatarPedro Cuenca <pedro@huggingface.co>

---------
Co-authored-by: default avatarPedro Cuenca <pedro@huggingface.co>
parent e748b3c6
...@@ -167,4 +167,4 @@ class DeprecateTester(unittest.TestCase): ...@@ -167,4 +167,4 @@ class DeprecateTester(unittest.TestCase):
with self.assertWarns(FutureWarning) as warning: with self.assertWarns(FutureWarning) as warning:
deprecate(("deprecated_arg", self.higher_version, "This message is better!!!"), standard_warn=False) deprecate(("deprecated_arg", self.higher_version, "This message is better!!!"), standard_warn=False)
assert str(warning.warning) == "This message is better!!!" assert str(warning.warning) == "This message is better!!!"
assert "diffusers/tests/test_utils.py" in warning.filename assert "diffusers/tests/others/test_utils.py" in warning.filename
...@@ -28,8 +28,8 @@ from diffusers.pipelines.alt_diffusion.modeling_roberta_series import ( ...@@ -28,8 +28,8 @@ from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
from diffusers.utils import slow, torch_device from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import require_torch_gpu from diffusers.utils.testing_utils import require_torch_gpu
from ...pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ...test_pipelines_common import PipelineTesterMixin from ..test_pipelines_common import PipelineTesterMixin
torch.backends.cuda.matmul.allow_tf32 = False torch.backends.cuda.matmul.allow_tf32 = False
......
...@@ -38,8 +38,8 @@ from diffusers import ( ...@@ -38,8 +38,8 @@ from diffusers import (
) )
from diffusers.utils import slow, torch_device from diffusers.utils import slow, torch_device
from ...pipeline_params import TEXT_TO_AUDIO_BATCH_PARAMS, TEXT_TO_AUDIO_PARAMS from ..pipeline_params import TEXT_TO_AUDIO_BATCH_PARAMS, TEXT_TO_AUDIO_PARAMS
from ...test_pipelines_common import PipelineTesterMixin from ..test_pipelines_common import PipelineTesterMixin
class AudioLDMPipelineFastTests(PipelineTesterMixin, unittest.TestCase): class AudioLDMPipelineFastTests(PipelineTesterMixin, unittest.TestCase):
......
...@@ -23,8 +23,8 @@ from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNet1DModel ...@@ -23,8 +23,8 @@ from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNet1DModel
from diffusers.utils import slow, torch_device from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import require_torch_gpu, skip_mps from diffusers.utils.testing_utils import require_torch_gpu, skip_mps
from ...pipeline_params import UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS, UNCONDITIONAL_AUDIO_GENERATION_PARAMS from ..pipeline_params import UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS, UNCONDITIONAL_AUDIO_GENERATION_PARAMS
from ...test_pipelines_common import PipelineTesterMixin from ..test_pipelines_common import PipelineTesterMixin
torch.backends.cuda.matmul.allow_tf32 = False torch.backends.cuda.matmul.allow_tf32 = False
......
...@@ -21,8 +21,8 @@ import torch ...@@ -21,8 +21,8 @@ import torch
from diffusers import DDIMPipeline, DDIMScheduler, UNet2DModel from diffusers import DDIMPipeline, DDIMScheduler, UNet2DModel
from diffusers.utils.testing_utils import require_torch_gpu, slow, torch_device from diffusers.utils.testing_utils import require_torch_gpu, slow, torch_device
from ...pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS
from ...test_pipelines_common import PipelineTesterMixin from ..test_pipelines_common import PipelineTesterMixin
torch.backends.cuda.matmul.allow_tf32 = False torch.backends.cuda.matmul.allow_tf32 = False
......
...@@ -23,11 +23,11 @@ from diffusers import AutoencoderKL, DDIMScheduler, DiTPipeline, DPMSolverMultis ...@@ -23,11 +23,11 @@ from diffusers import AutoencoderKL, DDIMScheduler, DiTPipeline, DPMSolverMultis
from diffusers.utils import is_xformers_available, load_numpy, slow, torch_device from diffusers.utils import is_xformers_available, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import require_torch_gpu from diffusers.utils.testing_utils import require_torch_gpu
from ...pipeline_params import ( from ..pipeline_params import (
CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS, CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS,
CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS, CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS,
) )
from ...test_pipelines_common import PipelineTesterMixin from ..test_pipelines_common import PipelineTesterMixin
torch.backends.cuda.matmul.allow_tf32 = False torch.backends.cuda.matmul.allow_tf32 = False
......
...@@ -23,8 +23,8 @@ from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer ...@@ -23,8 +23,8 @@ from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, LDMTextToImagePipeline, UNet2DConditionModel from diffusers import AutoencoderKL, DDIMScheduler, LDMTextToImagePipeline, UNet2DConditionModel
from diffusers.utils.testing_utils import load_numpy, nightly, require_torch_gpu, slow, torch_device from diffusers.utils.testing_utils import load_numpy, nightly, require_torch_gpu, slow, torch_device
from ...pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ...test_pipelines_common import PipelineTesterMixin from ..test_pipelines_common import PipelineTesterMixin
torch.backends.cuda.matmul.allow_tf32 = False torch.backends.cuda.matmul.allow_tf32 = False
......
...@@ -27,8 +27,8 @@ from diffusers.pipelines.paint_by_example import PaintByExampleImageEncoder ...@@ -27,8 +27,8 @@ from diffusers.pipelines.paint_by_example import PaintByExampleImageEncoder
from diffusers.utils import floats_tensor, load_image, slow, torch_device from diffusers.utils import floats_tensor, load_image, slow, torch_device
from diffusers.utils.testing_utils import require_torch_gpu from diffusers.utils.testing_utils import require_torch_gpu
from ...pipeline_params import IMAGE_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, IMAGE_GUIDED_IMAGE_INPAINTING_PARAMS from ..pipeline_params import IMAGE_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, IMAGE_GUIDED_IMAGE_INPAINTING_PARAMS
from ...test_pipelines_common import PipelineTesterMixin from ..test_pipelines_common import PipelineTesterMixin
torch.backends.cuda.matmul.allow_tf32 = False torch.backends.cuda.matmul.allow_tf32 = False
......
...@@ -22,8 +22,8 @@ import torch ...@@ -22,8 +22,8 @@ import torch
from diffusers import RePaintPipeline, RePaintScheduler, UNet2DModel from diffusers import RePaintPipeline, RePaintScheduler, UNet2DModel
from diffusers.utils.testing_utils import load_image, load_numpy, nightly, require_torch_gpu, skip_mps, torch_device from diffusers.utils.testing_utils import load_image, load_numpy, nightly, require_torch_gpu, skip_mps, torch_device
from ...pipeline_params import IMAGE_INPAINTING_BATCH_PARAMS, IMAGE_INPAINTING_PARAMS from ..pipeline_params import IMAGE_INPAINTING_BATCH_PARAMS, IMAGE_INPAINTING_PARAMS
from ...test_pipelines_common import PipelineTesterMixin from ..test_pipelines_common import PipelineTesterMixin
torch.backends.cuda.matmul.allow_tf32 = False torch.backends.cuda.matmul.allow_tf32 = False
......
...@@ -24,8 +24,8 @@ from diffusers.pipelines.spectrogram_diffusion import SpectrogramContEncoder, Sp ...@@ -24,8 +24,8 @@ from diffusers.pipelines.spectrogram_diffusion import SpectrogramContEncoder, Sp
from diffusers.utils import require_torch_gpu, skip_mps, slow, torch_device from diffusers.utils import require_torch_gpu, skip_mps, slow, torch_device
from diffusers.utils.testing_utils import require_note_seq, require_onnxruntime from diffusers.utils.testing_utils import require_note_seq, require_onnxruntime
from ...pipeline_params import TOKENS_TO_AUDIO_GENERATION_BATCH_PARAMS, TOKENS_TO_AUDIO_GENERATION_PARAMS from ..pipeline_params import TOKENS_TO_AUDIO_GENERATION_BATCH_PARAMS, TOKENS_TO_AUDIO_GENERATION_PARAMS
from ...test_pipelines_common import PipelineTesterMixin from ..test_pipelines_common import PipelineTesterMixin
torch.backends.cuda.matmul.allow_tf32 = False torch.backends.cuda.matmul.allow_tf32 = False
......
...@@ -25,8 +25,8 @@ from diffusers import AutoencoderKL, CycleDiffusionPipeline, DDIMScheduler, UNet ...@@ -25,8 +25,8 @@ from diffusers import AutoencoderKL, CycleDiffusionPipeline, DDIMScheduler, UNet
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import require_torch_gpu, skip_mps from diffusers.utils.testing_utils import require_torch_gpu, skip_mps
from ...pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ...test_pipelines_common import PipelineTesterMixin from ..test_pipelines_common import PipelineTesterMixin
torch.backends.cuda.matmul.allow_tf32 = False torch.backends.cuda.matmul.allow_tf32 = False
......
...@@ -29,7 +29,7 @@ from diffusers import ( ...@@ -29,7 +29,7 @@ from diffusers import (
) )
from diffusers.utils.testing_utils import is_onnx_available, nightly, require_onnxruntime, require_torch_gpu from diffusers.utils.testing_utils import is_onnx_available, nightly, require_onnxruntime, require_torch_gpu
from ...test_pipelines_onnx_common import OnnxPipelineTesterMixin from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available(): if is_onnx_available():
......
...@@ -35,7 +35,7 @@ from diffusers.utils.testing_utils import ( ...@@ -35,7 +35,7 @@ from diffusers.utils.testing_utils import (
require_torch_gpu, require_torch_gpu,
) )
from ...test_pipelines_onnx_common import OnnxPipelineTesterMixin from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available(): if is_onnx_available():
......
...@@ -26,7 +26,7 @@ from diffusers.utils.testing_utils import ( ...@@ -26,7 +26,7 @@ from diffusers.utils.testing_utils import (
require_torch_gpu, require_torch_gpu,
) )
from ...test_pipelines_onnx_common import OnnxPipelineTesterMixin from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available(): if is_onnx_available():
......
...@@ -36,7 +36,7 @@ from diffusers.utils.testing_utils import ( ...@@ -36,7 +36,7 @@ from diffusers.utils.testing_utils import (
require_torch_gpu, require_torch_gpu,
) )
from ...test_pipelines_onnx_common import OnnxPipelineTesterMixin from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available(): if is_onnx_available():
......
...@@ -40,8 +40,8 @@ from diffusers.utils import load_numpy, nightly, slow, torch_device ...@@ -40,8 +40,8 @@ from diffusers.utils import load_numpy, nightly, slow, torch_device
from diffusers.utils.testing_utils import CaptureLogger, require_torch_gpu from diffusers.utils.testing_utils import CaptureLogger, require_torch_gpu
from ...models.test_models_unet_2d_condition import create_lora_layers from ...models.test_models_unet_2d_condition import create_lora_layers
from ...pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ...test_pipelines_common import PipelineTesterMixin from ..test_pipelines_common import PipelineTesterMixin
torch.backends.cuda.matmul.allow_tf32 = False torch.backends.cuda.matmul.allow_tf32 = False
......
...@@ -33,8 +33,8 @@ from diffusers.utils import load_image, load_numpy, randn_tensor, slow, torch_de ...@@ -33,8 +33,8 @@ from diffusers.utils import load_image, load_numpy, randn_tensor, slow, torch_de
from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import require_torch_gpu from diffusers.utils.testing_utils import require_torch_gpu
from ...pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ...test_pipelines_common import PipelineTesterMixin from ..test_pipelines_common import PipelineTesterMixin
class StableDiffusionControlNetPipelineFastTests(PipelineTesterMixin, unittest.TestCase): class StableDiffusionControlNetPipelineFastTests(PipelineTesterMixin, unittest.TestCase):
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment