"src/git@developer.sourcefind.cn:renzhc/diffusers_dcu.git" did not exist on "c806f2fad6040d50b3d291076cab0195863ba328"
Unverified Commit 3a9d7d97 authored by Patrick von Platen's avatar Patrick von Platen Committed by GitHub
Browse files

[Tests] parallelize (#3078)



* [Tests] parallelize

* finish folder structuring

* Parallelize tests more

* Correct saving of pipelines

* make sure logging level is correct

* try again

* Apply suggestions from code review
Co-authored-by: default avatarPedro Cuenca <pedro@huggingface.co>

---------
Co-authored-by: default avatarPedro Cuenca <pedro@huggingface.co>
parent e748b3c6
...@@ -21,22 +21,27 @@ jobs: ...@@ -21,22 +21,27 @@ jobs:
fail-fast: false fail-fast: false
matrix: matrix:
config: config:
- name: Fast PyTorch CPU tests on Ubuntu - name: Fast PyTorch Pipeline CPU tests
framework: pytorch framework: pytorch_pipelines
runner: docker-cpu runner: docker-cpu
image: diffusers/diffusers-pytorch-cpu image: diffusers/diffusers-pytorch-cpu
report: torch_cpu report: torch_cpu_pipelines
- name: Fast Flax CPU tests on Ubuntu - name: Fast PyTorch Models & Schedulers CPU tests
framework: pytorch_models
runner: docker-cpu
image: diffusers/diffusers-pytorch-cpu
report: torch_cpu_models_schedulers
- name: Fast Flax CPU tests
framework: flax framework: flax
runner: docker-cpu runner: docker-cpu
image: diffusers/diffusers-flax-cpu image: diffusers/diffusers-flax-cpu
report: flax_cpu report: flax_cpu
- name: Fast ONNXRuntime CPU tests on Ubuntu - name: Fast ONNXRuntime CPU tests
framework: onnxruntime framework: onnxruntime
runner: docker-cpu runner: docker-cpu
image: diffusers/diffusers-onnxruntime-cpu image: diffusers/diffusers-onnxruntime-cpu
report: onnx_cpu report: onnx_cpu
- name: PyTorch Example CPU tests on Ubuntu - name: PyTorch Example CPU tests
framework: pytorch_examples framework: pytorch_examples
runner: docker-cpu runner: docker-cpu
image: diffusers/diffusers-pytorch-cpu image: diffusers/diffusers-pytorch-cpu
...@@ -71,13 +76,21 @@ jobs: ...@@ -71,13 +76,21 @@ jobs:
run: | run: |
python utils/print_env.py python utils/print_env.py
- name: Run fast PyTorch CPU tests - name: Run fast PyTorch Pipeline CPU tests
if: ${{ matrix.config.framework == 'pytorch' }} if: ${{ matrix.config.framework == 'pytorch_pipelines' }}
run: | run: |
python -m pytest -n 2 --max-worker-restart=0 --dist=loadfile \ python -m pytest -n 2 --max-worker-restart=0 --dist=loadfile \
-s -v -k "not Flax and not Onnx" \ -s -v -k "not Flax and not Onnx" \
--make-reports=tests_${{ matrix.config.report }} \ --make-reports=tests_${{ matrix.config.report }} \
tests/ tests/pipelines
- name: Run fast PyTorch Model Scheduler CPU tests
if: ${{ matrix.config.framework == 'pytorch_models' }}
run: |
python -m pytest -n 2 --max-worker-restart=0 --dist=loadfile \
-s -v -k "not Flax and not Onnx" \
--make-reports=tests_${{ matrix.config.report }} \
tests/models tests/schedulers tests/others
- name: Run fast Flax TPU tests - name: Run fast Flax TPU tests
if: ${{ matrix.config.framework == 'flax' }} if: ${{ matrix.config.framework == 'flax' }}
...@@ -85,7 +98,7 @@ jobs: ...@@ -85,7 +98,7 @@ jobs:
python -m pytest -n 2 --max-worker-restart=0 --dist=loadfile \ python -m pytest -n 2 --max-worker-restart=0 --dist=loadfile \
-s -v -k "Flax" \ -s -v -k "Flax" \
--make-reports=tests_${{ matrix.config.report }} \ --make-reports=tests_${{ matrix.config.report }} \
tests/ tests
- name: Run fast ONNXRuntime CPU tests - name: Run fast ONNXRuntime CPU tests
if: ${{ matrix.config.framework == 'onnxruntime' }} if: ${{ matrix.config.framework == 'onnxruntime' }}
......
...@@ -20,7 +20,7 @@ import torch ...@@ -20,7 +20,7 @@ import torch
from diffusers import UNet1DModel from diffusers import UNet1DModel
from diffusers.utils import floats_tensor, slow, torch_device from diffusers.utils import floats_tensor, slow, torch_device
from ..test_modeling_common import ModelTesterMixin from .test_modeling_common import ModelTesterMixin
torch.backends.cuda.matmul.allow_tf32 = False torch.backends.cuda.matmul.allow_tf32 = False
......
...@@ -22,7 +22,7 @@ import torch ...@@ -22,7 +22,7 @@ import torch
from diffusers import UNet2DModel from diffusers import UNet2DModel
from diffusers.utils import floats_tensor, logging, slow, torch_all_close, torch_device from diffusers.utils import floats_tensor, logging, slow, torch_all_close, torch_device
from ..test_modeling_common import ModelTesterMixin from .test_modeling_common import ModelTesterMixin
logger = logging.get_logger(__name__) logger = logging.get_logger(__name__)
......
...@@ -34,7 +34,7 @@ from diffusers.utils import ( ...@@ -34,7 +34,7 @@ from diffusers.utils import (
) )
from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.import_utils import is_xformers_available
from ..test_modeling_common import ModelTesterMixin from .test_modeling_common import ModelTesterMixin
logger = logging.get_logger(__name__) logger = logging.get_logger(__name__)
......
...@@ -30,7 +30,7 @@ from diffusers.utils import ( ...@@ -30,7 +30,7 @@ from diffusers.utils import (
) )
from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.import_utils import is_xformers_available
from ..test_modeling_common import ModelTesterMixin from .test_modeling_common import ModelTesterMixin
logger = logging.get_logger(__name__) logger = logging.get_logger(__name__)
......
...@@ -22,7 +22,7 @@ from parameterized import parameterized ...@@ -22,7 +22,7 @@ from parameterized import parameterized
from diffusers import AutoencoderKL from diffusers import AutoencoderKL
from diffusers.utils import floats_tensor, load_hf_numpy, require_torch_gpu, slow, torch_all_close, torch_device from diffusers.utils import floats_tensor, load_hf_numpy, require_torch_gpu, slow, torch_all_close, torch_device
from ..test_modeling_common import ModelTesterMixin from .test_modeling_common import ModelTesterMixin
torch.backends.cuda.matmul.allow_tf32 = False torch.backends.cuda.matmul.allow_tf32 = False
......
...@@ -4,7 +4,7 @@ from diffusers import FlaxAutoencoderKL ...@@ -4,7 +4,7 @@ from diffusers import FlaxAutoencoderKL
from diffusers.utils import is_flax_available from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax from diffusers.utils.testing_utils import require_flax
from ..test_modeling_common_flax import FlaxModelTesterMixin from .test_modeling_common_flax import FlaxModelTesterMixin
if is_flax_available(): if is_flax_available():
......
...@@ -20,7 +20,7 @@ import torch ...@@ -20,7 +20,7 @@ import torch
from diffusers import VQModel from diffusers import VQModel
from diffusers.utils import floats_tensor, torch_device from diffusers.utils import floats_tensor, torch_device
from ..test_modeling_common import ModelTesterMixin from .test_modeling_common import ModelTesterMixin
torch.backends.cuda.matmul.allow_tf32 = False torch.backends.cuda.matmul.allow_tf32 = False
......
...@@ -141,6 +141,8 @@ class ConfigTester(unittest.TestCase): ...@@ -141,6 +141,8 @@ class ConfigTester(unittest.TestCase):
def test_load_ddim_from_pndm(self): def test_load_ddim_from_pndm(self):
logger = logging.get_logger("diffusers.configuration_utils") logger = logging.get_logger("diffusers.configuration_utils")
# 30 for warning
logger.setLevel(30)
with CaptureLogger(logger) as cap_logger: with CaptureLogger(logger) as cap_logger:
ddim = DDIMScheduler.from_pretrained( ddim = DDIMScheduler.from_pretrained(
...@@ -153,6 +155,8 @@ class ConfigTester(unittest.TestCase): ...@@ -153,6 +155,8 @@ class ConfigTester(unittest.TestCase):
def test_load_euler_from_pndm(self): def test_load_euler_from_pndm(self):
logger = logging.get_logger("diffusers.configuration_utils") logger = logging.get_logger("diffusers.configuration_utils")
# 30 for warning
logger.setLevel(30)
with CaptureLogger(logger) as cap_logger: with CaptureLogger(logger) as cap_logger:
euler = EulerDiscreteScheduler.from_pretrained( euler = EulerDiscreteScheduler.from_pretrained(
...@@ -165,6 +169,8 @@ class ConfigTester(unittest.TestCase): ...@@ -165,6 +169,8 @@ class ConfigTester(unittest.TestCase):
def test_load_euler_ancestral_from_pndm(self): def test_load_euler_ancestral_from_pndm(self):
logger = logging.get_logger("diffusers.configuration_utils") logger = logging.get_logger("diffusers.configuration_utils")
# 30 for warning
logger.setLevel(30)
with CaptureLogger(logger) as cap_logger: with CaptureLogger(logger) as cap_logger:
euler = EulerAncestralDiscreteScheduler.from_pretrained( euler = EulerAncestralDiscreteScheduler.from_pretrained(
...@@ -177,6 +183,8 @@ class ConfigTester(unittest.TestCase): ...@@ -177,6 +183,8 @@ class ConfigTester(unittest.TestCase):
def test_load_pndm(self): def test_load_pndm(self):
logger = logging.get_logger("diffusers.configuration_utils") logger = logging.get_logger("diffusers.configuration_utils")
# 30 for warning
logger.setLevel(30)
with CaptureLogger(logger) as cap_logger: with CaptureLogger(logger) as cap_logger:
pndm = PNDMScheduler.from_pretrained( pndm = PNDMScheduler.from_pretrained(
...@@ -189,6 +197,8 @@ class ConfigTester(unittest.TestCase): ...@@ -189,6 +197,8 @@ class ConfigTester(unittest.TestCase):
def test_overwrite_config_on_load(self): def test_overwrite_config_on_load(self):
logger = logging.get_logger("diffusers.configuration_utils") logger = logging.get_logger("diffusers.configuration_utils")
# 30 for warning
logger.setLevel(30)
with CaptureLogger(logger) as cap_logger: with CaptureLogger(logger) as cap_logger:
ddpm = DDPMScheduler.from_pretrained( ddpm = DDPMScheduler.from_pretrained(
...@@ -212,6 +222,8 @@ class ConfigTester(unittest.TestCase): ...@@ -212,6 +222,8 @@ class ConfigTester(unittest.TestCase):
def test_load_dpmsolver(self): def test_load_dpmsolver(self):
logger = logging.get_logger("diffusers.configuration_utils") logger = logging.get_logger("diffusers.configuration_utils")
# 30 for warning
logger.setLevel(30)
with CaptureLogger(logger) as cap_logger: with CaptureLogger(logger) as cap_logger:
dpm = DPMSolverMultistepScheduler.from_pretrained( dpm = DPMSolverMultistepScheduler.from_pretrained(
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment