Unverified Commit 0f0b5318 authored by Dhruv Nair's avatar Dhruv Nair Committed by GitHub
Browse files

Add decorator for compile tests (#8703)



* update

* update

---------
Co-authored-by: default avatarSayak Paul <spsayakpaul@gmail.com>
parent e8284281
...@@ -330,6 +330,7 @@ jobs: ...@@ -330,6 +330,7 @@ jobs:
- name: Run example tests on GPU - name: Run example tests on GPU
env: env:
HF_TOKEN: ${{ secrets.HF_TOKEN }} HF_TOKEN: ${{ secrets.HF_TOKEN }}
RUN_COMPILE: yes
run: | run: |
python -m pytest -n 1 --max-worker-restart=0 --dist=loadfile -s -v -k "compile" --make-reports=tests_torch_compile_cuda tests/ python -m pytest -n 1 --max-worker-restart=0 --dist=loadfile -s -v -k "compile" --make-reports=tests_torch_compile_cuda tests/
- name: Failure short reports - name: Failure short reports
......
...@@ -187,6 +187,7 @@ def parse_flag_from_env(key, default=False): ...@@ -187,6 +187,7 @@ def parse_flag_from_env(key, default=False):
_run_slow_tests = parse_flag_from_env("RUN_SLOW", default=False) _run_slow_tests = parse_flag_from_env("RUN_SLOW", default=False)
_run_nightly_tests = parse_flag_from_env("RUN_NIGHTLY", default=False) _run_nightly_tests = parse_flag_from_env("RUN_NIGHTLY", default=False)
_run_compile_tests = parse_flag_from_env("RUN_COMPILE", default=False)
def floats_tensor(shape, scale=1.0, rng=None, name=None): def floats_tensor(shape, scale=1.0, rng=None, name=None):
...@@ -225,6 +226,16 @@ def nightly(test_case): ...@@ -225,6 +226,16 @@ def nightly(test_case):
return unittest.skipUnless(_run_nightly_tests, "test is nightly")(test_case) return unittest.skipUnless(_run_nightly_tests, "test is nightly")(test_case)
def is_torch_compile(test_case):
"""
Decorator marking a test that runs compile tests in the diffusers CI.
Compile tests are skipped by default. Set the RUN_COMPILE environment variable to a truthy value to run them.
"""
return unittest.skipUnless(_run_compile_tests, "test is torch compile")(test_case)
def require_torch(test_case): def require_torch(test_case):
""" """
Decorator marking a test that requires PyTorch. These tests are skipped when PyTorch isn't installed. Decorator marking a test that requires PyTorch. These tests are skipped when PyTorch isn't installed.
...@@ -390,14 +401,6 @@ def get_python_version(): ...@@ -390,14 +401,6 @@ def get_python_version():
return major, minor return major, minor
def require_python39_or_higher(test_case):
def python39_available():
major, minor = get_python_version()
return major == 3 and minor >= 9
return unittest.skipUnless(python39_available(), "test requires Python 3.9 or higher")(test_case)
def load_numpy(arry: Union[str, np.ndarray], local_path: Optional[str] = None) -> np.ndarray: def load_numpy(arry: Union[str, np.ndarray], local_path: Optional[str] = None) -> np.ndarray:
if isinstance(arry, str): if isinstance(arry, str):
if local_path is not None: if local_path is not None:
......
...@@ -43,7 +43,7 @@ from diffusers.utils import SAFE_WEIGHTS_INDEX_NAME, is_torch_npu_available, is_ ...@@ -43,7 +43,7 @@ from diffusers.utils import SAFE_WEIGHTS_INDEX_NAME, is_torch_npu_available, is_
from diffusers.utils.testing_utils import ( from diffusers.utils.testing_utils import (
CaptureLogger, CaptureLogger,
get_python_version, get_python_version,
require_python39_or_higher, is_torch_compile,
require_torch_2, require_torch_2,
require_torch_accelerator_with_training, require_torch_accelerator_with_training,
require_torch_gpu, require_torch_gpu,
...@@ -512,7 +512,7 @@ class ModelTesterMixin: ...@@ -512,7 +512,7 @@ class ModelTesterMixin:
max_diff = (image - new_image).abs().max().item() max_diff = (image - new_image).abs().max().item()
self.assertLessEqual(max_diff, expected_max_diff, "Models give different forward passes") self.assertLessEqual(max_diff, expected_max_diff, "Models give different forward passes")
@require_python39_or_higher @is_torch_compile
@require_torch_2 @require_torch_2
@unittest.skipIf( @unittest.skipIf(
get_python_version == (3, 12), get_python_version == (3, 12),
......
...@@ -36,9 +36,9 @@ from diffusers.utils.import_utils import is_xformers_available ...@@ -36,9 +36,9 @@ from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import ( from diffusers.utils.testing_utils import (
enable_full_determinism, enable_full_determinism,
get_python_version, get_python_version,
is_torch_compile,
load_image, load_image,
load_numpy, load_numpy,
require_python39_or_higher,
require_torch_2, require_torch_2,
require_torch_gpu, require_torch_gpu,
run_test_in_subprocess, run_test_in_subprocess,
...@@ -1022,7 +1022,7 @@ class ControlNetPipelineSlowTests(unittest.TestCase): ...@@ -1022,7 +1022,7 @@ class ControlNetPipelineSlowTests(unittest.TestCase):
expected_slice = np.array([0.1655, 0.1721, 0.1623, 0.1685, 0.1711, 0.1646, 0.1651, 0.1631, 0.1494]) expected_slice = np.array([0.1655, 0.1721, 0.1623, 0.1685, 0.1711, 0.1646, 0.1651, 0.1631, 0.1494])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
@require_python39_or_higher @is_torch_compile
@require_torch_2 @require_torch_2
@unittest.skipIf( @unittest.skipIf(
get_python_version == (3, 12), get_python_version == (3, 12),
......
...@@ -35,9 +35,9 @@ from diffusers import ( ...@@ -35,9 +35,9 @@ from diffusers import (
from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import ( from diffusers.utils.testing_utils import (
enable_full_determinism, enable_full_determinism,
is_torch_compile,
load_image, load_image,
load_numpy, load_numpy,
require_python39_or_higher,
require_torch_2, require_torch_2,
require_torch_gpu, require_torch_gpu,
run_test_in_subprocess, run_test_in_subprocess,
...@@ -392,7 +392,7 @@ class ControlNetXSPipelineSlowTests(unittest.TestCase): ...@@ -392,7 +392,7 @@ class ControlNetXSPipelineSlowTests(unittest.TestCase):
expected_image = np.array([0.4844, 0.4937, 0.4956, 0.4663, 0.5039, 0.5044, 0.4565, 0.4883, 0.4941]) expected_image = np.array([0.4844, 0.4937, 0.4956, 0.4663, 0.5039, 0.5044, 0.4565, 0.4883, 0.4941])
assert np.allclose(original_image, expected_image, atol=1e-04) assert np.allclose(original_image, expected_image, atol=1e-04)
@require_python39_or_higher @is_torch_compile
@require_torch_2 @require_torch_2
def test_stable_diffusion_compile(self): def test_stable_diffusion_compile(self):
run_test_in_subprocess(test_case=self, target_func=_test_stable_diffusion_compile, inputs=None) run_test_in_subprocess(test_case=self, target_func=_test_stable_diffusion_compile, inputs=None)
...@@ -45,12 +45,12 @@ from diffusers import ( ...@@ -45,12 +45,12 @@ from diffusers import (
from diffusers.utils.testing_utils import ( from diffusers.utils.testing_utils import (
CaptureLogger, CaptureLogger,
enable_full_determinism, enable_full_determinism,
is_torch_compile,
load_image, load_image,
load_numpy, load_numpy,
nightly, nightly,
numpy_cosine_similarity_distance, numpy_cosine_similarity_distance,
require_accelerate_version_greater, require_accelerate_version_greater,
require_python39_or_higher,
require_torch_2, require_torch_2,
require_torch_gpu, require_torch_gpu,
require_torch_multi_gpu, require_torch_multi_gpu,
...@@ -1282,7 +1282,7 @@ class StableDiffusionPipelineSlowTests(unittest.TestCase): ...@@ -1282,7 +1282,7 @@ class StableDiffusionPipelineSlowTests(unittest.TestCase):
max_diff = np.abs(expected_image - image).max() max_diff = np.abs(expected_image - image).max()
assert max_diff < 8e-1 assert max_diff < 8e-1
@require_python39_or_higher @is_torch_compile
@require_torch_2 @require_torch_2
def test_stable_diffusion_compile(self): def test_stable_diffusion_compile(self):
seed = 0 seed = 0
......
...@@ -37,10 +37,10 @@ from diffusers import ( ...@@ -37,10 +37,10 @@ from diffusers import (
from diffusers.utils.testing_utils import ( from diffusers.utils.testing_utils import (
enable_full_determinism, enable_full_determinism,
floats_tensor, floats_tensor,
is_torch_compile,
load_image, load_image,
load_numpy, load_numpy,
nightly, nightly,
require_python39_or_higher,
require_torch_2, require_torch_2,
require_torch_gpu, require_torch_gpu,
run_test_in_subprocess, run_test_in_subprocess,
...@@ -643,7 +643,7 @@ class StableDiffusionImg2ImgPipelineSlowTests(unittest.TestCase): ...@@ -643,7 +643,7 @@ class StableDiffusionImg2ImgPipelineSlowTests(unittest.TestCase):
assert out.nsfw_content_detected[0], f"Safety checker should work for prompt: {inputs['prompt']}" assert out.nsfw_content_detected[0], f"Safety checker should work for prompt: {inputs['prompt']}"
assert np.abs(out.images[0]).sum() < 1e-5 # should be all zeros assert np.abs(out.images[0]).sum() < 1e-5 # should be all zeros
@require_python39_or_higher @is_torch_compile
@require_torch_2 @require_torch_2
def test_img2img_compile(self): def test_img2img_compile(self):
seed = 0 seed = 0
......
...@@ -39,10 +39,10 @@ from diffusers import ( ...@@ -39,10 +39,10 @@ from diffusers import (
from diffusers.utils.testing_utils import ( from diffusers.utils.testing_utils import (
enable_full_determinism, enable_full_determinism,
floats_tensor, floats_tensor,
is_torch_compile,
load_image, load_image,
load_numpy, load_numpy,
nightly, nightly,
require_python39_or_higher,
require_torch_2, require_torch_2,
require_torch_gpu, require_torch_gpu,
run_test_in_subprocess, run_test_in_subprocess,
...@@ -715,7 +715,7 @@ class StableDiffusionInpaintPipelineSlowTests(unittest.TestCase): ...@@ -715,7 +715,7 @@ class StableDiffusionInpaintPipelineSlowTests(unittest.TestCase):
# make sure that less than 2.2 GB is allocated # make sure that less than 2.2 GB is allocated
assert mem_bytes < 2.2 * 10**9 assert mem_bytes < 2.2 * 10**9
@require_python39_or_higher @is_torch_compile
@require_torch_2 @require_torch_2
def test_inpaint_compile(self): def test_inpaint_compile(self):
seed = 0 seed = 0
...@@ -920,7 +920,7 @@ class StableDiffusionInpaintPipelineAsymmetricAutoencoderKLSlowTests(unittest.Te ...@@ -920,7 +920,7 @@ class StableDiffusionInpaintPipelineAsymmetricAutoencoderKLSlowTests(unittest.Te
# make sure that less than 2.45 GB is allocated # make sure that less than 2.45 GB is allocated
assert mem_bytes < 2.45 * 10**9 assert mem_bytes < 2.45 * 10**9
@require_python39_or_higher @is_torch_compile
@require_torch_2 @require_torch_2
def test_inpaint_compile(self): def test_inpaint_compile(self):
pass pass
......
...@@ -69,12 +69,12 @@ from diffusers.utils.testing_utils import ( ...@@ -69,12 +69,12 @@ from diffusers.utils.testing_utils import (
floats_tensor, floats_tensor,
get_python_version, get_python_version,
get_tests_dir, get_tests_dir,
is_torch_compile,
load_numpy, load_numpy,
nightly, nightly,
require_compel, require_compel,
require_flax, require_flax,
require_onnxruntime, require_onnxruntime,
require_python39_or_higher,
require_torch_2, require_torch_2,
require_torch_gpu, require_torch_gpu,
run_test_in_subprocess, run_test_in_subprocess,
...@@ -1761,7 +1761,7 @@ class PipelineSlowTests(unittest.TestCase): ...@@ -1761,7 +1761,7 @@ class PipelineSlowTests(unittest.TestCase):
assert np.abs(image - new_image).max() < 1e-5, "Models don't give the same forward pass" assert np.abs(image - new_image).max() < 1e-5, "Models don't give the same forward pass"
@require_python39_or_higher @is_torch_compile
@require_torch_2 @require_torch_2
@unittest.skipIf( @unittest.skipIf(
get_python_version == (3, 12), get_python_version == (3, 12),
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment