Unverified Commit 51843fd7 authored by Patrick von Platen's avatar Patrick von Platen Committed by GitHub
Browse files

Refactor full determinism (#3485)

* up

* fix more

* Apply suggestions from code review

* fix more

* fix more

* Check it

* Remove 16:8

* fix more

* fix more

* fix more

* up

* up

* Test only stable diffusion

* Test only two files

* up

* Try out spinning up processes that can be killed

* up

* Apply suggestions from code review

* up

* up
parent 49ad61c2
......@@ -21,7 +21,7 @@ import torch
from diffusers import AutoencoderKL, DDIMScheduler, DiTPipeline, DPMSolverMultistepScheduler, Transformer2DModel
from diffusers.utils import is_xformers_available, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS,
......@@ -30,7 +30,7 @@ from ..pipeline_params import (
from ..test_pipelines_common import PipelineTesterMixin
torch.backends.cuda.matmul.allow_tf32 = False
enable_full_determinism()
class DiTPipelineFastTests(PipelineTesterMixin, unittest.TestCase):
......
......@@ -19,10 +19,10 @@ import numpy as np
import torch
from diffusers import KarrasVePipeline, KarrasVeScheduler, UNet2DModel
from diffusers.utils.testing_utils import require_torch, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
torch.backends.cuda.matmul.allow_tf32 = False
enable_full_determinism()
class KarrasVePipelineFastTests(unittest.TestCase):
......
......@@ -21,13 +21,20 @@ import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, LDMTextToImagePipeline, UNet2DConditionModel
from diffusers.utils.testing_utils import load_numpy, nightly, require_torch_gpu, slow, torch_device
from diffusers.utils.testing_utils import (
enable_full_determinism,
load_numpy,
nightly,
require_torch_gpu,
slow,
torch_device,
)
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
torch.backends.cuda.matmul.allow_tf32 = False
enable_full_determinism()
class LDMTextToImagePipelineFastTests(PipelineTesterMixin, unittest.TestCase):
......
......@@ -21,10 +21,10 @@ import torch
from diffusers import DDIMScheduler, LDMSuperResolutionPipeline, UNet2DModel, VQModel
from diffusers.utils import PIL_INTERPOLATION, floats_tensor, load_image, slow, torch_device
from diffusers.utils.testing_utils import require_torch
from diffusers.utils.testing_utils import enable_full_determinism, require_torch
torch.backends.cuda.matmul.allow_tf32 = False
enable_full_determinism()
class LDMSuperResolutionPipelineFastTests(unittest.TestCase):
......
......@@ -20,10 +20,10 @@ import torch
from transformers import CLIPTextConfig, CLIPTextModel
from diffusers import DDIMScheduler, LDMPipeline, UNet2DModel, VQModel
from diffusers.utils.testing_utils import require_torch, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
torch.backends.cuda.matmul.allow_tf32 = False
enable_full_determinism()
class LDMPipelineFastTests(unittest.TestCase):
......
......@@ -25,14 +25,13 @@ from transformers import CLIPImageProcessor, CLIPVisionConfig
from diffusers import AutoencoderKL, PaintByExamplePipeline, PNDMScheduler, UNet2DConditionModel
from diffusers.pipelines.paint_by_example import PaintByExampleImageEncoder
from diffusers.utils import floats_tensor, load_image, slow, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import IMAGE_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, IMAGE_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
torch.backends.cuda.matmul.allow_tf32 = False
torch.use_deterministic_algorithms(True)
enable_full_determinism()
class PaintByExamplePipelineFastTests(PipelineTesterMixin, unittest.TestCase):
......
......@@ -19,10 +19,10 @@ import numpy as np
import torch
from diffusers import PNDMPipeline, PNDMScheduler, UNet2DModel
from diffusers.utils.testing_utils import require_torch, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
torch.backends.cuda.matmul.allow_tf32 = False
enable_full_determinism()
class PNDMPipelineFastTests(unittest.TestCase):
......
......@@ -20,14 +20,21 @@ import numpy as np
import torch
from diffusers import RePaintPipeline, RePaintScheduler, UNet2DModel
from diffusers.utils.testing_utils import load_image, load_numpy, nightly, require_torch_gpu, skip_mps, torch_device
from diffusers.utils.testing_utils import (
enable_full_determinism,
load_image,
load_numpy,
nightly,
require_torch_gpu,
skip_mps,
torch_device,
)
from ..pipeline_params import IMAGE_INPAINTING_BATCH_PARAMS, IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
torch.backends.cuda.matmul.allow_tf32 = False
torch.use_deterministic_algorithms(True)
enable_full_determinism()
class RepaintPipelineFastTests(PipelineTesterMixin, unittest.TestCase):
......
......@@ -19,10 +19,10 @@ import numpy as np
import torch
from diffusers import ScoreSdeVePipeline, ScoreSdeVeScheduler, UNet2DModel
from diffusers.utils.testing_utils import require_torch, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
torch.backends.cuda.matmul.allow_tf32 = False
enable_full_determinism()
class ScoreSdeVeipelineFastTests(unittest.TestCase):
......
......@@ -25,10 +25,10 @@ from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler, UNet2DConditionModel
from diffusers.pipelines.semantic_stable_diffusion import SemanticStableDiffusionPipeline as StableDiffusionPipeline
from diffusers.utils import floats_tensor, nightly, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
torch.backends.cuda.matmul.allow_tf32 = False
enable_full_determinism()
class SafeDiffusionPipelineFastTests(unittest.TestCase):
......
......@@ -22,13 +22,13 @@ import torch
from diffusers import DDPMScheduler, MidiProcessor, SpectrogramDiffusionPipeline
from diffusers.pipelines.spectrogram_diffusion import SpectrogramContEncoder, SpectrogramNotesEncoder, T5FilmDecoder
from diffusers.utils import require_torch_gpu, skip_mps, slow, torch_device
from diffusers.utils.testing_utils import require_note_seq, require_onnxruntime
from diffusers.utils.testing_utils import enable_full_determinism, require_note_seq, require_onnxruntime
from ..pipeline_params import TOKENS_TO_AUDIO_GENERATION_BATCH_PARAMS, TOKENS_TO_AUDIO_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
torch.backends.cuda.matmul.allow_tf32 = False
enable_full_determinism()
MIDI_FILE = "./tests/fixtures/elise_format0.mid"
......
......@@ -23,14 +23,13 @@ from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, CycleDiffusionPipeline, DDIMScheduler, UNet2DConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import require_torch_gpu, skip_mps
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
torch.backends.cuda.matmul.allow_tf32 = False
torch.use_deterministic_algorithms(True)
enable_full_determinism()
class CycleDiffusionPipelineFastTests(PipelineLatentTesterMixin, PipelineTesterMixin, unittest.TestCase):
......
......@@ -15,11 +15,16 @@
import gc
import os
import signal
import subprocess
import sys
import tempfile
import time
import unittest
import numpy as np
import pytest
import torch
from huggingface_hub import hf_hub_download
from packaging import version
......@@ -39,15 +44,25 @@ from diffusers import (
)
from diffusers.models.attention_processor import AttnProcessor
from diffusers.utils import load_numpy, nightly, slow, torch_device
from diffusers.utils.testing_utils import CaptureLogger, require_torch_gpu
from diffusers.utils.testing_utils import CaptureLogger, enable_full_determinism, require_torch_gpu
from ...models.test_models_unet_2d_condition import create_lora_layers
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
torch.backends.cuda.matmul.allow_tf32 = False
torch.use_deterministic_algorithms(True)
@pytest.fixture(autouse=True)
def process_fixture():
# This will be run before each test
command = [sys.executable, os.path.abspath(__file__)]
process = subprocess.Popen(command)
enable_full_determinism()
yield process
# This will be run after each test
try:
os.kill(process.pid, signal.SIGTERM) # or signal.SIGKILL
except ProcessLookupError:
pass
class StableDiffusionPipelineFastTests(PipelineLatentTesterMixin, PipelineTesterMixin, unittest.TestCase):
......@@ -551,8 +566,7 @@ class StableDiffusionPipelineFastTests(PipelineLatentTesterMixin, PipelineTester
@slow
@require_torch_gpu
class StableDiffusionPipelineSlowTests(unittest.TestCase):
def tearDown(self):
super().tearDown()
def setUp(self):
gc.collect()
torch.cuda.empty_cache()
......
......@@ -30,14 +30,13 @@ from diffusers import (
UNet2DConditionModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, nightly, slow, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import IMAGE_VARIATION_BATCH_PARAMS, IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
torch.backends.cuda.matmul.allow_tf32 = False
torch.use_deterministic_algorithms(True)
enable_full_determinism()
class StableDiffusionImageVariationPipelineFastTests(
......
......@@ -34,7 +34,7 @@ from diffusers import (
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.utils import floats_tensor, load_image, load_numpy, nightly, slow, torch_device
from diffusers.utils.testing_utils import require_torch_gpu, skip_mps
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
......@@ -44,8 +44,7 @@ from ..pipeline_params import (
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
torch.backends.cuda.matmul.allow_tf32 = False
torch.use_deterministic_algorithms(True)
enable_full_determinism()
class StableDiffusionImg2ImgPipelineFastTests(PipelineLatentTesterMixin, PipelineTesterMixin, unittest.TestCase):
......
......@@ -33,15 +33,14 @@ from diffusers import (
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_inpaint import prepare_mask_and_masked_image
from diffusers.utils import floats_tensor, load_image, load_numpy, nightly, slow, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ...models.test_models_unet_2d_condition import create_lora_layers
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
torch.backends.cuda.matmul.allow_tf32 = False
torch.use_deterministic_algorithms(True)
enable_full_determinism()
class StableDiffusionInpaintPipelineFastTests(PipelineLatentTesterMixin, PipelineTesterMixin, unittest.TestCase):
......
......@@ -34,11 +34,10 @@ from diffusers import (
VQModel,
)
from diffusers.utils import floats_tensor, load_image, nightly, slow, torch_device
from diffusers.utils.testing_utils import load_numpy, preprocess_image, require_torch_gpu
from diffusers.utils.testing_utils import enable_full_determinism, load_numpy, preprocess_image, require_torch_gpu
torch.backends.cuda.matmul.allow_tf32 = False
torch.use_deterministic_algorithms(True)
enable_full_determinism()
class StableDiffusionInpaintLegacyPipelineFastTests(unittest.TestCase):
......
......@@ -32,14 +32,13 @@ from diffusers import (
UNet2DConditionModel,
)
from diffusers.utils import floats_tensor, load_image, slow, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
torch.backends.cuda.matmul.allow_tf32 = False
torch.use_deterministic_algorithms(True)
enable_full_determinism()
class StableDiffusionInstructPix2PixPipelineFastTests(
......
......@@ -21,10 +21,10 @@ import torch
from diffusers import StableDiffusionKDiffusionPipeline
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
torch.backends.cuda.matmul.allow_tf32 = False
enable_full_determinism()
@slow
......
......@@ -29,14 +29,13 @@ from diffusers import (
UNet2DConditionModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import require_torch_gpu, skip_mps
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
torch.backends.cuda.matmul.allow_tf32 = False
torch.use_deterministic_algorithms(True)
enable_full_determinism()
@skip_mps
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment