"docs/source/git@developer.sourcefind.cn:norm/vllm.git" did not exist on "4ee52bb169d64691c3bfe7b1b2fff91300d49095"
Unverified Commit 64b3e0f5 authored by Fanli Lin's avatar Fanli Lin Committed by GitHub
Browse files

make `pipelines` tests device-agnostic (part1) (#9399)



* enable on xpu

* add 1 more

* add one more

* enable more

* add 1 more

* add more

* enable 1

* enable more cases

* enable

* enable

* update comment

* one more

* enable 1

* add more cases

* enable xpu

* add one more caswe

* add more cases

* add 1

* add more

* add more cases

* add case

* enable

* add more

* add more

* add more

* enbale more

* add more

* update code

* update test marker

* add skip back

* update comment

* remove single files

* remove

* style

* add

* revert

* reformat

* update decorator

* update

* update

* update

* Update tests/pipelines/deepfloyd_if/test_if.py
Co-authored-by: default avatarDhruv Nair <dhruv.nair@gmail.com>

* Update src/diffusers/utils/testing_utils.py
Co-authored-by: default avatarDhruv Nair <dhruv.nair@gmail.com>

* Update tests/pipelines/animatediff/test_animatediff_controlnet.py
Co-authored-by: default avatarDhruv Nair <dhruv.nair@gmail.com>

* Update tests/pipelines/animatediff/test_animatediff.py
Co-authored-by: default avatarDhruv Nair <dhruv.nair@gmail.com>

* Update tests/pipelines/animatediff/test_animatediff_controlnet.py
Co-authored-by: default avatarDhruv Nair <dhruv.nair@gmail.com>

* update float16

* no unitest.skipt

* update

* apply style check

* reapply format

---------
Co-authored-by: default avatarSayak Paul <spsayakpaul@gmail.com>
Co-authored-by: default avatarDhruv Nair <dhruv.nair@gmail.com>
parent 2e86a3f0
...@@ -373,6 +373,14 @@ def require_note_seq(test_case): ...@@ -373,6 +373,14 @@ def require_note_seq(test_case):
return unittest.skipUnless(is_note_seq_available(), "test requires note_seq")(test_case) return unittest.skipUnless(is_note_seq_available(), "test requires note_seq")(test_case)
def require_accelerator(test_case):
"""
Decorator marking a test that requires a hardware accelerator backend. These tests are skipped when there are no
hardware accelerator available.
"""
return unittest.skipUnless(torch_device != "cpu", "test requires a hardware accelerator")(test_case)
def require_torchsde(test_case): def require_torchsde(test_case):
""" """
Decorator marking a test that requires torchsde. These tests are skipped when torchsde isn't installed. Decorator marking a test that requires torchsde. These tests are skipped when torchsde isn't installed.
......
...@@ -22,7 +22,7 @@ from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokeni ...@@ -22,7 +22,7 @@ from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokeni
from diffusers import AmusedPipeline, AmusedScheduler, UVit2DModel, VQModel from diffusers import AmusedPipeline, AmusedScheduler, UVit2DModel, VQModel
from diffusers.utils.testing_utils import ( from diffusers.utils.testing_utils import (
enable_full_determinism, enable_full_determinism,
require_torch_gpu, require_torch_accelerator,
slow, slow,
torch_device, torch_device,
) )
...@@ -129,7 +129,7 @@ class AmusedPipelineFastTests(PipelineTesterMixin, unittest.TestCase): ...@@ -129,7 +129,7 @@ class AmusedPipelineFastTests(PipelineTesterMixin, unittest.TestCase):
@slow @slow
@require_torch_gpu @require_torch_accelerator
class AmusedPipelineSlowTests(unittest.TestCase): class AmusedPipelineSlowTests(unittest.TestCase):
def test_amused_256(self): def test_amused_256(self):
pipe = AmusedPipeline.from_pretrained("amused/amused-256") pipe = AmusedPipeline.from_pretrained("amused/amused-256")
......
...@@ -23,7 +23,7 @@ from diffusers import AmusedImg2ImgPipeline, AmusedScheduler, UVit2DModel, VQMod ...@@ -23,7 +23,7 @@ from diffusers import AmusedImg2ImgPipeline, AmusedScheduler, UVit2DModel, VQMod
from diffusers.utils import load_image from diffusers.utils import load_image
from diffusers.utils.testing_utils import ( from diffusers.utils.testing_utils import (
enable_full_determinism, enable_full_determinism,
require_torch_gpu, require_torch_accelerator,
slow, slow,
torch_device, torch_device,
) )
...@@ -131,7 +131,7 @@ class AmusedImg2ImgPipelineFastTests(PipelineTesterMixin, unittest.TestCase): ...@@ -131,7 +131,7 @@ class AmusedImg2ImgPipelineFastTests(PipelineTesterMixin, unittest.TestCase):
@slow @slow
@require_torch_gpu @require_torch_accelerator
class AmusedImg2ImgPipelineSlowTests(unittest.TestCase): class AmusedImg2ImgPipelineSlowTests(unittest.TestCase):
def test_amused_256(self): def test_amused_256(self):
pipe = AmusedImg2ImgPipeline.from_pretrained("amused/amused-256") pipe = AmusedImg2ImgPipeline.from_pretrained("amused/amused-256")
......
...@@ -23,7 +23,7 @@ from diffusers import AmusedInpaintPipeline, AmusedScheduler, UVit2DModel, VQMod ...@@ -23,7 +23,7 @@ from diffusers import AmusedInpaintPipeline, AmusedScheduler, UVit2DModel, VQMod
from diffusers.utils import load_image from diffusers.utils import load_image
from diffusers.utils.testing_utils import ( from diffusers.utils.testing_utils import (
enable_full_determinism, enable_full_determinism,
require_torch_gpu, require_torch_accelerator,
slow, slow,
torch_device, torch_device,
) )
...@@ -135,7 +135,7 @@ class AmusedInpaintPipelineFastTests(PipelineTesterMixin, unittest.TestCase): ...@@ -135,7 +135,7 @@ class AmusedInpaintPipelineFastTests(PipelineTesterMixin, unittest.TestCase):
@slow @slow
@require_torch_gpu @require_torch_accelerator
class AmusedInpaintPipelineSlowTests(unittest.TestCase): class AmusedInpaintPipelineSlowTests(unittest.TestCase):
def test_amused_256(self): def test_amused_256(self):
pipe = AmusedInpaintPipeline.from_pretrained("amused/amused-256") pipe = AmusedInpaintPipeline.from_pretrained("amused/amused-256")
......
...@@ -19,7 +19,13 @@ from diffusers import ( ...@@ -19,7 +19,13 @@ from diffusers import (
) )
from diffusers.models.attention import FreeNoiseTransformerBlock from diffusers.models.attention import FreeNoiseTransformerBlock
from diffusers.utils import is_xformers_available, logging from diffusers.utils import is_xformers_available, logging
from diffusers.utils.testing_utils import numpy_cosine_similarity_distance, require_torch_gpu, slow, torch_device from diffusers.utils.testing_utils import (
numpy_cosine_similarity_distance,
require_accelerator,
require_torch_gpu,
slow,
torch_device,
)
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import ( from ..test_pipelines_common import (
...@@ -272,7 +278,7 @@ class AnimateDiffPipelineFastTests( ...@@ -272,7 +278,7 @@ class AnimateDiffPipelineFastTests(
max_diff = np.abs(to_np(output_batch[0][0]) - to_np(output[0][0])).max() max_diff = np.abs(to_np(output_batch[0][0]) - to_np(output[0][0])).max()
assert max_diff < expected_max_diff assert max_diff < expected_max_diff
@unittest.skipIf(torch_device != "cuda", reason="CUDA and CPU are required to switch devices") @require_accelerator
def test_to_device(self): def test_to_device(self):
components = self.get_dummy_components() components = self.get_dummy_components()
pipe = self.pipeline_class(**components) pipe = self.pipeline_class(**components)
...@@ -288,14 +294,14 @@ class AnimateDiffPipelineFastTests( ...@@ -288,14 +294,14 @@ class AnimateDiffPipelineFastTests(
output_cpu = pipe(**self.get_dummy_inputs("cpu"))[0] output_cpu = pipe(**self.get_dummy_inputs("cpu"))[0]
self.assertTrue(np.isnan(output_cpu).sum() == 0) self.assertTrue(np.isnan(output_cpu).sum() == 0)
pipe.to("cuda") pipe.to(torch_device)
model_devices = [ model_devices = [
component.device.type for component in pipe.components.values() if hasattr(component, "device") component.device.type for component in pipe.components.values() if hasattr(component, "device")
] ]
self.assertTrue(all(device == "cuda" for device in model_devices)) self.assertTrue(all(device == torch_device for device in model_devices))
output_cuda = pipe(**self.get_dummy_inputs("cuda"))[0] output_device = pipe(**self.get_dummy_inputs(torch_device))[0]
self.assertTrue(np.isnan(to_np(output_cuda)).sum() == 0) self.assertTrue(np.isnan(to_np(output_device)).sum() == 0)
def test_to_dtype(self): def test_to_dtype(self):
components = self.get_dummy_components() components = self.get_dummy_components()
......
...@@ -21,7 +21,7 @@ from diffusers import ( ...@@ -21,7 +21,7 @@ from diffusers import (
from diffusers.models.attention import FreeNoiseTransformerBlock from diffusers.models.attention import FreeNoiseTransformerBlock
from diffusers.utils import logging from diffusers.utils import logging
from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import torch_device from diffusers.utils.testing_utils import require_accelerator, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import ( from ..test_pipelines_common import (
...@@ -281,7 +281,7 @@ class AnimateDiffControlNetPipelineFastTests( ...@@ -281,7 +281,7 @@ class AnimateDiffControlNetPipelineFastTests(
max_diff = np.abs(to_np(output_batch[0][0]) - to_np(output[0][0])).max() max_diff = np.abs(to_np(output_batch[0][0]) - to_np(output[0][0])).max()
assert max_diff < expected_max_diff assert max_diff < expected_max_diff
@unittest.skipIf(torch_device != "cuda", reason="CUDA and CPU are required to switch devices") @require_accelerator
def test_to_device(self): def test_to_device(self):
components = self.get_dummy_components() components = self.get_dummy_components()
pipe = self.pipeline_class(**components) pipe = self.pipeline_class(**components)
...@@ -297,14 +297,14 @@ class AnimateDiffControlNetPipelineFastTests( ...@@ -297,14 +297,14 @@ class AnimateDiffControlNetPipelineFastTests(
output_cpu = pipe(**self.get_dummy_inputs("cpu"))[0] output_cpu = pipe(**self.get_dummy_inputs("cpu"))[0]
self.assertTrue(np.isnan(output_cpu).sum() == 0) self.assertTrue(np.isnan(output_cpu).sum() == 0)
pipe.to("cuda") pipe.to(torch_device)
model_devices = [ model_devices = [
component.device.type for component in pipe.components.values() if hasattr(component, "device") component.device.type for component in pipe.components.values() if hasattr(component, "device")
] ]
self.assertTrue(all(device == "cuda" for device in model_devices)) self.assertTrue(all(device == torch_device for device in model_devices))
output_cuda = pipe(**self.get_dummy_inputs("cuda"))[0] output_device = pipe(**self.get_dummy_inputs(torch_device))[0]
self.assertTrue(np.isnan(to_np(output_cuda)).sum() == 0) self.assertTrue(np.isnan(to_np(output_device)).sum() == 0)
def test_to_dtype(self): def test_to_dtype(self):
components = self.get_dummy_components() components = self.get_dummy_components()
......
...@@ -14,7 +14,7 @@ from diffusers import ( ...@@ -14,7 +14,7 @@ from diffusers import (
UNetMotionModel, UNetMotionModel,
) )
from diffusers.utils import is_xformers_available, logging from diffusers.utils import is_xformers_available, logging
from diffusers.utils.testing_utils import torch_device from diffusers.utils.testing_utils import require_accelerator, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_CALLBACK_CFG_PARAMS, TEXT_TO_IMAGE_PARAMS from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_CALLBACK_CFG_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import ( from ..test_pipelines_common import (
...@@ -212,7 +212,7 @@ class AnimateDiffPipelineSDXLFastTests( ...@@ -212,7 +212,7 @@ class AnimateDiffPipelineSDXLFastTests(
max_diff = np.abs(to_np(output_batch[0][0]) - to_np(output[0][0])).max() max_diff = np.abs(to_np(output_batch[0][0]) - to_np(output[0][0])).max()
assert max_diff < expected_max_diff assert max_diff < expected_max_diff
@unittest.skipIf(torch_device != "cuda", reason="CUDA and CPU are required to switch devices") @require_accelerator
def test_to_device(self): def test_to_device(self):
components = self.get_dummy_components() components = self.get_dummy_components()
pipe = self.pipeline_class(**components) pipe = self.pipeline_class(**components)
...@@ -228,14 +228,14 @@ class AnimateDiffPipelineSDXLFastTests( ...@@ -228,14 +228,14 @@ class AnimateDiffPipelineSDXLFastTests(
output_cpu = pipe(**self.get_dummy_inputs("cpu"))[0] output_cpu = pipe(**self.get_dummy_inputs("cpu"))[0]
self.assertTrue(np.isnan(output_cpu).sum() == 0) self.assertTrue(np.isnan(output_cpu).sum() == 0)
pipe.to("cuda") pipe.to(torch_device)
model_devices = [ model_devices = [
component.device.type for component in pipe.components.values() if hasattr(component, "device") component.device.type for component in pipe.components.values() if hasattr(component, "device")
] ]
self.assertTrue(all(device == "cuda" for device in model_devices)) self.assertTrue(all(device == torch_device for device in model_devices))
output_cuda = pipe(**self.get_dummy_inputs("cuda"))[0] output_device = pipe(**self.get_dummy_inputs(torch_device))[0]
self.assertTrue(np.isnan(to_np(output_cuda)).sum() == 0) self.assertTrue(np.isnan(to_np(output_device)).sum() == 0)
def test_to_dtype(self): def test_to_dtype(self):
components = self.get_dummy_components() components = self.get_dummy_components()
......
...@@ -20,7 +20,7 @@ from diffusers import ( ...@@ -20,7 +20,7 @@ from diffusers import (
) )
from diffusers.utils import logging from diffusers.utils import logging
from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import torch_device from diffusers.utils.testing_utils import require_accelerator, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import ( from ..test_pipelines_common import (
...@@ -345,7 +345,7 @@ class AnimateDiffSparseControlNetPipelineFastTests( ...@@ -345,7 +345,7 @@ class AnimateDiffSparseControlNetPipelineFastTests(
max_diff = np.abs(to_np(output_batch[0][0]) - to_np(output[0][0])).max() max_diff = np.abs(to_np(output_batch[0][0]) - to_np(output[0][0])).max()
assert max_diff < expected_max_diff assert max_diff < expected_max_diff
@unittest.skipIf(torch_device != "cuda", reason="CUDA and CPU are required to switch devices") @require_accelerator
def test_to_device(self): def test_to_device(self):
components = self.get_dummy_components() components = self.get_dummy_components()
pipe = self.pipeline_class(**components) pipe = self.pipeline_class(**components)
...@@ -361,13 +361,13 @@ class AnimateDiffSparseControlNetPipelineFastTests( ...@@ -361,13 +361,13 @@ class AnimateDiffSparseControlNetPipelineFastTests(
output_cpu = pipe(**self.get_dummy_inputs("cpu"))[0] output_cpu = pipe(**self.get_dummy_inputs("cpu"))[0]
self.assertTrue(np.isnan(output_cpu).sum() == 0) self.assertTrue(np.isnan(output_cpu).sum() == 0)
pipe.to("cuda") pipe.to(torch_device)
model_devices = [ model_devices = [
component.device.type for component in pipe.components.values() if hasattr(component, "device") component.device.type for component in pipe.components.values() if hasattr(component, "device")
] ]
self.assertTrue(all(device == "cuda" for device in model_devices)) self.assertTrue(all(device == torch_device for device in model_devices))
output_cuda = pipe(**self.get_dummy_inputs("cuda"))[0] output_cuda = pipe(**self.get_dummy_inputs(torch_device))[0]
self.assertTrue(np.isnan(to_np(output_cuda)).sum() == 0) self.assertTrue(np.isnan(to_np(output_cuda)).sum() == 0)
def test_to_dtype(self): def test_to_dtype(self):
......
...@@ -19,7 +19,7 @@ from diffusers import ( ...@@ -19,7 +19,7 @@ from diffusers import (
) )
from diffusers.models.attention import FreeNoiseTransformerBlock from diffusers.models.attention import FreeNoiseTransformerBlock
from diffusers.utils import is_xformers_available, logging from diffusers.utils import is_xformers_available, logging
from diffusers.utils.testing_utils import torch_device from diffusers.utils.testing_utils import require_accelerator, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_PARAMS, VIDEO_TO_VIDEO_BATCH_PARAMS from ..pipeline_params import TEXT_TO_IMAGE_PARAMS, VIDEO_TO_VIDEO_BATCH_PARAMS
from ..test_pipelines_common import IPAdapterTesterMixin, PipelineFromPipeTesterMixin, PipelineTesterMixin from ..test_pipelines_common import IPAdapterTesterMixin, PipelineFromPipeTesterMixin, PipelineTesterMixin
...@@ -258,7 +258,7 @@ class AnimateDiffVideoToVideoPipelineFastTests( ...@@ -258,7 +258,7 @@ class AnimateDiffVideoToVideoPipelineFastTests(
max_diff = np.abs(to_np(output_batch[0][0]) - to_np(output[0][0])).max() max_diff = np.abs(to_np(output_batch[0][0]) - to_np(output[0][0])).max()
assert max_diff < expected_max_diff assert max_diff < expected_max_diff
@unittest.skipIf(torch_device != "cuda", reason="CUDA and CPU are required to switch devices") @require_accelerator
def test_to_device(self): def test_to_device(self):
components = self.get_dummy_components() components = self.get_dummy_components()
pipe = self.pipeline_class(**components) pipe = self.pipeline_class(**components)
...@@ -274,14 +274,14 @@ class AnimateDiffVideoToVideoPipelineFastTests( ...@@ -274,14 +274,14 @@ class AnimateDiffVideoToVideoPipelineFastTests(
output_cpu = pipe(**self.get_dummy_inputs("cpu"))[0] output_cpu = pipe(**self.get_dummy_inputs("cpu"))[0]
self.assertTrue(np.isnan(output_cpu).sum() == 0) self.assertTrue(np.isnan(output_cpu).sum() == 0)
pipe.to("cuda") pipe.to(torch_device)
model_devices = [ model_devices = [
component.device.type for component in pipe.components.values() if hasattr(component, "device") component.device.type for component in pipe.components.values() if hasattr(component, "device")
] ]
self.assertTrue(all(device == "cuda" for device in model_devices)) self.assertTrue(all(device == torch_device for device in model_devices))
output_cuda = pipe(**self.get_dummy_inputs("cuda"))[0] output_device = pipe(**self.get_dummy_inputs(torch_device))[0]
self.assertTrue(np.isnan(to_np(output_cuda)).sum() == 0) self.assertTrue(np.isnan(to_np(output_device)).sum() == 0)
def test_to_dtype(self): def test_to_dtype(self):
components = self.get_dummy_components() components = self.get_dummy_components()
......
...@@ -20,7 +20,7 @@ from diffusers import ( ...@@ -20,7 +20,7 @@ from diffusers import (
) )
from diffusers.models.attention import FreeNoiseTransformerBlock from diffusers.models.attention import FreeNoiseTransformerBlock
from diffusers.utils import is_xformers_available, logging from diffusers.utils import is_xformers_available, logging
from diffusers.utils.testing_utils import torch_device from diffusers.utils.testing_utils import require_accelerator, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_PARAMS, VIDEO_TO_VIDEO_BATCH_PARAMS from ..pipeline_params import TEXT_TO_IMAGE_PARAMS, VIDEO_TO_VIDEO_BATCH_PARAMS
from ..test_pipelines_common import IPAdapterTesterMixin, PipelineFromPipeTesterMixin, PipelineTesterMixin from ..test_pipelines_common import IPAdapterTesterMixin, PipelineFromPipeTesterMixin, PipelineTesterMixin
...@@ -274,7 +274,7 @@ class AnimateDiffVideoToVideoControlNetPipelineFastTests( ...@@ -274,7 +274,7 @@ class AnimateDiffVideoToVideoControlNetPipelineFastTests(
max_diff = np.abs(to_np(output_batch[0][0]) - to_np(output[0][0])).max() max_diff = np.abs(to_np(output_batch[0][0]) - to_np(output[0][0])).max()
assert max_diff < expected_max_diff assert max_diff < expected_max_diff
@unittest.skipIf(torch_device != "cuda", reason="CUDA and CPU are required to switch devices") @require_accelerator
def test_to_device(self): def test_to_device(self):
components = self.get_dummy_components() components = self.get_dummy_components()
pipe = self.pipeline_class(**components) pipe = self.pipeline_class(**components)
...@@ -290,13 +290,13 @@ class AnimateDiffVideoToVideoControlNetPipelineFastTests( ...@@ -290,13 +290,13 @@ class AnimateDiffVideoToVideoControlNetPipelineFastTests(
output_cpu = pipe(**self.get_dummy_inputs("cpu"))[0] output_cpu = pipe(**self.get_dummy_inputs("cpu"))[0]
self.assertTrue(np.isnan(output_cpu).sum() == 0) self.assertTrue(np.isnan(output_cpu).sum() == 0)
pipe.to("cuda") pipe.to(torch_device)
model_devices = [ model_devices = [
component.device.type for component in pipe.components.values() if hasattr(component, "device") component.device.type for component in pipe.components.values() if hasattr(component, "device")
] ]
self.assertTrue(all(device == "cuda" for device in model_devices)) self.assertTrue(all(device == torch_device for device in model_devices))
output_cuda = pipe(**self.get_dummy_inputs("cuda"))[0] output_cuda = pipe(**self.get_dummy_inputs(torch_device))[0]
self.assertTrue(np.isnan(to_np(output_cuda)).sum() == 0) self.assertTrue(np.isnan(to_np(output_cuda)).sum() == 0)
def test_to_dtype(self): def test_to_dtype(self):
......
...@@ -1019,7 +1019,7 @@ class StableDiffusionSSD1BControlNetPipelineFastTests(StableDiffusionXLControlNe ...@@ -1019,7 +1019,7 @@ class StableDiffusionSSD1BControlNetPipelineFastTests(StableDiffusionXLControlNe
) )
controlnet = ControlNetModel.from_unet(unet, conditioning_channels=4) controlnet = ControlNetModel.from_unet(unet, conditioning_channels=4)
assert type(controlnet.mid_block) == UNetMidBlock2D assert type(controlnet.mid_block) is UNetMidBlock2D
assert controlnet.conditioning_channels == 4 assert controlnet.conditioning_channels == 4
def get_dummy_components(self, time_cond_proj_dim=None): def get_dummy_components(self, time_cond_proj_dim=None):
......
...@@ -38,6 +38,7 @@ from diffusers.utils.testing_utils import ( ...@@ -38,6 +38,7 @@ from diffusers.utils.testing_utils import (
is_torch_compile, is_torch_compile,
load_image, load_image,
load_numpy, load_numpy,
require_accelerator,
require_torch_2, require_torch_2,
require_torch_gpu, require_torch_gpu,
run_test_in_subprocess, run_test_in_subprocess,
...@@ -306,7 +307,7 @@ class ControlNetXSPipelineFastTests( ...@@ -306,7 +307,7 @@ class ControlNetXSPipelineFastTests(
assert out_vae_np.shape == out_np.shape assert out_vae_np.shape == out_np.shape
@unittest.skipIf(torch_device != "cuda", reason="CUDA and CPU are required to switch devices") @require_accelerator
def test_to_device(self): def test_to_device(self):
components = self.get_dummy_components() components = self.get_dummy_components()
pipe = self.pipeline_class(**components) pipe = self.pipeline_class(**components)
...@@ -322,14 +323,14 @@ class ControlNetXSPipelineFastTests( ...@@ -322,14 +323,14 @@ class ControlNetXSPipelineFastTests(
output_cpu = pipe(**self.get_dummy_inputs("cpu"))[0] output_cpu = pipe(**self.get_dummy_inputs("cpu"))[0]
self.assertTrue(np.isnan(output_cpu).sum() == 0) self.assertTrue(np.isnan(output_cpu).sum() == 0)
pipe.to("cuda") pipe.to(torch_device)
model_devices = [ model_devices = [
component.device.type for component in pipe.components.values() if hasattr(component, "device") component.device.type for component in pipe.components.values() if hasattr(component, "device")
] ]
self.assertTrue(all(device == "cuda" for device in model_devices)) self.assertTrue(all(device == torch_device for device in model_devices))
output_cuda = pipe(**self.get_dummy_inputs("cuda"))[0] output_device = pipe(**self.get_dummy_inputs(torch_device))[0]
self.assertTrue(np.isnan(to_np(output_cuda)).sum() == 0) self.assertTrue(np.isnan(to_np(output_device)).sum() == 0)
@slow @slow
......
...@@ -23,7 +23,14 @@ from diffusers import ( ...@@ -23,7 +23,14 @@ from diffusers import (
) )
from diffusers.models.attention_processor import AttnAddedKVProcessor from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import load_numpy, require_torch_gpu, skip_mps, slow, torch_device from diffusers.utils.testing_utils import (
load_numpy,
require_accelerator,
require_torch_gpu,
skip_mps,
slow,
torch_device,
)
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
...@@ -58,7 +65,8 @@ class IFPipelineFastTests(PipelineTesterMixin, IFPipelineTesterMixin, unittest.T ...@@ -58,7 +65,8 @@ class IFPipelineFastTests(PipelineTesterMixin, IFPipelineTesterMixin, unittest.T
def test_save_load_optional_components(self): def test_save_load_optional_components(self):
self._test_save_load_optional_components() self._test_save_load_optional_components()
@unittest.skipIf(torch_device != "cuda", reason="float16 requires CUDA") @unittest.skipIf(torch_device not in ["cuda", "xpu"], reason="float16 requires CUDA or XPU")
@require_accelerator
def test_save_load_float16(self): def test_save_load_float16(self):
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder # Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_float16(expected_max_diff=1e-1) super().test_save_load_float16(expected_max_diff=1e-1)
......
...@@ -22,7 +22,15 @@ import torch ...@@ -22,7 +22,15 @@ import torch
from diffusers import IFImg2ImgPipeline from diffusers import IFImg2ImgPipeline
from diffusers.models.attention_processor import AttnAddedKVProcessor from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import floats_tensor, load_numpy, require_torch_gpu, skip_mps, slow, torch_device from diffusers.utils.testing_utils import (
floats_tensor,
load_numpy,
require_accelerator,
require_torch_gpu,
skip_mps,
slow,
torch_device,
)
from ..pipeline_params import ( from ..pipeline_params import (
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
...@@ -70,12 +78,14 @@ class IFImg2ImgPipelineFastTests(PipelineTesterMixin, IFPipelineTesterMixin, uni ...@@ -70,12 +78,14 @@ class IFImg2ImgPipelineFastTests(PipelineTesterMixin, IFPipelineTesterMixin, uni
def test_xformers_attention_forwardGenerator_pass(self): def test_xformers_attention_forwardGenerator_pass(self):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3) self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3)
@unittest.skipIf(torch_device != "cuda", reason="float16 requires CUDA") @unittest.skipIf(torch_device not in ["cuda", "xpu"], reason="float16 requires CUDA or XPU")
@require_accelerator
def test_save_load_float16(self): def test_save_load_float16(self):
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder # Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_float16(expected_max_diff=1e-1) super().test_save_load_float16(expected_max_diff=1e-1)
@unittest.skipIf(torch_device != "cuda", reason="float16 requires CUDA") @unittest.skipIf(torch_device not in ["cuda", "xpu"], reason="float16 requires CUDA or XPU")
@require_accelerator
def test_float16_inference(self): def test_float16_inference(self):
super().test_float16_inference(expected_max_diff=1e-1) super().test_float16_inference(expected_max_diff=1e-1)
......
...@@ -22,7 +22,15 @@ import torch ...@@ -22,7 +22,15 @@ import torch
from diffusers import IFImg2ImgSuperResolutionPipeline from diffusers import IFImg2ImgSuperResolutionPipeline
from diffusers.models.attention_processor import AttnAddedKVProcessor from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import floats_tensor, load_numpy, require_torch_gpu, skip_mps, slow, torch_device from diffusers.utils.testing_utils import (
floats_tensor,
load_numpy,
require_accelerator,
require_torch_gpu,
skip_mps,
slow,
torch_device,
)
from ..pipeline_params import ( from ..pipeline_params import (
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
...@@ -72,7 +80,8 @@ class IFImg2ImgSuperResolutionPipelineFastTests(PipelineTesterMixin, IFPipelineT ...@@ -72,7 +80,8 @@ class IFImg2ImgSuperResolutionPipelineFastTests(PipelineTesterMixin, IFPipelineT
def test_save_load_optional_components(self): def test_save_load_optional_components(self):
self._test_save_load_optional_components() self._test_save_load_optional_components()
@unittest.skipIf(torch_device != "cuda", reason="float16 requires CUDA") @unittest.skipIf(torch_device not in ["cuda", "xpu"], reason="float16 requires CUDA or XPU")
@require_accelerator
def test_save_load_float16(self): def test_save_load_float16(self):
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder # Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_float16(expected_max_diff=1e-1) super().test_save_load_float16(expected_max_diff=1e-1)
......
...@@ -22,7 +22,15 @@ import torch ...@@ -22,7 +22,15 @@ import torch
from diffusers import IFInpaintingPipeline from diffusers import IFInpaintingPipeline
from diffusers.models.attention_processor import AttnAddedKVProcessor from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import floats_tensor, load_numpy, require_torch_gpu, skip_mps, slow, torch_device from diffusers.utils.testing_utils import (
floats_tensor,
load_numpy,
require_accelerator,
require_torch_gpu,
skip_mps,
slow,
torch_device,
)
from ..pipeline_params import ( from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
...@@ -72,7 +80,8 @@ class IFInpaintingPipelineFastTests(PipelineTesterMixin, IFPipelineTesterMixin, ...@@ -72,7 +80,8 @@ class IFInpaintingPipelineFastTests(PipelineTesterMixin, IFPipelineTesterMixin,
def test_save_load_optional_components(self): def test_save_load_optional_components(self):
self._test_save_load_optional_components() self._test_save_load_optional_components()
@unittest.skipIf(torch_device != "cuda", reason="float16 requires CUDA") @unittest.skipIf(torch_device not in ["cuda", "xpu"], reason="float16 requires CUDA or XPU")
@require_accelerator
def test_save_load_float16(self): def test_save_load_float16(self):
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder # Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_float16(expected_max_diff=1e-1) super().test_save_load_float16(expected_max_diff=1e-1)
......
...@@ -22,7 +22,15 @@ import torch ...@@ -22,7 +22,15 @@ import torch
from diffusers import IFInpaintingSuperResolutionPipeline from diffusers import IFInpaintingSuperResolutionPipeline
from diffusers.models.attention_processor import AttnAddedKVProcessor from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import floats_tensor, load_numpy, require_torch_gpu, skip_mps, slow, torch_device from diffusers.utils.testing_utils import (
floats_tensor,
load_numpy,
require_accelerator,
require_torch_gpu,
skip_mps,
slow,
torch_device,
)
from ..pipeline_params import ( from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
...@@ -74,7 +82,8 @@ class IFInpaintingSuperResolutionPipelineFastTests(PipelineTesterMixin, IFPipeli ...@@ -74,7 +82,8 @@ class IFInpaintingSuperResolutionPipelineFastTests(PipelineTesterMixin, IFPipeli
def test_save_load_optional_components(self): def test_save_load_optional_components(self):
self._test_save_load_optional_components() self._test_save_load_optional_components()
@unittest.skipIf(torch_device != "cuda", reason="float16 requires CUDA") @unittest.skipIf(torch_device not in ["cuda", "xpu"], reason="float16 requires CUDA or XPU")
@require_accelerator
def test_save_load_float16(self): def test_save_load_float16(self):
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder # Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_float16(expected_max_diff=1e-1) super().test_save_load_float16(expected_max_diff=1e-1)
......
...@@ -22,7 +22,15 @@ import torch ...@@ -22,7 +22,15 @@ import torch
from diffusers import IFSuperResolutionPipeline from diffusers import IFSuperResolutionPipeline
from diffusers.models.attention_processor import AttnAddedKVProcessor from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import floats_tensor, load_numpy, require_torch_gpu, skip_mps, slow, torch_device from diffusers.utils.testing_utils import (
floats_tensor,
load_numpy,
require_accelerator,
require_torch_gpu,
skip_mps,
slow,
torch_device,
)
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
...@@ -67,7 +75,8 @@ class IFSuperResolutionPipelineFastTests(PipelineTesterMixin, IFPipelineTesterMi ...@@ -67,7 +75,8 @@ class IFSuperResolutionPipelineFastTests(PipelineTesterMixin, IFPipelineTesterMi
def test_save_load_optional_components(self): def test_save_load_optional_components(self):
self._test_save_load_optional_components() self._test_save_load_optional_components()
@unittest.skipIf(torch_device != "cuda", reason="float16 requires CUDA") @unittest.skipIf(torch_device not in ["cuda", "xpu"], reason="float16 requires CUDA or XPU")
@require_accelerator
def test_save_load_float16(self): def test_save_load_float16(self):
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder # Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_float16(expected_max_diff=1e-1) super().test_save_load_float16(expected_max_diff=1e-1)
......
...@@ -26,6 +26,7 @@ from diffusers.utils.testing_utils import ( ...@@ -26,6 +26,7 @@ from diffusers.utils.testing_utils import (
floats_tensor, floats_tensor,
load_image, load_image,
nightly, nightly,
require_accelerator,
require_torch, require_torch,
torch_device, torch_device,
) )
...@@ -93,7 +94,7 @@ class LDMSuperResolutionPipelineFastTests(unittest.TestCase): ...@@ -93,7 +94,7 @@ class LDMSuperResolutionPipelineFastTests(unittest.TestCase):
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
@unittest.skipIf(torch_device != "cuda", "This test requires a GPU") @require_accelerator
def test_inference_superresolution_fp16(self): def test_inference_superresolution_fp16(self):
unet = self.dummy_uncond_unet unet = self.dummy_uncond_unet
scheduler = DDIMScheduler() scheduler = DDIMScheduler()
......
...@@ -19,7 +19,7 @@ from diffusers import ( ...@@ -19,7 +19,7 @@ from diffusers import (
) )
from diffusers.models.attention import FreeNoiseTransformerBlock from diffusers.models.attention import FreeNoiseTransformerBlock
from diffusers.utils import is_xformers_available from diffusers.utils import is_xformers_available
from diffusers.utils.testing_utils import torch_device from diffusers.utils.testing_utils import require_accelerator, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import ( from ..test_pipelines_common import (
...@@ -218,7 +218,7 @@ class AnimateDiffPAGPipelineFastTests( ...@@ -218,7 +218,7 @@ class AnimateDiffPAGPipelineFastTests(
expected_slice = np.array([0.5295, 0.3947, 0.5300, 0.4864, 0.4518, 0.5315, 0.5440, 0.4775, 0.5538]) expected_slice = np.array([0.5295, 0.3947, 0.5300, 0.4864, 0.4518, 0.5315, 0.5440, 0.4775, 0.5538])
return super().test_dict_tuple_outputs_equivalent(expected_slice=expected_slice) return super().test_dict_tuple_outputs_equivalent(expected_slice=expected_slice)
@unittest.skipIf(torch_device != "cuda", reason="CUDA and CPU are required to switch devices") @require_accelerator
def test_to_device(self): def test_to_device(self):
components = self.get_dummy_components() components = self.get_dummy_components()
pipe = self.pipeline_class(**components) pipe = self.pipeline_class(**components)
...@@ -234,14 +234,14 @@ class AnimateDiffPAGPipelineFastTests( ...@@ -234,14 +234,14 @@ class AnimateDiffPAGPipelineFastTests(
output_cpu = pipe(**self.get_dummy_inputs("cpu"))[0] output_cpu = pipe(**self.get_dummy_inputs("cpu"))[0]
self.assertTrue(np.isnan(output_cpu).sum() == 0) self.assertTrue(np.isnan(output_cpu).sum() == 0)
pipe.to("cuda") pipe.to(torch_device)
model_devices = [ model_devices = [
component.device.type for component in pipe.components.values() if hasattr(component, "device") component.device.type for component in pipe.components.values() if hasattr(component, "device")
] ]
self.assertTrue(all(device == "cuda" for device in model_devices)) self.assertTrue(all(device == torch_device for device in model_devices))
output_cuda = pipe(**self.get_dummy_inputs("cuda"))[0] output_device = pipe(**self.get_dummy_inputs(torch_device))[0]
self.assertTrue(np.isnan(to_np(output_cuda)).sum() == 0) self.assertTrue(np.isnan(to_np(output_device)).sum() == 0)
def test_to_dtype(self): def test_to_dtype(self):
components = self.get_dummy_components() components = self.get_dummy_components()
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment