Unverified Commit 0ec64fe9 authored by Aryan's avatar Aryan Committed by GitHub
Browse files

[tests] fix broken xformers tests (#9206)

* fix xformers tests

* remove unnecessary modifications to cogvideox tests

* update
parent 5090b09d
...@@ -20,6 +20,7 @@ from diffusers import ( ...@@ -20,6 +20,7 @@ from diffusers import (
) )
from diffusers.models.attention import FreeNoiseTransformerBlock from diffusers.models.attention import FreeNoiseTransformerBlock
from diffusers.utils import logging from diffusers.utils import logging
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import torch_device from diffusers.utils.testing_utils import torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
...@@ -329,6 +330,13 @@ class AnimateDiffControlNetPipelineFastTests( ...@@ -329,6 +330,13 @@ class AnimateDiffControlNetPipelineFastTests(
inputs["prompt_embeds"] = torch.randn((1, 4, pipe.text_encoder.config.hidden_size), device=torch_device) inputs["prompt_embeds"] = torch.randn((1, 4, pipe.text_encoder.config.hidden_size), device=torch_device)
pipe(**inputs) pipe(**inputs)
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available(),
reason="XFormers attention is only available with CUDA and `xformers` installed",
)
def test_xformers_attention_forwardGenerator_pass(self):
super()._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=False)
def test_free_init(self): def test_free_init(self):
components = self.get_dummy_components() components = self.get_dummy_components()
pipe: AnimateDiffControlNetPipeline = self.pipeline_class(**components) pipe: AnimateDiffControlNetPipeline = self.pipeline_class(**components)
......
...@@ -19,6 +19,7 @@ from diffusers import ( ...@@ -19,6 +19,7 @@ from diffusers import (
UNetMotionModel, UNetMotionModel,
) )
from diffusers.utils import logging from diffusers.utils import logging
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import torch_device from diffusers.utils.testing_utils import torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
...@@ -393,6 +394,13 @@ class AnimateDiffSparseControlNetPipelineFastTests( ...@@ -393,6 +394,13 @@ class AnimateDiffSparseControlNetPipelineFastTests(
inputs["prompt_embeds"] = torch.randn((1, 4, pipe.text_encoder.config.hidden_size), device=torch_device) inputs["prompt_embeds"] = torch.randn((1, 4, pipe.text_encoder.config.hidden_size), device=torch_device)
pipe(**inputs) pipe(**inputs)
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available(),
reason="XFormers attention is only available with CUDA and `xformers` installed",
)
def test_xformers_attention_forwardGenerator_pass(self):
super()._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=False)
def test_free_init(self): def test_free_init(self):
components = self.get_dummy_components() components = self.get_dummy_components()
pipe: AnimateDiffSparseControlNetPipeline = self.pipeline_class(**components) pipe: AnimateDiffSparseControlNetPipeline = self.pipeline_class(**components)
......
...@@ -275,6 +275,10 @@ class CogVideoXPipelineFastTests(PipelineTesterMixin, unittest.TestCase): ...@@ -275,6 +275,10 @@ class CogVideoXPipelineFastTests(PipelineTesterMixin, unittest.TestCase):
"VAE tiling should not affect the inference results", "VAE tiling should not affect the inference results",
) )
@unittest.skip("xformers attention processor does not exist for CogVideoX")
def test_xformers_attention_forwardGenerator_pass(self):
pass
@slow @slow
@require_torch_gpu @require_torch_gpu
......
...@@ -28,6 +28,7 @@ from diffusers import ( ...@@ -28,6 +28,7 @@ from diffusers import (
LattePipeline, LattePipeline,
LatteTransformer3DModel, LatteTransformer3DModel,
) )
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import ( from diffusers.utils.testing_utils import (
enable_full_determinism, enable_full_determinism,
numpy_cosine_similarity_distance, numpy_cosine_similarity_distance,
...@@ -256,6 +257,13 @@ class LattePipelineFastTests(PipelineTesterMixin, unittest.TestCase): ...@@ -256,6 +257,13 @@ class LattePipelineFastTests(PipelineTesterMixin, unittest.TestCase):
max_diff = np.abs(to_np(output) - to_np(output_loaded)).max() max_diff = np.abs(to_np(output) - to_np(output_loaded)).max()
self.assertLess(max_diff, 1.0) self.assertLess(max_diff, 1.0)
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available(),
reason="XFormers attention is only available with CUDA and `xformers` installed",
)
def test_xformers_attention_forwardGenerator_pass(self):
super()._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=False)
@slow @slow
@require_torch_gpu @require_torch_gpu
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment