Unverified Commit 6fde5a6d authored by Patrick von Platen's avatar Patrick von Platen Committed by GitHub
Browse files

[Tests] Fix some slow tests (#3989)

fix some slow tests
parent d1d0b8af
...@@ -183,7 +183,7 @@ class StableDiffusionXLPipeline(DiffusionPipeline, FromSingleFileMixin): ...@@ -183,7 +183,7 @@ class StableDiffusionXLPipeline(DiffusionPipeline, FromSingleFileMixin):
self.to("cpu", silence_dtype_warnings=True) self.to("cpu", silence_dtype_warnings=True)
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist) torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
for cpu_offloaded_model in [self.unet, self.text_encoder, self.vae]: for cpu_offloaded_model in [self.unet, self.text_encoder, self.text_encoder_2, self.vae]:
cpu_offload(cpu_offloaded_model, device) cpu_offload(cpu_offloaded_model, device)
def enable_model_cpu_offload(self, gpu_id=0): def enable_model_cpu_offload(self, gpu_id=0):
......
...@@ -191,7 +191,7 @@ class StableDiffusionXLImg2ImgPipeline(DiffusionPipeline, FromSingleFileMixin): ...@@ -191,7 +191,7 @@ class StableDiffusionXLImg2ImgPipeline(DiffusionPipeline, FromSingleFileMixin):
self.to("cpu", silence_dtype_warnings=True) self.to("cpu", silence_dtype_warnings=True)
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist) torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
for cpu_offloaded_model in [self.unet, self.text_encoder, self.vae]: for cpu_offloaded_model in [self.unet, self.text_encoder, self.text_encoder_2, self.vae]:
cpu_offload(cpu_offloaded_model, device) cpu_offload(cpu_offloaded_model, device)
def enable_model_cpu_offload(self, gpu_id=0): def enable_model_cpu_offload(self, gpu_id=0):
......
...@@ -699,12 +699,16 @@ class PipelineTesterMixin: ...@@ -699,12 +699,16 @@ class PipelineTesterMixin:
inputs = self.get_dummy_inputs(torch_device) inputs = self.get_dummy_inputs(torch_device)
output_without_offload = pipe(**inputs)[0] output_without_offload = pipe(**inputs)[0]
output_without_offload.cpu() if torch.is_tensor(output_without_offload) else output_without_offload output_without_offload = (
output_without_offload.cpu() if torch.is_tensor(output_without_offload) else output_without_offload
)
pipe.enable_xformers_memory_efficient_attention() pipe.enable_xformers_memory_efficient_attention()
inputs = self.get_dummy_inputs(torch_device) inputs = self.get_dummy_inputs(torch_device)
output_with_offload = pipe(**inputs)[0] output_with_offload = pipe(**inputs)[0]
output_with_offload.cpu() if torch.is_tensor(output_with_offload) else output_without_offload output_with_offload = (
output_with_offload.cpu() if torch.is_tensor(output_with_offload) else output_without_offload
)
if test_max_difference: if test_max_difference:
max_diff = np.abs(output_with_offload - output_without_offload).max() max_diff = np.abs(output_with_offload - output_without_offload).max()
......
...@@ -26,7 +26,7 @@ from diffusers import ( ...@@ -26,7 +26,7 @@ from diffusers import (
TextToVideoSDPipeline, TextToVideoSDPipeline,
UNet3DConditionModel, UNet3DConditionModel,
) )
from diffusers.utils import load_numpy, skip_mps, slow from diffusers.utils import is_xformers_available, load_numpy, skip_mps, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
...@@ -143,6 +143,13 @@ class TextToVideoSDPipelineFastTests(PipelineTesterMixin, unittest.TestCase): ...@@ -143,6 +143,13 @@ class TextToVideoSDPipelineFastTests(PipelineTesterMixin, unittest.TestCase):
def test_attention_slicing_forward_pass(self): def test_attention_slicing_forward_pass(self):
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=False, expected_max_diff=3e-3) self._test_attention_slicing_forward_pass(test_mean_pixel_difference=False, expected_max_diff=3e-3)
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available(),
reason="XFormers attention is only available with CUDA and `xformers` installed",
)
def test_xformers_attention_forwardGenerator_pass(self):
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=False, expected_max_diff=1e-2)
# (todo): sayakpaul # (todo): sayakpaul
@unittest.skip(reason="Batching needs to be properly figured out first for this pipeline.") @unittest.skip(reason="Batching needs to be properly figured out first for this pipeline.")
def test_inference_batch_consistent(self): def test_inference_batch_consistent(self):
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment