Unverified Commit 023b0e0d authored by Fanli Lin's avatar Fanli Lin Committed by GitHub
Browse files

[tests] fix `AssertionError: Torch not compiled with CUDA enabled` (#10356)

fix bug on xpu
parent c0c11683
......@@ -378,14 +378,14 @@ class SDXLSingleFileTesterMixin:
def test_single_file_format_inference_is_same_as_pretrained(self, expected_max_diff=1e-4):
sf_pipe = self.pipeline_class.from_single_file(self.ckpt_path, torch_dtype=torch.float16, safety_checker=None)
sf_pipe.unet.set_default_attn_processor()
sf_pipe.enable_model_cpu_offload()
sf_pipe.enable_model_cpu_offload(device=torch_device)
inputs = self.get_inputs(torch_device)
image_single_file = sf_pipe(**inputs).images[0]
pipe = self.pipeline_class.from_pretrained(self.repo_id, torch_dtype=torch.float16, safety_checker=None)
pipe.unet.set_default_attn_processor()
pipe.enable_model_cpu_offload()
pipe.enable_model_cpu_offload(device=torch_device)
inputs = self.get_inputs(torch_device)
image = pipe(**inputs).images[0]
......
......@@ -76,14 +76,14 @@ class StableDiffusionControlNetPipelineSingleFileSlowTests(unittest.TestCase, SD
controlnet = ControlNetModel.from_pretrained("lllyasviel/control_v11p_sd15_canny")
pipe = self.pipeline_class.from_pretrained(self.repo_id, controlnet=controlnet)
pipe.unet.set_default_attn_processor()
pipe.enable_model_cpu_offload()
pipe.enable_model_cpu_offload(device=torch_device)
pipe_sf = self.pipeline_class.from_single_file(
self.ckpt_path,
controlnet=controlnet,
)
pipe_sf.unet.set_default_attn_processor()
pipe_sf.enable_model_cpu_offload()
pipe_sf.enable_model_cpu_offload(device=torch_device)
inputs = self.get_inputs(torch_device)
output = pipe(**inputs).images[0]
......
......@@ -73,11 +73,11 @@ class StableDiffusionControlNetInpaintPipelineSingleFileSlowTests(unittest.TestC
controlnet = ControlNetModel.from_pretrained("lllyasviel/control_v11p_sd15_canny")
pipe = self.pipeline_class.from_pretrained(self.repo_id, controlnet=controlnet, safety_checker=None)
pipe.unet.set_default_attn_processor()
pipe.enable_model_cpu_offload()
pipe.enable_model_cpu_offload(device=torch_device)
pipe_sf = self.pipeline_class.from_single_file(self.ckpt_path, controlnet=controlnet, safety_checker=None)
pipe_sf.unet.set_default_attn_processor()
pipe_sf.enable_model_cpu_offload()
pipe_sf.enable_model_cpu_offload(device=torch_device)
inputs = self.get_inputs()
output = pipe(**inputs).images[0]
......
......@@ -67,14 +67,14 @@ class StableDiffusionControlNetPipelineSingleFileSlowTests(unittest.TestCase, SD
controlnet = ControlNetModel.from_pretrained("lllyasviel/control_v11p_sd15_canny")
pipe = self.pipeline_class.from_pretrained(self.repo_id, controlnet=controlnet)
pipe.unet.set_default_attn_processor()
pipe.enable_model_cpu_offload()
pipe.enable_model_cpu_offload(device=torch_device)
pipe_sf = self.pipeline_class.from_single_file(
self.ckpt_path,
controlnet=controlnet,
)
pipe_sf.unet.set_default_attn_processor()
pipe_sf.enable_model_cpu_offload()
pipe_sf.enable_model_cpu_offload(device=torch_device)
inputs = self.get_inputs()
output = pipe(**inputs).images[0]
......
......@@ -49,14 +49,14 @@ class StableDiffusionUpscalePipelineSingleFileSlowTests(unittest.TestCase, SDSin
prompt = "a cat sitting on a park bench"
pipe = StableDiffusionUpscalePipeline.from_pretrained(self.repo_id)
pipe.enable_model_cpu_offload()
pipe.enable_model_cpu_offload(device=torch_device)
generator = torch.Generator("cpu").manual_seed(0)
output = pipe(prompt=prompt, image=image, generator=generator, output_type="np", num_inference_steps=3)
image_from_pretrained = output.images[0]
pipe_from_single_file = StableDiffusionUpscalePipeline.from_single_file(self.ckpt_path)
pipe_from_single_file.enable_model_cpu_offload()
pipe_from_single_file.enable_model_cpu_offload(device=torch_device)
generator = torch.Generator("cpu").manual_seed(0)
output_from_single_file = pipe_from_single_file(
......
......@@ -76,7 +76,7 @@ class StableDiffusionXLAdapterPipelineSingleFileSlowTests(unittest.TestCase, SDX
torch_dtype=torch.float16,
safety_checker=None,
)
pipe_single_file.enable_model_cpu_offload()
pipe_single_file.enable_model_cpu_offload(device=torch_device)
pipe_single_file.set_progress_bar_config(disable=None)
inputs = self.get_inputs()
......@@ -88,7 +88,7 @@ class StableDiffusionXLAdapterPipelineSingleFileSlowTests(unittest.TestCase, SDX
torch_dtype=torch.float16,
safety_checker=None,
)
pipe.enable_model_cpu_offload()
pipe.enable_model_cpu_offload(device=torch_device)
inputs = self.get_inputs()
images = pipe(**inputs).images[0]
......
......@@ -69,7 +69,7 @@ class StableDiffusionXLControlNetPipelineSingleFileSlowTests(unittest.TestCase,
self.ckpt_path, controlnet=controlnet, torch_dtype=torch.float16
)
pipe_single_file.unet.set_default_attn_processor()
pipe_single_file.enable_model_cpu_offload()
pipe_single_file.enable_model_cpu_offload(device=torch_device)
pipe_single_file.set_progress_bar_config(disable=None)
inputs = self.get_inputs(torch_device)
......@@ -77,7 +77,7 @@ class StableDiffusionXLControlNetPipelineSingleFileSlowTests(unittest.TestCase,
pipe = self.pipeline_class.from_pretrained(self.repo_id, controlnet=controlnet, torch_dtype=torch.float16)
pipe.unet.set_default_attn_processor()
pipe.enable_model_cpu_offload()
pipe.enable_model_cpu_offload(device=torch_device)
inputs = self.get_inputs(torch_device)
images = pipe(**inputs).images[0]
......
......@@ -85,7 +85,7 @@ class StableDiffusionXLImg2ImgRefinerPipelineSingleFileSlowTests(unittest.TestCa
pipe = self.pipeline_class.from_pretrained(self.repo_id, torch_dtype=torch.float16)
pipe.scheduler = DDIMScheduler.from_config(pipe.scheduler.config)
pipe.unet.set_default_attn_processor()
pipe.enable_model_cpu_offload()
pipe.enable_model_cpu_offload(device=torch_device)
generator = torch.Generator(device="cpu").manual_seed(0)
image = pipe(
......@@ -95,7 +95,7 @@ class StableDiffusionXLImg2ImgRefinerPipelineSingleFileSlowTests(unittest.TestCa
pipe_single_file = self.pipeline_class.from_single_file(self.ckpt_path, torch_dtype=torch.float16)
pipe_single_file.scheduler = DDIMScheduler.from_config(pipe_single_file.scheduler.config)
pipe_single_file.unet.set_default_attn_processor()
pipe_single_file.enable_model_cpu_offload()
pipe_single_file.enable_model_cpu_offload(device=torch_device)
generator = torch.Generator(device="cpu").manual_seed(0)
image_single_file = pipe_single_file(
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment