Unverified Commit a341b536 authored by YiYi Xu's avatar YiYi Xu Committed by GitHub
Browse files

disable test_conversion_when_using_device_map (#7620)



* disable test

* update

---------
Co-authored-by: default avataryiyixuxu <yixu310@gmail,com>
parent 8e46d97c
import tempfile
import unittest import unittest
import numpy as np
import torch import torch
from diffusers import DiffusionPipeline
from diffusers.models.attention_processor import Attention, AttnAddedKVProcessor from diffusers.models.attention_processor import Attention, AttnAddedKVProcessor
...@@ -80,40 +77,42 @@ class AttnAddedKVProcessorTests(unittest.TestCase): ...@@ -80,40 +77,42 @@ class AttnAddedKVProcessorTests(unittest.TestCase):
class DeprecatedAttentionBlockTests(unittest.TestCase): class DeprecatedAttentionBlockTests(unittest.TestCase):
def test_conversion_when_using_device_map(self): def test_conversion_when_using_device_map(self):
pipe = DiffusionPipeline.from_pretrained("hf-internal-testing/tiny-stable-diffusion-pipe", safety_checker=None) # To-DO for Sayak: enable this test again and to test `device_map='balanced'` once we have this in accelerate https://github.com/huggingface/accelerate/pull/2641
pass
pre_conversion = pipe( # pipe = DiffusionPipeline.from_pretrained("hf-internal-testing/tiny-stable-diffusion-pipe", safety_checker=None)
"foo",
num_inference_steps=2, # pre_conversion = pipe(
generator=torch.Generator("cpu").manual_seed(0), # "foo",
output_type="np", # num_inference_steps=2,
).images # generator=torch.Generator("cpu").manual_seed(0),
# output_type="np",
# the initial conversion succeeds # ).images
pipe = DiffusionPipeline.from_pretrained(
"hf-internal-testing/tiny-stable-diffusion-pipe", device_map="sequential", safety_checker=None # # the initial conversion succeeds
) # pipe = DiffusionPipeline.from_pretrained(
# "hf-internal-testing/tiny-stable-diffusion-pipe", device_map="sequential", safety_checker=None
conversion = pipe( # )
"foo",
num_inference_steps=2, # conversion = pipe(
generator=torch.Generator("cpu").manual_seed(0), # "foo",
output_type="np", # num_inference_steps=2,
).images # generator=torch.Generator("cpu").manual_seed(0),
# output_type="np",
with tempfile.TemporaryDirectory() as tmpdir: # ).images
# save the converted model
pipe.save_pretrained(tmpdir) # with tempfile.TemporaryDirectory() as tmpdir:
# # save the converted model
# can also load the converted weights # pipe.save_pretrained(tmpdir)
pipe = DiffusionPipeline.from_pretrained(tmpdir, device_map="sequential", safety_checker=None)
# # can also load the converted weights
after_conversion = pipe( # pipe = DiffusionPipeline.from_pretrained(tmpdir, device_map="sequential", safety_checker=None)
"foo",
num_inference_steps=2, # after_conversion = pipe(
generator=torch.Generator("cpu").manual_seed(0), # "foo",
output_type="np", # num_inference_steps=2,
).images # generator=torch.Generator("cpu").manual_seed(0),
# output_type="np",
self.assertTrue(np.allclose(pre_conversion, conversion, atol=1e-5)) # ).images
self.assertTrue(np.allclose(conversion, after_conversion, atol=1e-5))
# self.assertTrue(np.allclose(pre_conversion, conversion, atol=1e-5))
# self.assertTrue(np.allclose(conversion, after_conversion, atol=1e-5))
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment