Unverified Commit e5674015 authored by YiYi Xu's avatar YiYi Xu Committed by GitHub
Browse files

adding back test_conversion_when_using_device_map (#7704)



* style


* Fix device map nits (#7705)


---------
Co-authored-by: default avatarSayak Paul <spsayakpaul@gmail.com>
parent b5c8b555
...@@ -95,7 +95,7 @@ from setuptools import Command, find_packages, setup ...@@ -95,7 +95,7 @@ from setuptools import Command, find_packages, setup
# 2. once modified, run: `make deps_table_update` to update src/diffusers/dependency_versions_table.py # 2. once modified, run: `make deps_table_update` to update src/diffusers/dependency_versions_table.py
_deps = [ _deps = [
"Pillow", # keep the PIL.Image.Resampling deprecation away "Pillow", # keep the PIL.Image.Resampling deprecation away
"accelerate>=0.11.0", "accelerate>=0.29.3",
"compel==0.1.8", "compel==0.1.8",
"datasets", "datasets",
"filelock", "filelock",
......
...@@ -3,7 +3,7 @@ ...@@ -3,7 +3,7 @@
# 2. run `make deps_table_update` # 2. run `make deps_table_update`
deps = { deps = {
"Pillow": "Pillow", "Pillow": "Pillow",
"accelerate": "accelerate>=0.11.0", "accelerate": "accelerate>=0.29.3",
"compel": "compel==0.1.8", "compel": "compel==0.1.8",
"datasets": "datasets", "datasets": "datasets",
"filelock": "filelock", "filelock": "filelock",
......
...@@ -700,6 +700,7 @@ class ModelMixin(torch.nn.Module, PushToHubMixin): ...@@ -700,6 +700,7 @@ class ModelMixin(torch.nn.Module, PushToHubMixin):
offload_state_dict=offload_state_dict, offload_state_dict=offload_state_dict,
dtype=torch_dtype, dtype=torch_dtype,
force_hooks=True, force_hooks=True,
strict=True,
) )
except AttributeError as e: except AttributeError as e:
# When using accelerate loading, we do not have the ability to load the state # When using accelerate loading, we do not have the ability to load the state
......
...@@ -571,6 +571,8 @@ def _get_final_device_map(device_map, pipeline_class, passed_class_obj, init_dic ...@@ -571,6 +571,8 @@ def _get_final_device_map(device_map, pipeline_class, passed_class_obj, init_dic
# Obtain a dictionary mapping the model-level components to the available # Obtain a dictionary mapping the model-level components to the available
# devices based on the maximum memory and the model sizes. # devices based on the maximum memory and the model sizes.
final_device_map = None
if len(max_memory) > 0:
device_id_component_mapping = _assign_components_to_devices( device_id_component_mapping = _assign_components_to_devices(
module_sizes, max_memory, device_mapping_strategy=device_map module_sizes, max_memory, device_mapping_strategy=device_map
) )
......
import tempfile
import unittest import unittest
import numpy as np
import torch import torch
from diffusers import DiffusionPipeline
from diffusers.models.attention_processor import Attention, AttnAddedKVProcessor from diffusers.models.attention_processor import Attention, AttnAddedKVProcessor
...@@ -77,42 +80,41 @@ class AttnAddedKVProcessorTests(unittest.TestCase): ...@@ -77,42 +80,41 @@ class AttnAddedKVProcessorTests(unittest.TestCase):
class DeprecatedAttentionBlockTests(unittest.TestCase): class DeprecatedAttentionBlockTests(unittest.TestCase):
def test_conversion_when_using_device_map(self): def test_conversion_when_using_device_map(self):
# To-DO for Sayak: enable this test again and to test `device_map='balanced'` once we have this in accelerate https://github.com/huggingface/accelerate/pull/2641 pipe = DiffusionPipeline.from_pretrained(
pass "hf-internal-testing/tiny-stable-diffusion-torch", safety_checker=None
# pipe = DiffusionPipeline.from_pretrained("hf-internal-testing/tiny-stable-diffusion-pipe", safety_checker=None) )
# pre_conversion = pipe( pre_conversion = pipe(
# "foo", "foo",
# num_inference_steps=2, num_inference_steps=2,
# generator=torch.Generator("cpu").manual_seed(0), generator=torch.Generator("cpu").manual_seed(0),
# output_type="np", output_type="np",
# ).images ).images
# # the initial conversion succeeds # the initial conversion succeeds
# pipe = DiffusionPipeline.from_pretrained( pipe = DiffusionPipeline.from_pretrained(
# "hf-internal-testing/tiny-stable-diffusion-pipe", device_map="sequential", safety_checker=None "hf-internal-testing/tiny-stable-diffusion-torch", device_map="balanced", safety_checker=None
# ) )
# conversion = pipe( conversion = pipe(
# "foo", "foo",
# num_inference_steps=2, num_inference_steps=2,
# generator=torch.Generator("cpu").manual_seed(0), generator=torch.Generator("cpu").manual_seed(0),
# output_type="np", output_type="np",
# ).images ).images
# with tempfile.TemporaryDirectory() as tmpdir: with tempfile.TemporaryDirectory() as tmpdir:
# # save the converted model # save the converted model
# pipe.save_pretrained(tmpdir) pipe.save_pretrained(tmpdir)
# # can also load the converted weights # can also load the converted weights
# pipe = DiffusionPipeline.from_pretrained(tmpdir, device_map="sequential", safety_checker=None) pipe = DiffusionPipeline.from_pretrained(tmpdir, device_map="balanced", safety_checker=None)
after_conversion = pipe(
# after_conversion = pipe( "foo",
# "foo", num_inference_steps=2,
# num_inference_steps=2, generator=torch.Generator("cpu").manual_seed(0),
# generator=torch.Generator("cpu").manual_seed(0), output_type="np",
# output_type="np", ).images
# ).images
self.assertTrue(np.allclose(pre_conversion, conversion, atol=1e-3))
# self.assertTrue(np.allclose(pre_conversion, conversion, atol=1e-5)) self.assertTrue(np.allclose(conversion, after_conversion, atol=1e-3))
# self.assertTrue(np.allclose(conversion, after_conversion, atol=1e-5))
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment