Unverified Commit 8b451eb6 authored by Patrick von Platen's avatar Patrick von Platen Committed by GitHub
Browse files

Fix config prints and save, load of pipelines (#2849)

* [Config] Fix config prints and save, load

* Only use potential nn.Modules for dtype and device

* Correct vae image processor

* make sure in_channels is not accessed directly

* make sure in channels is only accessed via config

* Make sure schedulers only access config attributes

* Make sure to access config in SAG

* Fix vae processor and make style

* add tests

* uP

* make style

* Fix more naming issues

* Final fix with vae config

* change more
parent 83691967
...@@ -190,8 +190,8 @@ class DPMSolverSinglestepScheduler(SchedulerMixin, ConfigMixin): ...@@ -190,8 +190,8 @@ class DPMSolverSinglestepScheduler(SchedulerMixin, ConfigMixin):
the number of diffusion steps used when generating samples with a pre-trained model. the number of diffusion steps used when generating samples with a pre-trained model.
""" """
steps = num_inference_steps steps = num_inference_steps
order = self.solver_order order = self.config.solver_order
if self.lower_order_final: if self.config.lower_order_final:
if order == 3: if order == 3:
if steps % 3 == 0: if steps % 3 == 0:
orders = [1, 2, 3] * (steps // 3 - 1) + [1, 2] + [1] orders = [1, 2, 3] * (steps // 3 - 1) + [1, 2] + [1]
...@@ -227,7 +227,7 @@ class DPMSolverSinglestepScheduler(SchedulerMixin, ConfigMixin): ...@@ -227,7 +227,7 @@ class DPMSolverSinglestepScheduler(SchedulerMixin, ConfigMixin):
""" """
self.num_inference_steps = num_inference_steps self.num_inference_steps = num_inference_steps
timesteps = ( timesteps = (
np.linspace(0, self.num_train_timesteps - 1, num_inference_steps + 1) np.linspace(0, self.config.num_train_timesteps - 1, num_inference_steps + 1)
.round()[::-1][:-1] .round()[::-1][:-1]
.copy() .copy()
.astype(np.int64) .astype(np.int64)
......
...@@ -195,7 +195,7 @@ class UniPCMultistepScheduler(SchedulerMixin, ConfigMixin): ...@@ -195,7 +195,7 @@ class UniPCMultistepScheduler(SchedulerMixin, ConfigMixin):
the device to which the timesteps should be moved to. If `None`, the timesteps are not moved. the device to which the timesteps should be moved to. If `None`, the timesteps are not moved.
""" """
timesteps = ( timesteps = (
np.linspace(0, self.num_train_timesteps - 1, num_inference_steps + 1) np.linspace(0, self.config.num_train_timesteps - 1, num_inference_steps + 1)
.round()[::-1][:-1] .round()[::-1][:-1]
.copy() .copy()
.astype(np.int64) .astype(np.int64)
......
...@@ -73,7 +73,7 @@ class CustomLocalPipeline(DiffusionPipeline): ...@@ -73,7 +73,7 @@ class CustomLocalPipeline(DiffusionPipeline):
# Sample gaussian noise to begin loop # Sample gaussian noise to begin loop
image = torch.randn( image = torch.randn(
(batch_size, self.unet.in_channels, self.unet.sample_size, self.unet.sample_size), (batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size),
generator=generator, generator=generator,
) )
image = image.to(self.device) image = image.to(self.device)
......
...@@ -73,7 +73,7 @@ class CustomLocalPipeline(DiffusionPipeline): ...@@ -73,7 +73,7 @@ class CustomLocalPipeline(DiffusionPipeline):
# Sample gaussian noise to begin loop # Sample gaussian noise to begin loop
image = torch.randn( image = torch.randn(
(batch_size, self.unet.in_channels, self.unet.sample_size, self.unet.sample_size), (batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size),
generator=generator, generator=generator,
) )
image = image.to(self.device) image = image.to(self.device)
......
...@@ -116,7 +116,7 @@ class UNet1DModelTests(ModelTesterMixin, unittest.TestCase): ...@@ -116,7 +116,7 @@ class UNet1DModelTests(ModelTesterMixin, unittest.TestCase):
if torch.cuda.is_available(): if torch.cuda.is_available():
torch.cuda.manual_seed_all(0) torch.cuda.manual_seed_all(0)
num_features = model.in_channels num_features = model.config.in_channels
seq_len = 16 seq_len = 16
noise = torch.randn((1, seq_len, num_features)).permute( noise = torch.randn((1, seq_len, num_features)).permute(
0, 2, 1 0, 2, 1
...@@ -264,7 +264,7 @@ class UNetRLModelTests(ModelTesterMixin, unittest.TestCase): ...@@ -264,7 +264,7 @@ class UNetRLModelTests(ModelTesterMixin, unittest.TestCase):
if torch.cuda.is_available(): if torch.cuda.is_available():
torch.cuda.manual_seed_all(0) torch.cuda.manual_seed_all(0)
num_features = value_function.in_channels num_features = value_function.config.in_channels
seq_len = 14 seq_len = 14
noise = torch.randn((1, seq_len, num_features)).permute( noise = torch.randn((1, seq_len, num_features)).permute(
0, 2, 1 0, 2, 1
......
...@@ -675,6 +675,25 @@ class CustomPipelineTests(unittest.TestCase): ...@@ -675,6 +675,25 @@ class CustomPipelineTests(unittest.TestCase):
image = pipeline("a prompt", num_inference_steps=2, output_type="np").images[0] image = pipeline("a prompt", num_inference_steps=2, output_type="np").images[0]
assert image.shape == (512, 512, 3) assert image.shape == (512, 512, 3)
def test_save_pipeline_change_config(self):
pipe = DiffusionPipeline.from_pretrained(
"hf-internal-testing/tiny-stable-diffusion-torch", safety_checker=None
)
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(tmpdirname)
pipe = DiffusionPipeline.from_pretrained(tmpdirname)
assert pipe.scheduler.__class__.__name__ == "PNDMScheduler"
# let's make sure that changing the scheduler is correctly reflected
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.scheduler = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
pipe.save_pretrained(tmpdirname)
pipe = DiffusionPipeline.from_pretrained(tmpdirname)
assert pipe.scheduler.__class__.__name__ == "DPMSolverMultistepScheduler"
class PipelineFastTests(unittest.TestCase): class PipelineFastTests(unittest.TestCase):
def tearDown(self): def tearDown(self):
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment