"src/graph/transform/cuda/knn.hip" did not exist on "8f0df39ee106efb665af90c61dddfdfbf4899991"
Unverified Commit ca783a0f authored by Patrick von Platen's avatar Patrick von Platen Committed by GitHub
Browse files

[Bug fix] Make sure correct timesteps are chosen for img2img (#3128)

Make sure correct timesteps are chosen for img2img
parent beb848e2
...@@ -503,7 +503,7 @@ class AltDiffusionImg2ImgPipeline(DiffusionPipeline, TextualInversionLoaderMixin ...@@ -503,7 +503,7 @@ class AltDiffusionImg2ImgPipeline(DiffusionPipeline, TextualInversionLoaderMixin
init_timestep = min(int(num_inference_steps * strength), num_inference_steps) init_timestep = min(int(num_inference_steps * strength), num_inference_steps)
t_start = max(num_inference_steps - init_timestep, 0) t_start = max(num_inference_steps - init_timestep, 0)
timesteps = self.scheduler.timesteps[t_start:] timesteps = self.scheduler.timesteps[t_start * self.scheduler.order :]
return timesteps, num_inference_steps - t_start return timesteps, num_inference_steps - t_start
......
...@@ -528,7 +528,7 @@ class CycleDiffusionPipeline(DiffusionPipeline, TextualInversionLoaderMixin): ...@@ -528,7 +528,7 @@ class CycleDiffusionPipeline(DiffusionPipeline, TextualInversionLoaderMixin):
init_timestep = min(int(num_inference_steps * strength), num_inference_steps) init_timestep = min(int(num_inference_steps * strength), num_inference_steps)
t_start = max(num_inference_steps - init_timestep, 0) t_start = max(num_inference_steps - init_timestep, 0)
timesteps = self.scheduler.timesteps[t_start:] timesteps = self.scheduler.timesteps[t_start * self.scheduler.order :]
return timesteps, num_inference_steps - t_start return timesteps, num_inference_steps - t_start
......
...@@ -390,7 +390,7 @@ class StableDiffusionDepth2ImgPipeline(DiffusionPipeline, TextualInversionLoader ...@@ -390,7 +390,7 @@ class StableDiffusionDepth2ImgPipeline(DiffusionPipeline, TextualInversionLoader
init_timestep = min(int(num_inference_steps * strength), num_inference_steps) init_timestep = min(int(num_inference_steps * strength), num_inference_steps)
t_start = max(num_inference_steps - init_timestep, 0) t_start = max(num_inference_steps - init_timestep, 0)
timesteps = self.scheduler.timesteps[t_start:] timesteps = self.scheduler.timesteps[t_start * self.scheduler.order :]
return timesteps, num_inference_steps - t_start return timesteps, num_inference_steps - t_start
......
...@@ -511,7 +511,7 @@ class StableDiffusionImg2ImgPipeline(DiffusionPipeline, TextualInversionLoaderMi ...@@ -511,7 +511,7 @@ class StableDiffusionImg2ImgPipeline(DiffusionPipeline, TextualInversionLoaderMi
init_timestep = min(int(num_inference_steps * strength), num_inference_steps) init_timestep = min(int(num_inference_steps * strength), num_inference_steps)
t_start = max(num_inference_steps - init_timestep, 0) t_start = max(num_inference_steps - init_timestep, 0)
timesteps = self.scheduler.timesteps[t_start:] timesteps = self.scheduler.timesteps[t_start * self.scheduler.order :]
return timesteps, num_inference_steps - t_start return timesteps, num_inference_steps - t_start
......
...@@ -507,7 +507,7 @@ class StableDiffusionInpaintPipelineLegacy(DiffusionPipeline, TextualInversionLo ...@@ -507,7 +507,7 @@ class StableDiffusionInpaintPipelineLegacy(DiffusionPipeline, TextualInversionLo
init_timestep = min(int(num_inference_steps * strength), num_inference_steps) init_timestep = min(int(num_inference_steps * strength), num_inference_steps)
t_start = max(num_inference_steps - init_timestep, 0) t_start = max(num_inference_steps - init_timestep, 0)
timesteps = self.scheduler.timesteps[t_start:] timesteps = self.scheduler.timesteps[t_start * self.scheduler.order :]
return timesteps, num_inference_steps - t_start return timesteps, num_inference_steps - t_start
......
...@@ -25,6 +25,7 @@ from diffusers import ( ...@@ -25,6 +25,7 @@ from diffusers import (
AutoencoderKL, AutoencoderKL,
DDIMScheduler, DDIMScheduler,
DPMSolverMultistepScheduler, DPMSolverMultistepScheduler,
HeunDiscreteScheduler,
LMSDiscreteScheduler, LMSDiscreteScheduler,
PNDMScheduler, PNDMScheduler,
StableDiffusionImg2ImgPipeline, StableDiffusionImg2ImgPipeline,
...@@ -416,6 +417,33 @@ class StableDiffusionImg2ImgPipelineSlowTests(unittest.TestCase): ...@@ -416,6 +417,33 @@ class StableDiffusionImg2ImgPipelineSlowTests(unittest.TestCase):
for module in pipe.text_encoder, pipe.unet, pipe.vae: for module in pipe.text_encoder, pipe.unet, pipe.vae:
assert module.device == torch.device("cpu") assert module.device == torch.device("cpu")
def test_img2img_2nd_order(self):
sd_pipe = StableDiffusionImg2ImgPipeline.from_pretrained("runwayml/stable-diffusion-v1-5")
sd_pipe.scheduler = HeunDiscreteScheduler.from_config(sd_pipe.scheduler.config)
sd_pipe.to(torch_device)
sd_pipe.set_progress_bar_config(disable=None)
inputs = self.get_inputs(torch_device)
inputs["num_inference_steps"] = 10
inputs["strength"] = 0.75
image = sd_pipe(**inputs).images[0]
expected_image = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/img2img/img2img_heun.npy"
)
max_diff = np.abs(expected_image - image).max()
assert max_diff < 5e-2
inputs = self.get_inputs(torch_device)
inputs["num_inference_steps"] = 11
inputs["strength"] = 0.75
image_other = sd_pipe(**inputs).images[0]
mean_diff = np.abs(image - image_other).mean()
# images should be very similar
assert mean_diff < 5e-2
def test_stable_diffusion_img2img_pipeline_multiple_of_8(self): def test_stable_diffusion_img2img_pipeline_multiple_of_8(self):
init_image = load_image( init_image = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment