Unverified Commit 5d848ec0 authored by M. Tolga Cangöz's avatar M. Tolga Cangöz Committed by GitHub
Browse files

[`Tests`] Update a deprecated parameter in test files and fix several typos (#7277)

* Add properties and `IPAdapterTesterMixin` tests for `StableDiffusionPanoramaPipeline`

* Fix variable name typo and update comments

* Update deprecated `output_type="numpy"` to "np" in test files

* Discard changes to src/diffusers/pipelines/stable_diffusion_panorama/pipeline_stable_diffusion_panorama.py

* Update test_stable_diffusion_panorama.py

* Update numbers in README.md

* Update get_guidance_scale_embedding method to use timesteps instead of w

* Update number of checkpoints in README.md

* Add type hints and fix var name

* Fix PyTorch's convention for inplace functions

* Fix a typo

* Revert "Fix PyTorch's convention for inplace functions"

This reverts commit 74350cf65b2c9aa77f08bec7937d7a8b13edb509.

* Fix typos

* Indent

* Refactor get_guidance_scale_embedding method in LEditsPPPipelineStableDiffusionXL class
parent 4974b845
......@@ -117,7 +117,7 @@ class StableDiffusionImageVariationPipelineFastTests(
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"output_type": "numpy",
"output_type": "np",
}
return inputs
......@@ -293,7 +293,7 @@ class StableDiffusionImageVariationPipelineNightlyTests(unittest.TestCase):
"generator": generator,
"num_inference_steps": 50,
"guidance_scale": 7.5,
"output_type": "numpy",
"output_type": "np",
}
return inputs
......
......@@ -107,7 +107,7 @@ class StableDiffusionLDM3DPipelineFastTests(unittest.TestCase):
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"output_type": "numpy",
"output_type": "np",
}
return inputs
......@@ -222,7 +222,7 @@ class StableDiffusionLDM3DPipelineSlowTests(unittest.TestCase):
"generator": generator,
"num_inference_steps": 3,
"guidance_scale": 7.5,
"output_type": "numpy",
"output_type": "np",
}
return inputs
......@@ -268,7 +268,7 @@ class StableDiffusionPipelineNightlyTests(unittest.TestCase):
"generator": generator,
"num_inference_steps": 50,
"guidance_scale": 7.5,
"output_type": "numpy",
"output_type": "np",
}
return inputs
......
......@@ -105,7 +105,7 @@ class StableDiffusionPanoramaPipelineFastTests(PipelineLatentTesterMixin, Pipeli
"width": None,
"num_inference_steps": 1,
"guidance_scale": 6.0,
"output_type": "numpy",
"output_type": "np",
}
return inputs
......@@ -263,7 +263,7 @@ class StableDiffusionPanoramaNightlyTests(unittest.TestCase):
"generator": generator,
"num_inference_steps": 3,
"guidance_scale": 7.5,
"output_type": "numpy",
"output_type": "np",
}
return inputs
......
......@@ -290,7 +290,7 @@ class StableDiffusionXLAdapterPipelineFastTests(
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 5.0,
"output_type": "numpy",
"output_type": "np",
}
return inputs
......
......@@ -143,7 +143,7 @@ class StableDiffusionXLInstructPix2PixPipelineFastTests(
"num_inference_steps": 2,
"guidance_scale": 6.0,
"image_guidance_scale": 1,
"output_type": "numpy",
"output_type": "np",
}
return inputs
......
......@@ -168,7 +168,7 @@ class StableUnCLIPPipelineFastTests(
"generator": generator,
"num_inference_steps": 2,
"prior_num_inference_steps": 2,
"output_type": "numpy",
"output_type": "np",
}
return inputs
......
......@@ -117,10 +117,10 @@ def _test_from_save_pretrained_dynamo(in_queue, out_queue, timeout):
new_ddpm.to(torch_device)
generator = torch.Generator(device=torch_device).manual_seed(0)
image = ddpm(generator=generator, num_inference_steps=5, output_type="numpy").images
image = ddpm(generator=generator, num_inference_steps=5, output_type="np").images
generator = torch.Generator(device=torch_device).manual_seed(0)
new_image = new_ddpm(generator=generator, num_inference_steps=5, output_type="numpy").images
new_image = new_ddpm(generator=generator, num_inference_steps=5, output_type="np").images
assert np.abs(image - new_image).max() < 1e-5, "Models don't give the same forward pass"
except Exception:
......@@ -363,12 +363,12 @@ class DownloadTests(unittest.TestCase):
)
pipe = pipe.to(torch_device)
generator = torch.manual_seed(0)
out = pipe(prompt, num_inference_steps=2, generator=generator, output_type="numpy").images
out = pipe(prompt, num_inference_steps=2, generator=generator, output_type="np").images
pipe_2 = StableDiffusionPipeline.from_pretrained("hf-internal-testing/tiny-stable-diffusion-torch")
pipe_2 = pipe_2.to(torch_device)
generator = torch.manual_seed(0)
out_2 = pipe_2(prompt, num_inference_steps=2, generator=generator, output_type="numpy").images
out_2 = pipe_2(prompt, num_inference_steps=2, generator=generator, output_type="np").images
assert np.max(np.abs(out - out_2)) < 1e-3
......@@ -379,7 +379,7 @@ class DownloadTests(unittest.TestCase):
)
pipe = pipe.to(torch_device)
generator = torch.manual_seed(0)
out = pipe(prompt, num_inference_steps=2, generator=generator, output_type="numpy").images
out = pipe(prompt, num_inference_steps=2, generator=generator, output_type="np").images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(tmpdirname)
......@@ -388,7 +388,7 @@ class DownloadTests(unittest.TestCase):
generator = torch.manual_seed(0)
out_2 = pipe_2(prompt, num_inference_steps=2, generator=generator, output_type="numpy").images
out_2 = pipe_2(prompt, num_inference_steps=2, generator=generator, output_type="np").images
assert np.max(np.abs(out - out_2)) < 1e-3
......@@ -398,7 +398,7 @@ class DownloadTests(unittest.TestCase):
pipe = pipe.to(torch_device)
generator = torch.manual_seed(0)
out = pipe(prompt, num_inference_steps=2, generator=generator, output_type="numpy").images
out = pipe(prompt, num_inference_steps=2, generator=generator, output_type="np").images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(tmpdirname)
......@@ -407,7 +407,7 @@ class DownloadTests(unittest.TestCase):
generator = torch.manual_seed(0)
out_2 = pipe_2(prompt, num_inference_steps=2, generator=generator, output_type="numpy").images
out_2 = pipe_2(prompt, num_inference_steps=2, generator=generator, output_type="np").images
assert np.max(np.abs(out - out_2)) < 1e-3
......@@ -590,7 +590,7 @@ class DownloadTests(unittest.TestCase):
)
pipe = pipe.to(torch_device)
generator = torch.manual_seed(0)
out = pipe(prompt, num_inference_steps=2, generator=generator, output_type="numpy").images
out = pipe(prompt, num_inference_steps=2, generator=generator, output_type="np").images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(tmpdirname)
......@@ -601,7 +601,7 @@ class DownloadTests(unittest.TestCase):
generator = torch.manual_seed(0)
out_2 = pipe_2(prompt, num_inference_steps=2, generator=generator, output_type="numpy").images
out_2 = pipe_2(prompt, num_inference_steps=2, generator=generator, output_type="np").images
assert np.max(np.abs(out - out_2)) < 1e-3
......@@ -626,7 +626,7 @@ class DownloadTests(unittest.TestCase):
assert pipe._maybe_convert_prompt("<*>", pipe.tokenizer) == "<*>"
prompt = "hey <*>"
out = pipe(prompt, num_inference_steps=1, output_type="numpy").images
out = pipe(prompt, num_inference_steps=1, output_type="np").images
assert out.shape == (1, 128, 128, 3)
# single token load local with weight name
......@@ -642,7 +642,7 @@ class DownloadTests(unittest.TestCase):
assert pipe._maybe_convert_prompt("<**>", pipe.tokenizer) == "<**>"
prompt = "hey <**>"
out = pipe(prompt, num_inference_steps=1, output_type="numpy").images
out = pipe(prompt, num_inference_steps=1, output_type="np").images
assert out.shape == (1, 128, 128, 3)
# multi token load
......@@ -665,7 +665,7 @@ class DownloadTests(unittest.TestCase):
assert pipe._maybe_convert_prompt("<***>", pipe.tokenizer) == "<***> <***>_1 <***>_2"
prompt = "hey <***>"
out = pipe(prompt, num_inference_steps=1, output_type="numpy").images
out = pipe(prompt, num_inference_steps=1, output_type="np").images
assert out.shape == (1, 128, 128, 3)
# multi token load a1111
......@@ -693,7 +693,7 @@ class DownloadTests(unittest.TestCase):
assert pipe._maybe_convert_prompt("<****>", pipe.tokenizer) == "<****> <****>_1 <****>_2"
prompt = "hey <****>"
out = pipe(prompt, num_inference_steps=1, output_type="numpy").images
out = pipe(prompt, num_inference_steps=1, output_type="np").images
assert out.shape == (1, 128, 128, 3)
# multi embedding load
......@@ -718,7 +718,7 @@ class DownloadTests(unittest.TestCase):
assert pipe._maybe_convert_prompt("<******>", pipe.tokenizer) == "<******>"
prompt = "hey <*****> <******>"
out = pipe(prompt, num_inference_steps=1, output_type="numpy").images
out = pipe(prompt, num_inference_steps=1, output_type="np").images
assert out.shape == (1, 128, 128, 3)
# single token state dict load
......@@ -731,7 +731,7 @@ class DownloadTests(unittest.TestCase):
assert pipe._maybe_convert_prompt("<x>", pipe.tokenizer) == "<x>"
prompt = "hey <x>"
out = pipe(prompt, num_inference_steps=1, output_type="numpy").images
out = pipe(prompt, num_inference_steps=1, output_type="np").images
assert out.shape == (1, 128, 128, 3)
# multi embedding state dict load
......@@ -751,7 +751,7 @@ class DownloadTests(unittest.TestCase):
assert pipe._maybe_convert_prompt("<xxxxxx>", pipe.tokenizer) == "<xxxxxx>"
prompt = "hey <xxxxx> <xxxxxx>"
out = pipe(prompt, num_inference_steps=1, output_type="numpy").images
out = pipe(prompt, num_inference_steps=1, output_type="np").images
assert out.shape == (1, 128, 128, 3)
# auto1111 multi-token state dict load
......@@ -777,7 +777,7 @@ class DownloadTests(unittest.TestCase):
assert pipe._maybe_convert_prompt("<xxxx>", pipe.tokenizer) == "<xxxx> <xxxx>_1 <xxxx>_2"
prompt = "hey <xxxx>"
out = pipe(prompt, num_inference_steps=1, output_type="numpy").images
out = pipe(prompt, num_inference_steps=1, output_type="np").images
assert out.shape == (1, 128, 128, 3)
# multiple references to multi embedding
......@@ -789,7 +789,7 @@ class DownloadTests(unittest.TestCase):
)
prompt = "hey <cat> <cat>"
out = pipe(prompt, num_inference_steps=1, output_type="numpy").images
out = pipe(prompt, num_inference_steps=1, output_type="np").images
assert out.shape == (1, 128, 128, 3)
def test_text_inversion_multi_tokens(self):
......@@ -1739,10 +1739,10 @@ class PipelineSlowTests(unittest.TestCase):
new_ddpm.to(torch_device)
generator = torch.Generator(device=torch_device).manual_seed(0)
image = ddpm(generator=generator, num_inference_steps=5, output_type="numpy").images
image = ddpm(generator=generator, num_inference_steps=5, output_type="np").images
generator = torch.Generator(device=torch_device).manual_seed(0)
new_image = new_ddpm(generator=generator, num_inference_steps=5, output_type="numpy").images
new_image = new_ddpm(generator=generator, num_inference_steps=5, output_type="np").images
assert np.abs(image - new_image).max() < 1e-5, "Models don't give the same forward pass"
......@@ -1765,10 +1765,10 @@ class PipelineSlowTests(unittest.TestCase):
ddpm_from_hub.set_progress_bar_config(disable=None)
generator = torch.Generator(device=torch_device).manual_seed(0)
image = ddpm(generator=generator, num_inference_steps=5, output_type="numpy").images
image = ddpm(generator=generator, num_inference_steps=5, output_type="np").images
generator = torch.Generator(device=torch_device).manual_seed(0)
new_image = ddpm_from_hub(generator=generator, num_inference_steps=5, output_type="numpy").images
new_image = ddpm_from_hub(generator=generator, num_inference_steps=5, output_type="np").images
assert np.abs(image - new_image).max() < 1e-5, "Models don't give the same forward pass"
......@@ -1788,10 +1788,10 @@ class PipelineSlowTests(unittest.TestCase):
ddpm_from_hub_custom_model.set_progress_bar_config(disable=None)
generator = torch.Generator(device=torch_device).manual_seed(0)
image = ddpm_from_hub_custom_model(generator=generator, num_inference_steps=5, output_type="numpy").images
image = ddpm_from_hub_custom_model(generator=generator, num_inference_steps=5, output_type="np").images
generator = torch.Generator(device=torch_device).manual_seed(0)
new_image = ddpm_from_hub(generator=generator, num_inference_steps=5, output_type="numpy").images
new_image = ddpm_from_hub(generator=generator, num_inference_steps=5, output_type="np").images
assert np.abs(image - new_image).max() < 1e-5, "Models don't give the same forward pass"
......@@ -1803,7 +1803,7 @@ class PipelineSlowTests(unittest.TestCase):
pipe.to(torch_device)
pipe.set_progress_bar_config(disable=None)
images = pipe(output_type="numpy").images
images = pipe(output_type="np").images
assert images.shape == (1, 32, 32, 3)
assert isinstance(images, np.ndarray)
......@@ -1878,7 +1878,7 @@ class PipelineSlowTests(unittest.TestCase):
generator = [torch.Generator(device="cpu").manual_seed(33) for _ in range(prompt_embeds.shape[0])]
images = pipe(
prompt_embeds=prompt_embeds, generator=generator, num_inference_steps=20, output_type="numpy"
prompt_embeds=prompt_embeds, generator=generator, num_inference_steps=20, output_type="np"
).images
for i, image in enumerate(images):
......@@ -1916,7 +1916,7 @@ class PipelineNightlyTests(unittest.TestCase):
ddim.set_progress_bar_config(disable=None)
generator = torch.Generator(device=torch_device).manual_seed(seed)
ddpm_images = ddpm(batch_size=2, generator=generator, output_type="numpy").images
ddpm_images = ddpm(batch_size=2, generator=generator, output_type="np").images
generator = torch.Generator(device=torch_device).manual_seed(seed)
ddim_images = ddim(
......@@ -1924,7 +1924,7 @@ class PipelineNightlyTests(unittest.TestCase):
generator=generator,
num_inference_steps=1000,
eta=1.0,
output_type="numpy",
output_type="np",
use_clipped_model_output=True, # Need this to make DDIM match DDPM
).images
......
......@@ -233,7 +233,7 @@ class UnCLIPPipelineFastTests(PipelineTesterMixin, unittest.TestCase):
"prior_num_inference_steps": 2,
"decoder_num_inference_steps": 2,
"super_res_num_inference_steps": 2,
"output_type": "numpy",
"output_type": "np",
}
return inputs
......
......@@ -158,7 +158,7 @@ class UniDiffuserPipelineFastTests(
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"output_type": "numpy",
"output_type": "np",
}
return inputs
......@@ -199,7 +199,7 @@ class UniDiffuserPipelineFastTests(
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"output_type": "numpy",
"output_type": "np",
"prompt_latents": latents.get("prompt_latents"),
"vae_latents": latents.get("vae_latents"),
"clip_latents": latents.get("clip_latents"),
......@@ -590,7 +590,7 @@ class UniDiffuserPipelineSlowTests(unittest.TestCase):
"generator": generator,
"num_inference_steps": 3,
"guidance_scale": 8.0,
"output_type": "numpy",
"output_type": "np",
}
if generate_latents:
latents = self.get_fixed_latents(device, seed=seed)
......@@ -706,7 +706,7 @@ class UniDiffuserPipelineNightlyTests(unittest.TestCase):
"generator": generator,
"num_inference_steps": 3,
"guidance_scale": 8.0,
"output_type": "numpy",
"output_type": "np",
}
if generate_latents:
latents = self.get_fixed_latents(device, seed=seed)
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment