Unverified Commit 5d848ec0 authored by M. Tolga Cangöz's avatar M. Tolga Cangöz Committed by GitHub
Browse files

[`Tests`] Update a deprecated parameter in test files and fix several typos (#7277)

* Add properties and `IPAdapterTesterMixin` tests for `StableDiffusionPanoramaPipeline`

* Fix variable name typo and update comments

* Update deprecated `output_type="numpy"` to "np" in test files

* Discard changes to src/diffusers/pipelines/stable_diffusion_panorama/pipeline_stable_diffusion_panorama.py

* Update test_stable_diffusion_panorama.py

* Update numbers in README.md

* Update get_guidance_scale_embedding method to use timesteps instead of w

* Update number of checkpoints in README.md

* Add type hints and fix var name

* Fix PyTorch's convention for inplace functions

* Fix a typo

* Revert "Fix PyTorch's convention for inplace functions"

This reverts commit 74350cf65b2c9aa77f08bec7937d7a8b13edb509.

* Fix typos

* Indent

* Refactor get_guidance_scale_embedding method in LEditsPPPipelineStableDiffusionXL class
parent 4974b845
...@@ -117,7 +117,7 @@ class StableDiffusionImageVariationPipelineFastTests( ...@@ -117,7 +117,7 @@ class StableDiffusionImageVariationPipelineFastTests(
"generator": generator, "generator": generator,
"num_inference_steps": 2, "num_inference_steps": 2,
"guidance_scale": 6.0, "guidance_scale": 6.0,
"output_type": "numpy", "output_type": "np",
} }
return inputs return inputs
...@@ -293,7 +293,7 @@ class StableDiffusionImageVariationPipelineNightlyTests(unittest.TestCase): ...@@ -293,7 +293,7 @@ class StableDiffusionImageVariationPipelineNightlyTests(unittest.TestCase):
"generator": generator, "generator": generator,
"num_inference_steps": 50, "num_inference_steps": 50,
"guidance_scale": 7.5, "guidance_scale": 7.5,
"output_type": "numpy", "output_type": "np",
} }
return inputs return inputs
......
...@@ -107,7 +107,7 @@ class StableDiffusionLDM3DPipelineFastTests(unittest.TestCase): ...@@ -107,7 +107,7 @@ class StableDiffusionLDM3DPipelineFastTests(unittest.TestCase):
"generator": generator, "generator": generator,
"num_inference_steps": 2, "num_inference_steps": 2,
"guidance_scale": 6.0, "guidance_scale": 6.0,
"output_type": "numpy", "output_type": "np",
} }
return inputs return inputs
...@@ -222,7 +222,7 @@ class StableDiffusionLDM3DPipelineSlowTests(unittest.TestCase): ...@@ -222,7 +222,7 @@ class StableDiffusionLDM3DPipelineSlowTests(unittest.TestCase):
"generator": generator, "generator": generator,
"num_inference_steps": 3, "num_inference_steps": 3,
"guidance_scale": 7.5, "guidance_scale": 7.5,
"output_type": "numpy", "output_type": "np",
} }
return inputs return inputs
...@@ -268,7 +268,7 @@ class StableDiffusionPipelineNightlyTests(unittest.TestCase): ...@@ -268,7 +268,7 @@ class StableDiffusionPipelineNightlyTests(unittest.TestCase):
"generator": generator, "generator": generator,
"num_inference_steps": 50, "num_inference_steps": 50,
"guidance_scale": 7.5, "guidance_scale": 7.5,
"output_type": "numpy", "output_type": "np",
} }
return inputs return inputs
......
...@@ -105,7 +105,7 @@ class StableDiffusionPanoramaPipelineFastTests(PipelineLatentTesterMixin, Pipeli ...@@ -105,7 +105,7 @@ class StableDiffusionPanoramaPipelineFastTests(PipelineLatentTesterMixin, Pipeli
"width": None, "width": None,
"num_inference_steps": 1, "num_inference_steps": 1,
"guidance_scale": 6.0, "guidance_scale": 6.0,
"output_type": "numpy", "output_type": "np",
} }
return inputs return inputs
...@@ -263,7 +263,7 @@ class StableDiffusionPanoramaNightlyTests(unittest.TestCase): ...@@ -263,7 +263,7 @@ class StableDiffusionPanoramaNightlyTests(unittest.TestCase):
"generator": generator, "generator": generator,
"num_inference_steps": 3, "num_inference_steps": 3,
"guidance_scale": 7.5, "guidance_scale": 7.5,
"output_type": "numpy", "output_type": "np",
} }
return inputs return inputs
......
...@@ -290,7 +290,7 @@ class StableDiffusionXLAdapterPipelineFastTests( ...@@ -290,7 +290,7 @@ class StableDiffusionXLAdapterPipelineFastTests(
"generator": generator, "generator": generator,
"num_inference_steps": 2, "num_inference_steps": 2,
"guidance_scale": 5.0, "guidance_scale": 5.0,
"output_type": "numpy", "output_type": "np",
} }
return inputs return inputs
......
...@@ -143,7 +143,7 @@ class StableDiffusionXLInstructPix2PixPipelineFastTests( ...@@ -143,7 +143,7 @@ class StableDiffusionXLInstructPix2PixPipelineFastTests(
"num_inference_steps": 2, "num_inference_steps": 2,
"guidance_scale": 6.0, "guidance_scale": 6.0,
"image_guidance_scale": 1, "image_guidance_scale": 1,
"output_type": "numpy", "output_type": "np",
} }
return inputs return inputs
......
...@@ -168,7 +168,7 @@ class StableUnCLIPPipelineFastTests( ...@@ -168,7 +168,7 @@ class StableUnCLIPPipelineFastTests(
"generator": generator, "generator": generator,
"num_inference_steps": 2, "num_inference_steps": 2,
"prior_num_inference_steps": 2, "prior_num_inference_steps": 2,
"output_type": "numpy", "output_type": "np",
} }
return inputs return inputs
......
...@@ -117,10 +117,10 @@ def _test_from_save_pretrained_dynamo(in_queue, out_queue, timeout): ...@@ -117,10 +117,10 @@ def _test_from_save_pretrained_dynamo(in_queue, out_queue, timeout):
new_ddpm.to(torch_device) new_ddpm.to(torch_device)
generator = torch.Generator(device=torch_device).manual_seed(0) generator = torch.Generator(device=torch_device).manual_seed(0)
image = ddpm(generator=generator, num_inference_steps=5, output_type="numpy").images image = ddpm(generator=generator, num_inference_steps=5, output_type="np").images
generator = torch.Generator(device=torch_device).manual_seed(0) generator = torch.Generator(device=torch_device).manual_seed(0)
new_image = new_ddpm(generator=generator, num_inference_steps=5, output_type="numpy").images new_image = new_ddpm(generator=generator, num_inference_steps=5, output_type="np").images
assert np.abs(image - new_image).max() < 1e-5, "Models don't give the same forward pass" assert np.abs(image - new_image).max() < 1e-5, "Models don't give the same forward pass"
except Exception: except Exception:
...@@ -363,12 +363,12 @@ class DownloadTests(unittest.TestCase): ...@@ -363,12 +363,12 @@ class DownloadTests(unittest.TestCase):
) )
pipe = pipe.to(torch_device) pipe = pipe.to(torch_device)
generator = torch.manual_seed(0) generator = torch.manual_seed(0)
out = pipe(prompt, num_inference_steps=2, generator=generator, output_type="numpy").images out = pipe(prompt, num_inference_steps=2, generator=generator, output_type="np").images
pipe_2 = StableDiffusionPipeline.from_pretrained("hf-internal-testing/tiny-stable-diffusion-torch") pipe_2 = StableDiffusionPipeline.from_pretrained("hf-internal-testing/tiny-stable-diffusion-torch")
pipe_2 = pipe_2.to(torch_device) pipe_2 = pipe_2.to(torch_device)
generator = torch.manual_seed(0) generator = torch.manual_seed(0)
out_2 = pipe_2(prompt, num_inference_steps=2, generator=generator, output_type="numpy").images out_2 = pipe_2(prompt, num_inference_steps=2, generator=generator, output_type="np").images
assert np.max(np.abs(out - out_2)) < 1e-3 assert np.max(np.abs(out - out_2)) < 1e-3
...@@ -379,7 +379,7 @@ class DownloadTests(unittest.TestCase): ...@@ -379,7 +379,7 @@ class DownloadTests(unittest.TestCase):
) )
pipe = pipe.to(torch_device) pipe = pipe.to(torch_device)
generator = torch.manual_seed(0) generator = torch.manual_seed(0)
out = pipe(prompt, num_inference_steps=2, generator=generator, output_type="numpy").images out = pipe(prompt, num_inference_steps=2, generator=generator, output_type="np").images
with tempfile.TemporaryDirectory() as tmpdirname: with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(tmpdirname) pipe.save_pretrained(tmpdirname)
...@@ -388,7 +388,7 @@ class DownloadTests(unittest.TestCase): ...@@ -388,7 +388,7 @@ class DownloadTests(unittest.TestCase):
generator = torch.manual_seed(0) generator = torch.manual_seed(0)
out_2 = pipe_2(prompt, num_inference_steps=2, generator=generator, output_type="numpy").images out_2 = pipe_2(prompt, num_inference_steps=2, generator=generator, output_type="np").images
assert np.max(np.abs(out - out_2)) < 1e-3 assert np.max(np.abs(out - out_2)) < 1e-3
...@@ -398,7 +398,7 @@ class DownloadTests(unittest.TestCase): ...@@ -398,7 +398,7 @@ class DownloadTests(unittest.TestCase):
pipe = pipe.to(torch_device) pipe = pipe.to(torch_device)
generator = torch.manual_seed(0) generator = torch.manual_seed(0)
out = pipe(prompt, num_inference_steps=2, generator=generator, output_type="numpy").images out = pipe(prompt, num_inference_steps=2, generator=generator, output_type="np").images
with tempfile.TemporaryDirectory() as tmpdirname: with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(tmpdirname) pipe.save_pretrained(tmpdirname)
...@@ -407,7 +407,7 @@ class DownloadTests(unittest.TestCase): ...@@ -407,7 +407,7 @@ class DownloadTests(unittest.TestCase):
generator = torch.manual_seed(0) generator = torch.manual_seed(0)
out_2 = pipe_2(prompt, num_inference_steps=2, generator=generator, output_type="numpy").images out_2 = pipe_2(prompt, num_inference_steps=2, generator=generator, output_type="np").images
assert np.max(np.abs(out - out_2)) < 1e-3 assert np.max(np.abs(out - out_2)) < 1e-3
...@@ -590,7 +590,7 @@ class DownloadTests(unittest.TestCase): ...@@ -590,7 +590,7 @@ class DownloadTests(unittest.TestCase):
) )
pipe = pipe.to(torch_device) pipe = pipe.to(torch_device)
generator = torch.manual_seed(0) generator = torch.manual_seed(0)
out = pipe(prompt, num_inference_steps=2, generator=generator, output_type="numpy").images out = pipe(prompt, num_inference_steps=2, generator=generator, output_type="np").images
with tempfile.TemporaryDirectory() as tmpdirname: with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(tmpdirname) pipe.save_pretrained(tmpdirname)
...@@ -601,7 +601,7 @@ class DownloadTests(unittest.TestCase): ...@@ -601,7 +601,7 @@ class DownloadTests(unittest.TestCase):
generator = torch.manual_seed(0) generator = torch.manual_seed(0)
out_2 = pipe_2(prompt, num_inference_steps=2, generator=generator, output_type="numpy").images out_2 = pipe_2(prompt, num_inference_steps=2, generator=generator, output_type="np").images
assert np.max(np.abs(out - out_2)) < 1e-3 assert np.max(np.abs(out - out_2)) < 1e-3
...@@ -626,7 +626,7 @@ class DownloadTests(unittest.TestCase): ...@@ -626,7 +626,7 @@ class DownloadTests(unittest.TestCase):
assert pipe._maybe_convert_prompt("<*>", pipe.tokenizer) == "<*>" assert pipe._maybe_convert_prompt("<*>", pipe.tokenizer) == "<*>"
prompt = "hey <*>" prompt = "hey <*>"
out = pipe(prompt, num_inference_steps=1, output_type="numpy").images out = pipe(prompt, num_inference_steps=1, output_type="np").images
assert out.shape == (1, 128, 128, 3) assert out.shape == (1, 128, 128, 3)
# single token load local with weight name # single token load local with weight name
...@@ -642,7 +642,7 @@ class DownloadTests(unittest.TestCase): ...@@ -642,7 +642,7 @@ class DownloadTests(unittest.TestCase):
assert pipe._maybe_convert_prompt("<**>", pipe.tokenizer) == "<**>" assert pipe._maybe_convert_prompt("<**>", pipe.tokenizer) == "<**>"
prompt = "hey <**>" prompt = "hey <**>"
out = pipe(prompt, num_inference_steps=1, output_type="numpy").images out = pipe(prompt, num_inference_steps=1, output_type="np").images
assert out.shape == (1, 128, 128, 3) assert out.shape == (1, 128, 128, 3)
# multi token load # multi token load
...@@ -665,7 +665,7 @@ class DownloadTests(unittest.TestCase): ...@@ -665,7 +665,7 @@ class DownloadTests(unittest.TestCase):
assert pipe._maybe_convert_prompt("<***>", pipe.tokenizer) == "<***> <***>_1 <***>_2" assert pipe._maybe_convert_prompt("<***>", pipe.tokenizer) == "<***> <***>_1 <***>_2"
prompt = "hey <***>" prompt = "hey <***>"
out = pipe(prompt, num_inference_steps=1, output_type="numpy").images out = pipe(prompt, num_inference_steps=1, output_type="np").images
assert out.shape == (1, 128, 128, 3) assert out.shape == (1, 128, 128, 3)
# multi token load a1111 # multi token load a1111
...@@ -693,7 +693,7 @@ class DownloadTests(unittest.TestCase): ...@@ -693,7 +693,7 @@ class DownloadTests(unittest.TestCase):
assert pipe._maybe_convert_prompt("<****>", pipe.tokenizer) == "<****> <****>_1 <****>_2" assert pipe._maybe_convert_prompt("<****>", pipe.tokenizer) == "<****> <****>_1 <****>_2"
prompt = "hey <****>" prompt = "hey <****>"
out = pipe(prompt, num_inference_steps=1, output_type="numpy").images out = pipe(prompt, num_inference_steps=1, output_type="np").images
assert out.shape == (1, 128, 128, 3) assert out.shape == (1, 128, 128, 3)
# multi embedding load # multi embedding load
...@@ -718,7 +718,7 @@ class DownloadTests(unittest.TestCase): ...@@ -718,7 +718,7 @@ class DownloadTests(unittest.TestCase):
assert pipe._maybe_convert_prompt("<******>", pipe.tokenizer) == "<******>" assert pipe._maybe_convert_prompt("<******>", pipe.tokenizer) == "<******>"
prompt = "hey <*****> <******>" prompt = "hey <*****> <******>"
out = pipe(prompt, num_inference_steps=1, output_type="numpy").images out = pipe(prompt, num_inference_steps=1, output_type="np").images
assert out.shape == (1, 128, 128, 3) assert out.shape == (1, 128, 128, 3)
# single token state dict load # single token state dict load
...@@ -731,7 +731,7 @@ class DownloadTests(unittest.TestCase): ...@@ -731,7 +731,7 @@ class DownloadTests(unittest.TestCase):
assert pipe._maybe_convert_prompt("<x>", pipe.tokenizer) == "<x>" assert pipe._maybe_convert_prompt("<x>", pipe.tokenizer) == "<x>"
prompt = "hey <x>" prompt = "hey <x>"
out = pipe(prompt, num_inference_steps=1, output_type="numpy").images out = pipe(prompt, num_inference_steps=1, output_type="np").images
assert out.shape == (1, 128, 128, 3) assert out.shape == (1, 128, 128, 3)
# multi embedding state dict load # multi embedding state dict load
...@@ -751,7 +751,7 @@ class DownloadTests(unittest.TestCase): ...@@ -751,7 +751,7 @@ class DownloadTests(unittest.TestCase):
assert pipe._maybe_convert_prompt("<xxxxxx>", pipe.tokenizer) == "<xxxxxx>" assert pipe._maybe_convert_prompt("<xxxxxx>", pipe.tokenizer) == "<xxxxxx>"
prompt = "hey <xxxxx> <xxxxxx>" prompt = "hey <xxxxx> <xxxxxx>"
out = pipe(prompt, num_inference_steps=1, output_type="numpy").images out = pipe(prompt, num_inference_steps=1, output_type="np").images
assert out.shape == (1, 128, 128, 3) assert out.shape == (1, 128, 128, 3)
# auto1111 multi-token state dict load # auto1111 multi-token state dict load
...@@ -777,7 +777,7 @@ class DownloadTests(unittest.TestCase): ...@@ -777,7 +777,7 @@ class DownloadTests(unittest.TestCase):
assert pipe._maybe_convert_prompt("<xxxx>", pipe.tokenizer) == "<xxxx> <xxxx>_1 <xxxx>_2" assert pipe._maybe_convert_prompt("<xxxx>", pipe.tokenizer) == "<xxxx> <xxxx>_1 <xxxx>_2"
prompt = "hey <xxxx>" prompt = "hey <xxxx>"
out = pipe(prompt, num_inference_steps=1, output_type="numpy").images out = pipe(prompt, num_inference_steps=1, output_type="np").images
assert out.shape == (1, 128, 128, 3) assert out.shape == (1, 128, 128, 3)
# multiple references to multi embedding # multiple references to multi embedding
...@@ -789,7 +789,7 @@ class DownloadTests(unittest.TestCase): ...@@ -789,7 +789,7 @@ class DownloadTests(unittest.TestCase):
) )
prompt = "hey <cat> <cat>" prompt = "hey <cat> <cat>"
out = pipe(prompt, num_inference_steps=1, output_type="numpy").images out = pipe(prompt, num_inference_steps=1, output_type="np").images
assert out.shape == (1, 128, 128, 3) assert out.shape == (1, 128, 128, 3)
def test_text_inversion_multi_tokens(self): def test_text_inversion_multi_tokens(self):
...@@ -1739,10 +1739,10 @@ class PipelineSlowTests(unittest.TestCase): ...@@ -1739,10 +1739,10 @@ class PipelineSlowTests(unittest.TestCase):
new_ddpm.to(torch_device) new_ddpm.to(torch_device)
generator = torch.Generator(device=torch_device).manual_seed(0) generator = torch.Generator(device=torch_device).manual_seed(0)
image = ddpm(generator=generator, num_inference_steps=5, output_type="numpy").images image = ddpm(generator=generator, num_inference_steps=5, output_type="np").images
generator = torch.Generator(device=torch_device).manual_seed(0) generator = torch.Generator(device=torch_device).manual_seed(0)
new_image = new_ddpm(generator=generator, num_inference_steps=5, output_type="numpy").images new_image = new_ddpm(generator=generator, num_inference_steps=5, output_type="np").images
assert np.abs(image - new_image).max() < 1e-5, "Models don't give the same forward pass" assert np.abs(image - new_image).max() < 1e-5, "Models don't give the same forward pass"
...@@ -1765,10 +1765,10 @@ class PipelineSlowTests(unittest.TestCase): ...@@ -1765,10 +1765,10 @@ class PipelineSlowTests(unittest.TestCase):
ddpm_from_hub.set_progress_bar_config(disable=None) ddpm_from_hub.set_progress_bar_config(disable=None)
generator = torch.Generator(device=torch_device).manual_seed(0) generator = torch.Generator(device=torch_device).manual_seed(0)
image = ddpm(generator=generator, num_inference_steps=5, output_type="numpy").images image = ddpm(generator=generator, num_inference_steps=5, output_type="np").images
generator = torch.Generator(device=torch_device).manual_seed(0) generator = torch.Generator(device=torch_device).manual_seed(0)
new_image = ddpm_from_hub(generator=generator, num_inference_steps=5, output_type="numpy").images new_image = ddpm_from_hub(generator=generator, num_inference_steps=5, output_type="np").images
assert np.abs(image - new_image).max() < 1e-5, "Models don't give the same forward pass" assert np.abs(image - new_image).max() < 1e-5, "Models don't give the same forward pass"
...@@ -1788,10 +1788,10 @@ class PipelineSlowTests(unittest.TestCase): ...@@ -1788,10 +1788,10 @@ class PipelineSlowTests(unittest.TestCase):
ddpm_from_hub_custom_model.set_progress_bar_config(disable=None) ddpm_from_hub_custom_model.set_progress_bar_config(disable=None)
generator = torch.Generator(device=torch_device).manual_seed(0) generator = torch.Generator(device=torch_device).manual_seed(0)
image = ddpm_from_hub_custom_model(generator=generator, num_inference_steps=5, output_type="numpy").images image = ddpm_from_hub_custom_model(generator=generator, num_inference_steps=5, output_type="np").images
generator = torch.Generator(device=torch_device).manual_seed(0) generator = torch.Generator(device=torch_device).manual_seed(0)
new_image = ddpm_from_hub(generator=generator, num_inference_steps=5, output_type="numpy").images new_image = ddpm_from_hub(generator=generator, num_inference_steps=5, output_type="np").images
assert np.abs(image - new_image).max() < 1e-5, "Models don't give the same forward pass" assert np.abs(image - new_image).max() < 1e-5, "Models don't give the same forward pass"
...@@ -1803,7 +1803,7 @@ class PipelineSlowTests(unittest.TestCase): ...@@ -1803,7 +1803,7 @@ class PipelineSlowTests(unittest.TestCase):
pipe.to(torch_device) pipe.to(torch_device)
pipe.set_progress_bar_config(disable=None) pipe.set_progress_bar_config(disable=None)
images = pipe(output_type="numpy").images images = pipe(output_type="np").images
assert images.shape == (1, 32, 32, 3) assert images.shape == (1, 32, 32, 3)
assert isinstance(images, np.ndarray) assert isinstance(images, np.ndarray)
...@@ -1878,7 +1878,7 @@ class PipelineSlowTests(unittest.TestCase): ...@@ -1878,7 +1878,7 @@ class PipelineSlowTests(unittest.TestCase):
generator = [torch.Generator(device="cpu").manual_seed(33) for _ in range(prompt_embeds.shape[0])] generator = [torch.Generator(device="cpu").manual_seed(33) for _ in range(prompt_embeds.shape[0])]
images = pipe( images = pipe(
prompt_embeds=prompt_embeds, generator=generator, num_inference_steps=20, output_type="numpy" prompt_embeds=prompt_embeds, generator=generator, num_inference_steps=20, output_type="np"
).images ).images
for i, image in enumerate(images): for i, image in enumerate(images):
...@@ -1916,7 +1916,7 @@ class PipelineNightlyTests(unittest.TestCase): ...@@ -1916,7 +1916,7 @@ class PipelineNightlyTests(unittest.TestCase):
ddim.set_progress_bar_config(disable=None) ddim.set_progress_bar_config(disable=None)
generator = torch.Generator(device=torch_device).manual_seed(seed) generator = torch.Generator(device=torch_device).manual_seed(seed)
ddpm_images = ddpm(batch_size=2, generator=generator, output_type="numpy").images ddpm_images = ddpm(batch_size=2, generator=generator, output_type="np").images
generator = torch.Generator(device=torch_device).manual_seed(seed) generator = torch.Generator(device=torch_device).manual_seed(seed)
ddim_images = ddim( ddim_images = ddim(
...@@ -1924,7 +1924,7 @@ class PipelineNightlyTests(unittest.TestCase): ...@@ -1924,7 +1924,7 @@ class PipelineNightlyTests(unittest.TestCase):
generator=generator, generator=generator,
num_inference_steps=1000, num_inference_steps=1000,
eta=1.0, eta=1.0,
output_type="numpy", output_type="np",
use_clipped_model_output=True, # Need this to make DDIM match DDPM use_clipped_model_output=True, # Need this to make DDIM match DDPM
).images ).images
......
...@@ -233,7 +233,7 @@ class UnCLIPPipelineFastTests(PipelineTesterMixin, unittest.TestCase): ...@@ -233,7 +233,7 @@ class UnCLIPPipelineFastTests(PipelineTesterMixin, unittest.TestCase):
"prior_num_inference_steps": 2, "prior_num_inference_steps": 2,
"decoder_num_inference_steps": 2, "decoder_num_inference_steps": 2,
"super_res_num_inference_steps": 2, "super_res_num_inference_steps": 2,
"output_type": "numpy", "output_type": "np",
} }
return inputs return inputs
......
...@@ -158,7 +158,7 @@ class UniDiffuserPipelineFastTests( ...@@ -158,7 +158,7 @@ class UniDiffuserPipelineFastTests(
"generator": generator, "generator": generator,
"num_inference_steps": 2, "num_inference_steps": 2,
"guidance_scale": 6.0, "guidance_scale": 6.0,
"output_type": "numpy", "output_type": "np",
} }
return inputs return inputs
...@@ -199,7 +199,7 @@ class UniDiffuserPipelineFastTests( ...@@ -199,7 +199,7 @@ class UniDiffuserPipelineFastTests(
"generator": generator, "generator": generator,
"num_inference_steps": 2, "num_inference_steps": 2,
"guidance_scale": 6.0, "guidance_scale": 6.0,
"output_type": "numpy", "output_type": "np",
"prompt_latents": latents.get("prompt_latents"), "prompt_latents": latents.get("prompt_latents"),
"vae_latents": latents.get("vae_latents"), "vae_latents": latents.get("vae_latents"),
"clip_latents": latents.get("clip_latents"), "clip_latents": latents.get("clip_latents"),
...@@ -590,7 +590,7 @@ class UniDiffuserPipelineSlowTests(unittest.TestCase): ...@@ -590,7 +590,7 @@ class UniDiffuserPipelineSlowTests(unittest.TestCase):
"generator": generator, "generator": generator,
"num_inference_steps": 3, "num_inference_steps": 3,
"guidance_scale": 8.0, "guidance_scale": 8.0,
"output_type": "numpy", "output_type": "np",
} }
if generate_latents: if generate_latents:
latents = self.get_fixed_latents(device, seed=seed) latents = self.get_fixed_latents(device, seed=seed)
...@@ -706,7 +706,7 @@ class UniDiffuserPipelineNightlyTests(unittest.TestCase): ...@@ -706,7 +706,7 @@ class UniDiffuserPipelineNightlyTests(unittest.TestCase):
"generator": generator, "generator": generator,
"num_inference_steps": 3, "num_inference_steps": 3,
"guidance_scale": 8.0, "guidance_scale": 8.0,
"output_type": "numpy", "output_type": "np",
} }
if generate_latents: if generate_latents:
latents = self.get_fixed_latents(device, seed=seed) latents = self.get_fixed_latents(device, seed=seed)
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment