Unverified Commit 5d848ec0 authored by M. Tolga Cangöz's avatar M. Tolga Cangöz Committed by GitHub
Browse files

[`Tests`] Update a deprecated parameter in test files and fix several typos (#7277)

* Add properties and `IPAdapterTesterMixin` tests for `StableDiffusionPanoramaPipeline`

* Fix variable name typo and update comments

* Update deprecated `output_type="numpy"` to "np" in test files

* Discard changes to src/diffusers/pipelines/stable_diffusion_panorama/pipeline_stable_diffusion_panorama.py

* Update test_stable_diffusion_panorama.py

* Update numbers in README.md

* Update get_guidance_scale_embedding method to use timesteps instead of w

* Update number of checkpoints in README.md

* Add type hints and fix var name

* Fix PyTorch's convention for inplace functions

* Fix a typo

* Revert "Fix PyTorch's convention for inplace functions"

This reverts commit 74350cf65b2c9aa77f08bec7937d7a8b13edb509.

* Fix typos

* Indent

* Refactor get_guidance_scale_embedding method in LEditsPPPipelineStableDiffusionXL class
parent 4974b845
......@@ -74,7 +74,7 @@ class DiTPipelineFastTests(PipelineTesterMixin, unittest.TestCase):
"class_labels": [1],
"generator": generator,
"num_inference_steps": 2,
"output_type": "numpy",
"output_type": "np",
}
return inputs
......
......@@ -113,7 +113,7 @@ class LDMTextToImagePipelineFastTests(PipelineTesterMixin, unittest.TestCase):
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"output_type": "numpy",
"output_type": "np",
}
return inputs
......@@ -153,7 +153,7 @@ class LDMTextToImagePipelineSlowTests(unittest.TestCase):
"generator": generator,
"num_inference_steps": 3,
"guidance_scale": 6.0,
"output_type": "numpy",
"output_type": "np",
}
return inputs
......@@ -189,7 +189,7 @@ class LDMTextToImagePipelineNightlyTests(unittest.TestCase):
"generator": generator,
"num_inference_steps": 50,
"guidance_scale": 6.0,
"output_type": "numpy",
"output_type": "np",
}
return inputs
......
......@@ -84,7 +84,7 @@ class LDMSuperResolutionPipelineFastTests(unittest.TestCase):
init_image = self.dummy_image.to(device)
generator = torch.Generator(device=device).manual_seed(0)
image = ldm(image=init_image, generator=generator, num_inference_steps=2, output_type="numpy").images
image = ldm(image=init_image, generator=generator, num_inference_steps=2, output_type="np").images
image_slice = image[0, -3:, -3:, -1]
......@@ -109,7 +109,7 @@ class LDMSuperResolutionPipelineFastTests(unittest.TestCase):
init_image = self.dummy_image.to(torch_device)
image = ldm(init_image, num_inference_steps=2, output_type="numpy").images
image = ldm(init_image, num_inference_steps=2, output_type="np").images
assert image.shape == (1, 64, 64, 3)
......@@ -128,7 +128,7 @@ class LDMSuperResolutionPipelineIntegrationTests(unittest.TestCase):
ldm.set_progress_bar_config(disable=None)
generator = torch.manual_seed(0)
image = ldm(image=init_image, generator=generator, num_inference_steps=20, output_type="numpy").images
image = ldm(image=init_image, generator=generator, num_inference_steps=20, output_type="np").images
image_slice = image[0, -3:, -3:, -1]
......
......@@ -117,7 +117,7 @@ class PaintByExamplePipelineFastTests(PipelineTesterMixin, unittest.TestCase):
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"output_type": "numpy",
"output_type": "np",
}
return inputs
......
......@@ -49,10 +49,10 @@ class PNDMPipelineFastTests(unittest.TestCase):
pndm.set_progress_bar_config(disable=None)
generator = torch.manual_seed(0)
image = pndm(generator=generator, num_inference_steps=20, output_type="numpy").images
image = pndm(generator=generator, num_inference_steps=20, output_type="np").images
generator = torch.manual_seed(0)
image_from_tuple = pndm(generator=generator, num_inference_steps=20, output_type="numpy", return_dict=False)[0]
image_from_tuple = pndm(generator=generator, num_inference_steps=20, output_type="np", return_dict=False)[0]
image_slice = image[0, -3:, -3:, -1]
image_from_tuple_slice = image_from_tuple[0, -3:, -3:, -1]
......@@ -77,7 +77,7 @@ class PNDMPipelineIntegrationTests(unittest.TestCase):
pndm.to(torch_device)
pndm.set_progress_bar_config(disable=None)
generator = torch.manual_seed(0)
image = pndm(generator=generator, output_type="numpy").images
image = pndm(generator=generator, output_type="np").images
image_slice = image[0, -3:, -3:, -1]
......
......@@ -46,7 +46,7 @@ class OnnxStableDiffusionPipelineFastTests(OnnxPipelineTesterMixin, unittest.Tes
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 7.5,
"output_type": "numpy",
"output_type": "np",
}
return inputs
......
......@@ -55,7 +55,7 @@ class OnnxStableDiffusionImg2ImgPipelineFastTests(OnnxPipelineTesterMixin, unitt
"num_inference_steps": 3,
"strength": 0.75,
"guidance_scale": 7.5,
"output_type": "numpy",
"output_type": "np",
}
return inputs
......
......@@ -55,7 +55,7 @@ class OnnxStableDiffusionUpscalePipelineFastTests(OnnxPipelineTesterMixin, unitt
"generator": generator,
"num_inference_steps": 3,
"guidance_scale": 7.5,
"output_type": "numpy",
"output_type": "np",
}
return inputs
......
......@@ -775,7 +775,7 @@ class StableDiffusionPipelineSlowTests(unittest.TestCase):
"generator": generator,
"num_inference_steps": 3,
"guidance_scale": 7.5,
"output_type": "numpy",
"output_type": "np",
}
return inputs
......@@ -950,7 +950,7 @@ class StableDiffusionPipelineSlowTests(unittest.TestCase):
generator=generator,
guidance_scale=7.5,
num_inference_steps=2,
output_type="numpy",
output_type="np",
)
image_chunked = output_chunked.images
......@@ -966,7 +966,7 @@ class StableDiffusionPipelineSlowTests(unittest.TestCase):
generator=generator,
guidance_scale=7.5,
num_inference_steps=2,
output_type="numpy",
output_type="np",
)
image = output.images
......
......@@ -179,7 +179,7 @@ class StableDiffusionImg2ImgPipelineFastTests(
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"output_type": "numpy",
"output_type": "np",
}
return inputs
......
......@@ -199,7 +199,7 @@ class StableDiffusionInpaintPipelineFastTests(
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"output_type": "numpy",
"output_type": "np",
}
return inputs
......@@ -470,7 +470,7 @@ class StableDiffusionSimpleInpaintPipelineFastTests(StableDiffusionInpaintPipeli
"generator": [generator1, generator2],
"num_inference_steps": 2,
"guidance_scale": 6.0,
"output_type": "numpy",
"output_type": "np",
}
return inputs
......@@ -586,7 +586,7 @@ class StableDiffusionInpaintPipelineSlowTests(unittest.TestCase):
"generator": generator,
"num_inference_steps": 3,
"guidance_scale": 7.5,
"output_type": "numpy",
"output_type": "np",
}
return inputs
......@@ -847,7 +847,7 @@ class StableDiffusionInpaintPipelineAsymmetricAutoencoderKLSlowTests(unittest.Te
"generator": generator,
"num_inference_steps": 3,
"guidance_scale": 7.5,
"output_type": "numpy",
"output_type": "np",
}
return inputs
......@@ -1072,7 +1072,7 @@ class StableDiffusionInpaintPipelineNightlyTests(unittest.TestCase):
"generator": generator,
"num_inference_steps": 50,
"guidance_scale": 7.5,
"output_type": "numpy",
"output_type": "np",
}
return inputs
......
......@@ -131,7 +131,7 @@ class StableDiffusionInstructPix2PixPipelineFastTests(
"num_inference_steps": 2,
"guidance_scale": 6.0,
"image_guidance_scale": 1,
"output_type": "numpy",
"output_type": "np",
}
return inputs
......@@ -288,7 +288,7 @@ class StableDiffusionInstructPix2PixPipelineSlowTests(unittest.TestCase):
"num_inference_steps": 3,
"guidance_scale": 7.5,
"image_guidance_scale": 1.0,
"output_type": "numpy",
"output_type": "np",
}
return inputs
......
......@@ -151,7 +151,7 @@ class StableDiffusion2PipelineFastTests(
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"output_type": "numpy",
"output_type": "np",
}
return inputs
......@@ -336,7 +336,7 @@ class StableDiffusion2PipelineSlowTests(unittest.TestCase):
"generator": generator,
"num_inference_steps": 3,
"guidance_scale": 7.5,
"output_type": "numpy",
"output_type": "np",
}
return inputs
......@@ -557,7 +557,7 @@ class StableDiffusion2PipelineNightlyTests(unittest.TestCase):
"generator": generator,
"num_inference_steps": 50,
"guidance_scale": 7.5,
"output_type": "numpy",
"output_type": "np",
}
return inputs
......
......@@ -138,7 +138,7 @@ class StableDiffusionAttendAndExcitePipelineFastTests(
"generator": generator,
"num_inference_steps": 1,
"guidance_scale": 6.0,
"output_type": "numpy",
"output_type": "np",
"max_iter_to_alter": 2,
"thresholds": {0: 0.7},
}
......@@ -225,7 +225,7 @@ class StableDiffusionAttendAndExcitePipelineIntegrationTests(unittest.TestCase):
generator=generator,
num_inference_steps=5,
max_iter_to_alter=5,
output_type="numpy",
output_type="np",
).images[0]
expected_image = load_numpy(
......
......@@ -174,7 +174,7 @@ class StableDiffusionDepth2ImgPipelineFastTests(
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"output_type": "numpy",
"output_type": "np",
}
return inputs
......@@ -395,7 +395,7 @@ class StableDiffusionDepth2ImgPipelineSlowTests(unittest.TestCase):
"num_inference_steps": 3,
"strength": 0.75,
"guidance_scale": 7.5,
"output_type": "numpy",
"output_type": "np",
}
return inputs
......@@ -534,7 +534,7 @@ class StableDiffusionImg2ImgPipelineNightlyTests(unittest.TestCase):
"num_inference_steps": 3,
"strength": 0.75,
"guidance_scale": 7.5,
"output_type": "numpy",
"output_type": "np",
}
return inputs
......
......@@ -143,7 +143,7 @@ class StableDiffusionDiffEditPipelineFastTests(PipelineLatentTesterMixin, Pipeli
"num_inference_steps": 2,
"inpaint_strength": 1.0,
"guidance_scale": 6.0,
"output_type": "numpy",
"output_type": "np",
}
return inputs
......@@ -165,7 +165,7 @@ class StableDiffusionDiffEditPipelineFastTests(PipelineLatentTesterMixin, Pipeli
"num_maps_per_mask": 2,
"mask_encode_strength": 1.0,
"guidance_scale": 6.0,
"output_type": "numpy",
"output_type": "np",
}
return inputs
......@@ -186,7 +186,7 @@ class StableDiffusionDiffEditPipelineFastTests(PipelineLatentTesterMixin, Pipeli
"inpaint_strength": 1.0,
"guidance_scale": 6.0,
"decode_latents": True,
"output_type": "numpy",
"output_type": "np",
}
return inputs
......@@ -417,7 +417,7 @@ class StableDiffusionDiffEditPipelineNightlyTests(unittest.TestCase):
negative_prompt=source_prompt,
inpaint_strength=0.7,
num_inference_steps=25,
output_type="numpy",
output_type="np",
).images[0]
expected_image = (
......
......@@ -129,7 +129,7 @@ class StableDiffusion2InpaintPipelineFastTests(
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"output_type": "numpy",
"output_type": "np",
}
return inputs
......
......@@ -155,7 +155,7 @@ class StableDiffusionLatentUpscalePipelineFastTests(
"image": self.dummy_image.cpu(),
"generator": generator,
"num_inference_steps": 2,
"output_type": "numpy",
"output_type": "np",
}
return inputs
......
......@@ -308,7 +308,7 @@ class StableDiffusion2VPredictionPipelineIntegrationTests(unittest.TestCase):
prompt = "A painting of a squirrel eating a burger"
generator = torch.manual_seed(0)
output = sd_pipe([prompt], generator=generator, num_inference_steps=5, output_type="numpy")
output = sd_pipe([prompt], generator=generator, num_inference_steps=5, output_type="np")
image = output.images
image_slice = image[0, 253:256, 253:256, -1]
......@@ -335,7 +335,7 @@ class StableDiffusion2VPredictionPipelineIntegrationTests(unittest.TestCase):
prompt = "a photograph of an astronaut riding a horse"
generator = torch.manual_seed(0)
image = sd_pipe(
[prompt], generator=generator, guidance_scale=7.5, num_inference_steps=5, output_type="numpy"
[prompt], generator=generator, guidance_scale=7.5, num_inference_steps=5, output_type="np"
).images
image_slice = image[0, 253:256, 253:256, -1]
......@@ -357,7 +357,7 @@ class StableDiffusion2VPredictionPipelineIntegrationTests(unittest.TestCase):
pipe.enable_attention_slicing()
generator = torch.manual_seed(0)
output_chunked = pipe(
[prompt], generator=generator, guidance_scale=7.5, num_inference_steps=10, output_type="numpy"
[prompt], generator=generator, guidance_scale=7.5, num_inference_steps=10, output_type="np"
)
image_chunked = output_chunked.images
......@@ -369,7 +369,7 @@ class StableDiffusion2VPredictionPipelineIntegrationTests(unittest.TestCase):
# disable slicing
pipe.disable_attention_slicing()
generator = torch.manual_seed(0)
output = pipe([prompt], generator=generator, guidance_scale=7.5, num_inference_steps=10, output_type="numpy")
output = pipe([prompt], generator=generator, guidance_scale=7.5, num_inference_steps=10, output_type="np")
image = output.images
# make sure that more than 3.0 GB is allocated
......
......@@ -246,7 +246,7 @@ class AdapterTests:
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"output_type": "numpy",
"output_type": "np",
}
return inputs
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment