Unverified Commit 5d848ec0 authored by M. Tolga Cangöz's avatar M. Tolga Cangöz Committed by GitHub
Browse files

[`Tests`] Update a deprecated parameter in test files and fix several typos (#7277)

* Add properties and `IPAdapterTesterMixin` tests for `StableDiffusionPanoramaPipeline`

* Fix variable name typo and update comments

* Update deprecated `output_type="numpy"` to "np" in test files

* Discard changes to src/diffusers/pipelines/stable_diffusion_panorama/pipeline_stable_diffusion_panorama.py

* Update test_stable_diffusion_panorama.py

* Update numbers in README.md

* Update get_guidance_scale_embedding method to use timesteps instead of w

* Update number of checkpoints in README.md

* Add type hints and fix var name

* Fix PyTorch's convention for inplace functions

* Fix a typo

* Revert "Fix PyTorch's convention for inplace functions"

This reverts commit 74350cf65b2c9aa77f08bec7937d7a8b13edb509.

* Fix typos

* Indent

* Refactor get_guidance_scale_embedding method in LEditsPPPipelineStableDiffusionXL class
parent 4974b845
...@@ -632,7 +632,7 @@ class StableDiffusionSAGPipeline(DiffusionPipeline, StableDiffusionMixin, Textua ...@@ -632,7 +632,7 @@ class StableDiffusionSAGPipeline(DiffusionPipeline, StableDiffusionMixin, Textua
# corresponds to doing no classifier free guidance. # corresponds to doing no classifier free guidance.
do_classifier_free_guidance = guidance_scale > 1.0 do_classifier_free_guidance = guidance_scale > 1.0
# and `sag_scale` is` `s` of equation (16) # and `sag_scale` is` `s` of equation (16)
# of the self-attentnion guidance paper: https://arxiv.org/pdf/2210.00939.pdf # of the self-attention guidance paper: https://arxiv.org/pdf/2210.00939.pdf
# `sag_scale = 0` means no self-attention guidance # `sag_scale = 0` means no self-attention guidance
do_self_attention_guidance = sag_scale > 0.0 do_self_attention_guidance = sag_scale > 0.0
...@@ -667,7 +667,7 @@ class StableDiffusionSAGPipeline(DiffusionPipeline, StableDiffusionMixin, Textua ...@@ -667,7 +667,7 @@ class StableDiffusionSAGPipeline(DiffusionPipeline, StableDiffusionMixin, Textua
if timesteps.dtype not in [torch.int16, torch.int32, torch.int64]: if timesteps.dtype not in [torch.int16, torch.int32, torch.int64]:
raise ValueError( raise ValueError(
f"{self.__class__.__name__} does not support using a scheduler of type {self.scheduler.__class__.__name__}. Please make sure to use one of 'DDIMScheduler, PNDMScheduler, DDPMScheduler, DEISMultistepScheduler, UniPCMultistepScheduler, DPMSolverMultistepScheduler, DPMSolverSinlgestepScheduler'." f"{self.__class__.__name__} does not support using a scheduler of type {self.scheduler.__class__.__name__}. Please make sure to use one of 'DDIMScheduler, PNDMScheduler, DDPMScheduler, DEISMultistepScheduler, UniPCMultistepScheduler, DPMSolverMultistepScheduler, DPMSolverSinglestepScheduler'."
) )
# 5. Prepare latent variables # 5. Prepare latent variables
...@@ -723,7 +723,7 @@ class StableDiffusionSAGPipeline(DiffusionPipeline, StableDiffusionMixin, Textua ...@@ -723,7 +723,7 @@ class StableDiffusionSAGPipeline(DiffusionPipeline, StableDiffusionMixin, Textua
noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# perform self-attention guidance with the stored self-attentnion map # perform self-attention guidance with the stored self-attention map
if do_self_attention_guidance: if do_self_attention_guidance:
# classifier-free guidance produces two chunks of attention map # classifier-free guidance produces two chunks of attention map
# and we only use unconditional one according to equation (25) # and we only use unconditional one according to equation (25)
......
...@@ -740,20 +740,22 @@ class StableDiffusionXLPipeline( ...@@ -740,20 +740,22 @@ class StableDiffusionXLPipeline(
self.vae.decoder.mid_block.to(dtype) self.vae.decoder.mid_block.to(dtype)
# Copied from diffusers.pipelines.latent_consistency_models.pipeline_latent_consistency_text2img.LatentConsistencyModelPipeline.get_guidance_scale_embedding # Copied from diffusers.pipelines.latent_consistency_models.pipeline_latent_consistency_text2img.LatentConsistencyModelPipeline.get_guidance_scale_embedding
def get_guidance_scale_embedding(self, w, embedding_dim=512, dtype=torch.float32): def get_guidance_scale_embedding(
self, w: torch.Tensor, embedding_dim: int = 512, dtype: torch.dtype = torch.float32
) -> torch.FloatTensor:
""" """
See https://github.com/google-research/vdm/blob/dc27b98a554f65cdc654b800da5aa1846545d41b/model_vdm.py#L298 See https://github.com/google-research/vdm/blob/dc27b98a554f65cdc654b800da5aa1846545d41b/model_vdm.py#L298
Args: Args:
timesteps (`torch.Tensor`): w (`torch.Tensor`):
generate embedding vectors at these timesteps Generate embedding vectors with a specified guidance scale to subsequently enrich timestep embeddings.
embedding_dim (`int`, *optional*, defaults to 512): embedding_dim (`int`, *optional*, defaults to 512):
dimension of the embeddings to generate Dimension of the embeddings to generate.
dtype: dtype (`torch.dtype`, *optional*, defaults to `torch.float32`):
data type of the generated embeddings Data type of the generated embeddings.
Returns: Returns:
`torch.FloatTensor`: Embedding vectors with shape `(len(timesteps), embedding_dim)` `torch.FloatTensor`: Embedding vectors with shape `(len(w), embedding_dim)`.
""" """
assert len(w.shape) == 1 assert len(w.shape) == 1
w = w * 1000.0 w = w * 1000.0
......
...@@ -874,20 +874,22 @@ class StableDiffusionXLImg2ImgPipeline( ...@@ -874,20 +874,22 @@ class StableDiffusionXLImg2ImgPipeline(
self.vae.decoder.mid_block.to(dtype) self.vae.decoder.mid_block.to(dtype)
# Copied from diffusers.pipelines.latent_consistency_models.pipeline_latent_consistency_text2img.LatentConsistencyModelPipeline.get_guidance_scale_embedding # Copied from diffusers.pipelines.latent_consistency_models.pipeline_latent_consistency_text2img.LatentConsistencyModelPipeline.get_guidance_scale_embedding
def get_guidance_scale_embedding(self, w, embedding_dim=512, dtype=torch.float32): def get_guidance_scale_embedding(
self, w: torch.Tensor, embedding_dim: int = 512, dtype: torch.dtype = torch.float32
) -> torch.FloatTensor:
""" """
See https://github.com/google-research/vdm/blob/dc27b98a554f65cdc654b800da5aa1846545d41b/model_vdm.py#L298 See https://github.com/google-research/vdm/blob/dc27b98a554f65cdc654b800da5aa1846545d41b/model_vdm.py#L298
Args: Args:
timesteps (`torch.Tensor`): w (`torch.Tensor`):
generate embedding vectors at these timesteps Generate embedding vectors with a specified guidance scale to subsequently enrich timestep embeddings.
embedding_dim (`int`, *optional*, defaults to 512): embedding_dim (`int`, *optional*, defaults to 512):
dimension of the embeddings to generate Dimension of the embeddings to generate.
dtype: dtype (`torch.dtype`, *optional*, defaults to `torch.float32`):
data type of the generated embeddings Data type of the generated embeddings.
Returns: Returns:
`torch.FloatTensor`: Embedding vectors with shape `(len(timesteps), embedding_dim)` `torch.FloatTensor`: Embedding vectors with shape `(len(w), embedding_dim)`.
""" """
assert len(w.shape) == 1 assert len(w.shape) == 1
w = w * 1000.0 w = w * 1000.0
......
...@@ -1110,20 +1110,22 @@ class StableDiffusionXLInpaintPipeline( ...@@ -1110,20 +1110,22 @@ class StableDiffusionXLInpaintPipeline(
self.vae.decoder.mid_block.to(dtype) self.vae.decoder.mid_block.to(dtype)
# Copied from diffusers.pipelines.latent_consistency_models.pipeline_latent_consistency_text2img.LatentConsistencyModelPipeline.get_guidance_scale_embedding # Copied from diffusers.pipelines.latent_consistency_models.pipeline_latent_consistency_text2img.LatentConsistencyModelPipeline.get_guidance_scale_embedding
def get_guidance_scale_embedding(self, w, embedding_dim=512, dtype=torch.float32): def get_guidance_scale_embedding(
self, w: torch.Tensor, embedding_dim: int = 512, dtype: torch.dtype = torch.float32
) -> torch.FloatTensor:
""" """
See https://github.com/google-research/vdm/blob/dc27b98a554f65cdc654b800da5aa1846545d41b/model_vdm.py#L298 See https://github.com/google-research/vdm/blob/dc27b98a554f65cdc654b800da5aa1846545d41b/model_vdm.py#L298
Args: Args:
timesteps (`torch.Tensor`): w (`torch.Tensor`):
generate embedding vectors at these timesteps Generate embedding vectors with a specified guidance scale to subsequently enrich timestep embeddings.
embedding_dim (`int`, *optional*, defaults to 512): embedding_dim (`int`, *optional*, defaults to 512):
dimension of the embeddings to generate Dimension of the embeddings to generate.
dtype: dtype (`torch.dtype`, *optional*, defaults to `torch.float32`):
data type of the generated embeddings Data type of the generated embeddings.
Returns: Returns:
`torch.FloatTensor`: Embedding vectors with shape `(len(timesteps), embedding_dim)` `torch.FloatTensor`: Embedding vectors with shape `(len(w), embedding_dim)`.
""" """
assert len(w.shape) == 1 assert len(w.shape) == 1
w = w * 1000.0 w = w * 1000.0
......
...@@ -613,20 +613,22 @@ class StableDiffusionAdapterPipeline(DiffusionPipeline, StableDiffusionMixin): ...@@ -613,20 +613,22 @@ class StableDiffusionAdapterPipeline(DiffusionPipeline, StableDiffusionMixin):
return height, width return height, width
# Copied from diffusers.pipelines.latent_consistency_models.pipeline_latent_consistency_text2img.LatentConsistencyModelPipeline.get_guidance_scale_embedding # Copied from diffusers.pipelines.latent_consistency_models.pipeline_latent_consistency_text2img.LatentConsistencyModelPipeline.get_guidance_scale_embedding
def get_guidance_scale_embedding(self, w, embedding_dim=512, dtype=torch.float32): def get_guidance_scale_embedding(
self, w: torch.Tensor, embedding_dim: int = 512, dtype: torch.dtype = torch.float32
) -> torch.FloatTensor:
""" """
See https://github.com/google-research/vdm/blob/dc27b98a554f65cdc654b800da5aa1846545d41b/model_vdm.py#L298 See https://github.com/google-research/vdm/blob/dc27b98a554f65cdc654b800da5aa1846545d41b/model_vdm.py#L298
Args: Args:
timesteps (`torch.Tensor`): w (`torch.Tensor`):
generate embedding vectors at these timesteps Generate embedding vectors with a specified guidance scale to subsequently enrich timestep embeddings.
embedding_dim (`int`, *optional*, defaults to 512): embedding_dim (`int`, *optional*, defaults to 512):
dimension of the embeddings to generate Dimension of the embeddings to generate.
dtype: dtype (`torch.dtype`, *optional*, defaults to `torch.float32`):
data type of the generated embeddings Data type of the generated embeddings.
Returns: Returns:
`torch.FloatTensor`: Embedding vectors with shape `(len(timesteps), embedding_dim)` `torch.FloatTensor`: Embedding vectors with shape `(len(w), embedding_dim)`.
""" """
assert len(w.shape) == 1 assert len(w.shape) == 1
w = w * 1000.0 w = w * 1000.0
......
...@@ -784,20 +784,22 @@ class StableDiffusionXLAdapterPipeline( ...@@ -784,20 +784,22 @@ class StableDiffusionXLAdapterPipeline(
return height, width return height, width
# Copied from diffusers.pipelines.latent_consistency_models.pipeline_latent_consistency_text2img.LatentConsistencyModelPipeline.get_guidance_scale_embedding # Copied from diffusers.pipelines.latent_consistency_models.pipeline_latent_consistency_text2img.LatentConsistencyModelPipeline.get_guidance_scale_embedding
def get_guidance_scale_embedding(self, w, embedding_dim=512, dtype=torch.float32): def get_guidance_scale_embedding(
self, w: torch.Tensor, embedding_dim: int = 512, dtype: torch.dtype = torch.float32
) -> torch.FloatTensor:
""" """
See https://github.com/google-research/vdm/blob/dc27b98a554f65cdc654b800da5aa1846545d41b/model_vdm.py#L298 See https://github.com/google-research/vdm/blob/dc27b98a554f65cdc654b800da5aa1846545d41b/model_vdm.py#L298
Args: Args:
timesteps (`torch.Tensor`): w (`torch.Tensor`):
generate embedding vectors at these timesteps Generate embedding vectors with a specified guidance scale to subsequently enrich timestep embeddings.
embedding_dim (`int`, *optional*, defaults to 512): embedding_dim (`int`, *optional*, defaults to 512):
dimension of the embeddings to generate Dimension of the embeddings to generate.
dtype: dtype (`torch.dtype`, *optional*, defaults to `torch.float32`):
data type of the generated embeddings Data type of the generated embeddings.
Returns: Returns:
`torch.FloatTensor`: Embedding vectors with shape `(len(timesteps), embedding_dim)` `torch.FloatTensor`: Embedding vectors with shape `(len(w), embedding_dim)`.
""" """
assert len(w.shape) == 1 assert len(w.shape) == 1
w = w * 1000.0 w = w * 1000.0
......
...@@ -575,8 +575,8 @@ class TextToVideoZeroPipeline(DiffusionPipeline, StableDiffusionMixin, TextualIn ...@@ -575,8 +575,8 @@ class TextToVideoZeroPipeline(DiffusionPipeline, StableDiffusionMixin, TextualIn
Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for video Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for video
generation. Can be used to tweak the same generation with different prompts. If not provided, a latents generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
tensor is generated by sampling using the supplied random `generator`. tensor is generated by sampling using the supplied random `generator`.
output_type (`str`, *optional*, defaults to `"numpy"`): output_type (`str`, *optional*, defaults to `"np"`):
The output format of the generated video. Choose between `"latent"` and `"numpy"`. The output format of the generated video. Choose between `"latent"` and `"np"`.
return_dict (`bool`, *optional*, defaults to `True`): return_dict (`bool`, *optional*, defaults to `True`):
Whether or not to return a Whether or not to return a
[`~pipelines.text_to_video_synthesis.pipeline_text_to_video_zero.TextToVideoPipelineOutput`] instead of [`~pipelines.text_to_video_synthesis.pipeline_text_to_video_zero.TextToVideoPipelineOutput`] instead of
......
...@@ -211,7 +211,7 @@ class ControlNetPipelineFastTests( ...@@ -211,7 +211,7 @@ class ControlNetPipelineFastTests(
"generator": generator, "generator": generator,
"num_inference_steps": 2, "num_inference_steps": 2,
"guidance_scale": 6.0, "guidance_scale": 6.0,
"output_type": "numpy", "output_type": "np",
"image": image, "image": image,
} }
...@@ -402,7 +402,7 @@ class StableDiffusionMultiControlNetPipelineFastTests( ...@@ -402,7 +402,7 @@ class StableDiffusionMultiControlNetPipelineFastTests(
"generator": generator, "generator": generator,
"num_inference_steps": 2, "num_inference_steps": 2,
"guidance_scale": 6.0, "guidance_scale": 6.0,
"output_type": "numpy", "output_type": "np",
"image": images, "image": images,
} }
...@@ -602,7 +602,7 @@ class StableDiffusionMultiControlNetOneModelPipelineFastTests( ...@@ -602,7 +602,7 @@ class StableDiffusionMultiControlNetOneModelPipelineFastTests(
"generator": generator, "generator": generator,
"num_inference_steps": 2, "num_inference_steps": 2,
"guidance_scale": 6.0, "guidance_scale": 6.0,
"output_type": "numpy", "output_type": "np",
"image": images, "image": images,
} }
......
...@@ -164,7 +164,7 @@ class ControlNetImg2ImgPipelineFastTests( ...@@ -164,7 +164,7 @@ class ControlNetImg2ImgPipelineFastTests(
"generator": generator, "generator": generator,
"num_inference_steps": 2, "num_inference_steps": 2,
"guidance_scale": 6.0, "guidance_scale": 6.0,
"output_type": "numpy", "output_type": "np",
"image": image, "image": image,
"control_image": control_image, "control_image": control_image,
} }
...@@ -313,7 +313,7 @@ class StableDiffusionMultiControlNetPipelineFastTests( ...@@ -313,7 +313,7 @@ class StableDiffusionMultiControlNetPipelineFastTests(
"generator": generator, "generator": generator,
"num_inference_steps": 2, "num_inference_steps": 2,
"guidance_scale": 6.0, "guidance_scale": 6.0,
"output_type": "numpy", "output_type": "np",
"image": image, "image": image,
"control_image": control_image, "control_image": control_image,
} }
......
...@@ -155,7 +155,7 @@ class ControlNetInpaintPipelineFastTests( ...@@ -155,7 +155,7 @@ class ControlNetInpaintPipelineFastTests(
"generator": generator, "generator": generator,
"num_inference_steps": 2, "num_inference_steps": 2,
"guidance_scale": 6.0, "guidance_scale": 6.0,
"output_type": "numpy", "output_type": "np",
"image": image, "image": image,
"mask_image": mask_image, "mask_image": mask_image,
"control_image": control_image, "control_image": control_image,
...@@ -375,7 +375,7 @@ class MultiControlNetInpaintPipelineFastTests( ...@@ -375,7 +375,7 @@ class MultiControlNetInpaintPipelineFastTests(
"generator": generator, "generator": generator,
"num_inference_steps": 2, "num_inference_steps": 2,
"guidance_scale": 6.0, "guidance_scale": 6.0,
"output_type": "numpy", "output_type": "np",
"image": image, "image": image,
"mask_image": mask_image, "mask_image": mask_image,
"control_image": control_image, "control_image": control_image,
......
...@@ -172,7 +172,7 @@ class ControlNetPipelineSDXLFastTests( ...@@ -172,7 +172,7 @@ class ControlNetPipelineSDXLFastTests(
"generator": generator, "generator": generator,
"num_inference_steps": 2, "num_inference_steps": 2,
"guidance_scale": 6.0, "guidance_scale": 6.0,
"output_type": "numpy", "output_type": "np",
"image": init_image, "image": init_image,
"mask_image": mask_image, "mask_image": mask_image,
"control_image": control_image, "control_image": control_image,
......
...@@ -163,7 +163,7 @@ class ControlNetPipelineSDXLImg2ImgFastTests( ...@@ -163,7 +163,7 @@ class ControlNetPipelineSDXLImg2ImgFastTests(
"generator": generator, "generator": generator,
"num_inference_steps": 2, "num_inference_steps": 2,
"guidance_scale": 6.0, "guidance_scale": 6.0,
"output_type": "numpy", "output_type": "np",
"image": image, "image": image,
"control_image": image, "control_image": image,
} }
......
...@@ -63,7 +63,7 @@ class DDIMPipelineFastTests(PipelineTesterMixin, unittest.TestCase): ...@@ -63,7 +63,7 @@ class DDIMPipelineFastTests(PipelineTesterMixin, unittest.TestCase):
"batch_size": 1, "batch_size": 1,
"generator": generator, "generator": generator,
"num_inference_steps": 2, "num_inference_steps": 2,
"output_type": "numpy", "output_type": "np",
} }
return inputs return inputs
...@@ -113,7 +113,7 @@ class DDIMPipelineIntegrationTests(unittest.TestCase): ...@@ -113,7 +113,7 @@ class DDIMPipelineIntegrationTests(unittest.TestCase):
ddim.set_progress_bar_config(disable=None) ddim.set_progress_bar_config(disable=None)
generator = torch.manual_seed(0) generator = torch.manual_seed(0)
image = ddim(generator=generator, eta=0.0, output_type="numpy").images image = ddim(generator=generator, eta=0.0, output_type="np").images
image_slice = image[0, -3:, -3:, -1] image_slice = image[0, -3:, -3:, -1]
...@@ -133,7 +133,7 @@ class DDIMPipelineIntegrationTests(unittest.TestCase): ...@@ -133,7 +133,7 @@ class DDIMPipelineIntegrationTests(unittest.TestCase):
ddpm.set_progress_bar_config(disable=None) ddpm.set_progress_bar_config(disable=None)
generator = torch.manual_seed(0) generator = torch.manual_seed(0)
image = ddpm(generator=generator, output_type="numpy").images image = ddpm(generator=generator, output_type="np").images
image_slice = image[0, -3:, -3:, -1] image_slice = image[0, -3:, -3:, -1]
......
...@@ -50,10 +50,10 @@ class DDPMPipelineFastTests(unittest.TestCase): ...@@ -50,10 +50,10 @@ class DDPMPipelineFastTests(unittest.TestCase):
ddpm.set_progress_bar_config(disable=None) ddpm.set_progress_bar_config(disable=None)
generator = torch.Generator(device=device).manual_seed(0) generator = torch.Generator(device=device).manual_seed(0)
image = ddpm(generator=generator, num_inference_steps=2, output_type="numpy").images image = ddpm(generator=generator, num_inference_steps=2, output_type="np").images
generator = torch.Generator(device=device).manual_seed(0) generator = torch.Generator(device=device).manual_seed(0)
image_from_tuple = ddpm(generator=generator, num_inference_steps=2, output_type="numpy", return_dict=False)[0] image_from_tuple = ddpm(generator=generator, num_inference_steps=2, output_type="np", return_dict=False)[0]
image_slice = image[0, -3:, -3:, -1] image_slice = image[0, -3:, -3:, -1]
image_from_tuple_slice = image_from_tuple[0, -3:, -3:, -1] image_from_tuple_slice = image_from_tuple[0, -3:, -3:, -1]
...@@ -75,10 +75,10 @@ class DDPMPipelineFastTests(unittest.TestCase): ...@@ -75,10 +75,10 @@ class DDPMPipelineFastTests(unittest.TestCase):
ddpm.set_progress_bar_config(disable=None) ddpm.set_progress_bar_config(disable=None)
generator = torch.manual_seed(0) generator = torch.manual_seed(0)
image = ddpm(generator=generator, num_inference_steps=2, output_type="numpy").images image = ddpm(generator=generator, num_inference_steps=2, output_type="np").images
generator = torch.manual_seed(0) generator = torch.manual_seed(0)
image_eps = ddpm(generator=generator, num_inference_steps=2, output_type="numpy")[0] image_eps = ddpm(generator=generator, num_inference_steps=2, output_type="np")[0]
image_slice = image[0, -3:, -3:, -1] image_slice = image[0, -3:, -3:, -1]
image_eps_slice = image_eps[0, -3:, -3:, -1] image_eps_slice = image_eps[0, -3:, -3:, -1]
...@@ -102,7 +102,7 @@ class DDPMPipelineIntegrationTests(unittest.TestCase): ...@@ -102,7 +102,7 @@ class DDPMPipelineIntegrationTests(unittest.TestCase):
ddpm.set_progress_bar_config(disable=None) ddpm.set_progress_bar_config(disable=None)
generator = torch.manual_seed(0) generator = torch.manual_seed(0)
image = ddpm(generator=generator, output_type="numpy").images image = ddpm(generator=generator, output_type="np").images
image_slice = image[0, -3:, -3:, -1] image_slice = image[0, -3:, -3:, -1]
......
...@@ -50,7 +50,7 @@ class IFPipelineFastTests(PipelineTesterMixin, IFPipelineTesterMixin, unittest.T ...@@ -50,7 +50,7 @@ class IFPipelineFastTests(PipelineTesterMixin, IFPipelineTesterMixin, unittest.T
"prompt": "A painting of a squirrel eating a burger", "prompt": "A painting of a squirrel eating a burger",
"generator": generator, "generator": generator,
"num_inference_steps": 2, "num_inference_steps": 2,
"output_type": "numpy", "output_type": "np",
} }
return inputs return inputs
......
...@@ -55,7 +55,7 @@ class IFImg2ImgPipelineFastTests(PipelineTesterMixin, IFPipelineTesterMixin, uni ...@@ -55,7 +55,7 @@ class IFImg2ImgPipelineFastTests(PipelineTesterMixin, IFPipelineTesterMixin, uni
"image": image, "image": image,
"generator": generator, "generator": generator,
"num_inference_steps": 2, "num_inference_steps": 2,
"output_type": "numpy", "output_type": "np",
} }
return inputs return inputs
......
...@@ -57,7 +57,7 @@ class IFImg2ImgSuperResolutionPipelineFastTests(PipelineTesterMixin, IFPipelineT ...@@ -57,7 +57,7 @@ class IFImg2ImgSuperResolutionPipelineFastTests(PipelineTesterMixin, IFPipelineT
"original_image": original_image, "original_image": original_image,
"generator": generator, "generator": generator,
"num_inference_steps": 2, "num_inference_steps": 2,
"output_type": "numpy", "output_type": "np",
} }
return inputs return inputs
......
...@@ -57,7 +57,7 @@ class IFInpaintingPipelineFastTests(PipelineTesterMixin, IFPipelineTesterMixin, ...@@ -57,7 +57,7 @@ class IFInpaintingPipelineFastTests(PipelineTesterMixin, IFPipelineTesterMixin,
"mask_image": mask_image, "mask_image": mask_image,
"generator": generator, "generator": generator,
"num_inference_steps": 2, "num_inference_steps": 2,
"output_type": "numpy", "output_type": "np",
} }
return inputs return inputs
......
...@@ -59,7 +59,7 @@ class IFInpaintingSuperResolutionPipelineFastTests(PipelineTesterMixin, IFPipeli ...@@ -59,7 +59,7 @@ class IFInpaintingSuperResolutionPipelineFastTests(PipelineTesterMixin, IFPipeli
"mask_image": mask_image, "mask_image": mask_image,
"generator": generator, "generator": generator,
"num_inference_steps": 2, "num_inference_steps": 2,
"output_type": "numpy", "output_type": "np",
} }
return inputs return inputs
......
...@@ -52,7 +52,7 @@ class IFSuperResolutionPipelineFastTests(PipelineTesterMixin, IFPipelineTesterMi ...@@ -52,7 +52,7 @@ class IFSuperResolutionPipelineFastTests(PipelineTesterMixin, IFPipelineTesterMi
"image": image, "image": image,
"generator": generator, "generator": generator,
"num_inference_steps": 2, "num_inference_steps": 2,
"output_type": "numpy", "output_type": "np",
} }
return inputs return inputs
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment