return"Step that sets the scheduler's timesteps for inference"
@property
definputs(self)->List[InputParam]:
return[
InputParam("num_inference_steps",default=50),
InputParam("timesteps"),
InputParam("sigmas"),
InputParam("strength",default=0.6),
InputParam("guidance_scale",default=3.5),
InputParam("num_images_per_prompt",default=1),
InputParam("height",type_hint=int),
InputParam("width",type_hint=int),
]
@property
defintermediate_inputs(self)->List[str]:
return[
InputParam(
"batch_size",
required=True,
type_hint=int,
description="Number of prompts, the final batch size of model inputs should be `batch_size * num_images_per_prompt`. Can be generated in input step.",
),
]
@property
defintermediate_outputs(self)->List[OutputParam]:
return[
OutputParam("timesteps",type_hint=torch.Tensor,description="The timesteps to use for inference"),
OutputParam(
"num_inference_steps",
type_hint=int,
description="The number of denoising steps to perform at inference time",
),
OutputParam(
"latent_timestep",
type_hint=torch.Tensor,
description="The timestep that represents the initial noise level for image-to-image generation",
),
OutputParam("guidance",type_hint=torch.Tensor,description="Optional guidance to be used."),
]
@staticmethod
# Copied from diffusers.pipelines.stable_diffusion_3.pipeline_stable_diffusion_3_img2img.StableDiffusion3Img2ImgPipeline.get_timesteps with self.scheduler->scheduler
InputParam("dtype",type_hint=torch.dtype,description="Data type of model tensor inputs"),
InputParam(
"preprocess_kwargs",
type_hint=Optional[dict],
description="A kwargs dictionary that if specified is passed along to the `ImageProcessor` as defined under `self.image_processor` in [diffusers.image_processor.VaeImageProcessor]",
),
]
@property
defintermediate_outputs(self)->List[OutputParam]:
return[
OutputParam(
"image_latents",
type_hint=torch.Tensor,
description="The latents representing the reference image for image-to-image/inpainting generation",
)
]
@staticmethod
# Copied from diffusers.pipelines.stable_diffusion_3.pipeline_stable_diffusion_3_inpaint.StableDiffusion3InpaintPipeline._encode_vae_image with self.vae->vae