Unverified Commit 63dd6017 authored by David El Malih's avatar David El Malih Committed by GitHub
Browse files

Improve docstrings and type hints in scheduling_euler_discrete.py (#12654)

* refactor: enhance type hints and documentation in EulerDiscreteScheduler

Updated type hints for function parameters and return types in the EulerDiscreteScheduler class to improve code clarity and maintainability. Enhanced docstrings for several methods to provide clearer descriptions of their functionality and expected arguments. This includes specifying Literal types for certain parameters and ensuring consistent return type annotations across the class.

* refactor: enhance type hints and documentation across multiple schedulers

Updated type hints and improved docstrings in various scheduler classes, including CMStochasticIterativeScheduler, CosineDPMSolverMultistepScheduler, and others. This includes specifying parameter types, return types, and providing clearer descriptions of method functionalities. Notable changes include the addition of default values in the begin_index argument and enhanced explanations for noise addition methods. These improvements aim to enhance code clarity and maintainability across the scheduling module.

* refactor: update docstrings to clarify noise schedule construction

Revised docstrings across multiple scheduler classes to enhance clarity regarding the construction of noise schedules. Updated references to relevant papers, ensuring accurate citations for the methodologies used. This includes changes in DEISMultistepScheduler, DPMSolverMultistepInverseScheduler, and others, improving documentation consistency and readability.
parent eeae0338
...@@ -121,7 +121,7 @@ class CMStochasticIterativeScheduler(SchedulerMixin, ConfigMixin): ...@@ -121,7 +121,7 @@ class CMStochasticIterativeScheduler(SchedulerMixin, ConfigMixin):
Sets the begin index for the scheduler. This function should be run from pipeline before the inference. Sets the begin index for the scheduler. This function should be run from pipeline before the inference.
Args: Args:
begin_index (`int`): begin_index (`int`, defaults to `0`):
The begin index for the scheduler. The begin index for the scheduler.
""" """
self._begin_index = begin_index self._begin_index = begin_index
...@@ -287,7 +287,23 @@ class CMStochasticIterativeScheduler(SchedulerMixin, ConfigMixin): ...@@ -287,7 +287,23 @@ class CMStochasticIterativeScheduler(SchedulerMixin, ConfigMixin):
return c_skip, c_out return c_skip, c_out
# Copied from diffusers.schedulers.scheduling_euler_discrete.EulerDiscreteScheduler.index_for_timestep # Copied from diffusers.schedulers.scheduling_euler_discrete.EulerDiscreteScheduler.index_for_timestep
def index_for_timestep(self, timestep, schedule_timesteps=None): def index_for_timestep(
self, timestep: Union[float, torch.Tensor], schedule_timesteps: Optional[torch.Tensor] = None
) -> int:
"""
Find the index of a given timestep in the timestep schedule.
Args:
timestep (`float` or `torch.Tensor`):
The timestep value to find in the schedule.
schedule_timesteps (`torch.Tensor`, *optional*):
The timestep schedule to search in. If `None`, uses `self.timesteps`.
Returns:
`int`:
The index of the timestep in the schedule. For the very first step, returns the second index if
multiple matches exist to avoid skipping a sigma when starting mid-schedule (e.g., for image-to-image).
"""
if schedule_timesteps is None: if schedule_timesteps is None:
schedule_timesteps = self.timesteps schedule_timesteps = self.timesteps
...@@ -302,7 +318,14 @@ class CMStochasticIterativeScheduler(SchedulerMixin, ConfigMixin): ...@@ -302,7 +318,14 @@ class CMStochasticIterativeScheduler(SchedulerMixin, ConfigMixin):
return indices[pos].item() return indices[pos].item()
# Copied from diffusers.schedulers.scheduling_euler_discrete.EulerDiscreteScheduler._init_step_index # Copied from diffusers.schedulers.scheduling_euler_discrete.EulerDiscreteScheduler._init_step_index
def _init_step_index(self, timestep): def _init_step_index(self, timestep: Union[float, torch.Tensor]) -> None:
"""
Initialize the step index for the scheduler based on the given timestep.
Args:
timestep (`float` or `torch.Tensor`):
The current timestep to initialize the step index from.
"""
if self.begin_index is None: if self.begin_index is None:
if isinstance(timestep, torch.Tensor): if isinstance(timestep, torch.Tensor):
timestep = timestep.to(self.timesteps.device) timestep = timestep.to(self.timesteps.device)
...@@ -410,6 +433,21 @@ class CMStochasticIterativeScheduler(SchedulerMixin, ConfigMixin): ...@@ -410,6 +433,21 @@ class CMStochasticIterativeScheduler(SchedulerMixin, ConfigMixin):
noise: torch.Tensor, noise: torch.Tensor,
timesteps: torch.Tensor, timesteps: torch.Tensor,
) -> torch.Tensor: ) -> torch.Tensor:
"""
Add noise to the original samples according to the noise schedule at the specified timesteps.
Args:
original_samples (`torch.Tensor`):
The original samples to which noise will be added.
noise (`torch.Tensor`):
The noise tensor to add to the original samples.
timesteps (`torch.Tensor`):
The timesteps at which to add noise, determining the noise level from the schedule.
Returns:
`torch.Tensor`:
The noisy samples with added noise scaled according to the timestep schedule.
"""
# Make sure sigmas and timesteps have the same device and dtype as original_samples # Make sure sigmas and timesteps have the same device and dtype as original_samples
sigmas = self.sigmas.to(device=original_samples.device, dtype=original_samples.dtype) sigmas = self.sigmas.to(device=original_samples.device, dtype=original_samples.dtype)
if original_samples.device.type == "mps" and torch.is_floating_point(timesteps): if original_samples.device.type == "mps" and torch.is_floating_point(timesteps):
......
...@@ -137,7 +137,7 @@ class CosineDPMSolverMultistepScheduler(SchedulerMixin, ConfigMixin): ...@@ -137,7 +137,7 @@ class CosineDPMSolverMultistepScheduler(SchedulerMixin, ConfigMixin):
Sets the begin index for the scheduler. This function should be run from pipeline before the inference. Sets the begin index for the scheduler. This function should be run from pipeline before the inference.
Args: Args:
begin_index (`int`): begin_index (`int`, defaults to `0`):
The begin index for the scheduler. The begin index for the scheduler.
""" """
self._begin_index = begin_index self._begin_index = begin_index
...@@ -266,6 +266,19 @@ class CosineDPMSolverMultistepScheduler(SchedulerMixin, ConfigMixin): ...@@ -266,6 +266,19 @@ class CosineDPMSolverMultistepScheduler(SchedulerMixin, ConfigMixin):
# Copied from diffusers.schedulers.scheduling_euler_discrete.EulerDiscreteScheduler._sigma_to_t # Copied from diffusers.schedulers.scheduling_euler_discrete.EulerDiscreteScheduler._sigma_to_t
def _sigma_to_t(self, sigma, log_sigmas): def _sigma_to_t(self, sigma, log_sigmas):
"""
Convert sigma values to corresponding timestep values through interpolation.
Args:
sigma (`np.ndarray`):
The sigma value(s) to convert to timestep(s).
log_sigmas (`np.ndarray`):
The logarithm of the sigma schedule used for interpolation.
Returns:
`np.ndarray`:
The interpolated timestep value(s) corresponding to the input sigma(s).
"""
# get log sigma # get log sigma
log_sigma = np.log(np.maximum(sigma, 1e-10)) log_sigma = np.log(np.maximum(sigma, 1e-10))
...@@ -537,6 +550,21 @@ class CosineDPMSolverMultistepScheduler(SchedulerMixin, ConfigMixin): ...@@ -537,6 +550,21 @@ class CosineDPMSolverMultistepScheduler(SchedulerMixin, ConfigMixin):
noise: torch.Tensor, noise: torch.Tensor,
timesteps: torch.Tensor, timesteps: torch.Tensor,
) -> torch.Tensor: ) -> torch.Tensor:
"""
Add noise to the original samples according to the noise schedule at the specified timesteps.
Args:
original_samples (`torch.Tensor`):
The original samples to which noise will be added.
noise (`torch.Tensor`):
The noise tensor to add to the original samples.
timesteps (`torch.Tensor`):
The timesteps at which to add noise, determining the noise level from the schedule.
Returns:
`torch.Tensor`:
The noisy samples with added noise scaled according to the timestep schedule.
"""
# Make sure sigmas and timesteps have the same device and dtype as original_samples # Make sure sigmas and timesteps have the same device and dtype as original_samples
sigmas = self.sigmas.to(device=original_samples.device, dtype=original_samples.dtype) sigmas = self.sigmas.to(device=original_samples.device, dtype=original_samples.dtype)
if original_samples.device.type == "mps" and torch.is_floating_point(timesteps): if original_samples.device.type == "mps" and torch.is_floating_point(timesteps):
......
...@@ -99,10 +99,11 @@ def rescale_zero_terminal_snr(betas: torch.Tensor) -> torch.Tensor: ...@@ -99,10 +99,11 @@ def rescale_zero_terminal_snr(betas: torch.Tensor) -> torch.Tensor:
Args: Args:
betas (`torch.Tensor`): betas (`torch.Tensor`):
the betas that the scheduler is being initialized with. The betas that the scheduler is being initialized with.
Returns: Returns:
`torch.Tensor`: rescaled betas with zero terminal SNR `torch.Tensor`:
Rescaled betas with zero terminal SNR.
""" """
# Convert betas to alphas_bar_sqrt # Convert betas to alphas_bar_sqrt
alphas = 1.0 - betas alphas = 1.0 - betas
......
...@@ -98,10 +98,11 @@ def rescale_zero_terminal_snr(betas): ...@@ -98,10 +98,11 @@ def rescale_zero_terminal_snr(betas):
Args: Args:
betas (`torch.Tensor`): betas (`torch.Tensor`):
the betas that the scheduler is being initialized with. The betas that the scheduler is being initialized with.
Returns: Returns:
`torch.Tensor`: rescaled betas with zero terminal SNR `torch.Tensor`:
Rescaled betas with zero terminal SNR.
""" """
# Convert betas to alphas_bar_sqrt # Convert betas to alphas_bar_sqrt
alphas = 1.0 - betas alphas = 1.0 - betas
......
...@@ -100,10 +100,11 @@ def rescale_zero_terminal_snr(betas): ...@@ -100,10 +100,11 @@ def rescale_zero_terminal_snr(betas):
Args: Args:
betas (`torch.Tensor`): betas (`torch.Tensor`):
the betas that the scheduler is being initialized with. The betas that the scheduler is being initialized with.
Returns: Returns:
`torch.Tensor`: rescaled betas with zero terminal SNR `torch.Tensor`:
Rescaled betas with zero terminal SNR.
""" """
# Convert betas to alphas_bar_sqrt # Convert betas to alphas_bar_sqrt
alphas = 1.0 - betas alphas = 1.0 - betas
......
...@@ -97,10 +97,11 @@ def rescale_zero_terminal_snr(betas: torch.Tensor) -> torch.Tensor: ...@@ -97,10 +97,11 @@ def rescale_zero_terminal_snr(betas: torch.Tensor) -> torch.Tensor:
Args: Args:
betas (`torch.Tensor`): betas (`torch.Tensor`):
the betas that the scheduler is being initialized with. The betas that the scheduler is being initialized with.
Returns: Returns:
`torch.Tensor`: rescaled betas with zero terminal SNR `torch.Tensor`:
Rescaled betas with zero terminal SNR.
""" """
# Convert betas to alphas_bar_sqrt # Convert betas to alphas_bar_sqrt
alphas = 1.0 - betas alphas = 1.0 - betas
......
...@@ -99,10 +99,11 @@ def rescale_zero_terminal_snr(betas): ...@@ -99,10 +99,11 @@ def rescale_zero_terminal_snr(betas):
Args: Args:
betas (`torch.Tensor`): betas (`torch.Tensor`):
the betas that the scheduler is being initialized with. The betas that the scheduler is being initialized with.
Returns: Returns:
`torch.Tensor`: rescaled betas with zero terminal SNR `torch.Tensor`:
Rescaled betas with zero terminal SNR.
""" """
# Convert betas to alphas_bar_sqrt # Convert betas to alphas_bar_sqrt
alphas = 1.0 - betas alphas = 1.0 - betas
......
...@@ -230,7 +230,7 @@ class DEISMultistepScheduler(SchedulerMixin, ConfigMixin): ...@@ -230,7 +230,7 @@ class DEISMultistepScheduler(SchedulerMixin, ConfigMixin):
Sets the begin index for the scheduler. This function should be run from pipeline before the inference. Sets the begin index for the scheduler. This function should be run from pipeline before the inference.
Args: Args:
begin_index (`int`): begin_index (`int`, defaults to `0`):
The begin index for the scheduler. The begin index for the scheduler.
""" """
self._begin_index = begin_index self._begin_index = begin_index
...@@ -364,6 +364,19 @@ class DEISMultistepScheduler(SchedulerMixin, ConfigMixin): ...@@ -364,6 +364,19 @@ class DEISMultistepScheduler(SchedulerMixin, ConfigMixin):
# Copied from diffusers.schedulers.scheduling_euler_discrete.EulerDiscreteScheduler._sigma_to_t # Copied from diffusers.schedulers.scheduling_euler_discrete.EulerDiscreteScheduler._sigma_to_t
def _sigma_to_t(self, sigma, log_sigmas): def _sigma_to_t(self, sigma, log_sigmas):
"""
Convert sigma values to corresponding timestep values through interpolation.
Args:
sigma (`np.ndarray`):
The sigma value(s) to convert to timestep(s).
log_sigmas (`np.ndarray`):
The logarithm of the sigma schedule used for interpolation.
Returns:
`np.ndarray`:
The interpolated timestep value(s) corresponding to the input sigma(s).
"""
# get log sigma # get log sigma
log_sigma = np.log(np.maximum(sigma, 1e-10)) log_sigma = np.log(np.maximum(sigma, 1e-10))
...@@ -399,7 +412,20 @@ class DEISMultistepScheduler(SchedulerMixin, ConfigMixin): ...@@ -399,7 +412,20 @@ class DEISMultistepScheduler(SchedulerMixin, ConfigMixin):
# Copied from diffusers.schedulers.scheduling_euler_discrete.EulerDiscreteScheduler._convert_to_karras # Copied from diffusers.schedulers.scheduling_euler_discrete.EulerDiscreteScheduler._convert_to_karras
def _convert_to_karras(self, in_sigmas: torch.Tensor, num_inference_steps) -> torch.Tensor: def _convert_to_karras(self, in_sigmas: torch.Tensor, num_inference_steps) -> torch.Tensor:
"""Constructs the noise schedule of Karras et al. (2022).""" """
Construct the noise schedule as proposed in [Elucidating the Design Space of Diffusion-Based Generative
Models](https://huggingface.co/papers/2206.00364).
Args:
in_sigmas (`torch.Tensor`):
The input sigma values to be converted.
num_inference_steps (`int`):
The number of inference steps to generate the noise schedule for.
Returns:
`torch.Tensor`:
The converted sigma values following the Karras noise schedule.
"""
# Hack to make sure that other schedulers which copy this function don't break # Hack to make sure that other schedulers which copy this function don't break
# TODO: Add this logic to the other schedulers # TODO: Add this logic to the other schedulers
...@@ -425,7 +451,19 @@ class DEISMultistepScheduler(SchedulerMixin, ConfigMixin): ...@@ -425,7 +451,19 @@ class DEISMultistepScheduler(SchedulerMixin, ConfigMixin):
# Copied from diffusers.schedulers.scheduling_euler_discrete.EulerDiscreteScheduler._convert_to_exponential # Copied from diffusers.schedulers.scheduling_euler_discrete.EulerDiscreteScheduler._convert_to_exponential
def _convert_to_exponential(self, in_sigmas: torch.Tensor, num_inference_steps: int) -> torch.Tensor: def _convert_to_exponential(self, in_sigmas: torch.Tensor, num_inference_steps: int) -> torch.Tensor:
"""Constructs an exponential noise schedule.""" """
Construct an exponential noise schedule.
Args:
in_sigmas (`torch.Tensor`):
The input sigma values to be converted.
num_inference_steps (`int`):
The number of inference steps to generate the noise schedule for.
Returns:
`torch.Tensor`:
The converted sigma values following an exponential schedule.
"""
# Hack to make sure that other schedulers which copy this function don't break # Hack to make sure that other schedulers which copy this function don't break
# TODO: Add this logic to the other schedulers # TODO: Add this logic to the other schedulers
...@@ -449,7 +487,24 @@ class DEISMultistepScheduler(SchedulerMixin, ConfigMixin): ...@@ -449,7 +487,24 @@ class DEISMultistepScheduler(SchedulerMixin, ConfigMixin):
def _convert_to_beta( def _convert_to_beta(
self, in_sigmas: torch.Tensor, num_inference_steps: int, alpha: float = 0.6, beta: float = 0.6 self, in_sigmas: torch.Tensor, num_inference_steps: int, alpha: float = 0.6, beta: float = 0.6
) -> torch.Tensor: ) -> torch.Tensor:
"""From "Beta Sampling is All You Need" [arXiv:2407.12173] (Lee et. al, 2024)""" """
Construct a beta noise schedule as proposed in [Beta Sampling is All You
Need](https://huggingface.co/papers/2407.12173).
Args:
in_sigmas (`torch.Tensor`):
The input sigma values to be converted.
num_inference_steps (`int`):
The number of inference steps to generate the noise schedule for.
alpha (`float`, *optional*, defaults to `0.6`):
The alpha parameter for the beta distribution.
beta (`float`, *optional*, defaults to `0.6`):
The beta parameter for the beta distribution.
Returns:
`torch.Tensor`:
The converted sigma values following a beta distribution schedule.
"""
# Hack to make sure that other schedulers which copy this function don't break # Hack to make sure that other schedulers which copy this function don't break
# TODO: Add this logic to the other schedulers # TODO: Add this logic to the other schedulers
......
...@@ -83,10 +83,11 @@ def rescale_zero_terminal_snr(betas): ...@@ -83,10 +83,11 @@ def rescale_zero_terminal_snr(betas):
Args: Args:
betas (`torch.Tensor`): betas (`torch.Tensor`):
the betas that the scheduler is being initialized with. The betas that the scheduler is being initialized with.
Returns: Returns:
`torch.Tensor`: rescaled betas with zero terminal SNR `torch.Tensor`:
Rescaled betas with zero terminal SNR.
""" """
# Convert betas to alphas_bar_sqrt # Convert betas to alphas_bar_sqrt
alphas = 1.0 - betas alphas = 1.0 - betas
...@@ -323,7 +324,7 @@ class DPMSolverMultistepScheduler(SchedulerMixin, ConfigMixin): ...@@ -323,7 +324,7 @@ class DPMSolverMultistepScheduler(SchedulerMixin, ConfigMixin):
Sets the begin index for the scheduler. This function should be run from pipeline before the inference. Sets the begin index for the scheduler. This function should be run from pipeline before the inference.
Args: Args:
begin_index (`int`): begin_index (`int`, defaults to `0`):
The begin index for the scheduler. The begin index for the scheduler.
""" """
self._begin_index = begin_index self._begin_index = begin_index
...@@ -503,6 +504,19 @@ class DPMSolverMultistepScheduler(SchedulerMixin, ConfigMixin): ...@@ -503,6 +504,19 @@ class DPMSolverMultistepScheduler(SchedulerMixin, ConfigMixin):
# Copied from diffusers.schedulers.scheduling_euler_discrete.EulerDiscreteScheduler._sigma_to_t # Copied from diffusers.schedulers.scheduling_euler_discrete.EulerDiscreteScheduler._sigma_to_t
def _sigma_to_t(self, sigma, log_sigmas): def _sigma_to_t(self, sigma, log_sigmas):
"""
Convert sigma values to corresponding timestep values through interpolation.
Args:
sigma (`np.ndarray`):
The sigma value(s) to convert to timestep(s).
log_sigmas (`np.ndarray`):
The logarithm of the sigma schedule used for interpolation.
Returns:
`np.ndarray`:
The interpolated timestep value(s) corresponding to the input sigma(s).
"""
# get log sigma # get log sigma
log_sigma = np.log(np.maximum(sigma, 1e-10)) log_sigma = np.log(np.maximum(sigma, 1e-10))
...@@ -537,7 +551,20 @@ class DPMSolverMultistepScheduler(SchedulerMixin, ConfigMixin): ...@@ -537,7 +551,20 @@ class DPMSolverMultistepScheduler(SchedulerMixin, ConfigMixin):
# Copied from diffusers.schedulers.scheduling_euler_discrete.EulerDiscreteScheduler._convert_to_karras # Copied from diffusers.schedulers.scheduling_euler_discrete.EulerDiscreteScheduler._convert_to_karras
def _convert_to_karras(self, in_sigmas: torch.Tensor, num_inference_steps) -> torch.Tensor: def _convert_to_karras(self, in_sigmas: torch.Tensor, num_inference_steps) -> torch.Tensor:
"""Constructs the noise schedule of Karras et al. (2022).""" """
Construct the noise schedule as proposed in [Elucidating the Design Space of Diffusion-Based Generative
Models](https://huggingface.co/papers/2206.00364).
Args:
in_sigmas (`torch.Tensor`):
The input sigma values to be converted.
num_inference_steps (`int`):
The number of inference steps to generate the noise schedule for.
Returns:
`torch.Tensor`:
The converted sigma values following the Karras noise schedule.
"""
# Hack to make sure that other schedulers which copy this function don't break # Hack to make sure that other schedulers which copy this function don't break
# TODO: Add this logic to the other schedulers # TODO: Add this logic to the other schedulers
...@@ -576,7 +603,19 @@ class DPMSolverMultistepScheduler(SchedulerMixin, ConfigMixin): ...@@ -576,7 +603,19 @@ class DPMSolverMultistepScheduler(SchedulerMixin, ConfigMixin):
# Copied from diffusers.schedulers.scheduling_euler_discrete.EulerDiscreteScheduler._convert_to_exponential # Copied from diffusers.schedulers.scheduling_euler_discrete.EulerDiscreteScheduler._convert_to_exponential
def _convert_to_exponential(self, in_sigmas: torch.Tensor, num_inference_steps: int) -> torch.Tensor: def _convert_to_exponential(self, in_sigmas: torch.Tensor, num_inference_steps: int) -> torch.Tensor:
"""Constructs an exponential noise schedule.""" """
Construct an exponential noise schedule.
Args:
in_sigmas (`torch.Tensor`):
The input sigma values to be converted.
num_inference_steps (`int`):
The number of inference steps to generate the noise schedule for.
Returns:
`torch.Tensor`:
The converted sigma values following an exponential schedule.
"""
# Hack to make sure that other schedulers which copy this function don't break # Hack to make sure that other schedulers which copy this function don't break
# TODO: Add this logic to the other schedulers # TODO: Add this logic to the other schedulers
...@@ -600,7 +639,24 @@ class DPMSolverMultistepScheduler(SchedulerMixin, ConfigMixin): ...@@ -600,7 +639,24 @@ class DPMSolverMultistepScheduler(SchedulerMixin, ConfigMixin):
def _convert_to_beta( def _convert_to_beta(
self, in_sigmas: torch.Tensor, num_inference_steps: int, alpha: float = 0.6, beta: float = 0.6 self, in_sigmas: torch.Tensor, num_inference_steps: int, alpha: float = 0.6, beta: float = 0.6
) -> torch.Tensor: ) -> torch.Tensor:
"""From "Beta Sampling is All You Need" [arXiv:2407.12173] (Lee et. al, 2024)""" """
Construct a beta noise schedule as proposed in [Beta Sampling is All You
Need](https://huggingface.co/papers/2407.12173).
Args:
in_sigmas (`torch.Tensor`):
The input sigma values to be converted.
num_inference_steps (`int`):
The number of inference steps to generate the noise schedule for.
alpha (`float`, *optional*, defaults to `0.6`):
The alpha parameter for the beta distribution.
beta (`float`, *optional*, defaults to `0.6`):
The beta parameter for the beta distribution.
Returns:
`torch.Tensor`:
The converted sigma values following a beta distribution schedule.
"""
# Hack to make sure that other schedulers which copy this function don't break # Hack to make sure that other schedulers which copy this function don't break
# TODO: Add this logic to the other schedulers # TODO: Add this logic to the other schedulers
......
...@@ -376,6 +376,19 @@ class DPMSolverMultistepInverseScheduler(SchedulerMixin, ConfigMixin): ...@@ -376,6 +376,19 @@ class DPMSolverMultistepInverseScheduler(SchedulerMixin, ConfigMixin):
# Copied from diffusers.schedulers.scheduling_euler_discrete.EulerDiscreteScheduler._sigma_to_t # Copied from diffusers.schedulers.scheduling_euler_discrete.EulerDiscreteScheduler._sigma_to_t
def _sigma_to_t(self, sigma, log_sigmas): def _sigma_to_t(self, sigma, log_sigmas):
"""
Convert sigma values to corresponding timestep values through interpolation.
Args:
sigma (`np.ndarray`):
The sigma value(s) to convert to timestep(s).
log_sigmas (`np.ndarray`):
The logarithm of the sigma schedule used for interpolation.
Returns:
`np.ndarray`:
The interpolated timestep value(s) corresponding to the input sigma(s).
"""
# get log sigma # get log sigma
log_sigma = np.log(np.maximum(sigma, 1e-10)) log_sigma = np.log(np.maximum(sigma, 1e-10))
...@@ -411,7 +424,20 @@ class DPMSolverMultistepInverseScheduler(SchedulerMixin, ConfigMixin): ...@@ -411,7 +424,20 @@ class DPMSolverMultistepInverseScheduler(SchedulerMixin, ConfigMixin):
# Copied from diffusers.schedulers.scheduling_euler_discrete.EulerDiscreteScheduler._convert_to_karras # Copied from diffusers.schedulers.scheduling_euler_discrete.EulerDiscreteScheduler._convert_to_karras
def _convert_to_karras(self, in_sigmas: torch.Tensor, num_inference_steps) -> torch.Tensor: def _convert_to_karras(self, in_sigmas: torch.Tensor, num_inference_steps) -> torch.Tensor:
"""Constructs the noise schedule of Karras et al. (2022).""" """
Construct the noise schedule as proposed in [Elucidating the Design Space of Diffusion-Based Generative
Models](https://huggingface.co/papers/2206.00364).
Args:
in_sigmas (`torch.Tensor`):
The input sigma values to be converted.
num_inference_steps (`int`):
The number of inference steps to generate the noise schedule for.
Returns:
`torch.Tensor`:
The converted sigma values following the Karras noise schedule.
"""
# Hack to make sure that other schedulers which copy this function don't break # Hack to make sure that other schedulers which copy this function don't break
# TODO: Add this logic to the other schedulers # TODO: Add this logic to the other schedulers
...@@ -437,7 +463,19 @@ class DPMSolverMultistepInverseScheduler(SchedulerMixin, ConfigMixin): ...@@ -437,7 +463,19 @@ class DPMSolverMultistepInverseScheduler(SchedulerMixin, ConfigMixin):
# Copied from diffusers.schedulers.scheduling_euler_discrete.EulerDiscreteScheduler._convert_to_exponential # Copied from diffusers.schedulers.scheduling_euler_discrete.EulerDiscreteScheduler._convert_to_exponential
def _convert_to_exponential(self, in_sigmas: torch.Tensor, num_inference_steps: int) -> torch.Tensor: def _convert_to_exponential(self, in_sigmas: torch.Tensor, num_inference_steps: int) -> torch.Tensor:
"""Constructs an exponential noise schedule.""" """
Construct an exponential noise schedule.
Args:
in_sigmas (`torch.Tensor`):
The input sigma values to be converted.
num_inference_steps (`int`):
The number of inference steps to generate the noise schedule for.
Returns:
`torch.Tensor`:
The converted sigma values following an exponential schedule.
"""
# Hack to make sure that other schedulers which copy this function don't break # Hack to make sure that other schedulers which copy this function don't break
# TODO: Add this logic to the other schedulers # TODO: Add this logic to the other schedulers
...@@ -461,7 +499,24 @@ class DPMSolverMultistepInverseScheduler(SchedulerMixin, ConfigMixin): ...@@ -461,7 +499,24 @@ class DPMSolverMultistepInverseScheduler(SchedulerMixin, ConfigMixin):
def _convert_to_beta( def _convert_to_beta(
self, in_sigmas: torch.Tensor, num_inference_steps: int, alpha: float = 0.6, beta: float = 0.6 self, in_sigmas: torch.Tensor, num_inference_steps: int, alpha: float = 0.6, beta: float = 0.6
) -> torch.Tensor: ) -> torch.Tensor:
"""From "Beta Sampling is All You Need" [arXiv:2407.12173] (Lee et. al, 2024)""" """
Construct a beta noise schedule as proposed in [Beta Sampling is All You
Need](https://huggingface.co/papers/2407.12173).
Args:
in_sigmas (`torch.Tensor`):
The input sigma values to be converted.
num_inference_steps (`int`):
The number of inference steps to generate the noise schedule for.
alpha (`float`, *optional*, defaults to `0.6`):
The alpha parameter for the beta distribution.
beta (`float`, *optional*, defaults to `0.6`):
The beta parameter for the beta distribution.
Returns:
`torch.Tensor`:
The converted sigma values following a beta distribution schedule.
"""
# Hack to make sure that other schedulers which copy this function don't break # Hack to make sure that other schedulers which copy this function don't break
# TODO: Add this logic to the other schedulers # TODO: Add this logic to the other schedulers
......
...@@ -251,7 +251,23 @@ class DPMSolverSDEScheduler(SchedulerMixin, ConfigMixin): ...@@ -251,7 +251,23 @@ class DPMSolverSDEScheduler(SchedulerMixin, ConfigMixin):
self.sigmas = self.sigmas.to("cpu") # to avoid too much CPU/GPU communication self.sigmas = self.sigmas.to("cpu") # to avoid too much CPU/GPU communication
# Copied from diffusers.schedulers.scheduling_euler_discrete.EulerDiscreteScheduler.index_for_timestep # Copied from diffusers.schedulers.scheduling_euler_discrete.EulerDiscreteScheduler.index_for_timestep
def index_for_timestep(self, timestep, schedule_timesteps=None): def index_for_timestep(
self, timestep: Union[float, torch.Tensor], schedule_timesteps: Optional[torch.Tensor] = None
) -> int:
"""
Find the index of a given timestep in the timestep schedule.
Args:
timestep (`float` or `torch.Tensor`):
The timestep value to find in the schedule.
schedule_timesteps (`torch.Tensor`, *optional*):
The timestep schedule to search in. If `None`, uses `self.timesteps`.
Returns:
`int`:
The index of the timestep in the schedule. For the very first step, returns the second index if
multiple matches exist to avoid skipping a sigma when starting mid-schedule (e.g., for image-to-image).
"""
if schedule_timesteps is None: if schedule_timesteps is None:
schedule_timesteps = self.timesteps schedule_timesteps = self.timesteps
...@@ -266,7 +282,14 @@ class DPMSolverSDEScheduler(SchedulerMixin, ConfigMixin): ...@@ -266,7 +282,14 @@ class DPMSolverSDEScheduler(SchedulerMixin, ConfigMixin):
return indices[pos].item() return indices[pos].item()
# Copied from diffusers.schedulers.scheduling_euler_discrete.EulerDiscreteScheduler._init_step_index # Copied from diffusers.schedulers.scheduling_euler_discrete.EulerDiscreteScheduler._init_step_index
def _init_step_index(self, timestep): def _init_step_index(self, timestep: Union[float, torch.Tensor]) -> None:
"""
Initialize the step index for the scheduler based on the given timestep.
Args:
timestep (`float` or `torch.Tensor`):
The current timestep to initialize the step index from.
"""
if self.begin_index is None: if self.begin_index is None:
if isinstance(timestep, torch.Tensor): if isinstance(timestep, torch.Tensor):
timestep = timestep.to(self.timesteps.device) timestep = timestep.to(self.timesteps.device)
...@@ -302,7 +325,7 @@ class DPMSolverSDEScheduler(SchedulerMixin, ConfigMixin): ...@@ -302,7 +325,7 @@ class DPMSolverSDEScheduler(SchedulerMixin, ConfigMixin):
Sets the begin index for the scheduler. This function should be run from pipeline before the inference. Sets the begin index for the scheduler. This function should be run from pipeline before the inference.
Args: Args:
begin_index (`int`): begin_index (`int`, defaults to `0`):
The begin index for the scheduler. The begin index for the scheduler.
""" """
self._begin_index = begin_index self._begin_index = begin_index
...@@ -430,6 +453,19 @@ class DPMSolverSDEScheduler(SchedulerMixin, ConfigMixin): ...@@ -430,6 +453,19 @@ class DPMSolverSDEScheduler(SchedulerMixin, ConfigMixin):
# Copied from diffusers.schedulers.scheduling_euler_discrete.EulerDiscreteScheduler._sigma_to_t # Copied from diffusers.schedulers.scheduling_euler_discrete.EulerDiscreteScheduler._sigma_to_t
def _sigma_to_t(self, sigma, log_sigmas): def _sigma_to_t(self, sigma, log_sigmas):
"""
Convert sigma values to corresponding timestep values through interpolation.
Args:
sigma (`np.ndarray`):
The sigma value(s) to convert to timestep(s).
log_sigmas (`np.ndarray`):
The logarithm of the sigma schedule used for interpolation.
Returns:
`np.ndarray`:
The interpolated timestep value(s) corresponding to the input sigma(s).
"""
# get log sigma # get log sigma
log_sigma = np.log(np.maximum(sigma, 1e-10)) log_sigma = np.log(np.maximum(sigma, 1e-10))
...@@ -468,7 +504,19 @@ class DPMSolverSDEScheduler(SchedulerMixin, ConfigMixin): ...@@ -468,7 +504,19 @@ class DPMSolverSDEScheduler(SchedulerMixin, ConfigMixin):
# Copied from diffusers.schedulers.scheduling_euler_discrete.EulerDiscreteScheduler._convert_to_exponential # Copied from diffusers.schedulers.scheduling_euler_discrete.EulerDiscreteScheduler._convert_to_exponential
def _convert_to_exponential(self, in_sigmas: torch.Tensor, num_inference_steps: int) -> torch.Tensor: def _convert_to_exponential(self, in_sigmas: torch.Tensor, num_inference_steps: int) -> torch.Tensor:
"""Constructs an exponential noise schedule.""" """
Construct an exponential noise schedule.
Args:
in_sigmas (`torch.Tensor`):
The input sigma values to be converted.
num_inference_steps (`int`):
The number of inference steps to generate the noise schedule for.
Returns:
`torch.Tensor`:
The converted sigma values following an exponential schedule.
"""
# Hack to make sure that other schedulers which copy this function don't break # Hack to make sure that other schedulers which copy this function don't break
# TODO: Add this logic to the other schedulers # TODO: Add this logic to the other schedulers
...@@ -492,7 +540,24 @@ class DPMSolverSDEScheduler(SchedulerMixin, ConfigMixin): ...@@ -492,7 +540,24 @@ class DPMSolverSDEScheduler(SchedulerMixin, ConfigMixin):
def _convert_to_beta( def _convert_to_beta(
self, in_sigmas: torch.Tensor, num_inference_steps: int, alpha: float = 0.6, beta: float = 0.6 self, in_sigmas: torch.Tensor, num_inference_steps: int, alpha: float = 0.6, beta: float = 0.6
) -> torch.Tensor: ) -> torch.Tensor:
"""From "Beta Sampling is All You Need" [arXiv:2407.12173] (Lee et. al, 2024)""" """
Construct a beta noise schedule as proposed in [Beta Sampling is All You
Need](https://huggingface.co/papers/2407.12173).
Args:
in_sigmas (`torch.Tensor`):
The input sigma values to be converted.
num_inference_steps (`int`):
The number of inference steps to generate the noise schedule for.
alpha (`float`, *optional*, defaults to `0.6`):
The alpha parameter for the beta distribution.
beta (`float`, *optional*, defaults to `0.6`):
The beta parameter for the beta distribution.
Returns:
`torch.Tensor`:
The converted sigma values following a beta distribution schedule.
"""
# Hack to make sure that other schedulers which copy this function don't break # Hack to make sure that other schedulers which copy this function don't break
# TODO: Add this logic to the other schedulers # TODO: Add this logic to the other schedulers
...@@ -646,6 +711,21 @@ class DPMSolverSDEScheduler(SchedulerMixin, ConfigMixin): ...@@ -646,6 +711,21 @@ class DPMSolverSDEScheduler(SchedulerMixin, ConfigMixin):
noise: torch.Tensor, noise: torch.Tensor,
timesteps: torch.Tensor, timesteps: torch.Tensor,
) -> torch.Tensor: ) -> torch.Tensor:
"""
Add noise to the original samples according to the noise schedule at the specified timesteps.
Args:
original_samples (`torch.Tensor`):
The original samples to which noise will be added.
noise (`torch.Tensor`):
The noise tensor to add to the original samples.
timesteps (`torch.Tensor`):
The timesteps at which to add noise, determining the noise level from the schedule.
Returns:
`torch.Tensor`:
The noisy samples with added noise scaled according to the timestep schedule.
"""
# Make sure sigmas and timesteps have the same device and dtype as original_samples # Make sure sigmas and timesteps have the same device and dtype as original_samples
sigmas = self.sigmas.to(device=original_samples.device, dtype=original_samples.dtype) sigmas = self.sigmas.to(device=original_samples.device, dtype=original_samples.dtype)
if original_samples.device.type == "mps" and torch.is_floating_point(timesteps): if original_samples.device.type == "mps" and torch.is_floating_point(timesteps):
......
...@@ -295,7 +295,7 @@ class DPMSolverSinglestepScheduler(SchedulerMixin, ConfigMixin): ...@@ -295,7 +295,7 @@ class DPMSolverSinglestepScheduler(SchedulerMixin, ConfigMixin):
Sets the begin index for the scheduler. This function should be run from pipeline before the inference. Sets the begin index for the scheduler. This function should be run from pipeline before the inference.
Args: Args:
begin_index (`int`): begin_index (`int`, defaults to `0`):
The begin index for the scheduler. The begin index for the scheduler.
""" """
self._begin_index = begin_index self._begin_index = begin_index
...@@ -454,6 +454,19 @@ class DPMSolverSinglestepScheduler(SchedulerMixin, ConfigMixin): ...@@ -454,6 +454,19 @@ class DPMSolverSinglestepScheduler(SchedulerMixin, ConfigMixin):
# Copied from diffusers.schedulers.scheduling_euler_discrete.EulerDiscreteScheduler._sigma_to_t # Copied from diffusers.schedulers.scheduling_euler_discrete.EulerDiscreteScheduler._sigma_to_t
def _sigma_to_t(self, sigma, log_sigmas): def _sigma_to_t(self, sigma, log_sigmas):
"""
Convert sigma values to corresponding timestep values through interpolation.
Args:
sigma (`np.ndarray`):
The sigma value(s) to convert to timestep(s).
log_sigmas (`np.ndarray`):
The logarithm of the sigma schedule used for interpolation.
Returns:
`np.ndarray`:
The interpolated timestep value(s) corresponding to the input sigma(s).
"""
# get log sigma # get log sigma
log_sigma = np.log(np.maximum(sigma, 1e-10)) log_sigma = np.log(np.maximum(sigma, 1e-10))
...@@ -489,7 +502,20 @@ class DPMSolverSinglestepScheduler(SchedulerMixin, ConfigMixin): ...@@ -489,7 +502,20 @@ class DPMSolverSinglestepScheduler(SchedulerMixin, ConfigMixin):
# Copied from diffusers.schedulers.scheduling_euler_discrete.EulerDiscreteScheduler._convert_to_karras # Copied from diffusers.schedulers.scheduling_euler_discrete.EulerDiscreteScheduler._convert_to_karras
def _convert_to_karras(self, in_sigmas: torch.Tensor, num_inference_steps) -> torch.Tensor: def _convert_to_karras(self, in_sigmas: torch.Tensor, num_inference_steps) -> torch.Tensor:
"""Constructs the noise schedule of Karras et al. (2022).""" """
Construct the noise schedule as proposed in [Elucidating the Design Space of Diffusion-Based Generative
Models](https://huggingface.co/papers/2206.00364).
Args:
in_sigmas (`torch.Tensor`):
The input sigma values to be converted.
num_inference_steps (`int`):
The number of inference steps to generate the noise schedule for.
Returns:
`torch.Tensor`:
The converted sigma values following the Karras noise schedule.
"""
# Hack to make sure that other schedulers which copy this function don't break # Hack to make sure that other schedulers which copy this function don't break
# TODO: Add this logic to the other schedulers # TODO: Add this logic to the other schedulers
...@@ -515,7 +541,19 @@ class DPMSolverSinglestepScheduler(SchedulerMixin, ConfigMixin): ...@@ -515,7 +541,19 @@ class DPMSolverSinglestepScheduler(SchedulerMixin, ConfigMixin):
# Copied from diffusers.schedulers.scheduling_euler_discrete.EulerDiscreteScheduler._convert_to_exponential # Copied from diffusers.schedulers.scheduling_euler_discrete.EulerDiscreteScheduler._convert_to_exponential
def _convert_to_exponential(self, in_sigmas: torch.Tensor, num_inference_steps: int) -> torch.Tensor: def _convert_to_exponential(self, in_sigmas: torch.Tensor, num_inference_steps: int) -> torch.Tensor:
"""Constructs an exponential noise schedule.""" """
Construct an exponential noise schedule.
Args:
in_sigmas (`torch.Tensor`):
The input sigma values to be converted.
num_inference_steps (`int`):
The number of inference steps to generate the noise schedule for.
Returns:
`torch.Tensor`:
The converted sigma values following an exponential schedule.
"""
# Hack to make sure that other schedulers which copy this function don't break # Hack to make sure that other schedulers which copy this function don't break
# TODO: Add this logic to the other schedulers # TODO: Add this logic to the other schedulers
...@@ -539,7 +577,24 @@ class DPMSolverSinglestepScheduler(SchedulerMixin, ConfigMixin): ...@@ -539,7 +577,24 @@ class DPMSolverSinglestepScheduler(SchedulerMixin, ConfigMixin):
def _convert_to_beta( def _convert_to_beta(
self, in_sigmas: torch.Tensor, num_inference_steps: int, alpha: float = 0.6, beta: float = 0.6 self, in_sigmas: torch.Tensor, num_inference_steps: int, alpha: float = 0.6, beta: float = 0.6
) -> torch.Tensor: ) -> torch.Tensor:
"""From "Beta Sampling is All You Need" [arXiv:2407.12173] (Lee et. al, 2024)""" """
Construct a beta noise schedule as proposed in [Beta Sampling is All You
Need](https://huggingface.co/papers/2407.12173).
Args:
in_sigmas (`torch.Tensor`):
The input sigma values to be converted.
num_inference_steps (`int`):
The number of inference steps to generate the noise schedule for.
alpha (`float`, *optional*, defaults to `0.6`):
The alpha parameter for the beta distribution.
beta (`float`, *optional*, defaults to `0.6`):
The beta parameter for the beta distribution.
Returns:
`torch.Tensor`:
The converted sigma values following a beta distribution schedule.
"""
# Hack to make sure that other schedulers which copy this function don't break # Hack to make sure that other schedulers which copy this function don't break
# TODO: Add this logic to the other schedulers # TODO: Add this logic to the other schedulers
......
...@@ -169,7 +169,7 @@ class EDMDPMSolverMultistepScheduler(SchedulerMixin, ConfigMixin): ...@@ -169,7 +169,7 @@ class EDMDPMSolverMultistepScheduler(SchedulerMixin, ConfigMixin):
Sets the begin index for the scheduler. This function should be run from pipeline before the inference. Sets the begin index for the scheduler. This function should be run from pipeline before the inference.
Args: Args:
begin_index (`int`): begin_index (`int`, defaults to `0`):
The begin index for the scheduler. The begin index for the scheduler.
""" """
self._begin_index = begin_index self._begin_index = begin_index
...@@ -342,6 +342,19 @@ class EDMDPMSolverMultistepScheduler(SchedulerMixin, ConfigMixin): ...@@ -342,6 +342,19 @@ class EDMDPMSolverMultistepScheduler(SchedulerMixin, ConfigMixin):
# Copied from diffusers.schedulers.scheduling_euler_discrete.EulerDiscreteScheduler._sigma_to_t # Copied from diffusers.schedulers.scheduling_euler_discrete.EulerDiscreteScheduler._sigma_to_t
def _sigma_to_t(self, sigma, log_sigmas): def _sigma_to_t(self, sigma, log_sigmas):
"""
Convert sigma values to corresponding timestep values through interpolation.
Args:
sigma (`np.ndarray`):
The sigma value(s) to convert to timestep(s).
log_sigmas (`np.ndarray`):
The logarithm of the sigma schedule used for interpolation.
Returns:
`np.ndarray`:
The interpolated timestep value(s) corresponding to the input sigma(s).
"""
# get log sigma # get log sigma
log_sigma = np.log(np.maximum(sigma, 1e-10)) log_sigma = np.log(np.maximum(sigma, 1e-10))
...@@ -682,6 +695,21 @@ class EDMDPMSolverMultistepScheduler(SchedulerMixin, ConfigMixin): ...@@ -682,6 +695,21 @@ class EDMDPMSolverMultistepScheduler(SchedulerMixin, ConfigMixin):
noise: torch.Tensor, noise: torch.Tensor,
timesteps: torch.Tensor, timesteps: torch.Tensor,
) -> torch.Tensor: ) -> torch.Tensor:
"""
Add noise to the original samples according to the noise schedule at the specified timesteps.
Args:
original_samples (`torch.Tensor`):
The original samples to which noise will be added.
noise (`torch.Tensor`):
The noise tensor to add to the original samples.
timesteps (`torch.Tensor`):
The timesteps at which to add noise, determining the noise level from the schedule.
Returns:
`torch.Tensor`:
The noisy samples with added noise scaled according to the timestep schedule.
"""
# Make sure sigmas and timesteps have the same device and dtype as original_samples # Make sure sigmas and timesteps have the same device and dtype as original_samples
sigmas = self.sigmas.to(device=original_samples.device, dtype=original_samples.dtype) sigmas = self.sigmas.to(device=original_samples.device, dtype=original_samples.dtype)
if original_samples.device.type == "mps" and torch.is_floating_point(timesteps): if original_samples.device.type == "mps" and torch.is_floating_point(timesteps):
......
...@@ -155,7 +155,7 @@ class EDMEulerScheduler(SchedulerMixin, ConfigMixin): ...@@ -155,7 +155,7 @@ class EDMEulerScheduler(SchedulerMixin, ConfigMixin):
Sets the begin index for the scheduler. This function should be run from pipeline before the inference. Sets the begin index for the scheduler. This function should be run from pipeline before the inference.
Args: Args:
begin_index (`int`): begin_index (`int`, defaults to `0`):
The begin index for the scheduler. The begin index for the scheduler.
""" """
self._begin_index = begin_index self._begin_index = begin_index
...@@ -284,7 +284,23 @@ class EDMEulerScheduler(SchedulerMixin, ConfigMixin): ...@@ -284,7 +284,23 @@ class EDMEulerScheduler(SchedulerMixin, ConfigMixin):
return sigmas return sigmas
# Copied from diffusers.schedulers.scheduling_euler_discrete.EulerDiscreteScheduler.index_for_timestep # Copied from diffusers.schedulers.scheduling_euler_discrete.EulerDiscreteScheduler.index_for_timestep
def index_for_timestep(self, timestep, schedule_timesteps=None): def index_for_timestep(
self, timestep: Union[float, torch.Tensor], schedule_timesteps: Optional[torch.Tensor] = None
) -> int:
"""
Find the index of a given timestep in the timestep schedule.
Args:
timestep (`float` or `torch.Tensor`):
The timestep value to find in the schedule.
schedule_timesteps (`torch.Tensor`, *optional*):
The timestep schedule to search in. If `None`, uses `self.timesteps`.
Returns:
`int`:
The index of the timestep in the schedule. For the very first step, returns the second index if
multiple matches exist to avoid skipping a sigma when starting mid-schedule (e.g., for image-to-image).
"""
if schedule_timesteps is None: if schedule_timesteps is None:
schedule_timesteps = self.timesteps schedule_timesteps = self.timesteps
...@@ -299,7 +315,14 @@ class EDMEulerScheduler(SchedulerMixin, ConfigMixin): ...@@ -299,7 +315,14 @@ class EDMEulerScheduler(SchedulerMixin, ConfigMixin):
return indices[pos].item() return indices[pos].item()
# Copied from diffusers.schedulers.scheduling_euler_discrete.EulerDiscreteScheduler._init_step_index # Copied from diffusers.schedulers.scheduling_euler_discrete.EulerDiscreteScheduler._init_step_index
def _init_step_index(self, timestep): def _init_step_index(self, timestep: Union[float, torch.Tensor]) -> None:
"""
Initialize the step index for the scheduler based on the given timestep.
Args:
timestep (`float` or `torch.Tensor`):
The current timestep to initialize the step index from.
"""
if self.begin_index is None: if self.begin_index is None:
if isinstance(timestep, torch.Tensor): if isinstance(timestep, torch.Tensor):
timestep = timestep.to(self.timesteps.device) timestep = timestep.to(self.timesteps.device)
...@@ -413,6 +436,21 @@ class EDMEulerScheduler(SchedulerMixin, ConfigMixin): ...@@ -413,6 +436,21 @@ class EDMEulerScheduler(SchedulerMixin, ConfigMixin):
noise: torch.Tensor, noise: torch.Tensor,
timesteps: torch.Tensor, timesteps: torch.Tensor,
) -> torch.Tensor: ) -> torch.Tensor:
"""
Add noise to the original samples according to the noise schedule at the specified timesteps.
Args:
original_samples (`torch.Tensor`):
The original samples to which noise will be added.
noise (`torch.Tensor`):
The noise tensor to add to the original samples.
timesteps (`torch.Tensor`):
The timesteps at which to add noise, determining the noise level from the schedule.
Returns:
`torch.Tensor`:
The noisy samples with added noise scaled according to the timestep schedule.
"""
# Make sure sigmas and timesteps have the same device and dtype as original_samples # Make sure sigmas and timesteps have the same device and dtype as original_samples
sigmas = self.sigmas.to(device=original_samples.device, dtype=original_samples.dtype) sigmas = self.sigmas.to(device=original_samples.device, dtype=original_samples.dtype)
if original_samples.device.type == "mps" and torch.is_floating_point(timesteps): if original_samples.device.type == "mps" and torch.is_floating_point(timesteps):
......
...@@ -100,10 +100,11 @@ def rescale_zero_terminal_snr(betas): ...@@ -100,10 +100,11 @@ def rescale_zero_terminal_snr(betas):
Args: Args:
betas (`torch.Tensor`): betas (`torch.Tensor`):
the betas that the scheduler is being initialized with. The betas that the scheduler is being initialized with.
Returns: Returns:
`torch.Tensor`: rescaled betas with zero terminal SNR `torch.Tensor`:
Rescaled betas with zero terminal SNR.
""" """
# Convert betas to alphas_bar_sqrt # Convert betas to alphas_bar_sqrt
alphas = 1.0 - betas alphas = 1.0 - betas
...@@ -245,7 +246,7 @@ class EulerAncestralDiscreteScheduler(SchedulerMixin, ConfigMixin): ...@@ -245,7 +246,7 @@ class EulerAncestralDiscreteScheduler(SchedulerMixin, ConfigMixin):
Sets the begin index for the scheduler. This function should be run from pipeline before the inference. Sets the begin index for the scheduler. This function should be run from pipeline before the inference.
Args: Args:
begin_index (`int`): begin_index (`int`, defaults to `0`):
The begin index for the scheduler. The begin index for the scheduler.
""" """
self._begin_index = begin_index self._begin_index = begin_index
...@@ -319,7 +320,23 @@ class EulerAncestralDiscreteScheduler(SchedulerMixin, ConfigMixin): ...@@ -319,7 +320,23 @@ class EulerAncestralDiscreteScheduler(SchedulerMixin, ConfigMixin):
self.sigmas = self.sigmas.to("cpu") # to avoid too much CPU/GPU communication self.sigmas = self.sigmas.to("cpu") # to avoid too much CPU/GPU communication
# Copied from diffusers.schedulers.scheduling_euler_discrete.EulerDiscreteScheduler.index_for_timestep # Copied from diffusers.schedulers.scheduling_euler_discrete.EulerDiscreteScheduler.index_for_timestep
def index_for_timestep(self, timestep, schedule_timesteps=None): def index_for_timestep(
self, timestep: Union[float, torch.Tensor], schedule_timesteps: Optional[torch.Tensor] = None
) -> int:
"""
Find the index of a given timestep in the timestep schedule.
Args:
timestep (`float` or `torch.Tensor`):
The timestep value to find in the schedule.
schedule_timesteps (`torch.Tensor`, *optional*):
The timestep schedule to search in. If `None`, uses `self.timesteps`.
Returns:
`int`:
The index of the timestep in the schedule. For the very first step, returns the second index if
multiple matches exist to avoid skipping a sigma when starting mid-schedule (e.g., for image-to-image).
"""
if schedule_timesteps is None: if schedule_timesteps is None:
schedule_timesteps = self.timesteps schedule_timesteps = self.timesteps
...@@ -334,7 +351,14 @@ class EulerAncestralDiscreteScheduler(SchedulerMixin, ConfigMixin): ...@@ -334,7 +351,14 @@ class EulerAncestralDiscreteScheduler(SchedulerMixin, ConfigMixin):
return indices[pos].item() return indices[pos].item()
# Copied from diffusers.schedulers.scheduling_euler_discrete.EulerDiscreteScheduler._init_step_index # Copied from diffusers.schedulers.scheduling_euler_discrete.EulerDiscreteScheduler._init_step_index
def _init_step_index(self, timestep): def _init_step_index(self, timestep: Union[float, torch.Tensor]) -> None:
"""
Initialize the step index for the scheduler based on the given timestep.
Args:
timestep (`float` or `torch.Tensor`):
The current timestep to initialize the step index from.
"""
if self.begin_index is None: if self.begin_index is None:
if isinstance(timestep, torch.Tensor): if isinstance(timestep, torch.Tensor):
timestep = timestep.to(self.timesteps.device) timestep = timestep.to(self.timesteps.device)
...@@ -451,6 +475,21 @@ class EulerAncestralDiscreteScheduler(SchedulerMixin, ConfigMixin): ...@@ -451,6 +475,21 @@ class EulerAncestralDiscreteScheduler(SchedulerMixin, ConfigMixin):
noise: torch.Tensor, noise: torch.Tensor,
timesteps: torch.Tensor, timesteps: torch.Tensor,
) -> torch.Tensor: ) -> torch.Tensor:
"""
Add noise to the original samples according to the noise schedule at the specified timesteps.
Args:
original_samples (`torch.Tensor`):
The original samples to which noise will be added.
noise (`torch.Tensor`):
The noise tensor to add to the original samples.
timesteps (`torch.Tensor`):
The timesteps at which to add noise, determining the noise level from the schedule.
Returns:
`torch.Tensor`:
The noisy samples with added noise scaled according to the timestep schedule.
"""
# Make sure sigmas and timesteps have the same device and dtype as original_samples # Make sure sigmas and timesteps have the same device and dtype as original_samples
sigmas = self.sigmas.to(device=original_samples.device, dtype=original_samples.dtype) sigmas = self.sigmas.to(device=original_samples.device, dtype=original_samples.dtype)
if original_samples.device.type == "mps" and torch.is_floating_point(timesteps): if original_samples.device.type == "mps" and torch.is_floating_point(timesteps):
......
...@@ -160,7 +160,7 @@ class FlowMatchEulerDiscreteScheduler(SchedulerMixin, ConfigMixin): ...@@ -160,7 +160,7 @@ class FlowMatchEulerDiscreteScheduler(SchedulerMixin, ConfigMixin):
Sets the begin index for the scheduler. This function should be run from pipeline before the inference. Sets the begin index for the scheduler. This function should be run from pipeline before the inference.
Args: Args:
begin_index (`int`): begin_index (`int`, defaults to `0`):
The begin index for the scheduler. The begin index for the scheduler.
""" """
self._begin_index = begin_index self._begin_index = begin_index
...@@ -473,7 +473,20 @@ class FlowMatchEulerDiscreteScheduler(SchedulerMixin, ConfigMixin): ...@@ -473,7 +473,20 @@ class FlowMatchEulerDiscreteScheduler(SchedulerMixin, ConfigMixin):
# Copied from diffusers.schedulers.scheduling_euler_discrete.EulerDiscreteScheduler._convert_to_karras # Copied from diffusers.schedulers.scheduling_euler_discrete.EulerDiscreteScheduler._convert_to_karras
def _convert_to_karras(self, in_sigmas: torch.Tensor, num_inference_steps) -> torch.Tensor: def _convert_to_karras(self, in_sigmas: torch.Tensor, num_inference_steps) -> torch.Tensor:
"""Constructs the noise schedule of Karras et al. (2022).""" """
Construct the noise schedule as proposed in [Elucidating the Design Space of Diffusion-Based Generative
Models](https://huggingface.co/papers/2206.00364).
Args:
in_sigmas (`torch.Tensor`):
The input sigma values to be converted.
num_inference_steps (`int`):
The number of inference steps to generate the noise schedule for.
Returns:
`torch.Tensor`:
The converted sigma values following the Karras noise schedule.
"""
# Hack to make sure that other schedulers which copy this function don't break # Hack to make sure that other schedulers which copy this function don't break
# TODO: Add this logic to the other schedulers # TODO: Add this logic to the other schedulers
...@@ -499,7 +512,19 @@ class FlowMatchEulerDiscreteScheduler(SchedulerMixin, ConfigMixin): ...@@ -499,7 +512,19 @@ class FlowMatchEulerDiscreteScheduler(SchedulerMixin, ConfigMixin):
# Copied from diffusers.schedulers.scheduling_euler_discrete.EulerDiscreteScheduler._convert_to_exponential # Copied from diffusers.schedulers.scheduling_euler_discrete.EulerDiscreteScheduler._convert_to_exponential
def _convert_to_exponential(self, in_sigmas: torch.Tensor, num_inference_steps: int) -> torch.Tensor: def _convert_to_exponential(self, in_sigmas: torch.Tensor, num_inference_steps: int) -> torch.Tensor:
"""Constructs an exponential noise schedule.""" """
Construct an exponential noise schedule.
Args:
in_sigmas (`torch.Tensor`):
The input sigma values to be converted.
num_inference_steps (`int`):
The number of inference steps to generate the noise schedule for.
Returns:
`torch.Tensor`:
The converted sigma values following an exponential schedule.
"""
# Hack to make sure that other schedulers which copy this function don't break # Hack to make sure that other schedulers which copy this function don't break
# TODO: Add this logic to the other schedulers # TODO: Add this logic to the other schedulers
...@@ -523,7 +548,24 @@ class FlowMatchEulerDiscreteScheduler(SchedulerMixin, ConfigMixin): ...@@ -523,7 +548,24 @@ class FlowMatchEulerDiscreteScheduler(SchedulerMixin, ConfigMixin):
def _convert_to_beta( def _convert_to_beta(
self, in_sigmas: torch.Tensor, num_inference_steps: int, alpha: float = 0.6, beta: float = 0.6 self, in_sigmas: torch.Tensor, num_inference_steps: int, alpha: float = 0.6, beta: float = 0.6
) -> torch.Tensor: ) -> torch.Tensor:
"""From "Beta Sampling is All You Need" [arXiv:2407.12173] (Lee et. al, 2024)""" """
Construct a beta noise schedule as proposed in [Beta Sampling is All You
Need](https://huggingface.co/papers/2407.12173).
Args:
in_sigmas (`torch.Tensor`):
The input sigma values to be converted.
num_inference_steps (`int`):
The number of inference steps to generate the noise schedule for.
alpha (`float`, *optional*, defaults to `0.6`):
The alpha parameter for the beta distribution.
beta (`float`, *optional*, defaults to `0.6`):
The beta parameter for the beta distribution.
Returns:
`torch.Tensor`:
The converted sigma values following a beta distribution schedule.
"""
# Hack to make sure that other schedulers which copy this function don't break # Hack to make sure that other schedulers which copy this function don't break
# TODO: Add this logic to the other schedulers # TODO: Add this logic to the other schedulers
......
...@@ -102,7 +102,7 @@ class FlowMatchHeunDiscreteScheduler(SchedulerMixin, ConfigMixin): ...@@ -102,7 +102,7 @@ class FlowMatchHeunDiscreteScheduler(SchedulerMixin, ConfigMixin):
Sets the begin index for the scheduler. This function should be run from pipeline before the inference. Sets the begin index for the scheduler. This function should be run from pipeline before the inference.
Args: Args:
begin_index (`int`): begin_index (`int`, defaults to `0`):
The begin index for the scheduler. The begin index for the scheduler.
""" """
self._begin_index = begin_index self._begin_index = begin_index
......
...@@ -168,7 +168,7 @@ class FlowMatchLCMScheduler(SchedulerMixin, ConfigMixin): ...@@ -168,7 +168,7 @@ class FlowMatchLCMScheduler(SchedulerMixin, ConfigMixin):
Sets the begin index for the scheduler. This function should be run from pipeline before the inference. Sets the begin index for the scheduler. This function should be run from pipeline before the inference.
Args: Args:
begin_index (`int`): begin_index (`int`, defaults to `0`):
The begin index for the scheduler. The begin index for the scheduler.
""" """
self._begin_index = begin_index self._begin_index = begin_index
...@@ -473,7 +473,20 @@ class FlowMatchLCMScheduler(SchedulerMixin, ConfigMixin): ...@@ -473,7 +473,20 @@ class FlowMatchLCMScheduler(SchedulerMixin, ConfigMixin):
# Copied from diffusers.schedulers.scheduling_euler_discrete.EulerDiscreteScheduler._convert_to_karras # Copied from diffusers.schedulers.scheduling_euler_discrete.EulerDiscreteScheduler._convert_to_karras
def _convert_to_karras(self, in_sigmas: torch.Tensor, num_inference_steps) -> torch.Tensor: def _convert_to_karras(self, in_sigmas: torch.Tensor, num_inference_steps) -> torch.Tensor:
"""Constructs the noise schedule of Karras et al. (2022).""" """
Construct the noise schedule as proposed in [Elucidating the Design Space of Diffusion-Based Generative
Models](https://huggingface.co/papers/2206.00364).
Args:
in_sigmas (`torch.Tensor`):
The input sigma values to be converted.
num_inference_steps (`int`):
The number of inference steps to generate the noise schedule for.
Returns:
`torch.Tensor`:
The converted sigma values following the Karras noise schedule.
"""
# Hack to make sure that other schedulers which copy this function don't break # Hack to make sure that other schedulers which copy this function don't break
# TODO: Add this logic to the other schedulers # TODO: Add this logic to the other schedulers
...@@ -499,7 +512,19 @@ class FlowMatchLCMScheduler(SchedulerMixin, ConfigMixin): ...@@ -499,7 +512,19 @@ class FlowMatchLCMScheduler(SchedulerMixin, ConfigMixin):
# Copied from diffusers.schedulers.scheduling_euler_discrete.EulerDiscreteScheduler._convert_to_exponential # Copied from diffusers.schedulers.scheduling_euler_discrete.EulerDiscreteScheduler._convert_to_exponential
def _convert_to_exponential(self, in_sigmas: torch.Tensor, num_inference_steps: int) -> torch.Tensor: def _convert_to_exponential(self, in_sigmas: torch.Tensor, num_inference_steps: int) -> torch.Tensor:
"""Constructs an exponential noise schedule.""" """
Construct an exponential noise schedule.
Args:
in_sigmas (`torch.Tensor`):
The input sigma values to be converted.
num_inference_steps (`int`):
The number of inference steps to generate the noise schedule for.
Returns:
`torch.Tensor`:
The converted sigma values following an exponential schedule.
"""
# Hack to make sure that other schedulers which copy this function don't break # Hack to make sure that other schedulers which copy this function don't break
# TODO: Add this logic to the other schedulers # TODO: Add this logic to the other schedulers
...@@ -523,7 +548,24 @@ class FlowMatchLCMScheduler(SchedulerMixin, ConfigMixin): ...@@ -523,7 +548,24 @@ class FlowMatchLCMScheduler(SchedulerMixin, ConfigMixin):
def _convert_to_beta( def _convert_to_beta(
self, in_sigmas: torch.Tensor, num_inference_steps: int, alpha: float = 0.6, beta: float = 0.6 self, in_sigmas: torch.Tensor, num_inference_steps: int, alpha: float = 0.6, beta: float = 0.6
) -> torch.Tensor: ) -> torch.Tensor:
"""From "Beta Sampling is All You Need" [arXiv:2407.12173] (Lee et. al, 2024)""" """
Construct a beta noise schedule as proposed in [Beta Sampling is All You
Need](https://huggingface.co/papers/2407.12173).
Args:
in_sigmas (`torch.Tensor`):
The input sigma values to be converted.
num_inference_steps (`int`):
The number of inference steps to generate the noise schedule for.
alpha (`float`, *optional*, defaults to `0.6`):
The alpha parameter for the beta distribution.
beta (`float`, *optional*, defaults to `0.6`):
The beta parameter for the beta distribution.
Returns:
`torch.Tensor`:
The converted sigma values following a beta distribution schedule.
"""
# Hack to make sure that other schedulers which copy this function don't break # Hack to make sure that other schedulers which copy this function don't break
# TODO: Add this logic to the other schedulers # TODO: Add this logic to the other schedulers
......
...@@ -188,7 +188,23 @@ class HeunDiscreteScheduler(SchedulerMixin, ConfigMixin): ...@@ -188,7 +188,23 @@ class HeunDiscreteScheduler(SchedulerMixin, ConfigMixin):
self.sigmas = self.sigmas.to("cpu") # to avoid too much CPU/GPU communication self.sigmas = self.sigmas.to("cpu") # to avoid too much CPU/GPU communication
# Copied from diffusers.schedulers.scheduling_euler_discrete.EulerDiscreteScheduler.index_for_timestep # Copied from diffusers.schedulers.scheduling_euler_discrete.EulerDiscreteScheduler.index_for_timestep
def index_for_timestep(self, timestep, schedule_timesteps=None): def index_for_timestep(
self, timestep: Union[float, torch.Tensor], schedule_timesteps: Optional[torch.Tensor] = None
) -> int:
"""
Find the index of a given timestep in the timestep schedule.
Args:
timestep (`float` or `torch.Tensor`):
The timestep value to find in the schedule.
schedule_timesteps (`torch.Tensor`, *optional*):
The timestep schedule to search in. If `None`, uses `self.timesteps`.
Returns:
`int`:
The index of the timestep in the schedule. For the very first step, returns the second index if
multiple matches exist to avoid skipping a sigma when starting mid-schedule (e.g., for image-to-image).
"""
if schedule_timesteps is None: if schedule_timesteps is None:
schedule_timesteps = self.timesteps schedule_timesteps = self.timesteps
...@@ -230,7 +246,7 @@ class HeunDiscreteScheduler(SchedulerMixin, ConfigMixin): ...@@ -230,7 +246,7 @@ class HeunDiscreteScheduler(SchedulerMixin, ConfigMixin):
Sets the begin index for the scheduler. This function should be run from pipeline before the inference. Sets the begin index for the scheduler. This function should be run from pipeline before the inference.
Args: Args:
begin_index (`int`): begin_index (`int`, defaults to `0`):
The begin index for the scheduler. The begin index for the scheduler.
""" """
self._begin_index = begin_index self._begin_index = begin_index
...@@ -355,6 +371,19 @@ class HeunDiscreteScheduler(SchedulerMixin, ConfigMixin): ...@@ -355,6 +371,19 @@ class HeunDiscreteScheduler(SchedulerMixin, ConfigMixin):
# Copied from diffusers.schedulers.scheduling_euler_discrete.EulerDiscreteScheduler._sigma_to_t # Copied from diffusers.schedulers.scheduling_euler_discrete.EulerDiscreteScheduler._sigma_to_t
def _sigma_to_t(self, sigma, log_sigmas): def _sigma_to_t(self, sigma, log_sigmas):
"""
Convert sigma values to corresponding timestep values through interpolation.
Args:
sigma (`np.ndarray`):
The sigma value(s) to convert to timestep(s).
log_sigmas (`np.ndarray`):
The logarithm of the sigma schedule used for interpolation.
Returns:
`np.ndarray`:
The interpolated timestep value(s) corresponding to the input sigma(s).
"""
# get log sigma # get log sigma
log_sigma = np.log(np.maximum(sigma, 1e-10)) log_sigma = np.log(np.maximum(sigma, 1e-10))
...@@ -379,7 +408,20 @@ class HeunDiscreteScheduler(SchedulerMixin, ConfigMixin): ...@@ -379,7 +408,20 @@ class HeunDiscreteScheduler(SchedulerMixin, ConfigMixin):
# Copied from diffusers.schedulers.scheduling_euler_discrete.EulerDiscreteScheduler._convert_to_karras # Copied from diffusers.schedulers.scheduling_euler_discrete.EulerDiscreteScheduler._convert_to_karras
def _convert_to_karras(self, in_sigmas: torch.Tensor, num_inference_steps) -> torch.Tensor: def _convert_to_karras(self, in_sigmas: torch.Tensor, num_inference_steps) -> torch.Tensor:
"""Constructs the noise schedule of Karras et al. (2022).""" """
Construct the noise schedule as proposed in [Elucidating the Design Space of Diffusion-Based Generative
Models](https://huggingface.co/papers/2206.00364).
Args:
in_sigmas (`torch.Tensor`):
The input sigma values to be converted.
num_inference_steps (`int`):
The number of inference steps to generate the noise schedule for.
Returns:
`torch.Tensor`:
The converted sigma values following the Karras noise schedule.
"""
# Hack to make sure that other schedulers which copy this function don't break # Hack to make sure that other schedulers which copy this function don't break
# TODO: Add this logic to the other schedulers # TODO: Add this logic to the other schedulers
...@@ -405,7 +447,19 @@ class HeunDiscreteScheduler(SchedulerMixin, ConfigMixin): ...@@ -405,7 +447,19 @@ class HeunDiscreteScheduler(SchedulerMixin, ConfigMixin):
# Copied from diffusers.schedulers.scheduling_euler_discrete.EulerDiscreteScheduler._convert_to_exponential # Copied from diffusers.schedulers.scheduling_euler_discrete.EulerDiscreteScheduler._convert_to_exponential
def _convert_to_exponential(self, in_sigmas: torch.Tensor, num_inference_steps: int) -> torch.Tensor: def _convert_to_exponential(self, in_sigmas: torch.Tensor, num_inference_steps: int) -> torch.Tensor:
"""Constructs an exponential noise schedule.""" """
Construct an exponential noise schedule.
Args:
in_sigmas (`torch.Tensor`):
The input sigma values to be converted.
num_inference_steps (`int`):
The number of inference steps to generate the noise schedule for.
Returns:
`torch.Tensor`:
The converted sigma values following an exponential schedule.
"""
# Hack to make sure that other schedulers which copy this function don't break # Hack to make sure that other schedulers which copy this function don't break
# TODO: Add this logic to the other schedulers # TODO: Add this logic to the other schedulers
...@@ -429,7 +483,24 @@ class HeunDiscreteScheduler(SchedulerMixin, ConfigMixin): ...@@ -429,7 +483,24 @@ class HeunDiscreteScheduler(SchedulerMixin, ConfigMixin):
def _convert_to_beta( def _convert_to_beta(
self, in_sigmas: torch.Tensor, num_inference_steps: int, alpha: float = 0.6, beta: float = 0.6 self, in_sigmas: torch.Tensor, num_inference_steps: int, alpha: float = 0.6, beta: float = 0.6
) -> torch.Tensor: ) -> torch.Tensor:
"""From "Beta Sampling is All You Need" [arXiv:2407.12173] (Lee et. al, 2024)""" """
Construct a beta noise schedule as proposed in [Beta Sampling is All You
Need](https://huggingface.co/papers/2407.12173).
Args:
in_sigmas (`torch.Tensor`):
The input sigma values to be converted.
num_inference_steps (`int`):
The number of inference steps to generate the noise schedule for.
alpha (`float`, *optional*, defaults to `0.6`):
The alpha parameter for the beta distribution.
beta (`float`, *optional*, defaults to `0.6`):
The beta parameter for the beta distribution.
Returns:
`torch.Tensor`:
The converted sigma values following a beta distribution schedule.
"""
# Hack to make sure that other schedulers which copy this function don't break # Hack to make sure that other schedulers which copy this function don't break
# TODO: Add this logic to the other schedulers # TODO: Add this logic to the other schedulers
...@@ -462,7 +533,14 @@ class HeunDiscreteScheduler(SchedulerMixin, ConfigMixin): ...@@ -462,7 +533,14 @@ class HeunDiscreteScheduler(SchedulerMixin, ConfigMixin):
return self.dt is None return self.dt is None
# Copied from diffusers.schedulers.scheduling_euler_discrete.EulerDiscreteScheduler._init_step_index # Copied from diffusers.schedulers.scheduling_euler_discrete.EulerDiscreteScheduler._init_step_index
def _init_step_index(self, timestep): def _init_step_index(self, timestep: Union[float, torch.Tensor]) -> None:
"""
Initialize the step index for the scheduler based on the given timestep.
Args:
timestep (`float` or `torch.Tensor`):
The current timestep to initialize the step index from.
"""
if self.begin_index is None: if self.begin_index is None:
if isinstance(timestep, torch.Tensor): if isinstance(timestep, torch.Tensor):
timestep = timestep.to(self.timesteps.device) timestep = timestep.to(self.timesteps.device)
...@@ -580,6 +658,21 @@ class HeunDiscreteScheduler(SchedulerMixin, ConfigMixin): ...@@ -580,6 +658,21 @@ class HeunDiscreteScheduler(SchedulerMixin, ConfigMixin):
noise: torch.Tensor, noise: torch.Tensor,
timesteps: torch.Tensor, timesteps: torch.Tensor,
) -> torch.Tensor: ) -> torch.Tensor:
"""
Add noise to the original samples according to the noise schedule at the specified timesteps.
Args:
original_samples (`torch.Tensor`):
The original samples to which noise will be added.
noise (`torch.Tensor`):
The noise tensor to add to the original samples.
timesteps (`torch.Tensor`):
The timesteps at which to add noise, determining the noise level from the schedule.
Returns:
`torch.Tensor`:
The noisy samples with added noise scaled according to the timestep schedule.
"""
# Make sure sigmas and timesteps have the same device and dtype as original_samples # Make sure sigmas and timesteps have the same device and dtype as original_samples
sigmas = self.sigmas.to(device=original_samples.device, dtype=original_samples.dtype) sigmas = self.sigmas.to(device=original_samples.device, dtype=original_samples.dtype)
if original_samples.device.type == "mps" and torch.is_floating_point(timesteps): if original_samples.device.type == "mps" and torch.is_floating_point(timesteps):
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment