scheduling_euler_ancestral_discrete.py 20.6 KB
Newer Older
Aryan's avatar
Aryan committed
1
# Copyright 2025 Katherine Crowson and The HuggingFace Team. All rights reserved.
hlky's avatar
hlky committed
2
3
4
5
6
7
8
9
10
11
12
13
14
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

15
import math
hlky's avatar
hlky committed
16
from dataclasses import dataclass
17
from typing import List, Optional, Tuple, Union
hlky's avatar
hlky committed
18
19
20
21
22

import numpy as np
import torch

from ..configuration_utils import ConfigMixin, register_to_config
Dhruv Nair's avatar
Dhruv Nair committed
23
24
from ..utils import BaseOutput, logging
from ..utils.torch_utils import randn_tensor
Kashif Rasul's avatar
Kashif Rasul committed
25
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin
hlky's avatar
hlky committed
26
27
28
29
30
31
32
33
34


logger = logging.get_logger(__name__)  # pylint: disable=invalid-name


@dataclass
# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->EulerAncestralDiscrete
class EulerAncestralDiscreteSchedulerOutput(BaseOutput):
    """
35
    Output class for the scheduler's `step` function output.
hlky's avatar
hlky committed
36
37

    Args:
38
        prev_sample (`torch.Tensor` of shape `(batch_size, num_channels, height, width)` for images):
39
            Computed sample `(x_{t-1})` of previous timestep. `prev_sample` should be used as next model input in the
hlky's avatar
hlky committed
40
            denoising loop.
41
        pred_original_sample (`torch.Tensor` of shape `(batch_size, num_channels, height, width)` for images):
42
            The predicted denoised sample `(x_{0})` based on the model output from the current timestep.
hlky's avatar
hlky committed
43
44
45
            `pred_original_sample` can be used to preview progress or for guidance.
    """

46
47
    prev_sample: torch.Tensor
    pred_original_sample: Optional[torch.Tensor] = None
hlky's avatar
hlky committed
48
49


50
# Copied from diffusers.schedulers.scheduling_ddpm.betas_for_alpha_bar
YiYi Xu's avatar
YiYi Xu committed
51
52
53
54
55
def betas_for_alpha_bar(
    num_diffusion_timesteps,
    max_beta=0.999,
    alpha_transform_type="cosine",
):
56
57
58
59
60
61
62
63
64
65
66
67
    """
    Create a beta schedule that discretizes the given alpha_t_bar function, which defines the cumulative product of
    (1-beta) over time from t = [0,1].

    Contains a function alpha_bar that takes an argument t and transforms it to the cumulative product of (1-beta) up
    to that part of the diffusion process.


    Args:
        num_diffusion_timesteps (`int`): the number of betas to produce.
        max_beta (`float`): the maximum beta to use; use values lower than 1 to
                     prevent singularities.
YiYi Xu's avatar
YiYi Xu committed
68
69
        alpha_transform_type (`str`, *optional*, default to `cosine`): the type of noise schedule for alpha_bar.
                     Choose from `cosine` or `exp`
70
71
72
73

    Returns:
        betas (`np.ndarray`): the betas used by the scheduler to step the model outputs
    """
YiYi Xu's avatar
YiYi Xu committed
74
    if alpha_transform_type == "cosine":
75

YiYi Xu's avatar
YiYi Xu committed
76
77
78
79
80
81
82
83
84
        def alpha_bar_fn(t):
            return math.cos((t + 0.008) / 1.008 * math.pi / 2) ** 2

    elif alpha_transform_type == "exp":

        def alpha_bar_fn(t):
            return math.exp(t * -12.0)

    else:
M. Tolga Cangöz's avatar
M. Tolga Cangöz committed
85
        raise ValueError(f"Unsupported alpha_transform_type: {alpha_transform_type}")
86
87
88
89
90

    betas = []
    for i in range(num_diffusion_timesteps):
        t1 = i / num_diffusion_timesteps
        t2 = (i + 1) / num_diffusion_timesteps
YiYi Xu's avatar
YiYi Xu committed
91
        betas.append(min(1 - alpha_bar_fn(t2) / alpha_bar_fn(t1), max_beta))
92
93
94
    return torch.tensor(betas, dtype=torch.float32)


95
96
97
# Copied from diffusers.schedulers.scheduling_ddim.rescale_zero_terminal_snr
def rescale_zero_terminal_snr(betas):
    """
Quentin Gallouédec's avatar
Quentin Gallouédec committed
98
    Rescales betas to have zero terminal SNR Based on https://huggingface.co/papers/2305.08891 (Algorithm 1)
99
100

    Args:
101
        betas (`torch.Tensor`):
102
103
104
            the betas that the scheduler is being initialized with.

    Returns:
105
        `torch.Tensor`: rescaled betas with zero terminal SNR
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
    """
    # Convert betas to alphas_bar_sqrt
    alphas = 1.0 - betas
    alphas_cumprod = torch.cumprod(alphas, dim=0)
    alphas_bar_sqrt = alphas_cumprod.sqrt()

    # Store old values.
    alphas_bar_sqrt_0 = alphas_bar_sqrt[0].clone()
    alphas_bar_sqrt_T = alphas_bar_sqrt[-1].clone()

    # Shift so the last timestep is zero.
    alphas_bar_sqrt -= alphas_bar_sqrt_T

    # Scale so the first timestep is back to the old value.
    alphas_bar_sqrt *= alphas_bar_sqrt_0 / (alphas_bar_sqrt_0 - alphas_bar_sqrt_T)

    # Convert alphas_bar_sqrt to betas
    alphas_bar = alphas_bar_sqrt**2  # Revert sqrt
    alphas = alphas_bar[1:] / alphas_bar[:-1]  # Revert cumprod
    alphas = torch.cat([alphas_bar[0:1], alphas])
    betas = 1 - alphas

    return betas


hlky's avatar
hlky committed
131
132
class EulerAncestralDiscreteScheduler(SchedulerMixin, ConfigMixin):
    """
133
    Ancestral sampling with Euler method steps.
hlky's avatar
hlky committed
134

135
136
    This model inherits from [`SchedulerMixin`] and [`ConfigMixin`]. Check the superclass documentation for the generic
    methods the library implements for all schedulers such as loading and saving.
hlky's avatar
hlky committed
137
138

    Args:
139
140
141
142
143
144
145
146
        num_train_timesteps (`int`, defaults to 1000):
            The number of diffusion steps to train the model.
        beta_start (`float`, defaults to 0.0001):
            The starting `beta` value of inference.
        beta_end (`float`, defaults to 0.02):
            The final `beta` value.
        beta_schedule (`str`, defaults to `"linear"`):
            The beta schedule, a mapping from a beta range to a sequence of betas for stepping the model. Choose from
hlky's avatar
hlky committed
147
            `linear` or `scaled_linear`.
148
149
150
151
152
153
154
155
156
157
        trained_betas (`np.ndarray`, *optional*):
            Pass an array of betas directly to the constructor to bypass `beta_start` and `beta_end`.
        prediction_type (`str`, defaults to `epsilon`, *optional*):
            Prediction type of the scheduler function; can be `epsilon` (predicts the noise of the diffusion process),
            `sample` (directly predicts the noisy sample`) or `v_prediction` (see section 2.4 of [Imagen
            Video](https://imagen.research.google/video/paper.pdf) paper).
        timestep_spacing (`str`, defaults to `"linspace"`):
            The way the timesteps should be scaled. Refer to Table 2 of the [Common Diffusion Noise Schedules and
            Sample Steps are Flawed](https://huggingface.co/papers/2305.08891) for more information.
        steps_offset (`int`, defaults to 0):
158
            An offset added to the inference steps, as required by some model families.
159
160
161
162
        rescale_betas_zero_snr (`bool`, defaults to `False`):
            Whether to rescale the betas to have zero terminal SNR. This enables the model to generate very bright and
            dark samples instead of limiting it to samples with medium brightness. Loosely related to
            [`--offset_noise`](https://github.com/huggingface/diffusers/blob/74fd735eb073eb1d774b1ab4154a0876eb82f055/examples/dreambooth/train_dreambooth.py#L506).
hlky's avatar
hlky committed
163
164
    """

Kashif Rasul's avatar
Kashif Rasul committed
165
    _compatibles = [e.name for e in KarrasDiffusionSchedulers]
166
    order = 1
167

hlky's avatar
hlky committed
168
169
170
171
172
173
174
    @register_to_config
    def __init__(
        self,
        num_train_timesteps: int = 1000,
        beta_start: float = 0.0001,
        beta_end: float = 0.02,
        beta_schedule: str = "linear",
175
        trained_betas: Optional[Union[np.ndarray, List[float]]] = None,
176
        prediction_type: str = "epsilon",
177
178
        timestep_spacing: str = "linspace",
        steps_offset: int = 0,
179
        rescale_betas_zero_snr: bool = False,
hlky's avatar
hlky committed
180
181
    ):
        if trained_betas is not None:
182
            self.betas = torch.tensor(trained_betas, dtype=torch.float32)
hlky's avatar
hlky committed
183
184
185
186
        elif beta_schedule == "linear":
            self.betas = torch.linspace(beta_start, beta_end, num_train_timesteps, dtype=torch.float32)
        elif beta_schedule == "scaled_linear":
            # this schedule is very specific to the latent diffusion model.
187
            self.betas = torch.linspace(beta_start**0.5, beta_end**0.5, num_train_timesteps, dtype=torch.float32) ** 2
188
189
190
        elif beta_schedule == "squaredcos_cap_v2":
            # Glide cosine schedule
            self.betas = betas_for_alpha_bar(num_train_timesteps)
hlky's avatar
hlky committed
191
        else:
192
            raise NotImplementedError(f"{beta_schedule} is not implemented for {self.__class__}")
hlky's avatar
hlky committed
193

194
195
196
        if rescale_betas_zero_snr:
            self.betas = rescale_zero_terminal_snr(self.betas)

hlky's avatar
hlky committed
197
198
199
        self.alphas = 1.0 - self.betas
        self.alphas_cumprod = torch.cumprod(self.alphas, dim=0)

200
201
202
203
204
        if rescale_betas_zero_snr:
            # Close to 0 without being 0 so first sigma is not inf
            # FP16 smallest positive subnormal works well here
            self.alphas_cumprod[-1] = 2**-24

hlky's avatar
hlky committed
205
206
207
208
209
210
211
212
213
214
        sigmas = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5)
        sigmas = np.concatenate([sigmas[::-1], [0.0]]).astype(np.float32)
        self.sigmas = torch.from_numpy(sigmas)

        # setable values
        self.num_inference_steps = None
        timesteps = np.linspace(0, num_train_timesteps - 1, num_train_timesteps, dtype=float)[::-1].copy()
        self.timesteps = torch.from_numpy(timesteps)
        self.is_scale_input_called = False

YiYi Xu's avatar
YiYi Xu committed
215
        self._step_index = None
216
        self._begin_index = None
217
        self.sigmas = self.sigmas.to("cpu")  # to avoid too much CPU/GPU communication
YiYi Xu's avatar
YiYi Xu committed
218

219
220
221
222
223
224
225
226
    @property
    def init_noise_sigma(self):
        # standard deviation of the initial noise distribution
        if self.config.timestep_spacing in ["linspace", "trailing"]:
            return self.sigmas.max()

        return (self.sigmas.max() ** 2 + 1) ** 0.5

YiYi Xu's avatar
YiYi Xu committed
227
228
229
    @property
    def step_index(self):
        """
M. Tolga Cangöz's avatar
M. Tolga Cangöz committed
230
        The index counter for current timestep. It will increase 1 after each scheduler step.
YiYi Xu's avatar
YiYi Xu committed
231
232
233
        """
        return self._step_index

234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
    @property
    def begin_index(self):
        """
        The index for the first timestep. It should be set from pipeline with `set_begin_index` method.
        """
        return self._begin_index

    # Copied from diffusers.schedulers.scheduling_dpmsolver_multistep.DPMSolverMultistepScheduler.set_begin_index
    def set_begin_index(self, begin_index: int = 0):
        """
        Sets the begin index for the scheduler. This function should be run from pipeline before the inference.

        Args:
            begin_index (`int`):
                The begin index for the scheduler.
        """
        self._begin_index = begin_index

252
    def scale_model_input(self, sample: torch.Tensor, timestep: Union[float, torch.Tensor]) -> torch.Tensor:
hlky's avatar
hlky committed
253
        """
254
255
        Ensures interchangeability with schedulers that need to scale the denoising model input depending on the
        current timestep. Scales the denoising model input by `(sigma**2 + 1) ** 0.5` to match the Euler algorithm.
hlky's avatar
hlky committed
256
257

        Args:
258
            sample (`torch.Tensor`):
259
260
261
                The input sample.
            timestep (`int`, *optional*):
                The current timestep in the diffusion chain.
hlky's avatar
hlky committed
262
263

        Returns:
264
            `torch.Tensor`:
265
                A scaled input sample.
hlky's avatar
hlky committed
266
        """
YiYi Xu's avatar
YiYi Xu committed
267
268
269
270
271

        if self.step_index is None:
            self._init_step_index(timestep)

        sigma = self.sigmas[self.step_index]
hlky's avatar
hlky committed
272
273
274
275
276
277
        sample = sample / ((sigma**2 + 1) ** 0.5)
        self.is_scale_input_called = True
        return sample

    def set_timesteps(self, num_inference_steps: int, device: Union[str, torch.device] = None):
        """
278
        Sets the discrete timesteps used for the diffusion chain (to be run before inference).
hlky's avatar
hlky committed
279
280
281

        Args:
            num_inference_steps (`int`):
282
283
284
                The number of diffusion steps used when generating samples with a pre-trained model.
            device (`str` or `torch.device`, *optional*):
                The device to which the timesteps should be moved to. If `None`, the timesteps are not moved.
hlky's avatar
hlky committed
285
286
287
        """
        self.num_inference_steps = num_inference_steps

Quentin Gallouédec's avatar
Quentin Gallouédec committed
288
        # "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://huggingface.co/papers/2305.08891
289
        if self.config.timestep_spacing == "linspace":
YiYi Xu's avatar
YiYi Xu committed
290
            timesteps = np.linspace(0, self.config.num_train_timesteps - 1, num_inference_steps, dtype=np.float32)[
291
292
293
294
295
296
                ::-1
            ].copy()
        elif self.config.timestep_spacing == "leading":
            step_ratio = self.config.num_train_timesteps // self.num_inference_steps
            # creates integer timesteps by multiplying by ratio
            # casting to int to avoid issues when num_inference_step is power of 3
YiYi Xu's avatar
YiYi Xu committed
297
            timesteps = (np.arange(0, num_inference_steps) * step_ratio).round()[::-1].copy().astype(np.float32)
298
299
300
301
302
            timesteps += self.config.steps_offset
        elif self.config.timestep_spacing == "trailing":
            step_ratio = self.config.num_train_timesteps / self.num_inference_steps
            # creates integer timesteps by multiplying by ratio
            # casting to int to avoid issues when num_inference_step is power of 3
YiYi Xu's avatar
YiYi Xu committed
303
            timesteps = (np.arange(self.config.num_train_timesteps, 0, -step_ratio)).round().copy().astype(np.float32)
304
305
306
307
308
309
            timesteps -= 1
        else:
            raise ValueError(
                f"{self.config.timestep_spacing} is not supported. Please make sure to choose one of 'linspace', 'leading' or 'trailing'."
            )

hlky's avatar
hlky committed
310
311
312
313
        sigmas = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5)
        sigmas = np.interp(timesteps, np.arange(0, len(sigmas)), sigmas)
        sigmas = np.concatenate([sigmas, [0.0]]).astype(np.float32)
        self.sigmas = torch.from_numpy(sigmas).to(device=device)
YiYi Xu's avatar
YiYi Xu committed
314
315
316

        self.timesteps = torch.from_numpy(timesteps).to(device=device)
        self._step_index = None
317
        self._begin_index = None
318
        self.sigmas = self.sigmas.to("cpu")  # to avoid too much CPU/GPU communication
YiYi Xu's avatar
YiYi Xu committed
319

320
321
322
323
    # Copied from diffusers.schedulers.scheduling_euler_discrete.EulerDiscreteScheduler.index_for_timestep
    def index_for_timestep(self, timestep, schedule_timesteps=None):
        if schedule_timesteps is None:
            schedule_timesteps = self.timesteps
YiYi Xu's avatar
YiYi Xu committed
324

325
        indices = (schedule_timesteps == timestep).nonzero()
YiYi Xu's avatar
YiYi Xu committed
326
327
328
329
330

        # The sigma index that is taken for the **very** first `step`
        # is always the second index (or the last index if there is only 1)
        # This way we can ensure we don't accidentally skip a sigma in
        # case we start in the middle of the denoising schedule (e.g. for image-to-image)
331
332
333
        pos = 1 if len(indices) > 1 else 0

        return indices[pos].item()
YiYi Xu's avatar
YiYi Xu committed
334

335
336
337
338
339
340
341
342
    # Copied from diffusers.schedulers.scheduling_euler_discrete.EulerDiscreteScheduler._init_step_index
    def _init_step_index(self, timestep):
        if self.begin_index is None:
            if isinstance(timestep, torch.Tensor):
                timestep = timestep.to(self.timesteps.device)
            self._step_index = self.index_for_timestep(timestep)
        else:
            self._step_index = self._begin_index
hlky's avatar
hlky committed
343
344
345

    def step(
        self,
346
347
348
        model_output: torch.Tensor,
        timestep: Union[float, torch.Tensor],
        sample: torch.Tensor,
hlky's avatar
hlky committed
349
350
351
352
        generator: Optional[torch.Generator] = None,
        return_dict: bool = True,
    ) -> Union[EulerAncestralDiscreteSchedulerOutput, Tuple]:
        """
353
        Predict the sample from the previous timestep by reversing the SDE. This function propagates the diffusion
hlky's avatar
hlky committed
354
355
356
        process from the learned model outputs (most often the predicted noise).

        Args:
357
            model_output (`torch.Tensor`):
358
359
360
                The direct output from learned diffusion model.
            timestep (`float`):
                The current discrete timestep in the diffusion chain.
361
            sample (`torch.Tensor`):
362
363
364
365
366
367
                A current instance of a sample created by the diffusion process.
            generator (`torch.Generator`, *optional*):
                A random number generator.
            return_dict (`bool`):
                Whether or not to return a
                [`~schedulers.scheduling_euler_ancestral_discrete.EulerAncestralDiscreteSchedulerOutput`] or tuple.
hlky's avatar
hlky committed
368
369

        Returns:
370
371
372
373
            [`~schedulers.scheduling_euler_ancestral_discrete.EulerAncestralDiscreteSchedulerOutput`] or `tuple`:
                If return_dict is `True`,
                [`~schedulers.scheduling_euler_ancestral_discrete.EulerAncestralDiscreteSchedulerOutput`] is returned,
                otherwise a tuple is returned where the first element is the sample tensor.
hlky's avatar
hlky committed
374
375
376

        """

377
        if isinstance(timestep, (int, torch.IntTensor, torch.LongTensor)):
hlky's avatar
hlky committed
378
            raise ValueError(
Patrick von Platen's avatar
Patrick von Platen committed
379
380
381
382
383
                (
                    "Passing integer indices (e.g. from `enumerate(timesteps)`) as timesteps to"
                    " `EulerDiscreteScheduler.step()` is not supported. Make sure to pass"
                    " one of the `scheduler.timesteps` as a timestep."
                ),
hlky's avatar
hlky committed
384
385
386
            )

        if not self.is_scale_input_called:
387
            logger.warning(
hlky's avatar
hlky committed
388
389
390
391
                "The `scale_model_input` function should be called before `step` to ensure correct denoising. "
                "See `StableDiffusionPipeline` for a usage example."
            )

YiYi Xu's avatar
YiYi Xu committed
392
393
        if self.step_index is None:
            self._init_step_index(timestep)
hlky's avatar
hlky committed
394

YiYi Xu's avatar
YiYi Xu committed
395
        sigma = self.sigmas[self.step_index]
hlky's avatar
hlky committed
396

397
398
399
        # Upcast to avoid precision issues when computing prev_sample
        sample = sample.to(torch.float32)

hlky's avatar
hlky committed
400
        # 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
401
402
403
404
405
        if self.config.prediction_type == "epsilon":
            pred_original_sample = sample - sigma * model_output
        elif self.config.prediction_type == "v_prediction":
            # * c_out + input * c_skip
            pred_original_sample = model_output * (-sigma / (sigma**2 + 1) ** 0.5) + (sample / (sigma**2 + 1))
406
407
        elif self.config.prediction_type == "sample":
            raise NotImplementedError("prediction_type not implemented yet: sample")
408
409
410
411
412
        else:
            raise ValueError(
                f"prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`"
            )

YiYi Xu's avatar
YiYi Xu committed
413
414
        sigma_from = self.sigmas[self.step_index]
        sigma_to = self.sigmas[self.step_index + 1]
hlky's avatar
hlky committed
415
416
417
418
419
420
421
422
423
424
        sigma_up = (sigma_to**2 * (sigma_from**2 - sigma_to**2) / sigma_from**2) ** 0.5
        sigma_down = (sigma_to**2 - sigma_up**2) ** 0.5

        # 2. Convert to an ODE derivative
        derivative = (sample - pred_original_sample) / sigma

        dt = sigma_down - sigma

        prev_sample = sample + derivative * dt

Patrick von Platen's avatar
Patrick von Platen committed
425
        device = model_output.device
426
        noise = randn_tensor(model_output.shape, dtype=model_output.dtype, device=device, generator=generator)
427

hlky's avatar
hlky committed
428
429
        prev_sample = prev_sample + noise * sigma_up

430
431
432
        # Cast sample back to model compatible dtype
        prev_sample = prev_sample.to(model_output.dtype)

YiYi Xu's avatar
YiYi Xu committed
433
434
435
        # upon completion increase step index by one
        self._step_index += 1

hlky's avatar
hlky committed
436
        if not return_dict:
437
438
439
440
            return (
                prev_sample,
                pred_original_sample,
            )
hlky's avatar
hlky committed
441
442
443
444
445

        return EulerAncestralDiscreteSchedulerOutput(
            prev_sample=prev_sample, pred_original_sample=pred_original_sample
        )

446
    # Copied from diffusers.schedulers.scheduling_euler_discrete.EulerDiscreteScheduler.add_noise
hlky's avatar
hlky committed
447
448
    def add_noise(
        self,
449
450
451
452
        original_samples: torch.Tensor,
        noise: torch.Tensor,
        timesteps: torch.Tensor,
    ) -> torch.Tensor:
hlky's avatar
hlky committed
453
        # Make sure sigmas and timesteps have the same device and dtype as original_samples
454
        sigmas = self.sigmas.to(device=original_samples.device, dtype=original_samples.dtype)
hlky's avatar
hlky committed
455
456
        if original_samples.device.type == "mps" and torch.is_floating_point(timesteps):
            # mps does not support float64
457
            schedule_timesteps = self.timesteps.to(original_samples.device, dtype=torch.float32)
hlky's avatar
hlky committed
458
459
            timesteps = timesteps.to(original_samples.device, dtype=torch.float32)
        else:
460
            schedule_timesteps = self.timesteps.to(original_samples.device)
hlky's avatar
hlky committed
461
462
            timesteps = timesteps.to(original_samples.device)

463
464
465
        # self.begin_index is None when scheduler is used for training, or pipeline does not implement set_begin_index
        if self.begin_index is None:
            step_indices = [self.index_for_timestep(t, schedule_timesteps) for t in timesteps]
466
467
468
        elif self.step_index is not None:
            # add_noise is called after first denoising step (for inpainting)
            step_indices = [self.step_index] * timesteps.shape[0]
469
        else:
M. Tolga Cangöz's avatar
M. Tolga Cangöz committed
470
            # add noise is called before first denoising step to create initial latent(img2img)
471
            step_indices = [self.begin_index] * timesteps.shape[0]
hlky's avatar
hlky committed
472

473
        sigma = sigmas[step_indices].flatten()
hlky's avatar
hlky committed
474
475
476
477
478
479
480
481
        while len(sigma.shape) < len(original_samples.shape):
            sigma = sigma.unsqueeze(-1)

        noisy_samples = original_samples + noise * sigma
        return noisy_samples

    def __len__(self):
        return self.config.num_train_timesteps