scheduling_ddpm.py 20 KB
Newer Older
Patrick von Platen's avatar
Patrick von Platen committed
1
# Copyright 2023 UC Berkeley Team and The HuggingFace Team. All rights reserved.
Patrick von Platen's avatar
improve  
Patrick von Platen committed
2
3
4
5
6
7
8
9
10
11
12
13
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
14
15
16

# DISCLAIMER: This file is strongly influenced by https://github.com/ermongroup/ddim

anton-l's avatar
anton-l committed
17
import math
18
from dataclasses import dataclass
19
from typing import List, Optional, Tuple, Union
Patrick von Platen's avatar
Patrick von Platen committed
20

Patrick von Platen's avatar
Patrick von Platen committed
21
import numpy as np
22
import torch
Patrick von Platen's avatar
improve  
Patrick von Platen committed
23

24
from ..configuration_utils import ConfigMixin, register_to_config
25
from ..utils import BaseOutput, deprecate, randn_tensor
Kashif Rasul's avatar
Kashif Rasul committed
26
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44


@dataclass
class DDPMSchedulerOutput(BaseOutput):
    """
    Output class for the scheduler's step function output.

    Args:
        prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):
            Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the
            denoising loop.
        pred_original_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):
            The predicted denoised sample (x_{0}) based on the model output from the current timestep.
            `pred_original_sample` can be used to preview progress or for guidance.
    """

    prev_sample: torch.FloatTensor
    pred_original_sample: Optional[torch.FloatTensor] = None
45
46
47
48


def betas_for_alpha_bar(num_diffusion_timesteps, max_beta=0.999):
    """
Patrick von Platen's avatar
Patrick von Platen committed
49
50
    Create a beta schedule that discretizes the given alpha_t_bar function, which defines the cumulative product of
    (1-beta) over time from t = [0,1].
51

52
53
54
55
56
57
58
    Contains a function alpha_bar that takes an argument t and transforms it to the cumulative product of (1-beta) up
    to that part of the diffusion process.


    Args:
        num_diffusion_timesteps (`int`): the number of betas to produce.
        max_beta (`float`): the maximum beta to use; use values lower than 1 to
59
                     prevent singularities.
60
61
62

    Returns:
        betas (`np.ndarray`): the betas used by the scheduler to step the model outputs
63
    """
64

65
66
67
68
69
70
71
72
    def alpha_bar(time_step):
        return math.cos((time_step + 0.008) / 1.008 * math.pi / 2) ** 2

    betas = []
    for i in range(num_diffusion_timesteps):
        t1 = i / num_diffusion_timesteps
        t2 = (i + 1) / num_diffusion_timesteps
        betas.append(min(1 - alpha_bar(t2) / alpha_bar(t1), max_beta))
73
    return torch.tensor(betas, dtype=torch.float32)
Patrick von Platen's avatar
improve  
Patrick von Platen committed
74
75


Patrick von Platen's avatar
Patrick von Platen committed
76
class DDPMScheduler(SchedulerMixin, ConfigMixin):
77
78
79
80
    """
    Denoising diffusion probabilistic models (DDPMs) explores the connections between denoising score matching and
    Langevin dynamics sampling.

81
82
    [`~ConfigMixin`] takes care of storing all config attributes that are passed in the scheduler's `__init__`
    function, such as `num_train_timesteps`. They can be accessed via `scheduler.config.num_train_timesteps`.
83
84
    [`SchedulerMixin`] provides general loading and saving functionality via the [`SchedulerMixin.save_pretrained`] and
    [`~SchedulerMixin.from_pretrained`] functions.
85

86
87
88
89
90
91
92
93
94
    For more details, see the original paper: https://arxiv.org/abs/2006.11239

    Args:
        num_train_timesteps (`int`): number of diffusion steps used to train the model.
        beta_start (`float`): the starting `beta` value of inference.
        beta_end (`float`): the final `beta` value.
        beta_schedule (`str`):
            the beta schedule, a mapping from a beta range to a sequence of betas for stepping the model. Choose from
            `linear`, `scaled_linear`, or `squaredcos_cap_v2`.
Nathan Lambert's avatar
Nathan Lambert committed
95
96
        trained_betas (`np.ndarray`, optional):
            option to pass an array of betas directly to the constructor to bypass `beta_start`, `beta_end` etc.
97
98
99
100
        variance_type (`str`):
            options to clip the variance used when adding noise to the denoised sample. Choose from `fixed_small`,
            `fixed_small_log`, `fixed_large`, `fixed_large_log`, `learned` or `learned_range`.
        clip_sample (`bool`, default `True`):
101
102
103
            option to clip predicted sample for numerical stability.
        clip_sample_range (`float`, default `1.0`):
            the maximum magnitude for sample clipping. Valid only when `clip_sample=True`.
104
105
106
107
        prediction_type (`str`, default `epsilon`, optional):
            prediction type of the scheduler function, one of `epsilon` (predicting the noise of the diffusion
            process), `sample` (directly predicting the noisy sample`) or `v_prediction` (see section 2.4
            https://imagen.research.google/video/paper.pdf)
108
109
110
111
112
113
114
115
116
        thresholding (`bool`, default `False`):
            whether to use the "dynamic thresholding" method (introduced by Imagen, https://arxiv.org/abs/2205.11487).
            Note that the thresholding method is unsuitable for latent-space diffusion models (such as
            stable-diffusion).
        dynamic_thresholding_ratio (`float`, default `0.995`):
            the ratio for the dynamic thresholding method. Default is `0.995`, the same as Imagen
            (https://arxiv.org/abs/2205.11487). Valid only when `thresholding=True`.
        sample_max_value (`float`, default `1.0`):
            the threshold value for dynamic thresholding. Valid only when `thresholding=True`.
117
118
    """

Kashif Rasul's avatar
Kashif Rasul committed
119
    _compatibles = [e.name for e in KarrasDiffusionSchedulers]
120
    order = 1
121

122
    @register_to_config
Patrick von Platen's avatar
improve  
Patrick von Platen committed
123
124
    def __init__(
        self,
Partho's avatar
Partho committed
125
126
127
128
        num_train_timesteps: int = 1000,
        beta_start: float = 0.0001,
        beta_end: float = 0.02,
        beta_schedule: str = "linear",
129
        trained_betas: Optional[Union[np.ndarray, List[float]]] = None,
Partho's avatar
Partho committed
130
131
        variance_type: str = "fixed_small",
        clip_sample: bool = True,
132
        prediction_type: str = "epsilon",
133
134
135
136
        thresholding: bool = False,
        dynamic_thresholding_ratio: float = 0.995,
        clip_sample_range: float = 1.0,
        sample_max_value: float = 1.0,
Patrick von Platen's avatar
improve  
Patrick von Platen committed
137
    ):
138
        if trained_betas is not None:
139
            self.betas = torch.tensor(trained_betas, dtype=torch.float32)
140
        elif beta_schedule == "linear":
141
            self.betas = torch.linspace(beta_start, beta_end, num_train_timesteps, dtype=torch.float32)
142
143
        elif beta_schedule == "scaled_linear":
            # this schedule is very specific to the latent diffusion model.
144
145
146
            self.betas = (
                torch.linspace(beta_start**0.5, beta_end**0.5, num_train_timesteps, dtype=torch.float32) ** 2
            )
anton-l's avatar
anton-l committed
147
        elif beta_schedule == "squaredcos_cap_v2":
Patrick von Platen's avatar
Patrick von Platen committed
148
            # Glide cosine schedule
Nathan Lambert's avatar
Nathan Lambert committed
149
            self.betas = betas_for_alpha_bar(num_train_timesteps)
Nathan Lambert's avatar
Nathan Lambert committed
150
151
152
153
        elif beta_schedule == "sigmoid":
            # GeoDiff sigmoid schedule
            betas = torch.linspace(-6, 6, num_train_timesteps)
            self.betas = torch.sigmoid(betas) * (beta_end - beta_start) + beta_start
Patrick von Platen's avatar
improve  
Patrick von Platen committed
154
155
156
        else:
            raise NotImplementedError(f"{beta_schedule} does is not implemented for {self.__class__}")

Patrick von Platen's avatar
Patrick von Platen committed
157
        self.alphas = 1.0 - self.betas
158
159
        self.alphas_cumprod = torch.cumprod(self.alphas, dim=0)
        self.one = torch.tensor(1.0)
Patrick von Platen's avatar
Patrick von Platen committed
160

161
162
163
        # standard deviation of the initial noise distribution
        self.init_noise_sigma = 1.0

164
165
        # setable values
        self.num_inference_steps = None
166
        self.timesteps = torch.from_numpy(np.arange(0, num_train_timesteps)[::-1].copy())
Patrick von Platen's avatar
Patrick von Platen committed
167

168
169
        self.variance_type = variance_type

170
171
172
173
174
175
176
177
178
179
    @property
    def num_train_timesteps(self):
        deprecate(
            "num_train_timesteps",
            "1.0.0",
            "Accessing `num_train_timesteps` directly via scheduler.num_train_timesteps is deprecated. Please use `scheduler.config.num_train_timesteps instead`",
            standard_warn=False,
        )
        return self.config.num_train_timesteps

180
181
182
183
184
185
186
187
188
189
190
191
192
193
    def scale_model_input(self, sample: torch.FloatTensor, timestep: Optional[int] = None) -> torch.FloatTensor:
        """
        Ensures interchangeability with schedulers that need to scale the denoising model input depending on the
        current timestep.

        Args:
            sample (`torch.FloatTensor`): input sample
            timestep (`int`, optional): current timestep

        Returns:
            `torch.FloatTensor`: scaled input sample
        """
        return sample

194
    def set_timesteps(self, num_inference_steps: int, device: Union[str, torch.device] = None):
195
196
197
198
199
200
201
        """
        Sets the discrete timesteps used for the diffusion chain. Supporting function to be run before inference.

        Args:
            num_inference_steps (`int`):
                the number of diffusion steps used when generating samples with a pre-trained model.
        """
202
203
204
205
206
207
208
209

        if num_inference_steps > self.config.num_train_timesteps:
            raise ValueError(
                f"`num_inference_steps`: {num_inference_steps} cannot be larger than `self.config.train_timesteps`:"
                f" {self.config.num_train_timesteps} as the unet model trained with this scheduler can only handle"
                f" maximal {self.config.num_train_timesteps} timesteps."
            )

210
        self.num_inference_steps = num_inference_steps
211
212
213

        step_ratio = self.config.num_train_timesteps // self.num_inference_steps
        timesteps = (np.arange(0, num_inference_steps) * step_ratio).round()[::-1].copy().astype(np.int64)
214
        self.timesteps = torch.from_numpy(timesteps).to(device)
215

216
    def _get_variance(self, t, predicted_variance=None, variance_type=None):
217
218
        num_inference_steps = self.num_inference_steps if self.num_inference_steps else self.config.num_train_timesteps
        prev_t = t - self.config.num_train_timesteps // num_inference_steps
219
        alpha_prod_t = self.alphas_cumprod[t]
220
221
        alpha_prod_t_prev = self.alphas_cumprod[prev_t] if prev_t >= 0 else self.one
        current_beta_t = 1 - alpha_prod_t / alpha_prod_t_prev
Patrick von Platen's avatar
Patrick von Platen committed
222

Kashif Rasul's avatar
Kashif Rasul committed
223
        # For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
224
        # and sample from it to get previous sample
Kashif Rasul's avatar
Kashif Rasul committed
225
        # x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
226
        variance = (1 - alpha_prod_t_prev) / (1 - alpha_prod_t) * current_beta_t
William Berman's avatar
William Berman committed
227
228

        # we always take the log of variance, so clamp it to ensure it's not 0
William Berman's avatar
William Berman committed
229
        variance = torch.clamp(variance, min=1e-20)
Patrick von Platen's avatar
Patrick von Platen committed
230

231
232
233
        if variance_type is None:
            variance_type = self.config.variance_type

234
        # hacks - were probably added for training stability
235
        if variance_type == "fixed_small":
William Berman's avatar
William Berman committed
236
            variance = variance
237
        # for rl-diffuser https://arxiv.org/abs/2205.09991
238
        elif variance_type == "fixed_small_log":
239
            variance = torch.log(variance)
240
            variance = torch.exp(0.5 * variance)
241
        elif variance_type == "fixed_large":
242
            variance = current_beta_t
243
        elif variance_type == "fixed_large_log":
Patrick von Platen's avatar
Patrick von Platen committed
244
            # Glide max_log
245
            variance = torch.log(current_beta_t)
246
247
248
        elif variance_type == "learned":
            return predicted_variance
        elif variance_type == "learned_range":
249
            min_log = torch.log(variance)
William Berman's avatar
William Berman committed
250
            max_log = torch.log(current_beta_t)
251
252
            frac = (predicted_variance + 1) / 2
            variance = frac * max_log + (1 - frac) * min_log
Patrick von Platen's avatar
Patrick von Platen committed
253
254
255

        return variance

256
    def _threshold_sample(self, sample: torch.FloatTensor) -> torch.FloatTensor:
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
        """
        "Dynamic thresholding: At each sampling step we set s to a certain percentile absolute pixel value in xt0 (the
        prediction of x_0 at timestep t), and if s > 1, then we threshold xt0 to the range [-s, s] and then divide by
        s. Dynamic thresholding pushes saturated pixels (those near -1 and 1) inwards, thereby actively preventing
        pixels from saturation at each step. We find that dynamic thresholding results in significantly better
        photorealism as well as better image-text alignment, especially when using very large guidance weights."

        https://arxiv.org/abs/2205.11487
        """
        dtype = sample.dtype
        batch_size, channels, height, width = sample.shape

        if dtype not in (torch.float32, torch.float64):
            sample = sample.float()  # upcast for quantile calculation, and clamp not implemented for cpu half

        # Flatten sample for doing quantile calculation along each image
        sample = sample.reshape(batch_size, channels * height * width)

        abs_sample = sample.abs()  # "a certain percentile absolute pixel value"

        s = torch.quantile(abs_sample, self.config.dynamic_thresholding_ratio, dim=1)
        s = torch.clamp(
            s, min=1, max=self.config.sample_max_value
        )  # When clamped to min=1, equivalent to standard clipping to [-1, 1]

        s = s.unsqueeze(1)  # (batch_size, 1) because clamp will broadcast along dim=0
        sample = torch.clamp(sample, -s, s) / s  # "we threshold xt0 to the range [-s, s] and then divide by s"

        sample = sample.reshape(batch_size, channels, height, width)
        sample = sample.to(dtype)

        return sample
289

290
291
    def step(
        self,
292
        model_output: torch.FloatTensor,
293
        timestep: int,
294
        sample: torch.FloatTensor,
Patrick von Platen's avatar
Patrick von Platen committed
295
        generator=None,
296
        return_dict: bool = True,
297
    ) -> Union[DDPMSchedulerOutput, Tuple]:
298
299
300
301
302
        """
        Predict the sample at the previous timestep by reversing the SDE. Core function to propagate the diffusion
        process from the learned model outputs (most often the predicted noise).

        Args:
303
            model_output (`torch.FloatTensor`): direct output from learned diffusion model.
304
            timestep (`int`): current discrete timestep in the diffusion chain.
305
            sample (`torch.FloatTensor`):
306
307
                current instance of sample being created by diffusion process.
            generator: random number generator.
308
            return_dict (`bool`): option for returning tuple rather than DDPMSchedulerOutput class
309
310

        Returns:
311
312
            [`~schedulers.scheduling_utils.DDPMSchedulerOutput`] or `tuple`:
            [`~schedulers.scheduling_utils.DDPMSchedulerOutput`] if `return_dict` is True, otherwise a `tuple`. When
313
            returning a tuple, the first element is the sample tensor.
314
315

        """
316
        t = timestep
317
318
        num_inference_steps = self.num_inference_steps if self.num_inference_steps else self.config.num_train_timesteps
        prev_t = timestep - self.config.num_train_timesteps // num_inference_steps
319

320
321
322
323
324
        if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type in ["learned", "learned_range"]:
            model_output, predicted_variance = torch.split(model_output, sample.shape[1], dim=1)
        else:
            predicted_variance = None

Patrick von Platen's avatar
Patrick von Platen committed
325
        # 1. compute alphas, betas
326
        alpha_prod_t = self.alphas_cumprod[t]
327
        alpha_prod_t_prev = self.alphas_cumprod[prev_t] if prev_t >= 0 else self.one
Patrick von Platen's avatar
Patrick von Platen committed
328
329
        beta_prod_t = 1 - alpha_prod_t
        beta_prod_t_prev = 1 - alpha_prod_t_prev
330
331
        current_alpha_t = alpha_prod_t / alpha_prod_t_prev
        current_beta_t = 1 - current_alpha_t
Patrick von Platen's avatar
Patrick von Platen committed
332

333
        # 2. compute predicted original sample from predicted noise also called
Patrick von Platen's avatar
Patrick von Platen committed
334
        # "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
335
        if self.config.prediction_type == "epsilon":
Patrick von Platen's avatar
Patrick von Platen committed
336
            pred_original_sample = (sample - beta_prod_t ** (0.5) * model_output) / alpha_prod_t ** (0.5)
337
        elif self.config.prediction_type == "sample":
Patrick von Platen's avatar
Patrick von Platen committed
338
            pred_original_sample = model_output
339
340
        elif self.config.prediction_type == "v_prediction":
            pred_original_sample = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
341
342
        else:
            raise ValueError(
343
344
                f"prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample` or"
                " `v_prediction`  for the DDPMScheduler."
345
            )
Patrick von Platen's avatar
Patrick von Platen committed
346

347
        # 3. Clip or threshold "predicted x_0"
348
349
350
        if self.config.thresholding:
            pred_original_sample = self._threshold_sample(pred_original_sample)
        elif self.config.clip_sample:
351
352
            pred_original_sample = pred_original_sample.clamp(
                -self.config.clip_sample_range, self.config.clip_sample_range
Will Berman's avatar
Will Berman committed
353
            )
Patrick von Platen's avatar
Patrick von Platen committed
354

355
        # 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
Patrick von Platen's avatar
Patrick von Platen committed
356
        # See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
357
358
        pred_original_sample_coeff = (alpha_prod_t_prev ** (0.5) * current_beta_t) / beta_prod_t
        current_sample_coeff = current_alpha_t ** (0.5) * beta_prod_t_prev / beta_prod_t
Patrick von Platen's avatar
Patrick von Platen committed
359

360
        # 5. Compute predicted previous sample µ_t
Patrick von Platen's avatar
Patrick von Platen committed
361
        # See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
362
        pred_prev_sample = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
Patrick von Platen's avatar
Patrick von Platen committed
363

Patrick von Platen's avatar
Patrick von Platen committed
364
365
366
        # 6. Add noise
        variance = 0
        if t > 0:
367
            device = model_output.device
368
369
370
            variance_noise = randn_tensor(
                model_output.shape, generator=generator, device=device, dtype=model_output.dtype
            )
371
372
            if self.variance_type == "fixed_small_log":
                variance = self._get_variance(t, predicted_variance=predicted_variance) * variance_noise
373
374
375
            elif self.variance_type == "learned_range":
                variance = self._get_variance(t, predicted_variance=predicted_variance)
                variance = torch.exp(0.5 * variance) * variance_noise
376
377
            else:
                variance = (self._get_variance(t, predicted_variance=predicted_variance) ** 0.5) * variance_noise
Patrick von Platen's avatar
Patrick von Platen committed
378
379
380

        pred_prev_sample = pred_prev_sample + variance

381
382
383
        if not return_dict:
            return (pred_prev_sample,)

384
        return DDPMSchedulerOutput(prev_sample=pred_prev_sample, pred_original_sample=pred_original_sample)
Patrick von Platen's avatar
Patrick von Platen committed
385

Partho's avatar
Partho committed
386
387
    def add_noise(
        self,
388
389
390
391
        original_samples: torch.FloatTensor,
        noise: torch.FloatTensor,
        timesteps: torch.IntTensor,
    ) -> torch.FloatTensor:
392
        # Make sure alphas_cumprod and timestep have same device and dtype as original_samples
393
        alphas_cumprod = self.alphas_cumprod.to(device=original_samples.device, dtype=original_samples.dtype)
394
        timesteps = timesteps.to(original_samples.device)
395

396
        sqrt_alpha_prod = alphas_cumprod[timesteps] ** 0.5
397
398
399
400
        sqrt_alpha_prod = sqrt_alpha_prod.flatten()
        while len(sqrt_alpha_prod.shape) < len(original_samples.shape):
            sqrt_alpha_prod = sqrt_alpha_prod.unsqueeze(-1)

401
        sqrt_one_minus_alpha_prod = (1 - alphas_cumprod[timesteps]) ** 0.5
402
        sqrt_one_minus_alpha_prod = sqrt_one_minus_alpha_prod.flatten()
403
404
        while len(sqrt_one_minus_alpha_prod.shape) < len(original_samples.shape):
            sqrt_one_minus_alpha_prod = sqrt_one_minus_alpha_prod.unsqueeze(-1)
405
406

        noisy_samples = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
anton-l's avatar
anton-l committed
407
        return noisy_samples
anton-l's avatar
anton-l committed
408

409
410
411
412
    def get_velocity(
        self, sample: torch.FloatTensor, noise: torch.FloatTensor, timesteps: torch.IntTensor
    ) -> torch.FloatTensor:
        # Make sure alphas_cumprod and timestep have same device and dtype as sample
413
        alphas_cumprod = self.alphas_cumprod.to(device=sample.device, dtype=sample.dtype)
414
415
        timesteps = timesteps.to(sample.device)

416
        sqrt_alpha_prod = alphas_cumprod[timesteps] ** 0.5
417
418
419
420
        sqrt_alpha_prod = sqrt_alpha_prod.flatten()
        while len(sqrt_alpha_prod.shape) < len(sample.shape):
            sqrt_alpha_prod = sqrt_alpha_prod.unsqueeze(-1)

421
        sqrt_one_minus_alpha_prod = (1 - alphas_cumprod[timesteps]) ** 0.5
422
423
424
425
426
427
428
        sqrt_one_minus_alpha_prod = sqrt_one_minus_alpha_prod.flatten()
        while len(sqrt_one_minus_alpha_prod.shape) < len(sample.shape):
            sqrt_one_minus_alpha_prod = sqrt_one_minus_alpha_prod.unsqueeze(-1)

        velocity = sqrt_alpha_prod * noise - sqrt_one_minus_alpha_prod * sample
        return velocity

Patrick von Platen's avatar
improve  
Patrick von Platen committed
429
    def __len__(self):
Nathan Lambert's avatar
Nathan Lambert committed
430
        return self.config.num_train_timesteps