scheduling_ddpm.py 19.6 KB
Newer Older
Patrick von Platen's avatar
Patrick von Platen committed
1
# Copyright 2023 UC Berkeley Team and The HuggingFace Team. All rights reserved.
Patrick von Platen's avatar
improve  
Patrick von Platen committed
2
3
4
5
6
7
8
9
10
11
12
13
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
14
15
16

# DISCLAIMER: This file is strongly influenced by https://github.com/ermongroup/ddim

anton-l's avatar
anton-l committed
17
import math
18
from dataclasses import dataclass
19
from typing import List, Optional, Tuple, Union
Patrick von Platen's avatar
Patrick von Platen committed
20

Patrick von Platen's avatar
Patrick von Platen committed
21
import numpy as np
22
import torch
Patrick von Platen's avatar
improve  
Patrick von Platen committed
23

24
25
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
Kashif Rasul's avatar
Kashif Rasul committed
26
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44


@dataclass
class DDPMSchedulerOutput(BaseOutput):
    """
    Output class for the scheduler's step function output.

    Args:
        prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):
            Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the
            denoising loop.
        pred_original_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):
            The predicted denoised sample (x_{0}) based on the model output from the current timestep.
            `pred_original_sample` can be used to preview progress or for guidance.
    """

    prev_sample: torch.FloatTensor
    pred_original_sample: Optional[torch.FloatTensor] = None
45
46
47
48


def betas_for_alpha_bar(num_diffusion_timesteps, max_beta=0.999):
    """
Patrick von Platen's avatar
Patrick von Platen committed
49
50
    Create a beta schedule that discretizes the given alpha_t_bar function, which defines the cumulative product of
    (1-beta) over time from t = [0,1].
51

52
53
54
55
56
57
58
    Contains a function alpha_bar that takes an argument t and transforms it to the cumulative product of (1-beta) up
    to that part of the diffusion process.


    Args:
        num_diffusion_timesteps (`int`): the number of betas to produce.
        max_beta (`float`): the maximum beta to use; use values lower than 1 to
59
                     prevent singularities.
60
61
62

    Returns:
        betas (`np.ndarray`): the betas used by the scheduler to step the model outputs
63
    """
64

65
66
67
68
69
70
71
72
    def alpha_bar(time_step):
        return math.cos((time_step + 0.008) / 1.008 * math.pi / 2) ** 2

    betas = []
    for i in range(num_diffusion_timesteps):
        t1 = i / num_diffusion_timesteps
        t2 = (i + 1) / num_diffusion_timesteps
        betas.append(min(1 - alpha_bar(t2) / alpha_bar(t1), max_beta))
73
    return torch.tensor(betas, dtype=torch.float32)
Patrick von Platen's avatar
improve  
Patrick von Platen committed
74
75


Patrick von Platen's avatar
Patrick von Platen committed
76
class DDPMScheduler(SchedulerMixin, ConfigMixin):
77
78
79
80
    """
    Denoising diffusion probabilistic models (DDPMs) explores the connections between denoising score matching and
    Langevin dynamics sampling.

81
82
    [`~ConfigMixin`] takes care of storing all config attributes that are passed in the scheduler's `__init__`
    function, such as `num_train_timesteps`. They can be accessed via `scheduler.config.num_train_timesteps`.
83
84
    [`SchedulerMixin`] provides general loading and saving functionality via the [`SchedulerMixin.save_pretrained`] and
    [`~SchedulerMixin.from_pretrained`] functions.
85

86
87
88
89
90
91
92
93
94
    For more details, see the original paper: https://arxiv.org/abs/2006.11239

    Args:
        num_train_timesteps (`int`): number of diffusion steps used to train the model.
        beta_start (`float`): the starting `beta` value of inference.
        beta_end (`float`): the final `beta` value.
        beta_schedule (`str`):
            the beta schedule, a mapping from a beta range to a sequence of betas for stepping the model. Choose from
            `linear`, `scaled_linear`, or `squaredcos_cap_v2`.
Nathan Lambert's avatar
Nathan Lambert committed
95
96
        trained_betas (`np.ndarray`, optional):
            option to pass an array of betas directly to the constructor to bypass `beta_start`, `beta_end` etc.
97
98
99
100
        variance_type (`str`):
            options to clip the variance used when adding noise to the denoised sample. Choose from `fixed_small`,
            `fixed_small_log`, `fixed_large`, `fixed_large_log`, `learned` or `learned_range`.
        clip_sample (`bool`, default `True`):
101
102
103
            option to clip predicted sample for numerical stability.
        clip_sample_range (`float`, default `1.0`):
            the maximum magnitude for sample clipping. Valid only when `clip_sample=True`.
104
105
106
107
        prediction_type (`str`, default `epsilon`, optional):
            prediction type of the scheduler function, one of `epsilon` (predicting the noise of the diffusion
            process), `sample` (directly predicting the noisy sample`) or `v_prediction` (see section 2.4
            https://imagen.research.google/video/paper.pdf)
108
109
110
111
112
113
114
115
116
        thresholding (`bool`, default `False`):
            whether to use the "dynamic thresholding" method (introduced by Imagen, https://arxiv.org/abs/2205.11487).
            Note that the thresholding method is unsuitable for latent-space diffusion models (such as
            stable-diffusion).
        dynamic_thresholding_ratio (`float`, default `0.995`):
            the ratio for the dynamic thresholding method. Default is `0.995`, the same as Imagen
            (https://arxiv.org/abs/2205.11487). Valid only when `thresholding=True`.
        sample_max_value (`float`, default `1.0`):
            the threshold value for dynamic thresholding. Valid only when `thresholding=True`.
117
118
    """

Kashif Rasul's avatar
Kashif Rasul committed
119
    _compatibles = [e.name for e in KarrasDiffusionSchedulers]
120
    order = 1
121

122
    @register_to_config
Patrick von Platen's avatar
improve  
Patrick von Platen committed
123
124
    def __init__(
        self,
Partho's avatar
Partho committed
125
126
127
128
        num_train_timesteps: int = 1000,
        beta_start: float = 0.0001,
        beta_end: float = 0.02,
        beta_schedule: str = "linear",
129
        trained_betas: Optional[Union[np.ndarray, List[float]]] = None,
Partho's avatar
Partho committed
130
131
        variance_type: str = "fixed_small",
        clip_sample: bool = True,
132
        prediction_type: str = "epsilon",
133
134
135
136
        thresholding: bool = False,
        dynamic_thresholding_ratio: float = 0.995,
        clip_sample_range: float = 1.0,
        sample_max_value: float = 1.0,
Patrick von Platen's avatar
improve  
Patrick von Platen committed
137
    ):
138
        if trained_betas is not None:
139
            self.betas = torch.tensor(trained_betas, dtype=torch.float32)
140
        elif beta_schedule == "linear":
141
            self.betas = torch.linspace(beta_start, beta_end, num_train_timesteps, dtype=torch.float32)
142
143
        elif beta_schedule == "scaled_linear":
            # this schedule is very specific to the latent diffusion model.
144
145
146
            self.betas = (
                torch.linspace(beta_start**0.5, beta_end**0.5, num_train_timesteps, dtype=torch.float32) ** 2
            )
anton-l's avatar
anton-l committed
147
        elif beta_schedule == "squaredcos_cap_v2":
Patrick von Platen's avatar
Patrick von Platen committed
148
            # Glide cosine schedule
Nathan Lambert's avatar
Nathan Lambert committed
149
            self.betas = betas_for_alpha_bar(num_train_timesteps)
Nathan Lambert's avatar
Nathan Lambert committed
150
151
152
153
        elif beta_schedule == "sigmoid":
            # GeoDiff sigmoid schedule
            betas = torch.linspace(-6, 6, num_train_timesteps)
            self.betas = torch.sigmoid(betas) * (beta_end - beta_start) + beta_start
Patrick von Platen's avatar
improve  
Patrick von Platen committed
154
155
156
        else:
            raise NotImplementedError(f"{beta_schedule} does is not implemented for {self.__class__}")

Patrick von Platen's avatar
Patrick von Platen committed
157
        self.alphas = 1.0 - self.betas
158
159
        self.alphas_cumprod = torch.cumprod(self.alphas, dim=0)
        self.one = torch.tensor(1.0)
Patrick von Platen's avatar
Patrick von Platen committed
160

161
162
163
        # standard deviation of the initial noise distribution
        self.init_noise_sigma = 1.0

164
165
        # setable values
        self.num_inference_steps = None
166
        self.timesteps = torch.from_numpy(np.arange(0, num_train_timesteps)[::-1].copy())
Patrick von Platen's avatar
Patrick von Platen committed
167

168
169
        self.variance_type = variance_type

170
171
172
173
174
175
176
177
178
179
180
181
182
183
    def scale_model_input(self, sample: torch.FloatTensor, timestep: Optional[int] = None) -> torch.FloatTensor:
        """
        Ensures interchangeability with schedulers that need to scale the denoising model input depending on the
        current timestep.

        Args:
            sample (`torch.FloatTensor`): input sample
            timestep (`int`, optional): current timestep

        Returns:
            `torch.FloatTensor`: scaled input sample
        """
        return sample

184
    def set_timesteps(self, num_inference_steps: int, device: Union[str, torch.device] = None):
185
186
187
188
189
190
191
        """
        Sets the discrete timesteps used for the diffusion chain. Supporting function to be run before inference.

        Args:
            num_inference_steps (`int`):
                the number of diffusion steps used when generating samples with a pre-trained model.
        """
192
193
194
195
196
197
198
199

        if num_inference_steps > self.config.num_train_timesteps:
            raise ValueError(
                f"`num_inference_steps`: {num_inference_steps} cannot be larger than `self.config.train_timesteps`:"
                f" {self.config.num_train_timesteps} as the unet model trained with this scheduler can only handle"
                f" maximal {self.config.num_train_timesteps} timesteps."
            )

200
        self.num_inference_steps = num_inference_steps
201
202
203

        step_ratio = self.config.num_train_timesteps // self.num_inference_steps
        timesteps = (np.arange(0, num_inference_steps) * step_ratio).round()[::-1].copy().astype(np.int64)
204
        self.timesteps = torch.from_numpy(timesteps).to(device)
205

206
    def _get_variance(self, t, predicted_variance=None, variance_type=None):
207
208
        num_inference_steps = self.num_inference_steps if self.num_inference_steps else self.config.num_train_timesteps
        prev_t = t - self.config.num_train_timesteps // num_inference_steps
209
        alpha_prod_t = self.alphas_cumprod[t]
210
211
        alpha_prod_t_prev = self.alphas_cumprod[prev_t] if prev_t >= 0 else self.one
        current_beta_t = 1 - alpha_prod_t / alpha_prod_t_prev
Patrick von Platen's avatar
Patrick von Platen committed
212

Kashif Rasul's avatar
Kashif Rasul committed
213
        # For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
214
        # and sample from it to get previous sample
Kashif Rasul's avatar
Kashif Rasul committed
215
        # x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
216
        variance = (1 - alpha_prod_t_prev) / (1 - alpha_prod_t) * current_beta_t
Patrick von Platen's avatar
Patrick von Platen committed
217

218
219
220
        if variance_type is None:
            variance_type = self.config.variance_type

221
        # hacks - were probably added for training stability
222
        if variance_type == "fixed_small":
223
            variance = torch.clamp(variance, min=1e-20)
224
        # for rl-diffuser https://arxiv.org/abs/2205.09991
225
        elif variance_type == "fixed_small_log":
226
            variance = torch.log(torch.clamp(variance, min=1e-20))
227
            variance = torch.exp(0.5 * variance)
228
        elif variance_type == "fixed_large":
229
            variance = current_beta_t
230
        elif variance_type == "fixed_large_log":
Patrick von Platen's avatar
Patrick von Platen committed
231
            # Glide max_log
232
            variance = torch.log(current_beta_t)
233
234
235
        elif variance_type == "learned":
            return predicted_variance
        elif variance_type == "learned_range":
236
237
            min_log = torch.log(variance)
            max_log = torch.log(self.betas[t])
238
239
            frac = (predicted_variance + 1) / 2
            variance = frac * max_log + (1 - frac) * min_log
Patrick von Platen's avatar
Patrick von Platen committed
240
241
242

        return variance

243
    def _threshold_sample(self, sample: torch.FloatTensor) -> torch.FloatTensor:
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
        """
        "Dynamic thresholding: At each sampling step we set s to a certain percentile absolute pixel value in xt0 (the
        prediction of x_0 at timestep t), and if s > 1, then we threshold xt0 to the range [-s, s] and then divide by
        s. Dynamic thresholding pushes saturated pixels (those near -1 and 1) inwards, thereby actively preventing
        pixels from saturation at each step. We find that dynamic thresholding results in significantly better
        photorealism as well as better image-text alignment, especially when using very large guidance weights."

        https://arxiv.org/abs/2205.11487
        """
        dtype = sample.dtype
        batch_size, channels, height, width = sample.shape

        if dtype not in (torch.float32, torch.float64):
            sample = sample.float()  # upcast for quantile calculation, and clamp not implemented for cpu half

        # Flatten sample for doing quantile calculation along each image
        sample = sample.reshape(batch_size, channels * height * width)

        abs_sample = sample.abs()  # "a certain percentile absolute pixel value"

        s = torch.quantile(abs_sample, self.config.dynamic_thresholding_ratio, dim=1)
        s = torch.clamp(
            s, min=1, max=self.config.sample_max_value
        )  # When clamped to min=1, equivalent to standard clipping to [-1, 1]

        s = s.unsqueeze(1)  # (batch_size, 1) because clamp will broadcast along dim=0
        sample = torch.clamp(sample, -s, s) / s  # "we threshold xt0 to the range [-s, s] and then divide by s"

        sample = sample.reshape(batch_size, channels, height, width)
        sample = sample.to(dtype)

        return sample
276

277
278
    def step(
        self,
279
        model_output: torch.FloatTensor,
280
        timestep: int,
281
        sample: torch.FloatTensor,
Patrick von Platen's avatar
Patrick von Platen committed
282
        generator=None,
283
        return_dict: bool = True,
284
    ) -> Union[DDPMSchedulerOutput, Tuple]:
285
286
287
288
289
        """
        Predict the sample at the previous timestep by reversing the SDE. Core function to propagate the diffusion
        process from the learned model outputs (most often the predicted noise).

        Args:
290
            model_output (`torch.FloatTensor`): direct output from learned diffusion model.
291
            timestep (`int`): current discrete timestep in the diffusion chain.
292
            sample (`torch.FloatTensor`):
293
294
                current instance of sample being created by diffusion process.
            generator: random number generator.
295
            return_dict (`bool`): option for returning tuple rather than DDPMSchedulerOutput class
296
297

        Returns:
298
299
            [`~schedulers.scheduling_utils.DDPMSchedulerOutput`] or `tuple`:
            [`~schedulers.scheduling_utils.DDPMSchedulerOutput`] if `return_dict` is True, otherwise a `tuple`. When
300
            returning a tuple, the first element is the sample tensor.
301
302

        """
303
        t = timestep
304
305
        num_inference_steps = self.num_inference_steps if self.num_inference_steps else self.config.num_train_timesteps
        prev_t = timestep - self.config.num_train_timesteps // num_inference_steps
306

307
308
309
310
311
        if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type in ["learned", "learned_range"]:
            model_output, predicted_variance = torch.split(model_output, sample.shape[1], dim=1)
        else:
            predicted_variance = None

Patrick von Platen's avatar
Patrick von Platen committed
312
        # 1. compute alphas, betas
313
        alpha_prod_t = self.alphas_cumprod[t]
314
        alpha_prod_t_prev = self.alphas_cumprod[prev_t] if prev_t >= 0 else self.one
Patrick von Platen's avatar
Patrick von Platen committed
315
316
        beta_prod_t = 1 - alpha_prod_t
        beta_prod_t_prev = 1 - alpha_prod_t_prev
317
318
        current_alpha_t = alpha_prod_t / alpha_prod_t_prev
        current_beta_t = 1 - current_alpha_t
Patrick von Platen's avatar
Patrick von Platen committed
319

320
        # 2. compute predicted original sample from predicted noise also called
Patrick von Platen's avatar
Patrick von Platen committed
321
        # "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
322
        if self.config.prediction_type == "epsilon":
Patrick von Platen's avatar
Patrick von Platen committed
323
            pred_original_sample = (sample - beta_prod_t ** (0.5) * model_output) / alpha_prod_t ** (0.5)
324
        elif self.config.prediction_type == "sample":
Patrick von Platen's avatar
Patrick von Platen committed
325
            pred_original_sample = model_output
326
327
        elif self.config.prediction_type == "v_prediction":
            pred_original_sample = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
328
329
        else:
            raise ValueError(
330
331
                f"prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample` or"
                " `v_prediction`  for the DDPMScheduler."
332
            )
Patrick von Platen's avatar
Patrick von Platen committed
333

334
        # 3. Clip or threshold "predicted x_0"
335
336
337
        if self.config.thresholding:
            pred_original_sample = self._threshold_sample(pred_original_sample)
        elif self.config.clip_sample:
338
339
            pred_original_sample = pred_original_sample.clamp(
                -self.config.clip_sample_range, self.config.clip_sample_range
Will Berman's avatar
Will Berman committed
340
            )
Patrick von Platen's avatar
Patrick von Platen committed
341

342
        # 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
Patrick von Platen's avatar
Patrick von Platen committed
343
        # See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
344
345
        pred_original_sample_coeff = (alpha_prod_t_prev ** (0.5) * current_beta_t) / beta_prod_t
        current_sample_coeff = current_alpha_t ** (0.5) * beta_prod_t_prev / beta_prod_t
Patrick von Platen's avatar
Patrick von Platen committed
346

347
        # 5. Compute predicted previous sample µ_t
Patrick von Platen's avatar
Patrick von Platen committed
348
        # See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
349
        pred_prev_sample = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
Patrick von Platen's avatar
Patrick von Platen committed
350

Patrick von Platen's avatar
Patrick von Platen committed
351
352
353
        # 6. Add noise
        variance = 0
        if t > 0:
354
            device = model_output.device
355
356
357
            variance_noise = randn_tensor(
                model_output.shape, generator=generator, device=device, dtype=model_output.dtype
            )
358
359
            if self.variance_type == "fixed_small_log":
                variance = self._get_variance(t, predicted_variance=predicted_variance) * variance_noise
360
361
362
            elif self.variance_type == "learned_range":
                variance = self._get_variance(t, predicted_variance=predicted_variance)
                variance = torch.exp(0.5 * variance) * variance_noise
363
364
            else:
                variance = (self._get_variance(t, predicted_variance=predicted_variance) ** 0.5) * variance_noise
Patrick von Platen's avatar
Patrick von Platen committed
365
366
367

        pred_prev_sample = pred_prev_sample + variance

368
369
370
        if not return_dict:
            return (pred_prev_sample,)

371
        return DDPMSchedulerOutput(prev_sample=pred_prev_sample, pred_original_sample=pred_original_sample)
Patrick von Platen's avatar
Patrick von Platen committed
372

Partho's avatar
Partho committed
373
374
    def add_noise(
        self,
375
376
377
378
        original_samples: torch.FloatTensor,
        noise: torch.FloatTensor,
        timesteps: torch.IntTensor,
    ) -> torch.FloatTensor:
379
380
381
        # Make sure alphas_cumprod and timestep have same device and dtype as original_samples
        self.alphas_cumprod = self.alphas_cumprod.to(device=original_samples.device, dtype=original_samples.dtype)
        timesteps = timesteps.to(original_samples.device)
382

anton-l's avatar
anton-l committed
383
        sqrt_alpha_prod = self.alphas_cumprod[timesteps] ** 0.5
384
385
386
387
        sqrt_alpha_prod = sqrt_alpha_prod.flatten()
        while len(sqrt_alpha_prod.shape) < len(original_samples.shape):
            sqrt_alpha_prod = sqrt_alpha_prod.unsqueeze(-1)

anton-l's avatar
anton-l committed
388
        sqrt_one_minus_alpha_prod = (1 - self.alphas_cumprod[timesteps]) ** 0.5
389
        sqrt_one_minus_alpha_prod = sqrt_one_minus_alpha_prod.flatten()
390
391
        while len(sqrt_one_minus_alpha_prod.shape) < len(original_samples.shape):
            sqrt_one_minus_alpha_prod = sqrt_one_minus_alpha_prod.unsqueeze(-1)
392
393

        noisy_samples = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
anton-l's avatar
anton-l committed
394
        return noisy_samples
anton-l's avatar
anton-l committed
395

396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
    def get_velocity(
        self, sample: torch.FloatTensor, noise: torch.FloatTensor, timesteps: torch.IntTensor
    ) -> torch.FloatTensor:
        # Make sure alphas_cumprod and timestep have same device and dtype as sample
        self.alphas_cumprod = self.alphas_cumprod.to(device=sample.device, dtype=sample.dtype)
        timesteps = timesteps.to(sample.device)

        sqrt_alpha_prod = self.alphas_cumprod[timesteps] ** 0.5
        sqrt_alpha_prod = sqrt_alpha_prod.flatten()
        while len(sqrt_alpha_prod.shape) < len(sample.shape):
            sqrt_alpha_prod = sqrt_alpha_prod.unsqueeze(-1)

        sqrt_one_minus_alpha_prod = (1 - self.alphas_cumprod[timesteps]) ** 0.5
        sqrt_one_minus_alpha_prod = sqrt_one_minus_alpha_prod.flatten()
        while len(sqrt_one_minus_alpha_prod.shape) < len(sample.shape):
            sqrt_one_minus_alpha_prod = sqrt_one_minus_alpha_prod.unsqueeze(-1)

        velocity = sqrt_alpha_prod * noise - sqrt_one_minus_alpha_prod * sample
        return velocity

Patrick von Platen's avatar
improve  
Patrick von Platen committed
416
    def __len__(self):
Nathan Lambert's avatar
Nathan Lambert committed
417
        return self.config.num_train_timesteps