scheduling_pndm.py 8.67 KB
Newer Older
1
# Copyright 2022 Zhejiang University Team and The HuggingFace Team. All rights reserved.
Patrick von Platen's avatar
Patrick von Platen committed
2
3
4
5
6
7
8
9
10
11
12
13
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
14
15
16

# DISCLAIMER: This file is strongly influenced by https://github.com/ermongroup/ddim

17
import math
18
from typing import Union
Patrick von Platen's avatar
Patrick von Platen committed
19

20
import numpy as np
21
import torch
22

23
from ..configuration_utils import ConfigMixin, register_to_config
24
25
26
27
28
from .scheduling_utils import SchedulerMixin


def betas_for_alpha_bar(num_diffusion_timesteps, max_beta=0.999):
    """
Patrick von Platen's avatar
Patrick von Platen committed
29
30
    Create a beta schedule that discretizes the given alpha_t_bar function, which defines the cumulative product of
    (1-beta) over time from t = [0,1].
31

Patrick von Platen's avatar
Patrick von Platen committed
32
33
34
    :param num_diffusion_timesteps: the number of betas to produce. :param alpha_bar: a lambda that takes an argument t
    from 0 to 1 and
                      produces the cumulative product of (1-beta) up to that part of the diffusion process.
35
36
37
    :param max_beta: the maximum beta to use; use values lower than 1 to
                     prevent singularities.
    """
38

39
40
41
42
43
44
45
46
47
    def alpha_bar(time_step):
        return math.cos((time_step + 0.008) / 1.008 * math.pi / 2) ** 2

    betas = []
    for i in range(num_diffusion_timesteps):
        t1 = i / num_diffusion_timesteps
        t2 = (i + 1) / num_diffusion_timesteps
        betas.append(min(1 - alpha_bar(t2) / alpha_bar(t1), max_beta))
    return np.array(betas, dtype=np.float32)
Patrick von Platen's avatar
Patrick von Platen committed
48
49
50


class PNDMScheduler(SchedulerMixin, ConfigMixin):
51
    @register_to_config
Patrick von Platen's avatar
Patrick von Platen committed
52
53
    def __init__(
        self,
Nathan Lambert's avatar
Nathan Lambert committed
54
        num_train_timesteps=1000,
Patrick von Platen's avatar
Patrick von Platen committed
55
56
57
58
59
60
61
        beta_start=0.0001,
        beta_end=0.02,
        beta_schedule="linear",
        tensor_format="np",
    ):

        if beta_schedule == "linear":
Nathan Lambert's avatar
Nathan Lambert committed
62
            self.betas = np.linspace(beta_start, beta_end, num_train_timesteps, dtype=np.float32)
Patrick von Platen's avatar
Patrick von Platen committed
63
        elif beta_schedule == "squaredcos_cap_v2":
Patrick von Platen's avatar
Patrick von Platen committed
64
            # Glide cosine schedule
Nathan Lambert's avatar
Nathan Lambert committed
65
            self.betas = betas_for_alpha_bar(num_train_timesteps)
Patrick von Platen's avatar
Patrick von Platen committed
66
67
68
69
70
71
72
73
74
75
        else:
            raise NotImplementedError(f"{beta_schedule} does is not implemented for {self.__class__}")

        self.alphas = 1.0 - self.betas
        self.alphas_cumprod = np.cumprod(self.alphas, axis=0)

        self.one = np.array(1.0)

        self.set_format(tensor_format=tensor_format)

Patrick von Platen's avatar
Patrick von Platen committed
76
77
        # For now we only support F-PNDM, i.e. the runge-kutta method
        # For more information on the algorithm please take a look at the paper: https://arxiv.org/pdf/2202.09778.pdf
Patrick von Platen's avatar
Patrick von Platen committed
78
        # mainly at formula (9), (12), (13) and the Algorithm 2.
Patrick von Platen's avatar
Patrick von Platen committed
79
80
81
        self.pndm_order = 4

        # running values
Patrick von Platen's avatar
Patrick von Platen committed
82
        self.cur_model_output = 0
83
        self.cur_sample = None
Patrick von Platen's avatar
Patrick von Platen committed
84
        self.ets = []
Patrick von Platen's avatar
Patrick von Platen committed
85
        self.prk_time_steps = {}
Patrick von Platen's avatar
Patrick von Platen committed
86
        self.time_steps = {}
Patrick von Platen's avatar
Patrick von Platen committed
87
        self.set_prk_mode()
Patrick von Platen's avatar
Patrick von Platen committed
88

Patrick von Platen's avatar
Patrick von Platen committed
89
90
91
    def get_prk_time_steps(self, num_inference_steps):
        if num_inference_steps in self.prk_time_steps:
            return self.prk_time_steps[num_inference_steps]
Patrick von Platen's avatar
Patrick von Platen committed
92

Nathan Lambert's avatar
Nathan Lambert committed
93
94
95
        inference_step_times = list(
            range(0, self.config.num_train_timesteps, self.config.num_train_timesteps // num_inference_steps)
        )
Patrick von Platen's avatar
Patrick von Platen committed
96

Patrick von Platen's avatar
Patrick von Platen committed
97
        prk_time_steps = np.array(inference_step_times[-self.pndm_order :]).repeat(2) + np.tile(
Nathan Lambert's avatar
Nathan Lambert committed
98
            np.array([0, self.config.num_train_timesteps // num_inference_steps // 2]), self.pndm_order
99
        )
Patrick von Platen's avatar
Patrick von Platen committed
100
        self.prk_time_steps[num_inference_steps] = list(reversed(prk_time_steps[:-1].repeat(2)[1:-1]))
Patrick von Platen's avatar
Patrick von Platen committed
101

Patrick von Platen's avatar
Patrick von Platen committed
102
        return self.prk_time_steps[num_inference_steps]
Patrick von Platen's avatar
Patrick von Platen committed
103

Patrick von Platen's avatar
Patrick von Platen committed
104
105
106
    def get_time_steps(self, num_inference_steps):
        if num_inference_steps in self.time_steps:
            return self.time_steps[num_inference_steps]
Patrick von Platen's avatar
Patrick von Platen committed
107

Nathan Lambert's avatar
Nathan Lambert committed
108
109
110
        inference_step_times = list(
            range(0, self.config.num_train_timesteps, self.config.num_train_timesteps // num_inference_steps)
        )
Patrick von Platen's avatar
Patrick von Platen committed
111
        self.time_steps[num_inference_steps] = list(reversed(inference_step_times[:-3]))
Patrick von Platen's avatar
Patrick von Platen committed
112

Patrick von Platen's avatar
Patrick von Platen committed
113
        return self.time_steps[num_inference_steps]
Patrick von Platen's avatar
Patrick von Platen committed
114

Patrick von Platen's avatar
Patrick von Platen committed
115
116
117
118
119
120
121
122
123
124
125
126
127
128
    def set_prk_mode(self):
        self.mode = "prk"

    def set_plms_mode(self):
        self.mode = "plms"

    def step(self, *args, **kwargs):
        if self.mode == "prk":
            return self.step_prk(*args, **kwargs)
        if self.mode == "plms":
            return self.step_plms(*args, **kwargs)

        raise ValueError(f"mode {self.mode} does not exist.")

129
130
    def step_prk(
        self,
Patrick von Platen's avatar
Patrick von Platen committed
131
        model_output: Union[torch.FloatTensor, np.ndarray],
132
133
134
135
        timestep: int,
        sample: Union[torch.FloatTensor, np.ndarray],
        num_inference_steps,
    ):
Nathan Lambert's avatar
Nathan Lambert committed
136
137
138
139
        """
        Step function propagating the sample with the Runge-Kutta method. RK takes 4 forward passes to approximate the
        solution to the differential equation.
        """
140
        t = timestep
Patrick von Platen's avatar
Patrick von Platen committed
141
        prk_time_steps = self.get_prk_time_steps(num_inference_steps)
Patrick von Platen's avatar
Patrick von Platen committed
142

Patrick von Platen's avatar
Patrick von Platen committed
143
144
        t_orig = prk_time_steps[t // 4 * 4]
        t_orig_prev = prk_time_steps[min(t + 1, len(prk_time_steps) - 1)]
Patrick von Platen's avatar
Patrick von Platen committed
145

Patrick von Platen's avatar
Patrick von Platen committed
146
        if t % 4 == 0:
Patrick von Platen's avatar
Patrick von Platen committed
147
148
            self.cur_model_output += 1 / 6 * model_output
            self.ets.append(model_output)
149
            self.cur_sample = sample
Patrick von Platen's avatar
Patrick von Platen committed
150
        elif (t - 1) % 4 == 0:
Patrick von Platen's avatar
Patrick von Platen committed
151
            self.cur_model_output += 1 / 3 * model_output
Patrick von Platen's avatar
Patrick von Platen committed
152
        elif (t - 2) % 4 == 0:
Patrick von Platen's avatar
Patrick von Platen committed
153
            self.cur_model_output += 1 / 3 * model_output
Patrick von Platen's avatar
Patrick von Platen committed
154
        elif (t - 3) % 4 == 0:
Patrick von Platen's avatar
Patrick von Platen committed
155
156
            model_output = self.cur_model_output + 1 / 6 * model_output
            self.cur_model_output = 0
Patrick von Platen's avatar
Patrick von Platen committed
157

Patrick von Platen's avatar
Patrick von Platen committed
158
159
160
        # cur_sample should not be `None`
        cur_sample = self.cur_sample if self.cur_sample is not None else sample

Patrick von Platen's avatar
Patrick von Platen committed
161
        return {"prev_sample": self.get_prev_sample(cur_sample, t_orig, t_orig_prev, model_output)}
Patrick von Platen's avatar
Patrick von Platen committed
162

163
164
    def step_plms(
        self,
Patrick von Platen's avatar
Patrick von Platen committed
165
        model_output: Union[torch.FloatTensor, np.ndarray],
166
167
168
169
        timestep: int,
        sample: Union[torch.FloatTensor, np.ndarray],
        num_inference_steps,
    ):
Nathan Lambert's avatar
Nathan Lambert committed
170
171
172
173
        """
        Step function propagating the sample with the linear multi-step method. This has one forward pass with multiple
        times to approximate the solution.
        """
174
        t = timestep
Patrick von Platen's avatar
Patrick von Platen committed
175
176
177
178
179
180
181
182
        if len(self.ets) < 3:
            raise ValueError(
                f"{self.__class__} can only be run AFTER scheduler has been run "
                "in 'prk' mode for at least 12 iterations "
                "See: https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines/pipeline_pndm.py "
                "for more information."
            )

Patrick von Platen's avatar
Patrick von Platen committed
183
184
        timesteps = self.get_time_steps(num_inference_steps)

Patrick von Platen's avatar
Patrick von Platen committed
185
186
        t_orig = timesteps[t]
        t_orig_prev = timesteps[min(t + 1, len(timesteps) - 1)]
Patrick von Platen's avatar
Patrick von Platen committed
187
        self.ets.append(model_output)
Patrick von Platen's avatar
Patrick von Platen committed
188

Patrick von Platen's avatar
Patrick von Platen committed
189
        model_output = (1 / 24) * (55 * self.ets[-1] - 59 * self.ets[-2] + 37 * self.ets[-3] - 9 * self.ets[-4])
Patrick von Platen's avatar
Patrick von Platen committed
190

Patrick von Platen's avatar
Patrick von Platen committed
191
        return {"prev_sample": self.get_prev_sample(sample, t_orig, t_orig_prev, model_output)}
Patrick von Platen's avatar
Patrick von Platen committed
192

Patrick von Platen's avatar
Patrick von Platen committed
193
    def get_prev_sample(self, sample, t_orig, t_orig_prev, model_output):
Patrick von Platen's avatar
Patrick von Platen committed
194
195
196
197
198
199
200
201
202
203
        # See formula (9) of PNDM paper https://arxiv.org/pdf/2202.09778.pdf
        # this function computes x_(t−δ) using the formula of (9)
        # Note that x_t needs to be added to both sides of the equation

        # Notation (<variable name> -> <name in paper>
        # alpha_prod_t -> α_t
        # alpha_prod_t_prev -> α_(t−δ)
        # beta_prod_t -> (1 - α_t)
        # beta_prod_t_prev -> (1 - α_(t−δ))
        # sample -> x_t
Patrick von Platen's avatar
Patrick von Platen committed
204
        # model_output -> e_θ(x_t, t)
Patrick von Platen's avatar
Patrick von Platen committed
205
        # prev_sample -> x_(t−δ)
206
207
        alpha_prod_t = self.alphas_cumprod[t_orig + 1]
        alpha_prod_t_prev = self.alphas_cumprod[t_orig_prev + 1]
Patrick von Platen's avatar
Patrick von Platen committed
208
209
210
211
212
213
214
215
216
217
        beta_prod_t = 1 - alpha_prod_t
        beta_prod_t_prev = 1 - alpha_prod_t_prev

        # corresponds to (α_(t−δ) - α_t) divided by
        # denominator of x_t in formula (9) and plus 1
        # Note: (α_(t−δ) - α_t) / (sqrt(α_t) * (sqrt(α_(t−δ)) + sqr(α_t))) =
        # sqrt(α_(t−δ)) / sqrt(α_t))
        sample_coeff = (alpha_prod_t_prev / alpha_prod_t) ** (0.5)

        # corresponds to denominator of e_θ(x_t, t) in formula (9)
Patrick von Platen's avatar
Patrick von Platen committed
218
        model_output_denom_coeff = alpha_prod_t * beta_prod_t_prev ** (0.5) + (
Patrick von Platen's avatar
Patrick von Platen committed
219
220
221
222
            alpha_prod_t * beta_prod_t * alpha_prod_t_prev
        ) ** (0.5)

        # full formula (9)
Patrick von Platen's avatar
Patrick von Platen committed
223
224
225
        prev_sample = (
            sample_coeff * sample - (alpha_prod_t_prev - alpha_prod_t) * model_output / model_output_denom_coeff
        )
Patrick von Platen's avatar
Patrick von Platen committed
226
227

        return prev_sample
Patrick von Platen's avatar
Patrick von Platen committed
228
229

    def __len__(self):
Nathan Lambert's avatar
Nathan Lambert committed
230
        return self.config.num_train_timesteps