test_scheduler.py 43 KB
Newer Older
Patrick von Platen's avatar
Patrick von Platen committed
1
2
3
4
5
6
7
8
9
10
11
12
13
14
# coding=utf-8
# Copyright 2022 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
Patrick von Platen's avatar
Patrick von Platen committed
15
import tempfile
Patrick von Platen's avatar
Patrick von Platen committed
16
import unittest
17
from typing import Dict, List, Tuple
Patrick von Platen's avatar
Patrick von Platen committed
18

Patrick von Platen's avatar
Patrick von Platen committed
19
20
21
import numpy as np
import torch

22
23
24
25
26
27
28
29
from diffusers import (
    DDIMScheduler,
    DDPMScheduler,
    IPNDMScheduler,
    LMSDiscreteScheduler,
    PNDMScheduler,
    ScoreSdeVeScheduler,
)
30
from diffusers.utils import torch_device
Patrick von Platen's avatar
Patrick von Platen committed
31
32
33
34
35
36


torch.backends.cuda.matmul.allow_tf32 = False


class SchedulerCommonTest(unittest.TestCase):
Patrick von Platen's avatar
Patrick von Platen committed
37
38
    scheduler_classes = ()
    forward_default_kwargs = ()
Patrick von Platen's avatar
Patrick von Platen committed
39
40

    @property
41
    def dummy_sample(self):
Patrick von Platen's avatar
Patrick von Platen committed
42
43
44
45
46
        batch_size = 4
        num_channels = 3
        height = 8
        width = 8

47
        sample = torch.rand((batch_size, num_channels, height, width))
Patrick von Platen's avatar
Patrick von Platen committed
48

49
        return sample
Patrick von Platen's avatar
Patrick von Platen committed
50
51

    @property
52
    def dummy_sample_deter(self):
Patrick von Platen's avatar
Patrick von Platen committed
53
54
55
56
57
58
        batch_size = 4
        num_channels = 3
        height = 8
        width = 8

        num_elems = batch_size * num_channels * height * width
59
        sample = torch.arange(num_elems)
60
61
        sample = sample.reshape(num_channels, height, width, batch_size)
        sample = sample / num_elems
62
        sample = sample.permute(3, 0, 1, 2)
Patrick von Platen's avatar
Patrick von Platen committed
63

64
        return sample
Patrick von Platen's avatar
Patrick von Platen committed
65
66
67
68
69

    def get_scheduler_config(self):
        raise NotImplementedError

    def dummy_model(self):
70
71
        def model(sample, t, *args):
            return sample * t / (t + 1)
Patrick von Platen's avatar
Patrick von Platen committed
72
73
74

        return model

Patrick von Platen's avatar
Patrick von Platen committed
75
76
77
    def check_over_configs(self, time_step=0, **config):
        kwargs = dict(self.forward_default_kwargs)

78
79
        num_inference_steps = kwargs.pop("num_inference_steps", None)

Patrick von Platen's avatar
Patrick von Platen committed
80
        for scheduler_class in self.scheduler_classes:
81
82
            sample = self.dummy_sample
            residual = 0.1 * sample
Patrick von Platen's avatar
Patrick von Platen committed
83
84
85
86
87
88
89
90

            scheduler_config = self.get_scheduler_config(**config)
            scheduler = scheduler_class(**scheduler_config)

            with tempfile.TemporaryDirectory() as tmpdirname:
                scheduler.save_config(tmpdirname)
                new_scheduler = scheduler_class.from_config(tmpdirname)

91
92
93
94
95
96
            if num_inference_steps is not None and hasattr(scheduler, "set_timesteps"):
                scheduler.set_timesteps(num_inference_steps)
                new_scheduler.set_timesteps(num_inference_steps)
            elif num_inference_steps is not None and not hasattr(scheduler, "set_timesteps"):
                kwargs["num_inference_steps"] = num_inference_steps

97
98
            output = scheduler.step(residual, time_step, sample, **kwargs).prev_sample
            new_output = new_scheduler.step(residual, time_step, sample, **kwargs).prev_sample
Patrick von Platen's avatar
Patrick von Platen committed
99

100
            assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical"
Patrick von Platen's avatar
Patrick von Platen committed
101
102
103
104
105

    def check_over_forward(self, time_step=0, **forward_kwargs):
        kwargs = dict(self.forward_default_kwargs)
        kwargs.update(forward_kwargs)

106
107
        num_inference_steps = kwargs.pop("num_inference_steps", None)

Patrick von Platen's avatar
Patrick von Platen committed
108
        for scheduler_class in self.scheduler_classes:
109
110
            sample = self.dummy_sample
            residual = 0.1 * sample
Patrick von Platen's avatar
Patrick von Platen committed
111
112
113
114
115
116
117
118

            scheduler_config = self.get_scheduler_config()
            scheduler = scheduler_class(**scheduler_config)

            with tempfile.TemporaryDirectory() as tmpdirname:
                scheduler.save_config(tmpdirname)
                new_scheduler = scheduler_class.from_config(tmpdirname)

119
120
121
122
123
124
125
            if num_inference_steps is not None and hasattr(scheduler, "set_timesteps"):
                scheduler.set_timesteps(num_inference_steps)
                new_scheduler.set_timesteps(num_inference_steps)
            elif num_inference_steps is not None and not hasattr(scheduler, "set_timesteps"):
                kwargs["num_inference_steps"] = num_inference_steps

            torch.manual_seed(0)
126
            output = scheduler.step(residual, time_step, sample, **kwargs).prev_sample
127
            torch.manual_seed(0)
128
            new_output = new_scheduler.step(residual, time_step, sample, **kwargs).prev_sample
Patrick von Platen's avatar
Patrick von Platen committed
129

130
            assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical"
Patrick von Platen's avatar
Patrick von Platen committed
131

Patrick von Platen's avatar
Patrick von Platen committed
132
    def test_from_pretrained_save_pretrained(self):
Patrick von Platen's avatar
Patrick von Platen committed
133
134
        kwargs = dict(self.forward_default_kwargs)

135
136
        num_inference_steps = kwargs.pop("num_inference_steps", None)

Patrick von Platen's avatar
Patrick von Platen committed
137
        for scheduler_class in self.scheduler_classes:
138
139
            sample = self.dummy_sample
            residual = 0.1 * sample
Patrick von Platen's avatar
Patrick von Platen committed
140
141
142
143
144
145
146
147

            scheduler_config = self.get_scheduler_config()
            scheduler = scheduler_class(**scheduler_config)

            with tempfile.TemporaryDirectory() as tmpdirname:
                scheduler.save_config(tmpdirname)
                new_scheduler = scheduler_class.from_config(tmpdirname)

148
149
150
151
152
153
            if num_inference_steps is not None and hasattr(scheduler, "set_timesteps"):
                scheduler.set_timesteps(num_inference_steps)
                new_scheduler.set_timesteps(num_inference_steps)
            elif num_inference_steps is not None and not hasattr(scheduler, "set_timesteps"):
                kwargs["num_inference_steps"] = num_inference_steps

154
            torch.manual_seed(0)
155
            output = scheduler.step(residual, 1, sample, **kwargs).prev_sample
156
            torch.manual_seed(0)
157
            new_output = new_scheduler.step(residual, 1, sample, **kwargs).prev_sample
Patrick von Platen's avatar
Patrick von Platen committed
158

159
            assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical"
Patrick von Platen's avatar
Patrick von Platen committed
160
161
162
163

    def test_step_shape(self):
        kwargs = dict(self.forward_default_kwargs)

164
165
        num_inference_steps = kwargs.pop("num_inference_steps", None)

Patrick von Platen's avatar
Patrick von Platen committed
166
167
168
169
        for scheduler_class in self.scheduler_classes:
            scheduler_config = self.get_scheduler_config()
            scheduler = scheduler_class(**scheduler_config)

170
171
            sample = self.dummy_sample
            residual = 0.1 * sample
Patrick von Platen's avatar
Patrick von Platen committed
172

173
174
175
176
177
            if num_inference_steps is not None and hasattr(scheduler, "set_timesteps"):
                scheduler.set_timesteps(num_inference_steps)
            elif num_inference_steps is not None and not hasattr(scheduler, "set_timesteps"):
                kwargs["num_inference_steps"] = num_inference_steps

178
179
            output_0 = scheduler.step(residual, 0, sample, **kwargs).prev_sample
            output_1 = scheduler.step(residual, 1, sample, **kwargs).prev_sample
Patrick von Platen's avatar
Patrick von Platen committed
180

181
            self.assertEqual(output_0.shape, sample.shape)
Patrick von Platen's avatar
Patrick von Platen committed
182
183
            self.assertEqual(output_0.shape, output_1.shape)

184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
    def test_scheduler_outputs_equivalence(self):
        def set_nan_tensor_to_zero(t):
            t[t != t] = 0
            return t

        def recursive_check(tuple_object, dict_object):
            if isinstance(tuple_object, (List, Tuple)):
                for tuple_iterable_value, dict_iterable_value in zip(tuple_object, dict_object.values()):
                    recursive_check(tuple_iterable_value, dict_iterable_value)
            elif isinstance(tuple_object, Dict):
                for tuple_iterable_value, dict_iterable_value in zip(tuple_object.values(), dict_object.values()):
                    recursive_check(tuple_iterable_value, dict_iterable_value)
            elif tuple_object is None:
                return
            else:
                self.assertTrue(
                    torch.allclose(
                        set_nan_tensor_to_zero(tuple_object), set_nan_tensor_to_zero(dict_object), atol=1e-5
                    ),
                    msg=(
                        "Tuple and dict output are not equal. Difference:"
                        f" {torch.max(torch.abs(tuple_object - dict_object))}. Tuple has `nan`:"
                        f" {torch.isnan(tuple_object).any()} and `inf`: {torch.isinf(tuple_object)}. Dict has"
                        f" `nan`: {torch.isnan(dict_object).any()} and `inf`: {torch.isinf(dict_object)}."
                    ),
                )

        kwargs = dict(self.forward_default_kwargs)
212
        num_inference_steps = kwargs.pop("num_inference_steps", 50)
213

214
215
216
217
        timestep = 0
        if len(self.scheduler_classes) > 0 and self.scheduler_classes[0] == IPNDMScheduler:
            timestep = 1

218
219
220
221
222
223
224
225
226
227
228
229
        for scheduler_class in self.scheduler_classes:
            scheduler_config = self.get_scheduler_config()
            scheduler = scheduler_class(**scheduler_config)

            sample = self.dummy_sample
            residual = 0.1 * sample

            if num_inference_steps is not None and hasattr(scheduler, "set_timesteps"):
                scheduler.set_timesteps(num_inference_steps)
            elif num_inference_steps is not None and not hasattr(scheduler, "set_timesteps"):
                kwargs["num_inference_steps"] = num_inference_steps

230
            outputs_dict = scheduler.step(residual, timestep, sample, **kwargs)
231
232
233
234
235
236

            if num_inference_steps is not None and hasattr(scheduler, "set_timesteps"):
                scheduler.set_timesteps(num_inference_steps)
            elif num_inference_steps is not None and not hasattr(scheduler, "set_timesteps"):
                kwargs["num_inference_steps"] = num_inference_steps

237
            outputs_tuple = scheduler.step(residual, timestep, sample, return_dict=False, **kwargs)
238
239
240

            recursive_check(outputs_tuple, outputs_dict)

241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
    def test_scheduler_public_api(self):
        for scheduler_class in self.scheduler_classes:
            scheduler_config = self.get_scheduler_config()
            scheduler = scheduler_class(**scheduler_config)
            self.assertTrue(
                hasattr(scheduler, "init_noise_sigma"),
                f"{scheduler_class} does not implement a required attribute `init_noise_sigma`",
            )
            self.assertTrue(
                hasattr(scheduler, "scale_model_input"),
                f"{scheduler_class} does not implement a required class method `scale_model_input(sample, timestep)`",
            )
            self.assertTrue(
                hasattr(scheduler, "step"),
                f"{scheduler_class} does not implement a required class method `step(...)`",
            )

            sample = self.dummy_sample
            scaled_sample = scheduler.scale_model_input(sample, 0.0)
            self.assertEqual(sample.shape, scaled_sample.shape)

262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
    def test_add_noise_device(self):
        for scheduler_class in self.scheduler_classes:
            if scheduler_class == IPNDMScheduler:
                # Skip until #990 is addressed
                continue
            scheduler_config = self.get_scheduler_config()
            scheduler = scheduler_class(**scheduler_config)

            sample = self.dummy_sample.to(torch_device)
            scaled_sample = scheduler.scale_model_input(sample, 0.0)
            self.assertEqual(sample.shape, scaled_sample.shape)

            noise = torch.randn_like(scaled_sample).to(torch_device)
            t = torch.tensor([10]).to(torch_device)
            noised = scheduler.add_noise(scaled_sample, noise, t)
            self.assertEqual(noised.shape, scaled_sample.shape)

Patrick von Platen's avatar
Patrick von Platen committed
279
280

class DDPMSchedulerTest(SchedulerCommonTest):
Patrick von Platen's avatar
Patrick von Platen committed
281
    scheduler_classes = (DDPMScheduler,)
Patrick von Platen's avatar
Patrick von Platen committed
282
283
284

    def get_scheduler_config(self, **kwargs):
        config = {
Nathan Lambert's avatar
Nathan Lambert committed
285
            "num_train_timesteps": 1000,
Patrick von Platen's avatar
Patrick von Platen committed
286
287
288
289
            "beta_start": 0.0001,
            "beta_end": 0.02,
            "beta_schedule": "linear",
            "variance_type": "fixed_small",
Patrick von Platen's avatar
Patrick von Platen committed
290
            "clip_sample": True,
Patrick von Platen's avatar
Patrick von Platen committed
291
292
293
294
        }

        config.update(**kwargs)
        return config
Patrick von Platen's avatar
update  
Patrick von Platen committed
295

Patrick von Platen's avatar
Patrick von Platen committed
296
297
    def test_timesteps(self):
        for timesteps in [1, 5, 100, 1000]:
Nathan Lambert's avatar
Nathan Lambert committed
298
            self.check_over_configs(num_train_timesteps=timesteps)
Patrick von Platen's avatar
Patrick von Platen committed
299
300
301
302
303
304
305
306
307
308
309
310
311

    def test_betas(self):
        for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1], [0.002, 0.02, 0.2, 2]):
            self.check_over_configs(beta_start=beta_start, beta_end=beta_end)

    def test_schedules(self):
        for schedule in ["linear", "squaredcos_cap_v2"]:
            self.check_over_configs(beta_schedule=schedule)

    def test_variance_type(self):
        for variance in ["fixed_small", "fixed_large", "other"]:
            self.check_over_configs(variance_type=variance)

312
    def test_clip_sample(self):
Patrick von Platen's avatar
Patrick von Platen committed
313
314
        for clip_sample in [True, False]:
            self.check_over_configs(clip_sample=clip_sample)
Patrick von Platen's avatar
Patrick von Platen committed
315
316
317
318
319
320
321
322
323
324

    def test_time_indices(self):
        for t in [0, 500, 999]:
            self.check_over_forward(time_step=t)

    def test_variance(self):
        scheduler_class = self.scheduler_classes[0]
        scheduler_config = self.get_scheduler_config()
        scheduler = scheduler_class(**scheduler_config)

325
326
327
328
        assert torch.sum(torch.abs(scheduler._get_variance(0) - 0.0)) < 1e-5
        assert torch.sum(torch.abs(scheduler._get_variance(487) - 0.00979)) < 1e-5
        assert torch.sum(torch.abs(scheduler._get_variance(999) - 0.02)) < 1e-5

Patrick von Platen's avatar
Patrick von Platen committed
329
330
    def test_full_loop_no_noise(self):
        scheduler_class = self.scheduler_classes[0]
Patrick von Platen's avatar
Patrick von Platen committed
331
        scheduler_config = self.get_scheduler_config()
Patrick von Platen's avatar
Patrick von Platen committed
332
333
334
335
336
        scheduler = scheduler_class(**scheduler_config)

        num_trained_timesteps = len(scheduler)

        model = self.dummy_model()
337
        sample = self.dummy_sample_deter
338
        generator = torch.manual_seed(0)
Patrick von Platen's avatar
Patrick von Platen committed
339
340
341

        for t in reversed(range(num_trained_timesteps)):
            # 1. predict noise residual
342
            residual = model(sample, t)
Patrick von Platen's avatar
Patrick von Platen committed
343

344
            # 2. predict previous mean of sample x_t-1
345
            pred_prev_sample = scheduler.step(residual, t, sample, generator=generator).prev_sample
Patrick von Platen's avatar
Patrick von Platen committed
346

347
348
349
350
351
352
            # if t > 0:
            #     noise = self.dummy_sample_deter
            #     variance = scheduler.get_variance(t) ** (0.5) * noise
            #
            # sample = pred_prev_sample + variance
            sample = pred_prev_sample
Patrick von Platen's avatar
Patrick von Platen committed
353

354
355
        result_sum = torch.sum(torch.abs(sample))
        result_mean = torch.mean(torch.abs(sample))
Patrick von Platen's avatar
Patrick von Platen committed
356

357
        assert abs(result_sum.item() - 258.9070) < 1e-2
358
        assert abs(result_mean.item() - 0.3374) < 1e-3
Patrick von Platen's avatar
Patrick von Platen committed
359

Patrick von Platen's avatar
update  
Patrick von Platen committed
360

Patrick von Platen's avatar
Patrick von Platen committed
361
362
class DDIMSchedulerTest(SchedulerCommonTest):
    scheduler_classes = (DDIMScheduler,)
363
    forward_default_kwargs = (("eta", 0.0), ("num_inference_steps", 50))
Patrick von Platen's avatar
update  
Patrick von Platen committed
364

Patrick von Platen's avatar
Patrick von Platen committed
365
366
    def get_scheduler_config(self, **kwargs):
        config = {
Nathan Lambert's avatar
Nathan Lambert committed
367
            "num_train_timesteps": 1000,
Patrick von Platen's avatar
Patrick von Platen committed
368
369
370
            "beta_start": 0.0001,
            "beta_end": 0.02,
            "beta_schedule": "linear",
Patrick von Platen's avatar
Patrick von Platen committed
371
            "clip_sample": True,
Patrick von Platen's avatar
Patrick von Platen committed
372
        }
Patrick von Platen's avatar
Patrick von Platen committed
373

Patrick von Platen's avatar
Patrick von Platen committed
374
375
376
        config.update(**kwargs)
        return config

377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
    def full_loop(self, **config):
        scheduler_class = self.scheduler_classes[0]
        scheduler_config = self.get_scheduler_config(**config)
        scheduler = scheduler_class(**scheduler_config)

        num_inference_steps, eta = 10, 0.0

        model = self.dummy_model()
        sample = self.dummy_sample_deter

        scheduler.set_timesteps(num_inference_steps)

        for t in scheduler.timesteps:
            residual = model(sample, t)
            sample = scheduler.step(residual, t, sample, eta).prev_sample

        return sample

Patrick von Platen's avatar
Patrick von Platen committed
395
    def test_timesteps(self):
396
        for timesteps in [100, 500, 1000]:
Nathan Lambert's avatar
Nathan Lambert committed
397
            self.check_over_configs(num_train_timesteps=timesteps)
Patrick von Platen's avatar
Patrick von Platen committed
398

399
400
401
402
403
404
405
406
    def test_steps_offset(self):
        for steps_offset in [0, 1]:
            self.check_over_configs(steps_offset=steps_offset)

        scheduler_class = self.scheduler_classes[0]
        scheduler_config = self.get_scheduler_config(steps_offset=1)
        scheduler = scheduler_class(**scheduler_config)
        scheduler.set_timesteps(5)
407
        assert torch.equal(scheduler.timesteps, torch.LongTensor([801, 601, 401, 201, 1]))
408

Patrick von Platen's avatar
Patrick von Platen committed
409
410
411
412
413
414
415
416
    def test_betas(self):
        for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1], [0.002, 0.02, 0.2, 2]):
            self.check_over_configs(beta_start=beta_start, beta_end=beta_end)

    def test_schedules(self):
        for schedule in ["linear", "squaredcos_cap_v2"]:
            self.check_over_configs(beta_schedule=schedule)

417
    def test_clip_sample(self):
Patrick von Platen's avatar
Patrick von Platen committed
418
419
        for clip_sample in [True, False]:
            self.check_over_configs(clip_sample=clip_sample)
Patrick von Platen's avatar
Patrick von Platen committed
420
421
422
423
424
425
426

    def test_time_indices(self):
        for t in [1, 10, 49]:
            self.check_over_forward(time_step=t)

    def test_inference_steps(self):
        for t, num_inference_steps in zip([1, 10, 50], [10, 50, 500]):
427
            self.check_over_forward(time_step=t, num_inference_steps=num_inference_steps)
Patrick von Platen's avatar
Patrick von Platen committed
428
429
430
431
432
433
434

    def test_eta(self):
        for t, eta in zip([1, 10, 49], [0.0, 0.5, 1.0]):
            self.check_over_forward(time_step=t, eta=eta)

    def test_variance(self):
        scheduler_class = self.scheduler_classes[0]
Patrick von Platen's avatar
Patrick von Platen committed
435
        scheduler_config = self.get_scheduler_config()
Patrick von Platen's avatar
Patrick von Platen committed
436
437
        scheduler = scheduler_class(**scheduler_config)

438
439
440
441
442
443
        assert torch.sum(torch.abs(scheduler._get_variance(0, 0) - 0.0)) < 1e-5
        assert torch.sum(torch.abs(scheduler._get_variance(420, 400) - 0.14771)) < 1e-5
        assert torch.sum(torch.abs(scheduler._get_variance(980, 960) - 0.32460)) < 1e-5
        assert torch.sum(torch.abs(scheduler._get_variance(0, 0) - 0.0)) < 1e-5
        assert torch.sum(torch.abs(scheduler._get_variance(487, 486) - 0.00979)) < 1e-5
        assert torch.sum(torch.abs(scheduler._get_variance(999, 998) - 0.02)) < 1e-5
Patrick von Platen's avatar
Patrick von Platen committed
444
445

    def test_full_loop_no_noise(self):
446
        sample = self.full_loop()
Patrick von Platen's avatar
Patrick von Platen committed
447

448
449
        result_sum = torch.sum(torch.abs(sample))
        result_mean = torch.mean(torch.abs(sample))
Patrick von Platen's avatar
Patrick von Platen committed
450

451
452
        assert abs(result_sum.item() - 172.0067) < 1e-2
        assert abs(result_mean.item() - 0.223967) < 1e-3
Patrick von Platen's avatar
Patrick von Platen committed
453

454
455
456
457
458
    def test_full_loop_with_set_alpha_to_one(self):
        # We specify different beta, so that the first alpha is 0.99
        sample = self.full_loop(set_alpha_to_one=True, beta_start=0.01)
        result_sum = torch.sum(torch.abs(sample))
        result_mean = torch.mean(torch.abs(sample))
Patrick von Platen's avatar
Patrick von Platen committed
459

460
461
        assert abs(result_sum.item() - 149.8295) < 1e-2
        assert abs(result_mean.item() - 0.1951) < 1e-3
Patrick von Platen's avatar
Patrick von Platen committed
462

463
464
465
    def test_full_loop_with_no_set_alpha_to_one(self):
        # We specify different beta, so that the first alpha is 0.99
        sample = self.full_loop(set_alpha_to_one=False, beta_start=0.01)
466
467
        result_sum = torch.sum(torch.abs(sample))
        result_mean = torch.mean(torch.abs(sample))
Patrick von Platen's avatar
Patrick von Platen committed
468

469
470
        assert abs(result_sum.item() - 149.0784) < 1e-2
        assert abs(result_mean.item() - 0.1941) < 1e-3
Patrick von Platen's avatar
Patrick von Platen committed
471
472
473
474
475
476
477
478


class PNDMSchedulerTest(SchedulerCommonTest):
    scheduler_classes = (PNDMScheduler,)
    forward_default_kwargs = (("num_inference_steps", 50),)

    def get_scheduler_config(self, **kwargs):
        config = {
Nathan Lambert's avatar
Nathan Lambert committed
479
            "num_train_timesteps": 1000,
Patrick von Platen's avatar
Patrick von Platen committed
480
481
482
483
484
485
486
487
            "beta_start": 0.0001,
            "beta_end": 0.02,
            "beta_schedule": "linear",
        }

        config.update(**kwargs)
        return config

488
    def check_over_configs(self, time_step=0, **config):
Patrick von Platen's avatar
Patrick von Platen committed
489
        kwargs = dict(self.forward_default_kwargs)
Patrick von Platen's avatar
Patrick von Platen committed
490
        num_inference_steps = kwargs.pop("num_inference_steps", None)
491
492
        sample = self.dummy_sample
        residual = 0.1 * sample
Patrick von Platen's avatar
Patrick von Platen committed
493
494
495
496
497
        dummy_past_residuals = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]

        for scheduler_class in self.scheduler_classes:
            scheduler_config = self.get_scheduler_config(**config)
            scheduler = scheduler_class(**scheduler_config)
Patrick von Platen's avatar
Patrick von Platen committed
498
            scheduler.set_timesteps(num_inference_steps)
Patrick von Platen's avatar
Patrick von Platen committed
499
500
501
502
503
504
            # copy over dummy past residuals
            scheduler.ets = dummy_past_residuals[:]

            with tempfile.TemporaryDirectory() as tmpdirname:
                scheduler.save_config(tmpdirname)
                new_scheduler = scheduler_class.from_config(tmpdirname)
Patrick von Platen's avatar
Patrick von Platen committed
505
                new_scheduler.set_timesteps(num_inference_steps)
Patrick von Platen's avatar
Patrick von Platen committed
506
507
508
                # copy over dummy past residuals
                new_scheduler.ets = dummy_past_residuals[:]

509
510
            output = scheduler.step_prk(residual, time_step, sample, **kwargs).prev_sample
            new_output = new_scheduler.step_prk(residual, time_step, sample, **kwargs).prev_sample
Patrick von Platen's avatar
Patrick von Platen committed
511

512
            assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical"
Patrick von Platen's avatar
Patrick von Platen committed
513

514
515
            output = scheduler.step_plms(residual, time_step, sample, **kwargs).prev_sample
            new_output = new_scheduler.step_plms(residual, time_step, sample, **kwargs).prev_sample
516

517
            assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical"
518
519
520
521
522

    def test_from_pretrained_save_pretrained(self):
        pass

    def check_over_forward(self, time_step=0, **forward_kwargs):
Patrick von Platen's avatar
Patrick von Platen committed
523
        kwargs = dict(self.forward_default_kwargs)
Patrick von Platen's avatar
Patrick von Platen committed
524
        num_inference_steps = kwargs.pop("num_inference_steps", None)
525
526
        sample = self.dummy_sample
        residual = 0.1 * sample
Patrick von Platen's avatar
Patrick von Platen committed
527
528
529
530
531
        dummy_past_residuals = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]

        for scheduler_class in self.scheduler_classes:
            scheduler_config = self.get_scheduler_config()
            scheduler = scheduler_class(**scheduler_config)
Patrick von Platen's avatar
Patrick von Platen committed
532
            scheduler.set_timesteps(num_inference_steps)
533

Nathan Lambert's avatar
Nathan Lambert committed
534
            # copy over dummy past residuals (must be after setting timesteps)
Patrick von Platen's avatar
Patrick von Platen committed
535
536
537
538
539
540
            scheduler.ets = dummy_past_residuals[:]

            with tempfile.TemporaryDirectory() as tmpdirname:
                scheduler.save_config(tmpdirname)
                new_scheduler = scheduler_class.from_config(tmpdirname)
                # copy over dummy past residuals
Patrick von Platen's avatar
Patrick von Platen committed
541
                new_scheduler.set_timesteps(num_inference_steps)
Patrick von Platen's avatar
Patrick von Platen committed
542

Nathan Lambert's avatar
Nathan Lambert committed
543
544
545
                # copy over dummy past residual (must be after setting timesteps)
                new_scheduler.ets = dummy_past_residuals[:]

546
547
            output = scheduler.step_prk(residual, time_step, sample, **kwargs).prev_sample
            new_output = new_scheduler.step_prk(residual, time_step, sample, **kwargs).prev_sample
548

549
            assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical"
550

551
552
            output = scheduler.step_plms(residual, time_step, sample, **kwargs).prev_sample
            new_output = new_scheduler.step_plms(residual, time_step, sample, **kwargs).prev_sample
Patrick von Platen's avatar
Patrick von Platen committed
553

554
            assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical"
Patrick von Platen's avatar
Patrick von Platen committed
555

556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
    def full_loop(self, **config):
        scheduler_class = self.scheduler_classes[0]
        scheduler_config = self.get_scheduler_config(**config)
        scheduler = scheduler_class(**scheduler_config)

        num_inference_steps = 10
        model = self.dummy_model()
        sample = self.dummy_sample_deter
        scheduler.set_timesteps(num_inference_steps)

        for i, t in enumerate(scheduler.prk_timesteps):
            residual = model(sample, t)
            sample = scheduler.step_prk(residual, t, sample).prev_sample

        for i, t in enumerate(scheduler.plms_timesteps):
            residual = model(sample, t)
            sample = scheduler.step_plms(residual, t, sample).prev_sample

        return sample

576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
    def test_step_shape(self):
        kwargs = dict(self.forward_default_kwargs)

        num_inference_steps = kwargs.pop("num_inference_steps", None)

        for scheduler_class in self.scheduler_classes:
            scheduler_config = self.get_scheduler_config()
            scheduler = scheduler_class(**scheduler_config)

            sample = self.dummy_sample
            residual = 0.1 * sample

            if num_inference_steps is not None and hasattr(scheduler, "set_timesteps"):
                scheduler.set_timesteps(num_inference_steps)
            elif num_inference_steps is not None and not hasattr(scheduler, "set_timesteps"):
                kwargs["num_inference_steps"] = num_inference_steps

Nathan Lambert's avatar
Nathan Lambert committed
593
594
595
596
            # copy over dummy past residuals (must be done after set_timesteps)
            dummy_past_residuals = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
            scheduler.ets = dummy_past_residuals[:]

597
598
            output_0 = scheduler.step_prk(residual, 0, sample, **kwargs).prev_sample
            output_1 = scheduler.step_prk(residual, 1, sample, **kwargs).prev_sample
599
600
601
602

            self.assertEqual(output_0.shape, sample.shape)
            self.assertEqual(output_0.shape, output_1.shape)

603
604
            output_0 = scheduler.step_plms(residual, 0, sample, **kwargs).prev_sample
            output_1 = scheduler.step_plms(residual, 1, sample, **kwargs).prev_sample
605
606
607
608

            self.assertEqual(output_0.shape, sample.shape)
            self.assertEqual(output_0.shape, output_1.shape)

Patrick von Platen's avatar
Patrick von Platen committed
609
610
    def test_timesteps(self):
        for timesteps in [100, 1000]:
Nathan Lambert's avatar
Nathan Lambert committed
611
            self.check_over_configs(num_train_timesteps=timesteps)
Patrick von Platen's avatar
Patrick von Platen committed
612

613
614
615
616
617
618
619
620
    def test_steps_offset(self):
        for steps_offset in [0, 1]:
            self.check_over_configs(steps_offset=steps_offset)

        scheduler_class = self.scheduler_classes[0]
        scheduler_config = self.get_scheduler_config(steps_offset=1)
        scheduler = scheduler_class(**scheduler_config)
        scheduler.set_timesteps(10)
621
        assert torch.equal(
622
            scheduler.timesteps,
623
624
625
626
            torch.LongTensor(
                [901, 851, 851, 801, 801, 751, 751, 701, 701, 651, 651, 601, 601, 501, 401, 301, 201, 101, 1]
            ),
        )
627

Patrick von Platen's avatar
Patrick von Platen committed
628
    def test_betas(self):
629
        for beta_start, beta_end in zip([0.0001, 0.001], [0.002, 0.02]):
Patrick von Platen's avatar
Patrick von Platen committed
630
631
632
633
634
635
636
637
638
639
640
641
            self.check_over_configs(beta_start=beta_start, beta_end=beta_end)

    def test_schedules(self):
        for schedule in ["linear", "squaredcos_cap_v2"]:
            self.check_over_configs(beta_schedule=schedule)

    def test_time_indices(self):
        for t in [1, 5, 10]:
            self.check_over_forward(time_step=t)

    def test_inference_steps(self):
        for t, num_inference_steps in zip([1, 5, 10], [10, 50, 100]):
642
            self.check_over_forward(num_inference_steps=num_inference_steps)
Patrick von Platen's avatar
Patrick von Platen committed
643

644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
    def test_pow_of_3_inference_steps(self):
        # earlier version of set_timesteps() caused an error indexing alpha's with inference steps as power of 3
        num_inference_steps = 27

        for scheduler_class in self.scheduler_classes:
            sample = self.dummy_sample
            residual = 0.1 * sample

            scheduler_config = self.get_scheduler_config()
            scheduler = scheduler_class(**scheduler_config)

            scheduler.set_timesteps(num_inference_steps)

            # before power of 3 fix, would error on first step, so we only need to do two
            for i, t in enumerate(scheduler.prk_timesteps[:2]):
                sample = scheduler.step_prk(residual, t, sample).prev_sample

661
    def test_inference_plms_no_past_residuals(self):
Patrick von Platen's avatar
Patrick von Platen committed
662
663
664
665
666
        with self.assertRaises(ValueError):
            scheduler_class = self.scheduler_classes[0]
            scheduler_config = self.get_scheduler_config()
            scheduler = scheduler_class(**scheduler_config)

667
            scheduler.step_plms(self.dummy_sample, 1, self.dummy_sample).prev_sample
Patrick von Platen's avatar
Patrick von Platen committed
668
669

    def test_full_loop_no_noise(self):
670
671
672
        sample = self.full_loop()
        result_sum = torch.sum(torch.abs(sample))
        result_mean = torch.mean(torch.abs(sample))
Patrick von Platen's avatar
Patrick von Platen committed
673

674
675
        assert abs(result_sum.item() - 198.1318) < 1e-2
        assert abs(result_mean.item() - 0.2580) < 1e-3
Patrick von Platen's avatar
Patrick von Platen committed
676

677
678
679
680
681
    def test_full_loop_with_set_alpha_to_one(self):
        # We specify different beta, so that the first alpha is 0.99
        sample = self.full_loop(set_alpha_to_one=True, beta_start=0.01)
        result_sum = torch.sum(torch.abs(sample))
        result_mean = torch.mean(torch.abs(sample))
Patrick von Platen's avatar
Patrick von Platen committed
682

683
684
        assert abs(result_sum.item() - 230.0399) < 1e-2
        assert abs(result_mean.item() - 0.2995) < 1e-3
Patrick von Platen's avatar
Patrick von Platen committed
685

686
687
688
    def test_full_loop_with_no_set_alpha_to_one(self):
        # We specify different beta, so that the first alpha is 0.99
        sample = self.full_loop(set_alpha_to_one=False, beta_start=0.01)
689
690
        result_sum = torch.sum(torch.abs(sample))
        result_mean = torch.mean(torch.abs(sample))
Patrick von Platen's avatar
Patrick von Platen committed
691

692
693
        assert abs(result_sum.item() - 186.9482) < 1e-2
        assert abs(result_mean.item() - 0.2434) < 1e-3
Nathan Lambert's avatar
Nathan Lambert committed
694
695


696
697
class ScoreSdeVeSchedulerTest(unittest.TestCase):
    # TODO adapt with class SchedulerCommonTest (scheduler needs Numpy Integration)
Nathan Lambert's avatar
Nathan Lambert committed
698
    scheduler_classes = (ScoreSdeVeScheduler,)
699
    forward_default_kwargs = ()
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731

    @property
    def dummy_sample(self):
        batch_size = 4
        num_channels = 3
        height = 8
        width = 8

        sample = torch.rand((batch_size, num_channels, height, width))

        return sample

    @property
    def dummy_sample_deter(self):
        batch_size = 4
        num_channels = 3
        height = 8
        width = 8

        num_elems = batch_size * num_channels * height * width
        sample = torch.arange(num_elems)
        sample = sample.reshape(num_channels, height, width, batch_size)
        sample = sample / num_elems
        sample = sample.permute(3, 0, 1, 2)

        return sample

    def dummy_model(self):
        def model(sample, t, *args):
            return sample * t / (t + 1)

        return model
Nathan Lambert's avatar
Nathan Lambert committed
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758

    def get_scheduler_config(self, **kwargs):
        config = {
            "num_train_timesteps": 2000,
            "snr": 0.15,
            "sigma_min": 0.01,
            "sigma_max": 1348,
            "sampling_eps": 1e-5,
        }

        config.update(**kwargs)
        return config

    def check_over_configs(self, time_step=0, **config):
        kwargs = dict(self.forward_default_kwargs)

        for scheduler_class in self.scheduler_classes:
            sample = self.dummy_sample
            residual = 0.1 * sample

            scheduler_config = self.get_scheduler_config(**config)
            scheduler = scheduler_class(**scheduler_config)

            with tempfile.TemporaryDirectory() as tmpdirname:
                scheduler.save_config(tmpdirname)
                new_scheduler = scheduler_class.from_config(tmpdirname)

759
760
761
762
763
764
            output = scheduler.step_pred(
                residual, time_step, sample, generator=torch.manual_seed(0), **kwargs
            ).prev_sample
            new_output = new_scheduler.step_pred(
                residual, time_step, sample, generator=torch.manual_seed(0), **kwargs
            ).prev_sample
Nathan Lambert's avatar
Nathan Lambert committed
765

766
            assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical"
Nathan Lambert's avatar
Nathan Lambert committed
767

768
769
770
771
            output = scheduler.step_correct(residual, sample, generator=torch.manual_seed(0), **kwargs).prev_sample
            new_output = new_scheduler.step_correct(
                residual, sample, generator=torch.manual_seed(0), **kwargs
            ).prev_sample
Nathan Lambert's avatar
Nathan Lambert committed
772

773
            assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler correction are not identical"
Nathan Lambert's avatar
Nathan Lambert committed
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789

    def check_over_forward(self, time_step=0, **forward_kwargs):
        kwargs = dict(self.forward_default_kwargs)
        kwargs.update(forward_kwargs)

        for scheduler_class in self.scheduler_classes:
            sample = self.dummy_sample
            residual = 0.1 * sample

            scheduler_config = self.get_scheduler_config()
            scheduler = scheduler_class(**scheduler_config)

            with tempfile.TemporaryDirectory() as tmpdirname:
                scheduler.save_config(tmpdirname)
                new_scheduler = scheduler_class.from_config(tmpdirname)

790
791
792
793
794
795
            output = scheduler.step_pred(
                residual, time_step, sample, generator=torch.manual_seed(0), **kwargs
            ).prev_sample
            new_output = new_scheduler.step_pred(
                residual, time_step, sample, generator=torch.manual_seed(0), **kwargs
            ).prev_sample
Nathan Lambert's avatar
Nathan Lambert committed
796

797
            assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical"
Nathan Lambert's avatar
Nathan Lambert committed
798

799
800
801
802
            output = scheduler.step_correct(residual, sample, generator=torch.manual_seed(0), **kwargs).prev_sample
            new_output = new_scheduler.step_correct(
                residual, sample, generator=torch.manual_seed(0), **kwargs
            ).prev_sample
Nathan Lambert's avatar
Nathan Lambert committed
803

804
            assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler correction are not identical"
Nathan Lambert's avatar
Nathan Lambert committed
805
806
807
808
809
810
811
812
813
814

    def test_timesteps(self):
        for timesteps in [10, 100, 1000]:
            self.check_over_configs(num_train_timesteps=timesteps)

    def test_sigmas(self):
        for sigma_min, sigma_max in zip([0.0001, 0.001, 0.01], [1, 100, 1000]):
            self.check_over_configs(sigma_min=sigma_min, sigma_max=sigma_max)

    def test_time_indices(self):
815
        for t in [0.1, 0.5, 0.75]:
Nathan Lambert's avatar
Nathan Lambert committed
816
817
818
            self.check_over_forward(time_step=t)

    def test_full_loop_no_noise(self):
819
820
        kwargs = dict(self.forward_default_kwargs)

Nathan Lambert's avatar
Nathan Lambert committed
821
822
823
824
825
826
827
828
829
830
        scheduler_class = self.scheduler_classes[0]
        scheduler_config = self.get_scheduler_config()
        scheduler = scheduler_class(**scheduler_config)

        num_inference_steps = 3

        model = self.dummy_model()
        sample = self.dummy_sample_deter

        scheduler.set_sigmas(num_inference_steps)
831
        scheduler.set_timesteps(num_inference_steps)
832
        generator = torch.manual_seed(0)
Nathan Lambert's avatar
Nathan Lambert committed
833
834
835
836

        for i, t in enumerate(scheduler.timesteps):
            sigma_t = scheduler.sigmas[i]

837
            for _ in range(scheduler.config.correct_steps):
Nathan Lambert's avatar
Nathan Lambert committed
838
                with torch.no_grad():
839
                    model_output = model(sample, sigma_t)
840
                sample = scheduler.step_correct(model_output, sample, generator=generator, **kwargs).prev_sample
Nathan Lambert's avatar
Nathan Lambert committed
841
842

            with torch.no_grad():
843
                model_output = model(sample, sigma_t)
Patrick von Platen's avatar
Patrick von Platen committed
844

845
            output = scheduler.step_pred(model_output, t, sample, generator=generator, **kwargs)
846
            sample, _ = output.prev_sample, output.prev_sample_mean
Patrick von Platen's avatar
Patrick von Platen committed
847

848
849
        result_sum = torch.sum(torch.abs(sample))
        result_mean = torch.mean(torch.abs(sample))
Patrick von Platen's avatar
Patrick von Platen committed
850

851
852
        assert np.isclose(result_sum.item(), 14372758528.0)
        assert np.isclose(result_mean.item(), 18714530.0)
Patrick von Platen's avatar
Patrick von Platen committed
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870

    def test_step_shape(self):
        kwargs = dict(self.forward_default_kwargs)

        num_inference_steps = kwargs.pop("num_inference_steps", None)

        for scheduler_class in self.scheduler_classes:
            scheduler_config = self.get_scheduler_config()
            scheduler = scheduler_class(**scheduler_config)

            sample = self.dummy_sample
            residual = 0.1 * sample

            if num_inference_steps is not None and hasattr(scheduler, "set_timesteps"):
                scheduler.set_timesteps(num_inference_steps)
            elif num_inference_steps is not None and not hasattr(scheduler, "set_timesteps"):
                kwargs["num_inference_steps"] = num_inference_steps

871
872
            output_0 = scheduler.step_pred(residual, 0, sample, generator=torch.manual_seed(0), **kwargs).prev_sample
            output_1 = scheduler.step_pred(residual, 1, sample, generator=torch.manual_seed(0), **kwargs).prev_sample
Patrick von Platen's avatar
Patrick von Platen committed
873
874
875

            self.assertEqual(output_0.shape, sample.shape)
            self.assertEqual(output_0.shape, output_1.shape)
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898


class LMSDiscreteSchedulerTest(SchedulerCommonTest):
    scheduler_classes = (LMSDiscreteScheduler,)
    num_inference_steps = 10

    def get_scheduler_config(self, **kwargs):
        config = {
            "num_train_timesteps": 1100,
            "beta_start": 0.0001,
            "beta_end": 0.02,
            "beta_schedule": "linear",
            "trained_betas": None,
        }

        config.update(**kwargs)
        return config

    def test_timesteps(self):
        for timesteps in [10, 50, 100, 1000]:
            self.check_over_configs(num_train_timesteps=timesteps)

    def test_betas(self):
899
        for beta_start, beta_end in zip([0.00001, 0.0001, 0.001], [0.0002, 0.002, 0.02]):
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
            self.check_over_configs(beta_start=beta_start, beta_end=beta_end)

    def test_schedules(self):
        for schedule in ["linear", "scaled_linear"]:
            self.check_over_configs(beta_schedule=schedule)

    def test_time_indices(self):
        for t in [0, 500, 800]:
            self.check_over_forward(time_step=t)

    def test_full_loop_no_noise(self):
        scheduler_class = self.scheduler_classes[0]
        scheduler_config = self.get_scheduler_config()
        scheduler = scheduler_class(**scheduler_config)

        scheduler.set_timesteps(self.num_inference_steps)

        model = self.dummy_model()
918
        sample = self.dummy_sample_deter * scheduler.init_noise_sigma
919
920

        for i, t in enumerate(scheduler.timesteps):
921
            sample = scheduler.scale_model_input(sample, t)
922
923
924

            model_output = model(sample, t)

925
            output = scheduler.step(model_output, t, sample)
926
927
928
929
930
            sample = output.prev_sample

        result_sum = torch.sum(torch.abs(sample))
        result_mean = torch.mean(torch.abs(sample))

931
        assert abs(result_sum.item() - 1006.388) < 1e-2
932
        assert abs(result_mean.item() - 1.31) < 1e-3
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086


class IPNDMSchedulerTest(SchedulerCommonTest):
    scheduler_classes = (IPNDMScheduler,)
    forward_default_kwargs = (("num_inference_steps", 50),)

    def get_scheduler_config(self, **kwargs):
        config = {"num_train_timesteps": 1000}
        config.update(**kwargs)
        return config

    def check_over_configs(self, time_step=0, **config):
        kwargs = dict(self.forward_default_kwargs)
        num_inference_steps = kwargs.pop("num_inference_steps", None)
        sample = self.dummy_sample
        residual = 0.1 * sample
        dummy_past_residuals = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]

        for scheduler_class in self.scheduler_classes:
            scheduler_config = self.get_scheduler_config(**config)
            scheduler = scheduler_class(**scheduler_config)
            scheduler.set_timesteps(num_inference_steps)
            # copy over dummy past residuals
            scheduler.ets = dummy_past_residuals[:]

            if time_step is None:
                time_step = scheduler.timesteps[len(scheduler.timesteps) // 2]

            with tempfile.TemporaryDirectory() as tmpdirname:
                scheduler.save_config(tmpdirname)
                new_scheduler = scheduler_class.from_config(tmpdirname)
                new_scheduler.set_timesteps(num_inference_steps)
                # copy over dummy past residuals
                new_scheduler.ets = dummy_past_residuals[:]

            output = scheduler.step(residual, time_step, sample, **kwargs).prev_sample
            new_output = new_scheduler.step(residual, time_step, sample, **kwargs).prev_sample

            assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical"

            output = scheduler.step(residual, time_step, sample, **kwargs).prev_sample
            new_output = new_scheduler.step(residual, time_step, sample, **kwargs).prev_sample

            assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical"

    def test_from_pretrained_save_pretrained(self):
        pass

    def check_over_forward(self, time_step=0, **forward_kwargs):
        kwargs = dict(self.forward_default_kwargs)
        num_inference_steps = kwargs.pop("num_inference_steps", None)
        sample = self.dummy_sample
        residual = 0.1 * sample
        dummy_past_residuals = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]

        for scheduler_class in self.scheduler_classes:
            scheduler_config = self.get_scheduler_config()
            scheduler = scheduler_class(**scheduler_config)
            scheduler.set_timesteps(num_inference_steps)

            # copy over dummy past residuals (must be after setting timesteps)
            scheduler.ets = dummy_past_residuals[:]

            if time_step is None:
                time_step = scheduler.timesteps[len(scheduler.timesteps) // 2]

            with tempfile.TemporaryDirectory() as tmpdirname:
                scheduler.save_config(tmpdirname)
                new_scheduler = scheduler_class.from_config(tmpdirname)
                # copy over dummy past residuals
                new_scheduler.set_timesteps(num_inference_steps)

                # copy over dummy past residual (must be after setting timesteps)
                new_scheduler.ets = dummy_past_residuals[:]

            output = scheduler.step(residual, time_step, sample, **kwargs).prev_sample
            new_output = new_scheduler.step(residual, time_step, sample, **kwargs).prev_sample

            assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical"

            output = scheduler.step(residual, time_step, sample, **kwargs).prev_sample
            new_output = new_scheduler.step(residual, time_step, sample, **kwargs).prev_sample

            assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical"

    def full_loop(self, **config):
        scheduler_class = self.scheduler_classes[0]
        scheduler_config = self.get_scheduler_config(**config)
        scheduler = scheduler_class(**scheduler_config)

        num_inference_steps = 10
        model = self.dummy_model()
        sample = self.dummy_sample_deter
        scheduler.set_timesteps(num_inference_steps)

        for i, t in enumerate(scheduler.timesteps):
            residual = model(sample, t)
            sample = scheduler.step(residual, t, sample).prev_sample

        for i, t in enumerate(scheduler.timesteps):
            residual = model(sample, t)
            sample = scheduler.step(residual, t, sample).prev_sample

        return sample

    def test_step_shape(self):
        kwargs = dict(self.forward_default_kwargs)

        num_inference_steps = kwargs.pop("num_inference_steps", None)

        for scheduler_class in self.scheduler_classes:
            scheduler_config = self.get_scheduler_config()
            scheduler = scheduler_class(**scheduler_config)

            sample = self.dummy_sample
            residual = 0.1 * sample

            if num_inference_steps is not None and hasattr(scheduler, "set_timesteps"):
                scheduler.set_timesteps(num_inference_steps)
            elif num_inference_steps is not None and not hasattr(scheduler, "set_timesteps"):
                kwargs["num_inference_steps"] = num_inference_steps

            # copy over dummy past residuals (must be done after set_timesteps)
            dummy_past_residuals = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
            scheduler.ets = dummy_past_residuals[:]

            time_step_0 = scheduler.timesteps[5]
            time_step_1 = scheduler.timesteps[6]

            output_0 = scheduler.step(residual, time_step_0, sample, **kwargs).prev_sample
            output_1 = scheduler.step(residual, time_step_1, sample, **kwargs).prev_sample

            self.assertEqual(output_0.shape, sample.shape)
            self.assertEqual(output_0.shape, output_1.shape)

            output_0 = scheduler.step(residual, time_step_0, sample, **kwargs).prev_sample
            output_1 = scheduler.step(residual, time_step_1, sample, **kwargs).prev_sample

            self.assertEqual(output_0.shape, sample.shape)
            self.assertEqual(output_0.shape, output_1.shape)

    def test_timesteps(self):
        for timesteps in [100, 1000]:
            self.check_over_configs(num_train_timesteps=timesteps, time_step=None)

    def test_inference_steps(self):
        for t, num_inference_steps in zip([1, 5, 10], [10, 50, 100]):
            self.check_over_forward(num_inference_steps=num_inference_steps, time_step=None)

    def test_full_loop_no_noise(self):
        sample = self.full_loop()
        result_mean = torch.mean(torch.abs(sample))

        assert abs(result_mean.item() - 2540529) < 10