test_scheduler.py 43 KB
Newer Older
Patrick von Platen's avatar
Patrick von Platen committed
1
2
3
4
5
6
7
8
9
10
11
12
13
14
# coding=utf-8
# Copyright 2022 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
Patrick von Platen's avatar
Patrick von Platen committed
15
import tempfile
Patrick von Platen's avatar
Patrick von Platen committed
16
import unittest
17
from typing import Dict, List, Tuple
Patrick von Platen's avatar
Patrick von Platen committed
18

Patrick von Platen's avatar
Patrick von Platen committed
19
20
21
import numpy as np
import torch

22
23
24
25
26
27
28
29
from diffusers import (
    DDIMScheduler,
    DDPMScheduler,
    IPNDMScheduler,
    LMSDiscreteScheduler,
    PNDMScheduler,
    ScoreSdeVeScheduler,
)
30
from diffusers.utils import torch_device
Patrick von Platen's avatar
Patrick von Platen committed
31
32
33
34
35
36


torch.backends.cuda.matmul.allow_tf32 = False


class SchedulerCommonTest(unittest.TestCase):
Patrick von Platen's avatar
Patrick von Platen committed
37
38
    scheduler_classes = ()
    forward_default_kwargs = ()
Patrick von Platen's avatar
Patrick von Platen committed
39
40

    @property
41
    def dummy_sample(self):
Patrick von Platen's avatar
Patrick von Platen committed
42
43
44
45
46
        batch_size = 4
        num_channels = 3
        height = 8
        width = 8

47
        sample = torch.rand((batch_size, num_channels, height, width))
Patrick von Platen's avatar
Patrick von Platen committed
48

49
        return sample
Patrick von Platen's avatar
Patrick von Platen committed
50
51

    @property
52
    def dummy_sample_deter(self):
Patrick von Platen's avatar
Patrick von Platen committed
53
54
55
56
57
58
        batch_size = 4
        num_channels = 3
        height = 8
        width = 8

        num_elems = batch_size * num_channels * height * width
59
        sample = torch.arange(num_elems)
60
61
        sample = sample.reshape(num_channels, height, width, batch_size)
        sample = sample / num_elems
62
        sample = sample.permute(3, 0, 1, 2)
Patrick von Platen's avatar
Patrick von Platen committed
63

64
        return sample
Patrick von Platen's avatar
Patrick von Platen committed
65
66
67
68
69

    def get_scheduler_config(self):
        raise NotImplementedError

    def dummy_model(self):
70
71
        def model(sample, t, *args):
            return sample * t / (t + 1)
Patrick von Platen's avatar
Patrick von Platen committed
72
73
74

        return model

Patrick von Platen's avatar
Patrick von Platen committed
75
76
77
    def check_over_configs(self, time_step=0, **config):
        kwargs = dict(self.forward_default_kwargs)

78
79
        num_inference_steps = kwargs.pop("num_inference_steps", None)

Patrick von Platen's avatar
Patrick von Platen committed
80
        for scheduler_class in self.scheduler_classes:
81
82
            sample = self.dummy_sample
            residual = 0.1 * sample
Patrick von Platen's avatar
Patrick von Platen committed
83
84
85
86
87
88
89
90

            scheduler_config = self.get_scheduler_config(**config)
            scheduler = scheduler_class(**scheduler_config)

            with tempfile.TemporaryDirectory() as tmpdirname:
                scheduler.save_config(tmpdirname)
                new_scheduler = scheduler_class.from_config(tmpdirname)

91
92
93
94
95
96
            if num_inference_steps is not None and hasattr(scheduler, "set_timesteps"):
                scheduler.set_timesteps(num_inference_steps)
                new_scheduler.set_timesteps(num_inference_steps)
            elif num_inference_steps is not None and not hasattr(scheduler, "set_timesteps"):
                kwargs["num_inference_steps"] = num_inference_steps

97
98
            output = scheduler.step(residual, time_step, sample, **kwargs).prev_sample
            new_output = new_scheduler.step(residual, time_step, sample, **kwargs).prev_sample
Patrick von Platen's avatar
Patrick von Platen committed
99

100
            assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical"
Patrick von Platen's avatar
Patrick von Platen committed
101
102
103
104
105

    def check_over_forward(self, time_step=0, **forward_kwargs):
        kwargs = dict(self.forward_default_kwargs)
        kwargs.update(forward_kwargs)

106
107
        num_inference_steps = kwargs.pop("num_inference_steps", None)

Patrick von Platen's avatar
Patrick von Platen committed
108
        for scheduler_class in self.scheduler_classes:
109
110
            sample = self.dummy_sample
            residual = 0.1 * sample
Patrick von Platen's avatar
Patrick von Platen committed
111
112
113
114
115
116
117
118

            scheduler_config = self.get_scheduler_config()
            scheduler = scheduler_class(**scheduler_config)

            with tempfile.TemporaryDirectory() as tmpdirname:
                scheduler.save_config(tmpdirname)
                new_scheduler = scheduler_class.from_config(tmpdirname)

119
120
121
122
123
124
125
            if num_inference_steps is not None and hasattr(scheduler, "set_timesteps"):
                scheduler.set_timesteps(num_inference_steps)
                new_scheduler.set_timesteps(num_inference_steps)
            elif num_inference_steps is not None and not hasattr(scheduler, "set_timesteps"):
                kwargs["num_inference_steps"] = num_inference_steps

            torch.manual_seed(0)
126
            output = scheduler.step(residual, time_step, sample, **kwargs).prev_sample
127
            torch.manual_seed(0)
128
            new_output = new_scheduler.step(residual, time_step, sample, **kwargs).prev_sample
Patrick von Platen's avatar
Patrick von Platen committed
129

130
            assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical"
Patrick von Platen's avatar
Patrick von Platen committed
131

Patrick von Platen's avatar
Patrick von Platen committed
132
    def test_from_pretrained_save_pretrained(self):
Patrick von Platen's avatar
Patrick von Platen committed
133
134
        kwargs = dict(self.forward_default_kwargs)

135
136
        num_inference_steps = kwargs.pop("num_inference_steps", None)

Patrick von Platen's avatar
Patrick von Platen committed
137
        for scheduler_class in self.scheduler_classes:
138
139
            sample = self.dummy_sample
            residual = 0.1 * sample
Patrick von Platen's avatar
Patrick von Platen committed
140
141
142
143
144
145
146
147

            scheduler_config = self.get_scheduler_config()
            scheduler = scheduler_class(**scheduler_config)

            with tempfile.TemporaryDirectory() as tmpdirname:
                scheduler.save_config(tmpdirname)
                new_scheduler = scheduler_class.from_config(tmpdirname)

148
149
150
151
152
153
            if num_inference_steps is not None and hasattr(scheduler, "set_timesteps"):
                scheduler.set_timesteps(num_inference_steps)
                new_scheduler.set_timesteps(num_inference_steps)
            elif num_inference_steps is not None and not hasattr(scheduler, "set_timesteps"):
                kwargs["num_inference_steps"] = num_inference_steps

154
            torch.manual_seed(0)
155
            output = scheduler.step(residual, 1, sample, **kwargs).prev_sample
156
            torch.manual_seed(0)
157
            new_output = new_scheduler.step(residual, 1, sample, **kwargs).prev_sample
Patrick von Platen's avatar
Patrick von Platen committed
158

159
            assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical"
Patrick von Platen's avatar
Patrick von Platen committed
160
161
162
163

    def test_step_shape(self):
        kwargs = dict(self.forward_default_kwargs)

164
165
        num_inference_steps = kwargs.pop("num_inference_steps", None)

Patrick von Platen's avatar
Patrick von Platen committed
166
167
168
169
        for scheduler_class in self.scheduler_classes:
            scheduler_config = self.get_scheduler_config()
            scheduler = scheduler_class(**scheduler_config)

170
171
            sample = self.dummy_sample
            residual = 0.1 * sample
Patrick von Platen's avatar
Patrick von Platen committed
172

173
174
175
176
177
            if num_inference_steps is not None and hasattr(scheduler, "set_timesteps"):
                scheduler.set_timesteps(num_inference_steps)
            elif num_inference_steps is not None and not hasattr(scheduler, "set_timesteps"):
                kwargs["num_inference_steps"] = num_inference_steps

178
179
            output_0 = scheduler.step(residual, 0, sample, **kwargs).prev_sample
            output_1 = scheduler.step(residual, 1, sample, **kwargs).prev_sample
Patrick von Platen's avatar
Patrick von Platen committed
180

181
            self.assertEqual(output_0.shape, sample.shape)
Patrick von Platen's avatar
Patrick von Platen committed
182
183
            self.assertEqual(output_0.shape, output_1.shape)

184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
    def test_scheduler_outputs_equivalence(self):
        def set_nan_tensor_to_zero(t):
            t[t != t] = 0
            return t

        def recursive_check(tuple_object, dict_object):
            if isinstance(tuple_object, (List, Tuple)):
                for tuple_iterable_value, dict_iterable_value in zip(tuple_object, dict_object.values()):
                    recursive_check(tuple_iterable_value, dict_iterable_value)
            elif isinstance(tuple_object, Dict):
                for tuple_iterable_value, dict_iterable_value in zip(tuple_object.values(), dict_object.values()):
                    recursive_check(tuple_iterable_value, dict_iterable_value)
            elif tuple_object is None:
                return
            else:
                self.assertTrue(
                    torch.allclose(
                        set_nan_tensor_to_zero(tuple_object), set_nan_tensor_to_zero(dict_object), atol=1e-5
                    ),
                    msg=(
                        "Tuple and dict output are not equal. Difference:"
                        f" {torch.max(torch.abs(tuple_object - dict_object))}. Tuple has `nan`:"
                        f" {torch.isnan(tuple_object).any()} and `inf`: {torch.isinf(tuple_object)}. Dict has"
                        f" `nan`: {torch.isnan(dict_object).any()} and `inf`: {torch.isinf(dict_object)}."
                    ),
                )

        kwargs = dict(self.forward_default_kwargs)
212
        num_inference_steps = kwargs.pop("num_inference_steps", 50)
213

214
215
216
217
        timestep = 0
        if len(self.scheduler_classes) > 0 and self.scheduler_classes[0] == IPNDMScheduler:
            timestep = 1

218
219
220
221
222
223
224
225
226
227
228
229
        for scheduler_class in self.scheduler_classes:
            scheduler_config = self.get_scheduler_config()
            scheduler = scheduler_class(**scheduler_config)

            sample = self.dummy_sample
            residual = 0.1 * sample

            if num_inference_steps is not None and hasattr(scheduler, "set_timesteps"):
                scheduler.set_timesteps(num_inference_steps)
            elif num_inference_steps is not None and not hasattr(scheduler, "set_timesteps"):
                kwargs["num_inference_steps"] = num_inference_steps

230
            outputs_dict = scheduler.step(residual, timestep, sample, **kwargs)
231
232
233
234
235
236

            if num_inference_steps is not None and hasattr(scheduler, "set_timesteps"):
                scheduler.set_timesteps(num_inference_steps)
            elif num_inference_steps is not None and not hasattr(scheduler, "set_timesteps"):
                kwargs["num_inference_steps"] = num_inference_steps

237
            outputs_tuple = scheduler.step(residual, timestep, sample, return_dict=False, **kwargs)
238
239
240

            recursive_check(outputs_tuple, outputs_dict)

241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
    def test_scheduler_public_api(self):
        for scheduler_class in self.scheduler_classes:
            scheduler_config = self.get_scheduler_config()
            scheduler = scheduler_class(**scheduler_config)
            self.assertTrue(
                hasattr(scheduler, "init_noise_sigma"),
                f"{scheduler_class} does not implement a required attribute `init_noise_sigma`",
            )
            self.assertTrue(
                hasattr(scheduler, "scale_model_input"),
                f"{scheduler_class} does not implement a required class method `scale_model_input(sample, timestep)`",
            )
            self.assertTrue(
                hasattr(scheduler, "step"),
                f"{scheduler_class} does not implement a required class method `step(...)`",
            )

            sample = self.dummy_sample
            scaled_sample = scheduler.scale_model_input(sample, 0.0)
            self.assertEqual(sample.shape, scaled_sample.shape)

262
263
264
265
266
267
268
    def test_add_noise_device(self):
        for scheduler_class in self.scheduler_classes:
            if scheduler_class == IPNDMScheduler:
                # Skip until #990 is addressed
                continue
            scheduler_config = self.get_scheduler_config()
            scheduler = scheduler_class(**scheduler_config)
269
            scheduler.set_timesteps(100)
270
271
272
273
274
275

            sample = self.dummy_sample.to(torch_device)
            scaled_sample = scheduler.scale_model_input(sample, 0.0)
            self.assertEqual(sample.shape, scaled_sample.shape)

            noise = torch.randn_like(scaled_sample).to(torch_device)
276
            t = scheduler.timesteps[5][None]
277
278
279
            noised = scheduler.add_noise(scaled_sample, noise, t)
            self.assertEqual(noised.shape, scaled_sample.shape)

Patrick von Platen's avatar
Patrick von Platen committed
280
281

class DDPMSchedulerTest(SchedulerCommonTest):
Patrick von Platen's avatar
Patrick von Platen committed
282
    scheduler_classes = (DDPMScheduler,)
Patrick von Platen's avatar
Patrick von Platen committed
283
284
285

    def get_scheduler_config(self, **kwargs):
        config = {
Nathan Lambert's avatar
Nathan Lambert committed
286
            "num_train_timesteps": 1000,
Patrick von Platen's avatar
Patrick von Platen committed
287
288
289
290
            "beta_start": 0.0001,
            "beta_end": 0.02,
            "beta_schedule": "linear",
            "variance_type": "fixed_small",
Patrick von Platen's avatar
Patrick von Platen committed
291
            "clip_sample": True,
Patrick von Platen's avatar
Patrick von Platen committed
292
293
294
295
        }

        config.update(**kwargs)
        return config
Patrick von Platen's avatar
update  
Patrick von Platen committed
296

Patrick von Platen's avatar
Patrick von Platen committed
297
298
    def test_timesteps(self):
        for timesteps in [1, 5, 100, 1000]:
Nathan Lambert's avatar
Nathan Lambert committed
299
            self.check_over_configs(num_train_timesteps=timesteps)
Patrick von Platen's avatar
Patrick von Platen committed
300
301
302
303
304
305
306
307
308
309
310
311
312

    def test_betas(self):
        for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1], [0.002, 0.02, 0.2, 2]):
            self.check_over_configs(beta_start=beta_start, beta_end=beta_end)

    def test_schedules(self):
        for schedule in ["linear", "squaredcos_cap_v2"]:
            self.check_over_configs(beta_schedule=schedule)

    def test_variance_type(self):
        for variance in ["fixed_small", "fixed_large", "other"]:
            self.check_over_configs(variance_type=variance)

313
    def test_clip_sample(self):
Patrick von Platen's avatar
Patrick von Platen committed
314
315
        for clip_sample in [True, False]:
            self.check_over_configs(clip_sample=clip_sample)
Patrick von Platen's avatar
Patrick von Platen committed
316
317
318
319
320
321
322
323
324
325

    def test_time_indices(self):
        for t in [0, 500, 999]:
            self.check_over_forward(time_step=t)

    def test_variance(self):
        scheduler_class = self.scheduler_classes[0]
        scheduler_config = self.get_scheduler_config()
        scheduler = scheduler_class(**scheduler_config)

326
327
328
329
        assert torch.sum(torch.abs(scheduler._get_variance(0) - 0.0)) < 1e-5
        assert torch.sum(torch.abs(scheduler._get_variance(487) - 0.00979)) < 1e-5
        assert torch.sum(torch.abs(scheduler._get_variance(999) - 0.02)) < 1e-5

Patrick von Platen's avatar
Patrick von Platen committed
330
331
    def test_full_loop_no_noise(self):
        scheduler_class = self.scheduler_classes[0]
Patrick von Platen's avatar
Patrick von Platen committed
332
        scheduler_config = self.get_scheduler_config()
Patrick von Platen's avatar
Patrick von Platen committed
333
334
335
336
337
        scheduler = scheduler_class(**scheduler_config)

        num_trained_timesteps = len(scheduler)

        model = self.dummy_model()
338
        sample = self.dummy_sample_deter
339
        generator = torch.manual_seed(0)
Patrick von Platen's avatar
Patrick von Platen committed
340
341
342

        for t in reversed(range(num_trained_timesteps)):
            # 1. predict noise residual
343
            residual = model(sample, t)
Patrick von Platen's avatar
Patrick von Platen committed
344

345
            # 2. predict previous mean of sample x_t-1
346
            pred_prev_sample = scheduler.step(residual, t, sample, generator=generator).prev_sample
Patrick von Platen's avatar
Patrick von Platen committed
347

348
349
350
351
352
353
            # if t > 0:
            #     noise = self.dummy_sample_deter
            #     variance = scheduler.get_variance(t) ** (0.5) * noise
            #
            # sample = pred_prev_sample + variance
            sample = pred_prev_sample
Patrick von Platen's avatar
Patrick von Platen committed
354

355
356
        result_sum = torch.sum(torch.abs(sample))
        result_mean = torch.mean(torch.abs(sample))
Patrick von Platen's avatar
Patrick von Platen committed
357

358
        assert abs(result_sum.item() - 258.9070) < 1e-2
359
        assert abs(result_mean.item() - 0.3374) < 1e-3
Patrick von Platen's avatar
Patrick von Platen committed
360

Patrick von Platen's avatar
update  
Patrick von Platen committed
361

Patrick von Platen's avatar
Patrick von Platen committed
362
363
class DDIMSchedulerTest(SchedulerCommonTest):
    scheduler_classes = (DDIMScheduler,)
364
    forward_default_kwargs = (("eta", 0.0), ("num_inference_steps", 50))
Patrick von Platen's avatar
update  
Patrick von Platen committed
365

Patrick von Platen's avatar
Patrick von Platen committed
366
367
    def get_scheduler_config(self, **kwargs):
        config = {
Nathan Lambert's avatar
Nathan Lambert committed
368
            "num_train_timesteps": 1000,
Patrick von Platen's avatar
Patrick von Platen committed
369
370
371
            "beta_start": 0.0001,
            "beta_end": 0.02,
            "beta_schedule": "linear",
Patrick von Platen's avatar
Patrick von Platen committed
372
            "clip_sample": True,
Patrick von Platen's avatar
Patrick von Platen committed
373
        }
Patrick von Platen's avatar
Patrick von Platen committed
374

Patrick von Platen's avatar
Patrick von Platen committed
375
376
377
        config.update(**kwargs)
        return config

378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
    def full_loop(self, **config):
        scheduler_class = self.scheduler_classes[0]
        scheduler_config = self.get_scheduler_config(**config)
        scheduler = scheduler_class(**scheduler_config)

        num_inference_steps, eta = 10, 0.0

        model = self.dummy_model()
        sample = self.dummy_sample_deter

        scheduler.set_timesteps(num_inference_steps)

        for t in scheduler.timesteps:
            residual = model(sample, t)
            sample = scheduler.step(residual, t, sample, eta).prev_sample

        return sample

Patrick von Platen's avatar
Patrick von Platen committed
396
    def test_timesteps(self):
397
        for timesteps in [100, 500, 1000]:
Nathan Lambert's avatar
Nathan Lambert committed
398
            self.check_over_configs(num_train_timesteps=timesteps)
Patrick von Platen's avatar
Patrick von Platen committed
399

400
401
402
403
404
405
406
407
    def test_steps_offset(self):
        for steps_offset in [0, 1]:
            self.check_over_configs(steps_offset=steps_offset)

        scheduler_class = self.scheduler_classes[0]
        scheduler_config = self.get_scheduler_config(steps_offset=1)
        scheduler = scheduler_class(**scheduler_config)
        scheduler.set_timesteps(5)
408
        assert torch.equal(scheduler.timesteps, torch.LongTensor([801, 601, 401, 201, 1]))
409

Patrick von Platen's avatar
Patrick von Platen committed
410
411
412
413
414
415
416
417
    def test_betas(self):
        for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1], [0.002, 0.02, 0.2, 2]):
            self.check_over_configs(beta_start=beta_start, beta_end=beta_end)

    def test_schedules(self):
        for schedule in ["linear", "squaredcos_cap_v2"]:
            self.check_over_configs(beta_schedule=schedule)

418
    def test_clip_sample(self):
Patrick von Platen's avatar
Patrick von Platen committed
419
420
        for clip_sample in [True, False]:
            self.check_over_configs(clip_sample=clip_sample)
Patrick von Platen's avatar
Patrick von Platen committed
421
422
423
424
425
426
427

    def test_time_indices(self):
        for t in [1, 10, 49]:
            self.check_over_forward(time_step=t)

    def test_inference_steps(self):
        for t, num_inference_steps in zip([1, 10, 50], [10, 50, 500]):
428
            self.check_over_forward(time_step=t, num_inference_steps=num_inference_steps)
Patrick von Platen's avatar
Patrick von Platen committed
429
430
431
432
433
434
435

    def test_eta(self):
        for t, eta in zip([1, 10, 49], [0.0, 0.5, 1.0]):
            self.check_over_forward(time_step=t, eta=eta)

    def test_variance(self):
        scheduler_class = self.scheduler_classes[0]
Patrick von Platen's avatar
Patrick von Platen committed
436
        scheduler_config = self.get_scheduler_config()
Patrick von Platen's avatar
Patrick von Platen committed
437
438
        scheduler = scheduler_class(**scheduler_config)

439
440
441
442
443
444
        assert torch.sum(torch.abs(scheduler._get_variance(0, 0) - 0.0)) < 1e-5
        assert torch.sum(torch.abs(scheduler._get_variance(420, 400) - 0.14771)) < 1e-5
        assert torch.sum(torch.abs(scheduler._get_variance(980, 960) - 0.32460)) < 1e-5
        assert torch.sum(torch.abs(scheduler._get_variance(0, 0) - 0.0)) < 1e-5
        assert torch.sum(torch.abs(scheduler._get_variance(487, 486) - 0.00979)) < 1e-5
        assert torch.sum(torch.abs(scheduler._get_variance(999, 998) - 0.02)) < 1e-5
Patrick von Platen's avatar
Patrick von Platen committed
445
446

    def test_full_loop_no_noise(self):
447
        sample = self.full_loop()
Patrick von Platen's avatar
Patrick von Platen committed
448

449
450
        result_sum = torch.sum(torch.abs(sample))
        result_mean = torch.mean(torch.abs(sample))
Patrick von Platen's avatar
Patrick von Platen committed
451

452
453
        assert abs(result_sum.item() - 172.0067) < 1e-2
        assert abs(result_mean.item() - 0.223967) < 1e-3
Patrick von Platen's avatar
Patrick von Platen committed
454

455
456
457
458
459
    def test_full_loop_with_set_alpha_to_one(self):
        # We specify different beta, so that the first alpha is 0.99
        sample = self.full_loop(set_alpha_to_one=True, beta_start=0.01)
        result_sum = torch.sum(torch.abs(sample))
        result_mean = torch.mean(torch.abs(sample))
Patrick von Platen's avatar
Patrick von Platen committed
460

461
462
        assert abs(result_sum.item() - 149.8295) < 1e-2
        assert abs(result_mean.item() - 0.1951) < 1e-3
Patrick von Platen's avatar
Patrick von Platen committed
463

464
465
466
    def test_full_loop_with_no_set_alpha_to_one(self):
        # We specify different beta, so that the first alpha is 0.99
        sample = self.full_loop(set_alpha_to_one=False, beta_start=0.01)
467
468
        result_sum = torch.sum(torch.abs(sample))
        result_mean = torch.mean(torch.abs(sample))
Patrick von Platen's avatar
Patrick von Platen committed
469

470
471
        assert abs(result_sum.item() - 149.0784) < 1e-2
        assert abs(result_mean.item() - 0.1941) < 1e-3
Patrick von Platen's avatar
Patrick von Platen committed
472
473
474
475
476
477
478
479


class PNDMSchedulerTest(SchedulerCommonTest):
    scheduler_classes = (PNDMScheduler,)
    forward_default_kwargs = (("num_inference_steps", 50),)

    def get_scheduler_config(self, **kwargs):
        config = {
Nathan Lambert's avatar
Nathan Lambert committed
480
            "num_train_timesteps": 1000,
Patrick von Platen's avatar
Patrick von Platen committed
481
482
483
484
485
486
487
488
            "beta_start": 0.0001,
            "beta_end": 0.02,
            "beta_schedule": "linear",
        }

        config.update(**kwargs)
        return config

489
    def check_over_configs(self, time_step=0, **config):
Patrick von Platen's avatar
Patrick von Platen committed
490
        kwargs = dict(self.forward_default_kwargs)
Patrick von Platen's avatar
Patrick von Platen committed
491
        num_inference_steps = kwargs.pop("num_inference_steps", None)
492
493
        sample = self.dummy_sample
        residual = 0.1 * sample
Patrick von Platen's avatar
Patrick von Platen committed
494
495
496
497
498
        dummy_past_residuals = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]

        for scheduler_class in self.scheduler_classes:
            scheduler_config = self.get_scheduler_config(**config)
            scheduler = scheduler_class(**scheduler_config)
Patrick von Platen's avatar
Patrick von Platen committed
499
            scheduler.set_timesteps(num_inference_steps)
Patrick von Platen's avatar
Patrick von Platen committed
500
501
502
503
504
505
            # copy over dummy past residuals
            scheduler.ets = dummy_past_residuals[:]

            with tempfile.TemporaryDirectory() as tmpdirname:
                scheduler.save_config(tmpdirname)
                new_scheduler = scheduler_class.from_config(tmpdirname)
Patrick von Platen's avatar
Patrick von Platen committed
506
                new_scheduler.set_timesteps(num_inference_steps)
Patrick von Platen's avatar
Patrick von Platen committed
507
508
509
                # copy over dummy past residuals
                new_scheduler.ets = dummy_past_residuals[:]

510
511
            output = scheduler.step_prk(residual, time_step, sample, **kwargs).prev_sample
            new_output = new_scheduler.step_prk(residual, time_step, sample, **kwargs).prev_sample
Patrick von Platen's avatar
Patrick von Platen committed
512

513
            assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical"
Patrick von Platen's avatar
Patrick von Platen committed
514

515
516
            output = scheduler.step_plms(residual, time_step, sample, **kwargs).prev_sample
            new_output = new_scheduler.step_plms(residual, time_step, sample, **kwargs).prev_sample
517

518
            assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical"
519
520
521
522
523

    def test_from_pretrained_save_pretrained(self):
        pass

    def check_over_forward(self, time_step=0, **forward_kwargs):
Patrick von Platen's avatar
Patrick von Platen committed
524
        kwargs = dict(self.forward_default_kwargs)
Patrick von Platen's avatar
Patrick von Platen committed
525
        num_inference_steps = kwargs.pop("num_inference_steps", None)
526
527
        sample = self.dummy_sample
        residual = 0.1 * sample
Patrick von Platen's avatar
Patrick von Platen committed
528
529
530
531
532
        dummy_past_residuals = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]

        for scheduler_class in self.scheduler_classes:
            scheduler_config = self.get_scheduler_config()
            scheduler = scheduler_class(**scheduler_config)
Patrick von Platen's avatar
Patrick von Platen committed
533
            scheduler.set_timesteps(num_inference_steps)
534

Nathan Lambert's avatar
Nathan Lambert committed
535
            # copy over dummy past residuals (must be after setting timesteps)
Patrick von Platen's avatar
Patrick von Platen committed
536
537
538
539
540
541
            scheduler.ets = dummy_past_residuals[:]

            with tempfile.TemporaryDirectory() as tmpdirname:
                scheduler.save_config(tmpdirname)
                new_scheduler = scheduler_class.from_config(tmpdirname)
                # copy over dummy past residuals
Patrick von Platen's avatar
Patrick von Platen committed
542
                new_scheduler.set_timesteps(num_inference_steps)
Patrick von Platen's avatar
Patrick von Platen committed
543

Nathan Lambert's avatar
Nathan Lambert committed
544
545
546
                # copy over dummy past residual (must be after setting timesteps)
                new_scheduler.ets = dummy_past_residuals[:]

547
548
            output = scheduler.step_prk(residual, time_step, sample, **kwargs).prev_sample
            new_output = new_scheduler.step_prk(residual, time_step, sample, **kwargs).prev_sample
549

550
            assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical"
551

552
553
            output = scheduler.step_plms(residual, time_step, sample, **kwargs).prev_sample
            new_output = new_scheduler.step_plms(residual, time_step, sample, **kwargs).prev_sample
Patrick von Platen's avatar
Patrick von Platen committed
554

555
            assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical"
Patrick von Platen's avatar
Patrick von Platen committed
556

557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
    def full_loop(self, **config):
        scheduler_class = self.scheduler_classes[0]
        scheduler_config = self.get_scheduler_config(**config)
        scheduler = scheduler_class(**scheduler_config)

        num_inference_steps = 10
        model = self.dummy_model()
        sample = self.dummy_sample_deter
        scheduler.set_timesteps(num_inference_steps)

        for i, t in enumerate(scheduler.prk_timesteps):
            residual = model(sample, t)
            sample = scheduler.step_prk(residual, t, sample).prev_sample

        for i, t in enumerate(scheduler.plms_timesteps):
            residual = model(sample, t)
            sample = scheduler.step_plms(residual, t, sample).prev_sample

        return sample

577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
    def test_step_shape(self):
        kwargs = dict(self.forward_default_kwargs)

        num_inference_steps = kwargs.pop("num_inference_steps", None)

        for scheduler_class in self.scheduler_classes:
            scheduler_config = self.get_scheduler_config()
            scheduler = scheduler_class(**scheduler_config)

            sample = self.dummy_sample
            residual = 0.1 * sample

            if num_inference_steps is not None and hasattr(scheduler, "set_timesteps"):
                scheduler.set_timesteps(num_inference_steps)
            elif num_inference_steps is not None and not hasattr(scheduler, "set_timesteps"):
                kwargs["num_inference_steps"] = num_inference_steps

Nathan Lambert's avatar
Nathan Lambert committed
594
595
596
597
            # copy over dummy past residuals (must be done after set_timesteps)
            dummy_past_residuals = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
            scheduler.ets = dummy_past_residuals[:]

598
599
            output_0 = scheduler.step_prk(residual, 0, sample, **kwargs).prev_sample
            output_1 = scheduler.step_prk(residual, 1, sample, **kwargs).prev_sample
600
601
602
603

            self.assertEqual(output_0.shape, sample.shape)
            self.assertEqual(output_0.shape, output_1.shape)

604
605
            output_0 = scheduler.step_plms(residual, 0, sample, **kwargs).prev_sample
            output_1 = scheduler.step_plms(residual, 1, sample, **kwargs).prev_sample
606
607
608
609

            self.assertEqual(output_0.shape, sample.shape)
            self.assertEqual(output_0.shape, output_1.shape)

Patrick von Platen's avatar
Patrick von Platen committed
610
611
    def test_timesteps(self):
        for timesteps in [100, 1000]:
Nathan Lambert's avatar
Nathan Lambert committed
612
            self.check_over_configs(num_train_timesteps=timesteps)
Patrick von Platen's avatar
Patrick von Platen committed
613

614
615
616
617
618
619
620
621
    def test_steps_offset(self):
        for steps_offset in [0, 1]:
            self.check_over_configs(steps_offset=steps_offset)

        scheduler_class = self.scheduler_classes[0]
        scheduler_config = self.get_scheduler_config(steps_offset=1)
        scheduler = scheduler_class(**scheduler_config)
        scheduler.set_timesteps(10)
622
        assert torch.equal(
623
            scheduler.timesteps,
624
625
626
627
            torch.LongTensor(
                [901, 851, 851, 801, 801, 751, 751, 701, 701, 651, 651, 601, 601, 501, 401, 301, 201, 101, 1]
            ),
        )
628

Patrick von Platen's avatar
Patrick von Platen committed
629
    def test_betas(self):
630
        for beta_start, beta_end in zip([0.0001, 0.001], [0.002, 0.02]):
Patrick von Platen's avatar
Patrick von Platen committed
631
632
633
634
635
636
637
638
639
640
641
642
            self.check_over_configs(beta_start=beta_start, beta_end=beta_end)

    def test_schedules(self):
        for schedule in ["linear", "squaredcos_cap_v2"]:
            self.check_over_configs(beta_schedule=schedule)

    def test_time_indices(self):
        for t in [1, 5, 10]:
            self.check_over_forward(time_step=t)

    def test_inference_steps(self):
        for t, num_inference_steps in zip([1, 5, 10], [10, 50, 100]):
643
            self.check_over_forward(num_inference_steps=num_inference_steps)
Patrick von Platen's avatar
Patrick von Platen committed
644

645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
    def test_pow_of_3_inference_steps(self):
        # earlier version of set_timesteps() caused an error indexing alpha's with inference steps as power of 3
        num_inference_steps = 27

        for scheduler_class in self.scheduler_classes:
            sample = self.dummy_sample
            residual = 0.1 * sample

            scheduler_config = self.get_scheduler_config()
            scheduler = scheduler_class(**scheduler_config)

            scheduler.set_timesteps(num_inference_steps)

            # before power of 3 fix, would error on first step, so we only need to do two
            for i, t in enumerate(scheduler.prk_timesteps[:2]):
                sample = scheduler.step_prk(residual, t, sample).prev_sample

662
    def test_inference_plms_no_past_residuals(self):
Patrick von Platen's avatar
Patrick von Platen committed
663
664
665
666
667
        with self.assertRaises(ValueError):
            scheduler_class = self.scheduler_classes[0]
            scheduler_config = self.get_scheduler_config()
            scheduler = scheduler_class(**scheduler_config)

668
            scheduler.step_plms(self.dummy_sample, 1, self.dummy_sample).prev_sample
Patrick von Platen's avatar
Patrick von Platen committed
669
670

    def test_full_loop_no_noise(self):
671
672
673
        sample = self.full_loop()
        result_sum = torch.sum(torch.abs(sample))
        result_mean = torch.mean(torch.abs(sample))
Patrick von Platen's avatar
Patrick von Platen committed
674

675
676
        assert abs(result_sum.item() - 198.1318) < 1e-2
        assert abs(result_mean.item() - 0.2580) < 1e-3
Patrick von Platen's avatar
Patrick von Platen committed
677

678
679
680
681
682
    def test_full_loop_with_set_alpha_to_one(self):
        # We specify different beta, so that the first alpha is 0.99
        sample = self.full_loop(set_alpha_to_one=True, beta_start=0.01)
        result_sum = torch.sum(torch.abs(sample))
        result_mean = torch.mean(torch.abs(sample))
Patrick von Platen's avatar
Patrick von Platen committed
683

684
685
        assert abs(result_sum.item() - 230.0399) < 1e-2
        assert abs(result_mean.item() - 0.2995) < 1e-3
Patrick von Platen's avatar
Patrick von Platen committed
686

687
688
689
    def test_full_loop_with_no_set_alpha_to_one(self):
        # We specify different beta, so that the first alpha is 0.99
        sample = self.full_loop(set_alpha_to_one=False, beta_start=0.01)
690
691
        result_sum = torch.sum(torch.abs(sample))
        result_mean = torch.mean(torch.abs(sample))
Patrick von Platen's avatar
Patrick von Platen committed
692

693
694
        assert abs(result_sum.item() - 186.9482) < 1e-2
        assert abs(result_mean.item() - 0.2434) < 1e-3
Nathan Lambert's avatar
Nathan Lambert committed
695
696


697
698
class ScoreSdeVeSchedulerTest(unittest.TestCase):
    # TODO adapt with class SchedulerCommonTest (scheduler needs Numpy Integration)
Nathan Lambert's avatar
Nathan Lambert committed
699
    scheduler_classes = (ScoreSdeVeScheduler,)
700
    forward_default_kwargs = ()
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732

    @property
    def dummy_sample(self):
        batch_size = 4
        num_channels = 3
        height = 8
        width = 8

        sample = torch.rand((batch_size, num_channels, height, width))

        return sample

    @property
    def dummy_sample_deter(self):
        batch_size = 4
        num_channels = 3
        height = 8
        width = 8

        num_elems = batch_size * num_channels * height * width
        sample = torch.arange(num_elems)
        sample = sample.reshape(num_channels, height, width, batch_size)
        sample = sample / num_elems
        sample = sample.permute(3, 0, 1, 2)

        return sample

    def dummy_model(self):
        def model(sample, t, *args):
            return sample * t / (t + 1)

        return model
Nathan Lambert's avatar
Nathan Lambert committed
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759

    def get_scheduler_config(self, **kwargs):
        config = {
            "num_train_timesteps": 2000,
            "snr": 0.15,
            "sigma_min": 0.01,
            "sigma_max": 1348,
            "sampling_eps": 1e-5,
        }

        config.update(**kwargs)
        return config

    def check_over_configs(self, time_step=0, **config):
        kwargs = dict(self.forward_default_kwargs)

        for scheduler_class in self.scheduler_classes:
            sample = self.dummy_sample
            residual = 0.1 * sample

            scheduler_config = self.get_scheduler_config(**config)
            scheduler = scheduler_class(**scheduler_config)

            with tempfile.TemporaryDirectory() as tmpdirname:
                scheduler.save_config(tmpdirname)
                new_scheduler = scheduler_class.from_config(tmpdirname)

760
761
762
763
764
765
            output = scheduler.step_pred(
                residual, time_step, sample, generator=torch.manual_seed(0), **kwargs
            ).prev_sample
            new_output = new_scheduler.step_pred(
                residual, time_step, sample, generator=torch.manual_seed(0), **kwargs
            ).prev_sample
Nathan Lambert's avatar
Nathan Lambert committed
766

767
            assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical"
Nathan Lambert's avatar
Nathan Lambert committed
768

769
770
771
772
            output = scheduler.step_correct(residual, sample, generator=torch.manual_seed(0), **kwargs).prev_sample
            new_output = new_scheduler.step_correct(
                residual, sample, generator=torch.manual_seed(0), **kwargs
            ).prev_sample
Nathan Lambert's avatar
Nathan Lambert committed
773

774
            assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler correction are not identical"
Nathan Lambert's avatar
Nathan Lambert committed
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790

    def check_over_forward(self, time_step=0, **forward_kwargs):
        kwargs = dict(self.forward_default_kwargs)
        kwargs.update(forward_kwargs)

        for scheduler_class in self.scheduler_classes:
            sample = self.dummy_sample
            residual = 0.1 * sample

            scheduler_config = self.get_scheduler_config()
            scheduler = scheduler_class(**scheduler_config)

            with tempfile.TemporaryDirectory() as tmpdirname:
                scheduler.save_config(tmpdirname)
                new_scheduler = scheduler_class.from_config(tmpdirname)

791
792
793
794
795
796
            output = scheduler.step_pred(
                residual, time_step, sample, generator=torch.manual_seed(0), **kwargs
            ).prev_sample
            new_output = new_scheduler.step_pred(
                residual, time_step, sample, generator=torch.manual_seed(0), **kwargs
            ).prev_sample
Nathan Lambert's avatar
Nathan Lambert committed
797

798
            assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical"
Nathan Lambert's avatar
Nathan Lambert committed
799

800
801
802
803
            output = scheduler.step_correct(residual, sample, generator=torch.manual_seed(0), **kwargs).prev_sample
            new_output = new_scheduler.step_correct(
                residual, sample, generator=torch.manual_seed(0), **kwargs
            ).prev_sample
Nathan Lambert's avatar
Nathan Lambert committed
804

805
            assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler correction are not identical"
Nathan Lambert's avatar
Nathan Lambert committed
806
807
808
809
810
811
812
813
814
815

    def test_timesteps(self):
        for timesteps in [10, 100, 1000]:
            self.check_over_configs(num_train_timesteps=timesteps)

    def test_sigmas(self):
        for sigma_min, sigma_max in zip([0.0001, 0.001, 0.01], [1, 100, 1000]):
            self.check_over_configs(sigma_min=sigma_min, sigma_max=sigma_max)

    def test_time_indices(self):
816
        for t in [0.1, 0.5, 0.75]:
Nathan Lambert's avatar
Nathan Lambert committed
817
818
819
            self.check_over_forward(time_step=t)

    def test_full_loop_no_noise(self):
820
821
        kwargs = dict(self.forward_default_kwargs)

Nathan Lambert's avatar
Nathan Lambert committed
822
823
824
825
826
827
828
829
830
831
        scheduler_class = self.scheduler_classes[0]
        scheduler_config = self.get_scheduler_config()
        scheduler = scheduler_class(**scheduler_config)

        num_inference_steps = 3

        model = self.dummy_model()
        sample = self.dummy_sample_deter

        scheduler.set_sigmas(num_inference_steps)
832
        scheduler.set_timesteps(num_inference_steps)
833
        generator = torch.manual_seed(0)
Nathan Lambert's avatar
Nathan Lambert committed
834
835
836
837

        for i, t in enumerate(scheduler.timesteps):
            sigma_t = scheduler.sigmas[i]

838
            for _ in range(scheduler.config.correct_steps):
Nathan Lambert's avatar
Nathan Lambert committed
839
                with torch.no_grad():
840
                    model_output = model(sample, sigma_t)
841
                sample = scheduler.step_correct(model_output, sample, generator=generator, **kwargs).prev_sample
Nathan Lambert's avatar
Nathan Lambert committed
842
843

            with torch.no_grad():
844
                model_output = model(sample, sigma_t)
Patrick von Platen's avatar
Patrick von Platen committed
845

846
            output = scheduler.step_pred(model_output, t, sample, generator=generator, **kwargs)
847
            sample, _ = output.prev_sample, output.prev_sample_mean
Patrick von Platen's avatar
Patrick von Platen committed
848

849
850
        result_sum = torch.sum(torch.abs(sample))
        result_mean = torch.mean(torch.abs(sample))
Patrick von Platen's avatar
Patrick von Platen committed
851

852
853
        assert np.isclose(result_sum.item(), 14372758528.0)
        assert np.isclose(result_mean.item(), 18714530.0)
Patrick von Platen's avatar
Patrick von Platen committed
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871

    def test_step_shape(self):
        kwargs = dict(self.forward_default_kwargs)

        num_inference_steps = kwargs.pop("num_inference_steps", None)

        for scheduler_class in self.scheduler_classes:
            scheduler_config = self.get_scheduler_config()
            scheduler = scheduler_class(**scheduler_config)

            sample = self.dummy_sample
            residual = 0.1 * sample

            if num_inference_steps is not None and hasattr(scheduler, "set_timesteps"):
                scheduler.set_timesteps(num_inference_steps)
            elif num_inference_steps is not None and not hasattr(scheduler, "set_timesteps"):
                kwargs["num_inference_steps"] = num_inference_steps

872
873
            output_0 = scheduler.step_pred(residual, 0, sample, generator=torch.manual_seed(0), **kwargs).prev_sample
            output_1 = scheduler.step_pred(residual, 1, sample, generator=torch.manual_seed(0), **kwargs).prev_sample
Patrick von Platen's avatar
Patrick von Platen committed
874
875
876

            self.assertEqual(output_0.shape, sample.shape)
            self.assertEqual(output_0.shape, output_1.shape)
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899


class LMSDiscreteSchedulerTest(SchedulerCommonTest):
    scheduler_classes = (LMSDiscreteScheduler,)
    num_inference_steps = 10

    def get_scheduler_config(self, **kwargs):
        config = {
            "num_train_timesteps": 1100,
            "beta_start": 0.0001,
            "beta_end": 0.02,
            "beta_schedule": "linear",
            "trained_betas": None,
        }

        config.update(**kwargs)
        return config

    def test_timesteps(self):
        for timesteps in [10, 50, 100, 1000]:
            self.check_over_configs(num_train_timesteps=timesteps)

    def test_betas(self):
900
        for beta_start, beta_end in zip([0.00001, 0.0001, 0.001], [0.0002, 0.002, 0.02]):
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
            self.check_over_configs(beta_start=beta_start, beta_end=beta_end)

    def test_schedules(self):
        for schedule in ["linear", "scaled_linear"]:
            self.check_over_configs(beta_schedule=schedule)

    def test_time_indices(self):
        for t in [0, 500, 800]:
            self.check_over_forward(time_step=t)

    def test_full_loop_no_noise(self):
        scheduler_class = self.scheduler_classes[0]
        scheduler_config = self.get_scheduler_config()
        scheduler = scheduler_class(**scheduler_config)

        scheduler.set_timesteps(self.num_inference_steps)

        model = self.dummy_model()
919
        sample = self.dummy_sample_deter * scheduler.init_noise_sigma
920
921

        for i, t in enumerate(scheduler.timesteps):
922
            sample = scheduler.scale_model_input(sample, t)
923
924
925

            model_output = model(sample, t)

926
            output = scheduler.step(model_output, t, sample)
927
928
929
930
931
            sample = output.prev_sample

        result_sum = torch.sum(torch.abs(sample))
        result_mean = torch.mean(torch.abs(sample))

932
        assert abs(result_sum.item() - 1006.388) < 1e-2
933
        assert abs(result_mean.item() - 1.31) < 1e-3
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087


class IPNDMSchedulerTest(SchedulerCommonTest):
    scheduler_classes = (IPNDMScheduler,)
    forward_default_kwargs = (("num_inference_steps", 50),)

    def get_scheduler_config(self, **kwargs):
        config = {"num_train_timesteps": 1000}
        config.update(**kwargs)
        return config

    def check_over_configs(self, time_step=0, **config):
        kwargs = dict(self.forward_default_kwargs)
        num_inference_steps = kwargs.pop("num_inference_steps", None)
        sample = self.dummy_sample
        residual = 0.1 * sample
        dummy_past_residuals = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]

        for scheduler_class in self.scheduler_classes:
            scheduler_config = self.get_scheduler_config(**config)
            scheduler = scheduler_class(**scheduler_config)
            scheduler.set_timesteps(num_inference_steps)
            # copy over dummy past residuals
            scheduler.ets = dummy_past_residuals[:]

            if time_step is None:
                time_step = scheduler.timesteps[len(scheduler.timesteps) // 2]

            with tempfile.TemporaryDirectory() as tmpdirname:
                scheduler.save_config(tmpdirname)
                new_scheduler = scheduler_class.from_config(tmpdirname)
                new_scheduler.set_timesteps(num_inference_steps)
                # copy over dummy past residuals
                new_scheduler.ets = dummy_past_residuals[:]

            output = scheduler.step(residual, time_step, sample, **kwargs).prev_sample
            new_output = new_scheduler.step(residual, time_step, sample, **kwargs).prev_sample

            assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical"

            output = scheduler.step(residual, time_step, sample, **kwargs).prev_sample
            new_output = new_scheduler.step(residual, time_step, sample, **kwargs).prev_sample

            assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical"

    def test_from_pretrained_save_pretrained(self):
        pass

    def check_over_forward(self, time_step=0, **forward_kwargs):
        kwargs = dict(self.forward_default_kwargs)
        num_inference_steps = kwargs.pop("num_inference_steps", None)
        sample = self.dummy_sample
        residual = 0.1 * sample
        dummy_past_residuals = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]

        for scheduler_class in self.scheduler_classes:
            scheduler_config = self.get_scheduler_config()
            scheduler = scheduler_class(**scheduler_config)
            scheduler.set_timesteps(num_inference_steps)

            # copy over dummy past residuals (must be after setting timesteps)
            scheduler.ets = dummy_past_residuals[:]

            if time_step is None:
                time_step = scheduler.timesteps[len(scheduler.timesteps) // 2]

            with tempfile.TemporaryDirectory() as tmpdirname:
                scheduler.save_config(tmpdirname)
                new_scheduler = scheduler_class.from_config(tmpdirname)
                # copy over dummy past residuals
                new_scheduler.set_timesteps(num_inference_steps)

                # copy over dummy past residual (must be after setting timesteps)
                new_scheduler.ets = dummy_past_residuals[:]

            output = scheduler.step(residual, time_step, sample, **kwargs).prev_sample
            new_output = new_scheduler.step(residual, time_step, sample, **kwargs).prev_sample

            assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical"

            output = scheduler.step(residual, time_step, sample, **kwargs).prev_sample
            new_output = new_scheduler.step(residual, time_step, sample, **kwargs).prev_sample

            assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical"

    def full_loop(self, **config):
        scheduler_class = self.scheduler_classes[0]
        scheduler_config = self.get_scheduler_config(**config)
        scheduler = scheduler_class(**scheduler_config)

        num_inference_steps = 10
        model = self.dummy_model()
        sample = self.dummy_sample_deter
        scheduler.set_timesteps(num_inference_steps)

        for i, t in enumerate(scheduler.timesteps):
            residual = model(sample, t)
            sample = scheduler.step(residual, t, sample).prev_sample

        for i, t in enumerate(scheduler.timesteps):
            residual = model(sample, t)
            sample = scheduler.step(residual, t, sample).prev_sample

        return sample

    def test_step_shape(self):
        kwargs = dict(self.forward_default_kwargs)

        num_inference_steps = kwargs.pop("num_inference_steps", None)

        for scheduler_class in self.scheduler_classes:
            scheduler_config = self.get_scheduler_config()
            scheduler = scheduler_class(**scheduler_config)

            sample = self.dummy_sample
            residual = 0.1 * sample

            if num_inference_steps is not None and hasattr(scheduler, "set_timesteps"):
                scheduler.set_timesteps(num_inference_steps)
            elif num_inference_steps is not None and not hasattr(scheduler, "set_timesteps"):
                kwargs["num_inference_steps"] = num_inference_steps

            # copy over dummy past residuals (must be done after set_timesteps)
            dummy_past_residuals = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
            scheduler.ets = dummy_past_residuals[:]

            time_step_0 = scheduler.timesteps[5]
            time_step_1 = scheduler.timesteps[6]

            output_0 = scheduler.step(residual, time_step_0, sample, **kwargs).prev_sample
            output_1 = scheduler.step(residual, time_step_1, sample, **kwargs).prev_sample

            self.assertEqual(output_0.shape, sample.shape)
            self.assertEqual(output_0.shape, output_1.shape)

            output_0 = scheduler.step(residual, time_step_0, sample, **kwargs).prev_sample
            output_1 = scheduler.step(residual, time_step_1, sample, **kwargs).prev_sample

            self.assertEqual(output_0.shape, sample.shape)
            self.assertEqual(output_0.shape, output_1.shape)

    def test_timesteps(self):
        for timesteps in [100, 1000]:
            self.check_over_configs(num_train_timesteps=timesteps, time_step=None)

    def test_inference_steps(self):
        for t, num_inference_steps in zip([1, 5, 10], [10, 50, 100]):
            self.check_over_forward(num_inference_steps=num_inference_steps, time_step=None)

    def test_full_loop_no_noise(self):
        sample = self.full_loop()
        result_mean = torch.mean(torch.abs(sample))

        assert abs(result_mean.item() - 2540529) < 10