pipeline_ddim.py 2.49 KB
Newer Older
Patrick von Platen's avatar
Patrick von Platen committed
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and

# limitations under the License.


Pedro Cuenca's avatar
Pedro Cuenca committed
17
18
import warnings

Patrick von Platen's avatar
Patrick von Platen committed
19
20
import torch

anton-l's avatar
anton-l committed
21
from tqdm.auto import tqdm
Patrick von Platen's avatar
Patrick von Platen committed
22

23
from ...pipeline_utils import DiffusionPipeline
Patrick von Platen's avatar
Patrick von Platen committed
24
25


Patrick von Platen's avatar
Patrick von Platen committed
26
class DDIMPipeline(DiffusionPipeline):
27
    def __init__(self, unet, scheduler):
Patrick von Platen's avatar
Patrick von Platen committed
28
        super().__init__()
29
30
        scheduler = scheduler.set_format("pt")
        self.register_modules(unet=unet, scheduler=scheduler)
Patrick von Platen's avatar
Patrick von Platen committed
31

Patrick von Platen's avatar
Patrick von Platen committed
32
    @torch.no_grad()
Pedro Cuenca's avatar
Pedro Cuenca committed
33
34
35
36
37
38
39
40
    def __call__(self, batch_size=1, generator=None, eta=0.0, num_inference_steps=50, output_type="pil", **kwargs):

        if "torch_device" in kwargs:
            device = kwargs.pop("torch_device")
            warnings.warn(
                "`torch_device` is deprecated as an input argument to `__call__` and will be removed in v0.3.0."
                " Consider using `pipe.to(torch_device)` instead."
            )
Patrick von Platen's avatar
Patrick von Platen committed
41

Pedro Cuenca's avatar
Pedro Cuenca committed
42
43
44
45
46
47
            # Set device as before (to be removed in 0.3.0)
            if device is None:
                device = "cuda" if torch.cuda.is_available() else "cpu"
            self.to(device)

        # eta corresponds to η in paper and should be between [0, 1]
Patrick von Platen's avatar
Patrick von Platen committed
48
49

        # Sample gaussian noise to begin loop
Patrick von Platen's avatar
Patrick von Platen committed
50
        image = torch.randn(
Patrick von Platen's avatar
Patrick von Platen committed
51
            (batch_size, self.unet.in_channels, self.unet.sample_size, self.unet.sample_size),
Patrick von Platen's avatar
Patrick von Platen committed
52
53
            generator=generator,
        )
Pedro Cuenca's avatar
Pedro Cuenca committed
54
        image = image.to(self.device)
Patrick von Platen's avatar
Patrick von Platen committed
55

56
57
        # set step values
        self.scheduler.set_timesteps(num_inference_steps)
Patrick von Platen's avatar
Patrick von Platen committed
58

anton-l's avatar
anton-l committed
59
        for t in tqdm(self.scheduler.timesteps):
Patrick von Platen's avatar
Patrick von Platen committed
60
            # 1. predict noise model_output
Patrick von Platen's avatar
Patrick von Platen committed
61
            model_output = self.unet(image, t)["sample"]
Patrick von Platen's avatar
Patrick von Platen committed
62

63
64
            # 2. predict previous mean of image x_t-1 and add variance depending on eta
            # do x_t -> x_t-1
Patrick von Platen's avatar
Patrick von Platen committed
65
            image = self.scheduler.step(model_output, t, image, eta)["prev_sample"]
Patrick von Platen's avatar
Patrick von Platen committed
66

67
68
        image = (image / 2 + 0.5).clamp(0, 1)
        image = image.cpu().permute(0, 2, 3, 1).numpy()
anton-l's avatar
anton-l committed
69
70
        if output_type == "pil":
            image = self.numpy_to_pil(image)
71

72
        return {"sample": image}