pipeline_ddpm.py 2.3 KB
Newer Older
Patrick von Platen's avatar
Patrick von Platen committed
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and

# limitations under the License.


Pedro Cuenca's avatar
Pedro Cuenca committed
17
18
import warnings

Patrick von Platen's avatar
Patrick von Platen committed
19
20
import torch

anton-l's avatar
anton-l committed
21
from tqdm.auto import tqdm
Patrick von Platen's avatar
Patrick von Platen committed
22

23
from ...pipeline_utils import DiffusionPipeline
Patrick von Platen's avatar
Patrick von Platen committed
24
25


Patrick von Platen's avatar
Patrick von Platen committed
26
class DDPMPipeline(DiffusionPipeline):
27
    def __init__(self, unet, scheduler):
Patrick von Platen's avatar
Patrick von Platen committed
28
        super().__init__()
29
30
        scheduler = scheduler.set_format("pt")
        self.register_modules(unet=unet, scheduler=scheduler)
Patrick von Platen's avatar
Patrick von Platen committed
31

Patrick von Platen's avatar
Patrick von Platen committed
32
    @torch.no_grad()
Pedro Cuenca's avatar
Pedro Cuenca committed
33
34
35
36
37
38
39
    def __call__(self, batch_size=1, generator=None, output_type="pil", **kwargs):
        if "torch_device" in kwargs:
            device = kwargs.pop("torch_device")
            warnings.warn(
                "`torch_device` is deprecated as an input argument to `__call__` and will be removed in v0.3.0."
                " Consider using `pipe.to(torch_device)` instead."
            )
Patrick von Platen's avatar
Patrick von Platen committed
40

Pedro Cuenca's avatar
Pedro Cuenca committed
41
42
43
44
            # Set device as before (to be removed in 0.3.0)
            if device is None:
                device = "cuda" if torch.cuda.is_available() else "cpu"
            self.to(device)
Patrick von Platen's avatar
Patrick von Platen committed
45
46

        # Sample gaussian noise to begin loop
Patrick von Platen's avatar
Patrick von Platen committed
47
        image = torch.randn(
Patrick von Platen's avatar
Patrick von Platen committed
48
            (batch_size, self.unet.in_channels, self.unet.sample_size, self.unet.sample_size),
Patrick von Platen's avatar
Patrick von Platen committed
49
50
            generator=generator,
        )
Pedro Cuenca's avatar
Pedro Cuenca committed
51
        image = image.to(self.device)
Patrick von Platen's avatar
Patrick von Platen committed
52

53
54
55
56
        # set step values
        self.scheduler.set_timesteps(1000)

        for t in tqdm(self.scheduler.timesteps):
Patrick von Platen's avatar
Patrick von Platen committed
57
            # 1. predict noise model_output
Patrick von Platen's avatar
Patrick von Platen committed
58
            model_output = self.unet(image, t)["sample"]
Patrick von Platen's avatar
Patrick von Platen committed
59

Patrick von Platen's avatar
Patrick von Platen committed
60
61
            # 2. compute previous image: x_t -> t_t-1
            image = self.scheduler.step(model_output, t, image)["prev_sample"]
Patrick von Platen's avatar
Patrick von Platen committed
62

63
64
        image = (image / 2 + 0.5).clamp(0, 1)
        image = image.cpu().permute(0, 2, 3, 1).numpy()
65
66
        if output_type == "pil":
            image = self.numpy_to_pil(image)
67

Patrick von Platen's avatar
Patrick von Platen committed
68
        return {"sample": image}