unet_2d.py 14.5 KB
Newer Older
Patrick von Platen's avatar
Patrick von Platen committed
1
# Copyright 2023 The HuggingFace Team. All rights reserved.
2
3
4
5
6
7
8
9
10
11
12
13
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
14
15
from dataclasses import dataclass
from typing import Optional, Tuple, Union
Patrick von Platen's avatar
Patrick von Platen committed
16
17
18
19
20

import torch
import torch.nn as nn

from ..configuration_utils import ConfigMixin, register_to_config
21
from ..utils import BaseOutput
Patrick von Platen's avatar
Patrick von Platen committed
22
from .embeddings import GaussianFourierProjection, TimestepEmbedding, Timesteps
23
from .modeling_utils import ModelMixin
24
from .unet_2d_blocks import UNetMidBlock2D, get_down_block, get_up_block
Patrick von Platen's avatar
Patrick von Platen committed
25
26


27
28
29
30
31
32
33
34
35
36
37
@dataclass
class UNet2DOutput(BaseOutput):
    """
    Args:
        sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
            Hidden states output. Output of last layer of model.
    """

    sample: torch.FloatTensor


Patrick von Platen's avatar
Patrick von Platen committed
38
class UNet2DModel(ModelMixin, ConfigMixin):
Kashif Rasul's avatar
Kashif Rasul committed
39
40
41
42
43
44
45
    r"""
    UNet2DModel is a 2D UNet model that takes in a noisy sample and a timestep and returns sample shaped output.

    This model inherits from [`ModelMixin`]. Check the superclass documentation for the generic methods the library
    implements for all the model (such as downloading or saving, etc.)

    Parameters:
46
        sample_size (`int` or `Tuple[int, int]`, *optional*, defaults to `None`):
George Ogden's avatar
George Ogden committed
47
48
            Height and width of input/output sample. Dimensions must be a multiple of `2 ** (len(block_out_channels) -
            1)`.
Kashif Rasul's avatar
Kashif Rasul committed
49
50
51
52
53
54
        in_channels (`int`, *optional*, defaults to 3): Number of channels in the input image.
        out_channels (`int`, *optional*, defaults to 3): Number of channels in the output.
        center_input_sample (`bool`, *optional*, defaults to `False`): Whether to center the input sample.
        time_embedding_type (`str`, *optional*, defaults to `"positional"`): Type of time embedding to use.
        freq_shift (`int`, *optional*, defaults to 0): Frequency shift for fourier time embedding.
        flip_sin_to_cos (`bool`, *optional*, defaults to :
55
            obj:`True`): Whether to flip sin to cos for fourier time embedding.
Kashif Rasul's avatar
Kashif Rasul committed
56
57
58
        down_block_types (`Tuple[str]`, *optional*, defaults to :
            obj:`("DownBlock2D", "AttnDownBlock2D", "AttnDownBlock2D", "AttnDownBlock2D")`): Tuple of downsample block
            types.
Will Berman's avatar
Will Berman committed
59
60
        mid_block_type (`str`, *optional*, defaults to `"UNetMidBlock2D"`):
            The mid block type. Choose from `UNetMidBlock2D` or `UnCLIPUNetMidBlock2D`.
Kashif Rasul's avatar
Kashif Rasul committed
61
62
63
64
65
66
67
68
69
70
71
        up_block_types (`Tuple[str]`, *optional*, defaults to :
            obj:`("AttnUpBlock2D", "AttnUpBlock2D", "AttnUpBlock2D", "UpBlock2D")`): Tuple of upsample block types.
        block_out_channels (`Tuple[int]`, *optional*, defaults to :
            obj:`(224, 448, 672, 896)`): Tuple of block output channels.
        layers_per_block (`int`, *optional*, defaults to `2`): The number of layers per block.
        mid_block_scale_factor (`float`, *optional*, defaults to `1`): The scale factor for the mid block.
        downsample_padding (`int`, *optional*, defaults to `1`): The padding for the downsample convolution.
        act_fn (`str`, *optional*, defaults to `"silu"`): The activation function to use.
        attention_head_dim (`int`, *optional*, defaults to `8`): The attention head dimension.
        norm_num_groups (`int`, *optional*, defaults to `32`): The number of groups for the normalization.
        norm_eps (`float`, *optional*, defaults to `1e-5`): The epsilon for the normalization.
Will Berman's avatar
Will Berman committed
72
73
        resnet_time_scale_shift (`str`, *optional*, defaults to `"default"`): Time scale shift config
            for resnet blocks, see [`~models.resnet.ResnetBlock2D`]. Choose from `default` or `scale_shift`.
74
75
76
        class_embed_type (`str`, *optional*, defaults to None):
            The type of class embedding to use which is ultimately summed with the time embeddings. Choose from `None`,
            `"timestep"`, or `"identity"`.
77
78
79
        num_class_embeds (`int`, *optional*, defaults to None):
            Input dimension of the learnable embedding matrix to be projected to `time_embed_dim`, when performing
            class conditioning with `class_embed_type` equal to `None`.
Kashif Rasul's avatar
Kashif Rasul committed
80
81
    """

Patrick von Platen's avatar
Patrick von Platen committed
82
83
84
    @register_to_config
    def __init__(
        self,
85
        sample_size: Optional[Union[int, Tuple[int, int]]] = None,
Sid Sahai's avatar
Sid Sahai committed
86
87
88
89
90
91
92
93
94
95
96
97
98
        in_channels: int = 3,
        out_channels: int = 3,
        center_input_sample: bool = False,
        time_embedding_type: str = "positional",
        freq_shift: int = 0,
        flip_sin_to_cos: bool = True,
        down_block_types: Tuple[str] = ("DownBlock2D", "AttnDownBlock2D", "AttnDownBlock2D", "AttnDownBlock2D"),
        up_block_types: Tuple[str] = ("AttnUpBlock2D", "AttnUpBlock2D", "AttnUpBlock2D", "UpBlock2D"),
        block_out_channels: Tuple[int] = (224, 448, 672, 896),
        layers_per_block: int = 2,
        mid_block_scale_factor: float = 1,
        downsample_padding: int = 1,
        act_fn: str = "silu",
Will Berman's avatar
Will Berman committed
99
        attention_head_dim: Optional[int] = 8,
Sid Sahai's avatar
Sid Sahai committed
100
101
        norm_num_groups: int = 32,
        norm_eps: float = 1e-5,
Will Berman's avatar
Will Berman committed
102
103
        resnet_time_scale_shift: str = "default",
        add_attention: bool = True,
104
105
        class_embed_type: Optional[str] = None,
        num_class_embeds: Optional[int] = None,
Patrick von Platen's avatar
Patrick von Platen committed
106
107
108
109
110
111
    ):
        super().__init__()

        self.sample_size = sample_size
        time_embed_dim = block_out_channels[0] * 4

Will Berman's avatar
Will Berman committed
112
113
114
115
116
117
118
119
120
121
122
        # Check inputs
        if len(down_block_types) != len(up_block_types):
            raise ValueError(
                f"Must provide the same number of `down_block_types` as `up_block_types`. `down_block_types`: {down_block_types}. `up_block_types`: {up_block_types}."
            )

        if len(block_out_channels) != len(down_block_types):
            raise ValueError(
                f"Must provide the same number of `block_out_channels` as `down_block_types`. `block_out_channels`: {block_out_channels}. `down_block_types`: {down_block_types}."
            )

Patrick von Platen's avatar
Patrick von Platen committed
123
124
125
126
127
128
129
130
131
132
133
134
135
        # input
        self.conv_in = nn.Conv2d(in_channels, block_out_channels[0], kernel_size=3, padding=(1, 1))

        # time
        if time_embedding_type == "fourier":
            self.time_proj = GaussianFourierProjection(embedding_size=block_out_channels[0], scale=16)
            timestep_input_dim = 2 * block_out_channels[0]
        elif time_embedding_type == "positional":
            self.time_proj = Timesteps(block_out_channels[0], flip_sin_to_cos, freq_shift)
            timestep_input_dim = block_out_channels[0]

        self.time_embedding = TimestepEmbedding(timestep_input_dim, time_embed_dim)

136
137
138
139
140
141
142
143
144
145
        # class embedding
        if class_embed_type is None and num_class_embeds is not None:
            self.class_embedding = nn.Embedding(num_class_embeds, time_embed_dim)
        elif class_embed_type == "timestep":
            self.class_embedding = TimestepEmbedding(timestep_input_dim, time_embed_dim)
        elif class_embed_type == "identity":
            self.class_embedding = nn.Identity(time_embed_dim, time_embed_dim)
        else:
            self.class_embedding = None

Patrick von Platen's avatar
Patrick von Platen committed
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
        self.down_blocks = nn.ModuleList([])
        self.mid_block = None
        self.up_blocks = nn.ModuleList([])

        # down
        output_channel = block_out_channels[0]
        for i, down_block_type in enumerate(down_block_types):
            input_channel = output_channel
            output_channel = block_out_channels[i]
            is_final_block = i == len(block_out_channels) - 1

            down_block = get_down_block(
                down_block_type,
                num_layers=layers_per_block,
                in_channels=input_channel,
                out_channels=output_channel,
                temb_channels=time_embed_dim,
                add_downsample=not is_final_block,
                resnet_eps=norm_eps,
                resnet_act_fn=act_fn,
166
                resnet_groups=norm_num_groups,
167
                attention_head_dim=attention_head_dim if attention_head_dim is not None else output_channel,
Patrick von Platen's avatar
Patrick von Platen committed
168
                downsample_padding=downsample_padding,
Will Berman's avatar
Will Berman committed
169
                resnet_time_scale_shift=resnet_time_scale_shift,
Patrick von Platen's avatar
Patrick von Platen committed
170
171
172
173
174
175
176
177
178
179
            )
            self.down_blocks.append(down_block)

        # mid
        self.mid_block = UNetMidBlock2D(
            in_channels=block_out_channels[-1],
            temb_channels=time_embed_dim,
            resnet_eps=norm_eps,
            resnet_act_fn=act_fn,
            output_scale_factor=mid_block_scale_factor,
Will Berman's avatar
Will Berman committed
180
            resnet_time_scale_shift=resnet_time_scale_shift,
181
            attention_head_dim=attention_head_dim if attention_head_dim is not None else block_out_channels[-1],
Patrick von Platen's avatar
Patrick von Platen committed
182
            resnet_groups=norm_num_groups,
Will Berman's avatar
Will Berman committed
183
            add_attention=add_attention,
Patrick von Platen's avatar
Patrick von Platen committed
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
        )

        # up
        reversed_block_out_channels = list(reversed(block_out_channels))
        output_channel = reversed_block_out_channels[0]
        for i, up_block_type in enumerate(up_block_types):
            prev_output_channel = output_channel
            output_channel = reversed_block_out_channels[i]
            input_channel = reversed_block_out_channels[min(i + 1, len(block_out_channels) - 1)]

            is_final_block = i == len(block_out_channels) - 1

            up_block = get_up_block(
                up_block_type,
                num_layers=layers_per_block + 1,
                in_channels=input_channel,
                out_channels=output_channel,
                prev_output_channel=prev_output_channel,
                temb_channels=time_embed_dim,
                add_upsample=not is_final_block,
                resnet_eps=norm_eps,
                resnet_act_fn=act_fn,
206
                resnet_groups=norm_num_groups,
207
                attention_head_dim=attention_head_dim if attention_head_dim is not None else output_channel,
Will Berman's avatar
Will Berman committed
208
                resnet_time_scale_shift=resnet_time_scale_shift,
Patrick von Platen's avatar
Patrick von Platen committed
209
210
211
212
213
214
215
216
            )
            self.up_blocks.append(up_block)
            prev_output_channel = output_channel

        # out
        num_groups_out = norm_num_groups if norm_num_groups is not None else min(block_out_channels[0] // 4, 32)
        self.conv_norm_out = nn.GroupNorm(num_channels=block_out_channels[0], num_groups=num_groups_out, eps=norm_eps)
        self.conv_act = nn.SiLU()
217
        self.conv_out = nn.Conv2d(block_out_channels[0], out_channels, kernel_size=3, padding=1)
Patrick von Platen's avatar
Patrick von Platen committed
218
219

    def forward(
220
221
222
        self,
        sample: torch.FloatTensor,
        timestep: Union[torch.Tensor, float, int],
223
        class_labels: Optional[torch.Tensor] = None,
224
225
        return_dict: bool = True,
    ) -> Union[UNet2DOutput, Tuple]:
226
        r"""
Kashif Rasul's avatar
Kashif Rasul committed
227
228
229
        Args:
            sample (`torch.FloatTensor`): (batch, channel, height, width) noisy inputs tensor
            timestep (`torch.FloatTensor` or `float` or `int): (batch) timesteps
230
231
            class_labels (`torch.FloatTensor`, *optional*, defaults to `None`):
                Optional class labels for conditioning. Their embeddings will be summed with the timestep embeddings.
Kashif Rasul's avatar
Kashif Rasul committed
232
233
234
235
236
237
238
            return_dict (`bool`, *optional*, defaults to `True`):
                Whether or not to return a [`~models.unet_2d.UNet2DOutput`] instead of a plain tuple.

        Returns:
            [`~models.unet_2d.UNet2DOutput`] or `tuple`: [`~models.unet_2d.UNet2DOutput`] if `return_dict` is True,
            otherwise a `tuple`. When returning a tuple, the first element is the sample tensor.
        """
Patrick von Platen's avatar
Patrick von Platen committed
239
240
241
242
243
244
245
246
247
248
249
        # 0. center input if necessary
        if self.config.center_input_sample:
            sample = 2 * sample - 1.0

        # 1. time
        timesteps = timestep
        if not torch.is_tensor(timesteps):
            timesteps = torch.tensor([timesteps], dtype=torch.long, device=sample.device)
        elif torch.is_tensor(timesteps) and len(timesteps.shape) == 0:
            timesteps = timesteps[None].to(sample.device)

250
251
        # broadcast to batch dimension in a way that's compatible with ONNX/Core ML
        timesteps = timesteps * torch.ones(sample.shape[0], dtype=timesteps.dtype, device=timesteps.device)
252

Patrick von Platen's avatar
Patrick von Platen committed
253
        t_emb = self.time_proj(timesteps)
254
255
256
257
258

        # timesteps does not contain any weights and will always return f32 tensors
        # but time_embedding might actually be running in fp16. so we need to cast here.
        # there might be better ways to encapsulate this.
        t_emb = t_emb.to(dtype=self.dtype)
Patrick von Platen's avatar
Patrick von Platen committed
259
260
        emb = self.time_embedding(t_emb)

261
262
263
264
265
266
267
268
269
270
        if self.class_embedding is not None:
            if class_labels is None:
                raise ValueError("class_labels should be provided when doing class conditioning")

            if self.config.class_embed_type == "timestep":
                class_labels = self.time_proj(class_labels)

            class_emb = self.class_embedding(class_labels).to(dtype=self.dtype)
            emb = emb + class_emb

Patrick von Platen's avatar
Patrick von Platen committed
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
        # 2. pre-process
        skip_sample = sample
        sample = self.conv_in(sample)

        # 3. down
        down_block_res_samples = (sample,)
        for downsample_block in self.down_blocks:
            if hasattr(downsample_block, "skip_conv"):
                sample, res_samples, skip_sample = downsample_block(
                    hidden_states=sample, temb=emb, skip_sample=skip_sample
                )
            else:
                sample, res_samples = downsample_block(hidden_states=sample, temb=emb)

            down_block_res_samples += res_samples

        # 4. mid
        sample = self.mid_block(sample, emb)

        # 5. up
        skip_sample = None
        for upsample_block in self.up_blocks:
            res_samples = down_block_res_samples[-len(upsample_block.resnets) :]
            down_block_res_samples = down_block_res_samples[: -len(upsample_block.resnets)]

            if hasattr(upsample_block, "skip_conv"):
                sample, skip_sample = upsample_block(sample, res_samples, emb, skip_sample)
            else:
                sample = upsample_block(sample, res_samples, emb)

        # 6. post-process
302
        sample = self.conv_norm_out(sample)
Patrick von Platen's avatar
Patrick von Platen committed
303
304
305
306
307
308
309
310
311
312
        sample = self.conv_act(sample)
        sample = self.conv_out(sample)

        if skip_sample is not None:
            sample += skip_sample

        if self.config.time_embedding_type == "fourier":
            timesteps = timesteps.reshape((sample.shape[0], *([1] * len(sample.shape[1:]))))
            sample = sample / timesteps

313
314
        if not return_dict:
            return (sample,)
Patrick von Platen's avatar
Patrick von Platen committed
315

316
        return UNet2DOutput(sample=sample)