cldm.py 12.8 KB
Newer Older
comfyanonymous's avatar
comfyanonymous committed
1
2
3
4
5
6
7
#taken from: https://github.com/lllyasviel/ControlNet
#and modified

import torch
import torch as th
import torch.nn as nn

comfyanonymous's avatar
comfyanonymous committed
8
from ..ldm.modules.diffusionmodules.util import (
comfyanonymous's avatar
comfyanonymous committed
9
10
11
12
    zero_module,
    timestep_embedding,
)

comfyanonymous's avatar
comfyanonymous committed
13
from ..ldm.modules.attention import SpatialTransformer
comfyanonymous's avatar
comfyanonymous committed
14
from ..ldm.modules.diffusionmodules.openaimodel import UNetModel, TimestepEmbedSequential, ResBlock, Downsample
15
from ..ldm.util import exists
comfyanonymous's avatar
comfyanonymous committed
16
import comfy.ops
comfyanonymous's avatar
comfyanonymous committed
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34

class ControlledUnetModel(UNetModel):
    #implemented in the ldm unet
    pass

class ControlNet(nn.Module):
    def __init__(
        self,
        image_size,
        in_channels,
        model_channels,
        hint_channels,
        num_res_blocks,
        attention_resolutions,
        dropout=0,
        channel_mult=(1, 2, 4, 8),
        conv_resample=True,
        dims=2,
35
        num_classes=None,
comfyanonymous's avatar
comfyanonymous committed
36
        use_checkpoint=False,
37
        dtype=torch.float32,
comfyanonymous's avatar
comfyanonymous committed
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
        num_heads=-1,
        num_head_channels=-1,
        num_heads_upsample=-1,
        use_scale_shift_norm=False,
        resblock_updown=False,
        use_new_attention_order=False,
        use_spatial_transformer=False,    # custom transformer support
        transformer_depth=1,              # custom transformer support
        context_dim=None,                 # custom transformer support
        n_embed=None,                     # custom support for prediction of discrete ids into codebook of first stage vq model
        legacy=True,
        disable_self_attentions=None,
        num_attention_blocks=None,
        disable_middle_self_attn=False,
        use_linear_in_transformer=False,
53
54
        adm_in_channels=None,
        transformer_depth_middle=None,
comfyanonymous's avatar
comfyanonymous committed
55
56
        device=None,
        operations=comfy.ops,
comfyanonymous's avatar
comfyanonymous committed
57
58
    ):
        super().__init__()
comfyanonymous's avatar
comfyanonymous committed
59
        assert use_spatial_transformer == True, "use_spatial_transformer has to be true"
comfyanonymous's avatar
comfyanonymous committed
60
61
62
63
64
        if use_spatial_transformer:
            assert context_dim is not None, 'Fool!! You forgot to include the dimension of your cross-attention conditioning...'

        if context_dim is not None:
            assert use_spatial_transformer, 'Fool!! You forgot to use the spatial transformer for your cross-attention conditioning...'
65
66
67
            # from omegaconf.listconfig import ListConfig
            # if type(context_dim) == ListConfig:
            #     context_dim = list(context_dim)
comfyanonymous's avatar
comfyanonymous committed
68
69
70
71
72
73
74
75
76
77
78
79
80
81

        if num_heads_upsample == -1:
            num_heads_upsample = num_heads

        if num_heads == -1:
            assert num_head_channels != -1, 'Either num_heads or num_head_channels has to be set'

        if num_head_channels == -1:
            assert num_heads != -1, 'Either num_heads or num_head_channels has to be set'

        self.dims = dims
        self.image_size = image_size
        self.in_channels = in_channels
        self.model_channels = model_channels
82
83
84
85
        if isinstance(transformer_depth, int):
            transformer_depth = len(channel_mult) * [transformer_depth]
        if transformer_depth_middle is None:
            transformer_depth_middle =  transformer_depth[-1]
comfyanonymous's avatar
comfyanonymous committed
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
        if isinstance(num_res_blocks, int):
            self.num_res_blocks = len(channel_mult) * [num_res_blocks]
        else:
            if len(num_res_blocks) != len(channel_mult):
                raise ValueError("provide num_res_blocks either as an int (globally constant) or "
                                 "as a list/tuple (per-level) with the same length as channel_mult")
            self.num_res_blocks = num_res_blocks
        if disable_self_attentions is not None:
            # should be a list of booleans, indicating whether to disable self-attention in TransformerBlocks or not
            assert len(disable_self_attentions) == len(channel_mult)
        if num_attention_blocks is not None:
            assert len(num_attention_blocks) == len(self.num_res_blocks)
            assert all(map(lambda i: self.num_res_blocks[i] >= num_attention_blocks[i], range(len(num_attention_blocks))))
            print(f"Constructor of UNetModel received num_attention_blocks={num_attention_blocks}. "
                  f"This option has LESS priority than attention_resolutions {attention_resolutions}, "
                  f"i.e., in cases where num_attention_blocks[i] > 0 but 2**i not in attention_resolutions, "
                  f"attention will still not be set.")

        self.attention_resolutions = attention_resolutions
        self.dropout = dropout
        self.channel_mult = channel_mult
        self.conv_resample = conv_resample
108
        self.num_classes = num_classes
comfyanonymous's avatar
comfyanonymous committed
109
        self.use_checkpoint = use_checkpoint
110
        self.dtype = dtype
comfyanonymous's avatar
comfyanonymous committed
111
112
113
114
115
116
117
        self.num_heads = num_heads
        self.num_head_channels = num_head_channels
        self.num_heads_upsample = num_heads_upsample
        self.predict_codebook_ids = n_embed is not None

        time_embed_dim = model_channels * 4
        self.time_embed = nn.Sequential(
comfyanonymous's avatar
comfyanonymous committed
118
            operations.Linear(model_channels, time_embed_dim, dtype=self.dtype, device=device),
comfyanonymous's avatar
comfyanonymous committed
119
            nn.SiLU(),
comfyanonymous's avatar
comfyanonymous committed
120
            operations.Linear(time_embed_dim, time_embed_dim, dtype=self.dtype, device=device),
comfyanonymous's avatar
comfyanonymous committed
121
122
        )

123
124
125
126
127
128
129
130
131
132
        if self.num_classes is not None:
            if isinstance(self.num_classes, int):
                self.label_emb = nn.Embedding(num_classes, time_embed_dim)
            elif self.num_classes == "continuous":
                print("setting up linear c_adm embedding layer")
                self.label_emb = nn.Linear(1, time_embed_dim)
            elif self.num_classes == "sequential":
                assert adm_in_channels is not None
                self.label_emb = nn.Sequential(
                    nn.Sequential(
comfyanonymous's avatar
comfyanonymous committed
133
                        operations.Linear(adm_in_channels, time_embed_dim, dtype=self.dtype, device=device),
134
                        nn.SiLU(),
comfyanonymous's avatar
comfyanonymous committed
135
                        operations.Linear(time_embed_dim, time_embed_dim, dtype=self.dtype, device=device),
136
137
138
139
140
                    )
                )
            else:
                raise ValueError()

comfyanonymous's avatar
comfyanonymous committed
141
142
143
        self.input_blocks = nn.ModuleList(
            [
                TimestepEmbedSequential(
comfyanonymous's avatar
comfyanonymous committed
144
                    operations.conv_nd(dims, in_channels, model_channels, 3, padding=1, dtype=self.dtype, device=device)
comfyanonymous's avatar
comfyanonymous committed
145
146
147
                )
            ]
        )
comfyanonymous's avatar
comfyanonymous committed
148
        self.zero_convs = nn.ModuleList([self.make_zero_conv(model_channels, operations=operations)])
comfyanonymous's avatar
comfyanonymous committed
149
150

        self.input_hint_block = TimestepEmbedSequential(
comfyanonymous's avatar
comfyanonymous committed
151
                    operations.conv_nd(dims, hint_channels, 16, 3, padding=1),
comfyanonymous's avatar
comfyanonymous committed
152
                    nn.SiLU(),
comfyanonymous's avatar
comfyanonymous committed
153
                    operations.conv_nd(dims, 16, 16, 3, padding=1),
comfyanonymous's avatar
comfyanonymous committed
154
                    nn.SiLU(),
comfyanonymous's avatar
comfyanonymous committed
155
                    operations.conv_nd(dims, 16, 32, 3, padding=1, stride=2),
comfyanonymous's avatar
comfyanonymous committed
156
                    nn.SiLU(),
comfyanonymous's avatar
comfyanonymous committed
157
                    operations.conv_nd(dims, 32, 32, 3, padding=1),
comfyanonymous's avatar
comfyanonymous committed
158
                    nn.SiLU(),
comfyanonymous's avatar
comfyanonymous committed
159
                    operations.conv_nd(dims, 32, 96, 3, padding=1, stride=2),
comfyanonymous's avatar
comfyanonymous committed
160
                    nn.SiLU(),
comfyanonymous's avatar
comfyanonymous committed
161
                    operations.conv_nd(dims, 96, 96, 3, padding=1),
comfyanonymous's avatar
comfyanonymous committed
162
                    nn.SiLU(),
comfyanonymous's avatar
comfyanonymous committed
163
                    operations.conv_nd(dims, 96, 256, 3, padding=1, stride=2),
comfyanonymous's avatar
comfyanonymous committed
164
                    nn.SiLU(),
comfyanonymous's avatar
comfyanonymous committed
165
                    zero_module(operations.conv_nd(dims, 256, model_channels, 3, padding=1))
comfyanonymous's avatar
comfyanonymous committed
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
        )

        self._feature_size = model_channels
        input_block_chans = [model_channels]
        ch = model_channels
        ds = 1
        for level, mult in enumerate(channel_mult):
            for nr in range(self.num_res_blocks[level]):
                layers = [
                    ResBlock(
                        ch,
                        time_embed_dim,
                        dropout,
                        out_channels=mult * model_channels,
                        dims=dims,
                        use_checkpoint=use_checkpoint,
                        use_scale_shift_norm=use_scale_shift_norm,
comfyanonymous's avatar
comfyanonymous committed
183
                        operations=operations
comfyanonymous's avatar
comfyanonymous committed
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
                    )
                ]
                ch = mult * model_channels
                if ds in attention_resolutions:
                    if num_head_channels == -1:
                        dim_head = ch // num_heads
                    else:
                        num_heads = ch // num_head_channels
                        dim_head = num_head_channels
                    if legacy:
                        #num_heads = 1
                        dim_head = ch // num_heads if use_spatial_transformer else num_head_channels
                    if exists(disable_self_attentions):
                        disabled_sa = disable_self_attentions[level]
                    else:
                        disabled_sa = False

                    if not exists(num_attention_blocks) or nr < num_attention_blocks[level]:
                        layers.append(
comfyanonymous's avatar
comfyanonymous committed
203
                            SpatialTransformer(
204
                                ch, num_heads, dim_head, depth=transformer_depth[level], context_dim=context_dim,
comfyanonymous's avatar
comfyanonymous committed
205
                                disable_self_attn=disabled_sa, use_linear=use_linear_in_transformer,
comfyanonymous's avatar
comfyanonymous committed
206
                                use_checkpoint=use_checkpoint, operations=operations
comfyanonymous's avatar
comfyanonymous committed
207
208
209
                            )
                        )
                self.input_blocks.append(TimestepEmbedSequential(*layers))
comfyanonymous's avatar
comfyanonymous committed
210
                self.zero_convs.append(self.make_zero_conv(ch, operations=operations))
comfyanonymous's avatar
comfyanonymous committed
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
                self._feature_size += ch
                input_block_chans.append(ch)
            if level != len(channel_mult) - 1:
                out_ch = ch
                self.input_blocks.append(
                    TimestepEmbedSequential(
                        ResBlock(
                            ch,
                            time_embed_dim,
                            dropout,
                            out_channels=out_ch,
                            dims=dims,
                            use_checkpoint=use_checkpoint,
                            use_scale_shift_norm=use_scale_shift_norm,
                            down=True,
comfyanonymous's avatar
comfyanonymous committed
226
                            operations=operations
comfyanonymous's avatar
comfyanonymous committed
227
228
229
                        )
                        if resblock_updown
                        else Downsample(
comfyanonymous's avatar
comfyanonymous committed
230
                            ch, conv_resample, dims=dims, out_channels=out_ch, operations=operations
comfyanonymous's avatar
comfyanonymous committed
231
232
233
234
235
                        )
                    )
                )
                ch = out_ch
                input_block_chans.append(ch)
comfyanonymous's avatar
comfyanonymous committed
236
                self.zero_convs.append(self.make_zero_conv(ch, operations=operations))
comfyanonymous's avatar
comfyanonymous committed
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
                ds *= 2
                self._feature_size += ch

        if num_head_channels == -1:
            dim_head = ch // num_heads
        else:
            num_heads = ch // num_head_channels
            dim_head = num_head_channels
        if legacy:
            #num_heads = 1
            dim_head = ch // num_heads if use_spatial_transformer else num_head_channels
        self.middle_block = TimestepEmbedSequential(
            ResBlock(
                ch,
                time_embed_dim,
                dropout,
                dims=dims,
                use_checkpoint=use_checkpoint,
                use_scale_shift_norm=use_scale_shift_norm,
comfyanonymous's avatar
comfyanonymous committed
256
                operations=operations
comfyanonymous's avatar
comfyanonymous committed
257
            ),
comfyanonymous's avatar
comfyanonymous committed
258
            SpatialTransformer(  # always uses a self-attn
259
                            ch, num_heads, dim_head, depth=transformer_depth_middle, context_dim=context_dim,
comfyanonymous's avatar
comfyanonymous committed
260
                            disable_self_attn=disable_middle_self_attn, use_linear=use_linear_in_transformer,
comfyanonymous's avatar
comfyanonymous committed
261
                            use_checkpoint=use_checkpoint, operations=operations
comfyanonymous's avatar
comfyanonymous committed
262
263
264
265
266
267
268
269
                        ),
            ResBlock(
                ch,
                time_embed_dim,
                dropout,
                dims=dims,
                use_checkpoint=use_checkpoint,
                use_scale_shift_norm=use_scale_shift_norm,
comfyanonymous's avatar
comfyanonymous committed
270
                operations=operations
comfyanonymous's avatar
comfyanonymous committed
271
272
            ),
        )
comfyanonymous's avatar
comfyanonymous committed
273
        self.middle_block_out = self.make_zero_conv(ch, operations=operations)
comfyanonymous's avatar
comfyanonymous committed
274
275
        self._feature_size += ch

comfyanonymous's avatar
comfyanonymous committed
276
277
    def make_zero_conv(self, channels, operations=None):
        return TimestepEmbedSequential(zero_module(operations.conv_nd(self.dims, channels, channels, 1, padding=0)))
comfyanonymous's avatar
comfyanonymous committed
278

279
    def forward(self, x, hint, timesteps, context, y=None, **kwargs):
280
        t_emb = timestep_embedding(timesteps, self.model_channels, repeat_only=False).to(self.dtype)
comfyanonymous's avatar
comfyanonymous committed
281
282
283
284
285
286
        emb = self.time_embed(t_emb)

        guided_hint = self.input_hint_block(hint, emb, context)

        outs = []

287
288
289
290
291
        hs = []
        if self.num_classes is not None:
            assert y.shape[0] == x.shape[0]
            emb = emb + self.label_emb(y)

comfyanonymous's avatar
comfyanonymous committed
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
        h = x.type(self.dtype)
        for module, zero_conv in zip(self.input_blocks, self.zero_convs):
            if guided_hint is not None:
                h = module(h, emb, context)
                h += guided_hint
                guided_hint = None
            else:
                h = module(h, emb, context)
            outs.append(zero_conv(h, emb, context))

        h = self.middle_block(h, emb, context)
        outs.append(self.middle_block_out(h, emb, context))

        return outs