sd.py 43.6 KB
Newer Older
comfyanonymous's avatar
comfyanonymous committed
1
import torch
2
import contextlib
3
import copy
comfyanonymous's avatar
comfyanonymous committed
4

comfyanonymous's avatar
comfyanonymous committed
5
6
from . import sd1_clip
from . import sd2_clip
7
from comfy import model_management
8
9
from .ldm.util import instantiate_from_config
from .ldm.models.autoencoder import AutoencoderKL
10
import yaml
comfyanonymous's avatar
comfyanonymous committed
11
from .cldm import cldm
12
from .t2i_adapter import adapter
comfyanonymous's avatar
comfyanonymous committed
13
14

from . import utils
15
from . import clip_vision
16
from . import gligen
17
from . import diffusers_convert
18

19
def load_model_weights(model, sd, verbose=False, load_state_dict_to=[]):
comfyanonymous's avatar
comfyanonymous committed
20
21
22
23
24
25
26
27
28
    m, u = model.load_state_dict(sd, strict=False)

    k = list(sd.keys())
    for x in k:
        # print(x)
        if x.startswith("cond_stage_model.transformer.") and not x.startswith("cond_stage_model.transformer.text_model."):
            y = x.replace("cond_stage_model.transformer.", "cond_stage_model.transformer.text_model.")
            sd[y] = sd.pop(x)

comfyanonymous's avatar
comfyanonymous committed
29
30
31
32
    if 'cond_stage_model.transformer.text_model.embeddings.position_ids' in sd:
        ids = sd['cond_stage_model.transformer.text_model.embeddings.position_ids']
        if ids.dtype == torch.float32:
            sd['cond_stage_model.transformer.text_model.embeddings.position_ids'] = ids.round()
33

34
35
36
37
38
39
40
41
42
43
44
    keys_to_replace = {
        "cond_stage_model.model.positional_embedding": "cond_stage_model.transformer.text_model.embeddings.position_embedding.weight",
        "cond_stage_model.model.token_embedding.weight": "cond_stage_model.transformer.text_model.embeddings.token_embedding.weight",
        "cond_stage_model.model.ln_final.weight": "cond_stage_model.transformer.text_model.final_layer_norm.weight",
        "cond_stage_model.model.ln_final.bias": "cond_stage_model.transformer.text_model.final_layer_norm.bias",
    }

    for x in keys_to_replace:
        if x in sd:
            sd[keys_to_replace[x]] = sd.pop(x)

45
    sd = utils.transformers_convert(sd, "cond_stage_model.model", "cond_stage_model.transformer.text_model", 24)
46

comfyanonymous's avatar
comfyanonymous committed
47
48
49
50
51
52
53
54
55
56
57
58
59
    for x in load_state_dict_to:
        x.load_state_dict(sd, strict=False)

    if len(m) > 0 and verbose:
        print("missing keys:")
        print(m)
    if len(u) > 0 and verbose:
        print("unexpected keys:")
        print(u)

    model.eval()
    return model

60
61
62
63
64
65
66
67
68
LORA_CLIP_MAP = {
    "mlp.fc1": "mlp_fc1",
    "mlp.fc2": "mlp_fc2",
    "self_attn.k_proj": "self_attn_k_proj",
    "self_attn.q_proj": "self_attn_q_proj",
    "self_attn.v_proj": "self_attn_v_proj",
    "self_attn.out_proj": "self_attn_out_proj",
}

comfyanonymous's avatar
comfyanonymous committed
69
LORA_UNET_MAP_ATTENTIONS = {
70
71
72
73
74
75
76
77
78
79
80
81
82
83
    "proj_in": "proj_in",
    "proj_out": "proj_out",
    "transformer_blocks.0.attn1.to_q": "transformer_blocks_0_attn1_to_q",
    "transformer_blocks.0.attn1.to_k": "transformer_blocks_0_attn1_to_k",
    "transformer_blocks.0.attn1.to_v": "transformer_blocks_0_attn1_to_v",
    "transformer_blocks.0.attn1.to_out.0": "transformer_blocks_0_attn1_to_out_0",
    "transformer_blocks.0.attn2.to_q": "transformer_blocks_0_attn2_to_q",
    "transformer_blocks.0.attn2.to_k": "transformer_blocks_0_attn2_to_k",
    "transformer_blocks.0.attn2.to_v": "transformer_blocks_0_attn2_to_v",
    "transformer_blocks.0.attn2.to_out.0": "transformer_blocks_0_attn2_to_out_0",
    "transformer_blocks.0.ff.net.0.proj": "transformer_blocks_0_ff_net_0_proj",
    "transformer_blocks.0.ff.net.2": "transformer_blocks_0_ff_net_2",
}

comfyanonymous's avatar
comfyanonymous committed
84
85
86
87
88
89
LORA_UNET_MAP_RESNET = {
    "in_layers.2": "resnets_{}_conv1",
    "emb_layers.1": "resnets_{}_time_emb_proj",
    "out_layers.3": "resnets_{}_conv2",
    "skip_connection": "resnets_{}_conv_shortcut"
}
90
91

def load_lora(path, to_load):
92
    lora = utils.load_torch_file(path)
93
94
95
    patch_dict = {}
    loaded_keys = set()
    for x in to_load:
comfyanonymous's avatar
comfyanonymous committed
96
97
98
99
100
101
        alpha_name = "{}.alpha".format(x)
        alpha = None
        if alpha_name in lora.keys():
            alpha = lora[alpha_name].item()
            loaded_keys.add(alpha_name)

102
103
        A_name = "{}.lora_up.weight".format(x)
        B_name = "{}.lora_down.weight".format(x)
104
        mid_name = "{}.lora_mid.weight".format(x)
comfyanonymous's avatar
comfyanonymous committed
105

106
        if A_name in lora.keys():
107
108
109
110
111
            mid = None
            if mid_name in lora.keys():
                mid = lora[mid_name]
                loaded_keys.add(mid_name)
            patch_dict[to_load[x]] = (lora[A_name], lora[B_name], alpha, mid)
112
113
            loaded_keys.add(A_name)
            loaded_keys.add(B_name)
comfyanonymous's avatar
comfyanonymous committed
114

comfyanonymous's avatar
comfyanonymous committed
115
116

        ######## loha
comfyanonymous's avatar
comfyanonymous committed
117
118
119
120
        hada_w1_a_name = "{}.hada_w1_a".format(x)
        hada_w1_b_name = "{}.hada_w1_b".format(x)
        hada_w2_a_name = "{}.hada_w2_a".format(x)
        hada_w2_b_name = "{}.hada_w2_b".format(x)
121
122
        hada_t1_name = "{}.hada_t1".format(x)
        hada_t2_name = "{}.hada_t2".format(x)
comfyanonymous's avatar
comfyanonymous committed
123
        if hada_w1_a_name in lora.keys():
124
125
126
127
128
129
130
131
132
            hada_t1 = None
            hada_t2 = None
            if hada_t1_name in lora.keys():
                hada_t1 = lora[hada_t1_name]
                hada_t2 = lora[hada_t2_name]
                loaded_keys.add(hada_t1_name)
                loaded_keys.add(hada_t2_name)

            patch_dict[to_load[x]] = (lora[hada_w1_a_name], lora[hada_w1_b_name], alpha, lora[hada_w2_a_name], lora[hada_w2_b_name], hada_t1, hada_t2)
comfyanonymous's avatar
comfyanonymous committed
133
134
135
136
137
            loaded_keys.add(hada_w1_a_name)
            loaded_keys.add(hada_w1_b_name)
            loaded_keys.add(hada_w2_a_name)
            loaded_keys.add(hada_w2_b_name)

comfyanonymous's avatar
comfyanonymous committed
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185

        ######## lokr
        lokr_w1_name = "{}.lokr_w1".format(x)
        lokr_w2_name = "{}.lokr_w2".format(x)
        lokr_w1_a_name = "{}.lokr_w1_a".format(x)
        lokr_w1_b_name = "{}.lokr_w1_b".format(x)
        lokr_t2_name = "{}.lokr_t2".format(x)
        lokr_w2_a_name = "{}.lokr_w2_a".format(x)
        lokr_w2_b_name = "{}.lokr_w2_b".format(x)

        lokr_w1 = None
        if lokr_w1_name in lora.keys():
            lokr_w1 = lora[lokr_w1_name]
            loaded_keys.add(lokr_w1_name)

        lokr_w2 = None
        if lokr_w2_name in lora.keys():
            lokr_w2 = lora[lokr_w2_name]
            loaded_keys.add(lokr_w2_name)

        lokr_w1_a = None
        if lokr_w1_a_name in lora.keys():
            lokr_w1_a = lora[lokr_w1_a_name]
            loaded_keys.add(lokr_w1_a_name)

        lokr_w1_b = None
        if lokr_w1_b_name in lora.keys():
            lokr_w1_b = lora[lokr_w1_b_name]
            loaded_keys.add(lokr_w1_b_name)

        lokr_w2_a = None
        if lokr_w2_a_name in lora.keys():
            lokr_w2_a = lora[lokr_w2_a_name]
            loaded_keys.add(lokr_w2_a_name)

        lokr_w2_b = None
        if lokr_w2_b_name in lora.keys():
            lokr_w2_b = lora[lokr_w2_b_name]
            loaded_keys.add(lokr_w2_b_name)

        lokr_t2 = None
        if lokr_t2_name in lora.keys():
            lokr_t2 = lora[lokr_t2_name]
            loaded_keys.add(lokr_t2_name)

        if (lokr_w1 is not None) or (lokr_w2 is not None) or (lokr_w1_a is not None) or (lokr_w2_a is not None):
            patch_dict[to_load[x]] = (lokr_w1, lokr_w2, alpha, lokr_w1_a, lokr_w1_b, lokr_w2_a, lokr_w2_b, lokr_t2)

186
187
188
189
190
191
192
193
194
195
196
197
    for x in lora.keys():
        if x not in loaded_keys:
            print("lora key not loaded", x)
    return patch_dict

def model_lora_keys(model, key_map={}):
    sdk = model.state_dict().keys()

    counter = 0
    for b in range(12):
        tk = "model.diffusion_model.input_blocks.{}.1".format(b)
        up_counter = 0
comfyanonymous's avatar
comfyanonymous committed
198
        for c in LORA_UNET_MAP_ATTENTIONS:
199
200
            k = "{}.{}.weight".format(tk, c)
            if k in sdk:
comfyanonymous's avatar
comfyanonymous committed
201
                lora_key = "lora_unet_down_blocks_{}_attentions_{}_{}".format(counter // 2, counter % 2, LORA_UNET_MAP_ATTENTIONS[c])
202
                key_map[lora_key] = k
203
204
205
                up_counter += 1
        if up_counter >= 4:
            counter += 1
comfyanonymous's avatar
comfyanonymous committed
206
    for c in LORA_UNET_MAP_ATTENTIONS:
207
208
        k = "model.diffusion_model.middle_block.1.{}.weight".format(c)
        if k in sdk:
comfyanonymous's avatar
comfyanonymous committed
209
            lora_key = "lora_unet_mid_block_attentions_0_{}".format(LORA_UNET_MAP_ATTENTIONS[c])
210
            key_map[lora_key] = k
211
212
213
214
    counter = 3
    for b in range(12):
        tk = "model.diffusion_model.output_blocks.{}.1".format(b)
        up_counter = 0
comfyanonymous's avatar
comfyanonymous committed
215
        for c in LORA_UNET_MAP_ATTENTIONS:
216
217
            k = "{}.{}.weight".format(tk, c)
            if k in sdk:
comfyanonymous's avatar
comfyanonymous committed
218
                lora_key = "lora_unet_up_blocks_{}_attentions_{}_{}".format(counter // 3, counter % 3, LORA_UNET_MAP_ATTENTIONS[c])
219
                key_map[lora_key] = k
220
221
222
223
                up_counter += 1
        if up_counter >= 4:
            counter += 1
    counter = 0
comfyanonymous's avatar
comfyanonymous committed
224
    text_model_lora_key = "lora_te_text_model_encoder_layers_{}_{}"
225
    for b in range(24):
226
227
228
        for c in LORA_CLIP_MAP:
            k = "transformer.text_model.encoder.layers.{}.{}.weight".format(b, c)
            if k in sdk:
comfyanonymous's avatar
comfyanonymous committed
229
                lora_key = text_model_lora_key.format(b, LORA_CLIP_MAP[c])
230
                key_map[lora_key] = k
comfyanonymous's avatar
comfyanonymous committed
231

comfyanonymous's avatar
comfyanonymous committed
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286

    #Locon stuff
    ds_counter = 0
    counter = 0
    for b in range(12):
        tk = "model.diffusion_model.input_blocks.{}.0".format(b)
        key_in = False
        for c in LORA_UNET_MAP_RESNET:
            k = "{}.{}.weight".format(tk, c)
            if k in sdk:
                lora_key = "lora_unet_down_blocks_{}_{}".format(counter // 2, LORA_UNET_MAP_RESNET[c].format(counter % 2))
                key_map[lora_key] = k
                key_in = True
        for bb in range(3):
            k = "{}.{}.op.weight".format(tk[:-2], bb)
            if k in sdk:
                lora_key = "lora_unet_down_blocks_{}_downsamplers_0_conv".format(ds_counter)
                key_map[lora_key] = k
                ds_counter += 1
        if key_in:
            counter += 1

    counter = 0
    for b in range(3):
        tk = "model.diffusion_model.middle_block.{}".format(b)
        key_in = False
        for c in LORA_UNET_MAP_RESNET:
            k = "{}.{}.weight".format(tk, c)
            if k in sdk:
                lora_key = "lora_unet_mid_block_{}".format(LORA_UNET_MAP_RESNET[c].format(counter))
                key_map[lora_key] = k
                key_in = True
        if key_in:
            counter += 1

    counter = 0
    us_counter = 0
    for b in range(12):
        tk = "model.diffusion_model.output_blocks.{}.0".format(b)
        key_in = False
        for c in LORA_UNET_MAP_RESNET:
            k = "{}.{}.weight".format(tk, c)
            if k in sdk:
                lora_key = "lora_unet_up_blocks_{}_{}".format(counter // 3, LORA_UNET_MAP_RESNET[c].format(counter % 3))
                key_map[lora_key] = k
                key_in = True
        for bb in range(3):
            k = "{}.{}.conv.weight".format(tk[:-2], bb)
            if k in sdk:
                lora_key = "lora_unet_up_blocks_{}_upsamplers_0_conv".format(us_counter)
                key_map[lora_key] = k
                us_counter += 1
        if key_in:
            counter += 1

287
288
289
290
291
292
293
    return key_map

class ModelPatcher:
    def __init__(self, model):
        self.model = model
        self.patches = []
        self.backup = {}
294
        self.model_options = {"transformer_options":{}}
295
296
297
298

    def clone(self):
        n = ModelPatcher(self.model)
        n.patches = self.patches[:]
299
        n.model_options = copy.deepcopy(self.model_options)
300
301
        return n

302
303
304
    def set_model_tomesd(self, ratio):
        self.model_options["transformer_options"]["tomesd"] = {"ratio": ratio}

305
306
307
    def set_model_sampler_cfg_function(self, sampler_cfg_function):
        self.model_options["sampler_cfg_function"] = sampler_cfg_function

308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330

    def set_model_patch(self, patch, name):
        to = self.model_options["transformer_options"]
        if "patches" not in to:
            to["patches"] = {}
        to["patches"][name] = to["patches"].get(name, []) + [patch]

    def set_model_attn1_patch(self, patch):
        self.set_model_patch(patch, "attn1_patch")

    def set_model_attn2_patch(self, patch):
        self.set_model_patch(patch, "attn2_patch")

    def model_patches_to(self, device):
        to = self.model_options["transformer_options"]
        if "patches" in to:
            patches = to["patches"]
            for name in patches:
                patch_list = patches[name]
                for i in range(len(patch_list)):
                    if hasattr(patch_list[i], "to"):
                        patch_list[i] = patch_list[i].to(device)

331
332
333
    def model_dtype(self):
        return self.model.diffusion_model.dtype

334
335
336
337
    def add_patches(self, patches, strength=1.0):
        p = {}
        model_sd = self.model.state_dict()
        for k in patches:
338
            if k in model_sd:
339
340
341
342
343
344
345
346
347
                p[k] = patches[k]
        self.patches += [(strength, p)]
        return p.keys()

    def patch_model(self):
        model_sd = self.model.state_dict()
        for p in self.patches:
            for k in p[1]:
                v = p[1][k]
348
                key = k
comfyanonymous's avatar
comfyanonymous committed
349
                if key not in model_sd:
350
351
352
                    print("could not patch. key doesn't exist in model:", k)
                    continue

comfyanonymous's avatar
comfyanonymous committed
353
354
355
                weight = model_sd[key]
                if key not in self.backup:
                    self.backup[key] = weight.clone()
356
357

                alpha = p[0]
comfyanonymous's avatar
comfyanonymous committed
358
359
360
361
362
363
364
365
366
367
368

                if len(v) == 4: #lora/locon
                    mat1 = v[0]
                    mat2 = v[1]
                    if v[2] is not None:
                        alpha *= v[2] / mat2.shape[0]
                    if v[3] is not None:
                        #locon mid weights, hopefully the math is fine because I didn't properly test it
                        final_shape = [mat2.shape[1], mat2.shape[0], v[3].shape[2], v[3].shape[3]]
                        mat2 = torch.mm(mat2.transpose(0, 1).flatten(start_dim=1).float(), v[3].transpose(0, 1).flatten(start_dim=1).float()).reshape(final_shape).transpose(0, 1)
                    weight += (alpha * torch.mm(mat1.flatten(start_dim=1).float(), mat2.flatten(start_dim=1).float())).reshape(weight.shape).type(weight.dtype).to(weight.device)
comfyanonymous's avatar
comfyanonymous committed
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
                elif len(v) == 8: #lokr
                    w1 = v[0]
                    w2 = v[1]
                    w1_a = v[3]
                    w1_b = v[4]
                    w2_a = v[5]
                    w2_b = v[6]
                    t2 = v[7]
                    dim = None

                    if w1 is None:
                        dim = w1_b.shape[0]
                        w1 = torch.mm(w1_a.float(), w1_b.float())

                    if w2 is None:
                        dim = w2_b.shape[0]
                        if t2 is None:
                            w2 = torch.mm(w2_a.float(), w2_b.float())
                        else:
                            w2 = torch.einsum('i j k l, j r, i p -> p r k l', t2.float(), w2_b.float(), w2_a.float())

                    if len(w2.shape) == 4:
                        w1 = w1.unsqueeze(2).unsqueeze(2)
                    if v[2] is not None and dim is not None:
                        alpha *= v[2] / dim

                    weight += alpha * torch.kron(w1.float(), w2.float()).reshape(weight.shape).type(weight.dtype).to(weight.device)
comfyanonymous's avatar
comfyanonymous committed
396
397
398
399
400
401
402
                else: #loha
                    w1a = v[0]
                    w1b = v[1]
                    if v[2] is not None:
                        alpha *= v[2] / w1b.shape[0]
                    w2a = v[3]
                    w2b = v[4]
403
404
405
406
407
408
409
410
411
412
                    if v[5] is not None: #cp decomposition
                        t1 = v[5]
                        t2 = v[6]
                        m1 = torch.einsum('i j k l, j r, i p -> p r k l', t1.float(), w1b.float(), w1a.float())
                        m2 = torch.einsum('i j k l, j r, i p -> p r k l', t2.float(), w2b.float(), w2a.float())
                    else:
                        m1 = torch.mm(w1a.float(), w1b.float())
                        m2 = torch.mm(w2a.float(), w2b.float())

                    weight += (alpha * m1 * m2).reshape(weight.shape).type(weight.dtype).to(weight.device)
413
414
415
        return self.model
    def unpatch_model(self):
        model_sd = self.model.state_dict()
416
417
        keys = list(self.backup.keys())
        for k in keys:
418
            model_sd[k][:] = self.backup[k]
419
420
            del self.backup[k]

421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
        self.backup = {}

def load_lora_for_models(model, clip, lora_path, strength_model, strength_clip):
    key_map = model_lora_keys(model.model)
    key_map = model_lora_keys(clip.cond_stage_model, key_map)
    loaded = load_lora(lora_path, key_map)
    new_modelpatcher = model.clone()
    k = new_modelpatcher.add_patches(loaded, strength_model)
    new_clip = clip.clone()
    k1 = new_clip.add_patches(loaded, strength_clip)
    k = set(k)
    k1 = set(k1)
    for x in loaded:
        if (x not in k) and (x not in k1):
            print("NOT LOADED", x)

    return (new_modelpatcher, new_clip)
comfyanonymous's avatar
comfyanonymous committed
438
439
440


class CLIP:
441
442
443
    def __init__(self, config={}, embedding_directory=None, no_init=False):
        if no_init:
            return
comfyanonymous's avatar
comfyanonymous committed
444
        self.target_clip = config["target"]
445
446
447
448
449
        if "params" in config:
            params = config["params"]
        else:
            params = {}

comfyanonymous's avatar
comfyanonymous committed
450
        if self.target_clip.endswith("FrozenOpenCLIPEmbedder"):
comfyanonymous's avatar
comfyanonymous committed
451
452
            clip = sd2_clip.SD2ClipModel
            tokenizer = sd2_clip.SD2Tokenizer
comfyanonymous's avatar
comfyanonymous committed
453
        elif self.target_clip.endswith("FrozenCLIPEmbedder"):
comfyanonymous's avatar
comfyanonymous committed
454
455
            clip = sd1_clip.SD1ClipModel
            tokenizer = sd1_clip.SD1Tokenizer
456
457

        self.cond_stage_model = clip(**(params))
458
        self.tokenizer = tokenizer(embedding_directory=embedding_directory)
459
        self.patcher = ModelPatcher(self.cond_stage_model)
460
        self.layer_idx = None
461
462
463
464
465
466
467

    def clone(self):
        n = CLIP(no_init=True)
        n.target_clip = self.target_clip
        n.patcher = self.patcher.clone()
        n.cond_stage_model = self.cond_stage_model
        n.tokenizer = self.tokenizer
comfyanonymous's avatar
comfyanonymous committed
468
        n.layer_idx = self.layer_idx
469
470
        return n

471
472
473
    def load_from_state_dict(self, sd):
        self.cond_stage_model.transformer.load_state_dict(sd, strict=False)

474
475
    def add_patches(self, patches, strength=1.0):
        return self.patcher.add_patches(patches, strength)
comfyanonymous's avatar
comfyanonymous committed
476

477
    def clip_layer(self, layer_idx):
comfyanonymous's avatar
comfyanonymous committed
478
        self.layer_idx = layer_idx
479

480
481
    def tokenize(self, text, return_word_ids=False):
        return self.tokenizer.tokenize_with_weights(text, return_word_ids)
BlenderNeko's avatar
BlenderNeko committed
482

483
    def encode_from_tokens(self, tokens, return_pooled=False):
484
485
        if self.layer_idx is not None:
            self.cond_stage_model.clip_layer(self.layer_idx)
486
487
488
489
490
491
492
        try:
            self.patcher.patch_model()
            cond = self.cond_stage_model.encode_token_weights(tokens)
            self.patcher.unpatch_model()
        except Exception as e:
            self.patcher.unpatch_model()
            raise e
493
494
495
496
        if return_pooled:
            eos_token_index = max(range(len(tokens[0])), key=tokens[0].__getitem__)
            pooled = cond[:, eos_token_index]
            return cond, pooled
comfyanonymous's avatar
comfyanonymous committed
497
498
        return cond

499
    def encode(self, text):
500
        tokens = self.tokenize(text)
501
502
        return self.encode_from_tokens(tokens)

comfyanonymous's avatar
comfyanonymous committed
503
class VAE:
504
    def __init__(self, ckpt_path=None, scale_factor=0.18215, device=None, config=None):
comfyanonymous's avatar
comfyanonymous committed
505
506
507
        if config is None:
            #default SD1.x/SD2.x VAE parameters
            ddconfig = {'double_z': True, 'z_channels': 4, 'resolution': 256, 'in_channels': 3, 'out_ch': 3, 'ch': 128, 'ch_mult': [1, 2, 4, 4], 'num_res_blocks': 2, 'attn_resolutions': [], 'dropout': 0.0}
508
            self.first_stage_model = AutoencoderKL(ddconfig, {'target': 'torch.nn.Identity'}, 4, monitor="val/rec_loss")
comfyanonymous's avatar
comfyanonymous committed
509
        else:
510
            self.first_stage_model = AutoencoderKL(**(config['params']))
comfyanonymous's avatar
comfyanonymous committed
511
        self.first_stage_model = self.first_stage_model.eval()
512
513
514
515
516
517
        if ckpt_path is not None:
            sd = utils.load_torch_file(ckpt_path)
            if 'decoder.up_blocks.0.resnets.0.norm1.weight' in sd.keys(): #diffusers format
                sd = diffusers_convert.convert_vae_state_dict(sd)
            self.first_stage_model.load_state_dict(sd, strict=False)

comfyanonymous's avatar
comfyanonymous committed
518
        self.scale_factor = scale_factor
519
520
        if device is None:
            device = model_management.get_torch_device()
comfyanonymous's avatar
comfyanonymous committed
521
522
        self.device = device

523
    def decode_tiled_(self, samples, tile_x=64, tile_y=64, overlap = 16):
pythongosssss's avatar
pythongosssss committed
524
        steps = samples.shape[0] * utils.get_tiled_scale_steps(samples.shape[3], samples.shape[2], tile_x, tile_y, overlap)
comfyanonymous's avatar
comfyanonymous committed
525
526
        steps += samples.shape[0] * utils.get_tiled_scale_steps(samples.shape[3], samples.shape[2], tile_x // 2, tile_y * 2, overlap)
        steps += samples.shape[0] * utils.get_tiled_scale_steps(samples.shape[3], samples.shape[2], tile_x * 2, tile_y // 2, overlap)
pythongosssss's avatar
pythongosssss committed
527
        pbar = utils.ProgressBar(steps)
528

529
530
        decode_fn = lambda a: (self.first_stage_model.decode(1. / self.scale_factor * a.to(self.device)) + 1.0)
        output = torch.clamp((
531
532
533
            (utils.tiled_scale(samples, decode_fn, tile_x // 2, tile_y * 2, overlap, upscale_amount = 8, pbar = pbar) +
            utils.tiled_scale(samples, decode_fn, tile_x * 2, tile_y // 2, overlap, upscale_amount = 8, pbar = pbar) +
             utils.tiled_scale(samples, decode_fn, tile_x, tile_y, overlap, upscale_amount = 8, pbar = pbar))
534
535
536
537
            / 3.0) / 2.0, min=0.0, max=1.0)
        return output

    def decode(self, samples_in):
538
        model_management.unload_model()
comfyanonymous's avatar
comfyanonymous committed
539
        self.first_stage_model = self.first_stage_model.to(self.device)
540
        try:
541
542
543
544
545
546
547
548
            free_memory = model_management.get_free_memory(self.device)
            batch_number = int((free_memory * 0.7) / (2562 * samples_in.shape[2] * samples_in.shape[3] * 64))
            batch_number = max(1, batch_number)

            pixel_samples = torch.empty((samples_in.shape[0], 3, round(samples_in.shape[2] * 8), round(samples_in.shape[3] * 8)), device="cpu")
            for x in range(0, samples_in.shape[0], batch_number):
                samples = samples_in[x:x+batch_number].to(self.device)
                pixel_samples[x:x+batch_number] = torch.clamp((self.first_stage_model.decode(1. / self.scale_factor * samples) + 1.0) / 2.0, min=0.0, max=1.0).cpu()
549
550
551
552
        except model_management.OOM_EXCEPTION as e:
            print("Warning: Ran out of memory when regular VAE decoding, retrying with tiled VAE decoding.")
            pixel_samples = self.decode_tiled_(samples_in)

comfyanonymous's avatar
comfyanonymous committed
553
554
555
556
        self.first_stage_model = self.first_stage_model.cpu()
        pixel_samples = pixel_samples.cpu().movedim(1,-1)
        return pixel_samples

557
    def decode_tiled(self, samples, tile_x=64, tile_y=64, overlap = 16):
558
559
        model_management.unload_model()
        self.first_stage_model = self.first_stage_model.to(self.device)
560
        output = self.decode_tiled_(samples, tile_x, tile_y, overlap)
561
562
563
        self.first_stage_model = self.first_stage_model.cpu()
        return output.movedim(1,-1)

comfyanonymous's avatar
comfyanonymous committed
564
    def encode(self, pixel_samples):
565
        model_management.unload_model()
comfyanonymous's avatar
comfyanonymous committed
566
567
568
569
570
571
572
        self.first_stage_model = self.first_stage_model.to(self.device)
        pixel_samples = pixel_samples.movedim(-1,1).to(self.device)
        samples = self.first_stage_model.encode(2. * pixel_samples - 1.).sample() * self.scale_factor
        self.first_stage_model = self.first_stage_model.cpu()
        samples = samples.cpu()
        return samples

comfyanonymous's avatar
comfyanonymous committed
573
574
575
576
    def encode_tiled(self, pixel_samples, tile_x=512, tile_y=512, overlap = 64):
        model_management.unload_model()
        self.first_stage_model = self.first_stage_model.to(self.device)
        pixel_samples = pixel_samples.movedim(-1,1).to(self.device)
577

comfyanonymous's avatar
comfyanonymous committed
578
579
580
        steps = pixel_samples.shape[0] * utils.get_tiled_scale_steps(pixel_samples.shape[3], pixel_samples.shape[2], tile_x, tile_y, overlap)
        steps += pixel_samples.shape[0] * utils.get_tiled_scale_steps(pixel_samples.shape[3], pixel_samples.shape[2], tile_x // 2, tile_y * 2, overlap)
        steps += pixel_samples.shape[0] * utils.get_tiled_scale_steps(pixel_samples.shape[3], pixel_samples.shape[2], tile_x * 2, tile_y // 2, overlap)
pythongosssss's avatar
pythongosssss committed
581
582
        pbar = utils.ProgressBar(steps)

583
584
585
        samples = utils.tiled_scale(pixel_samples, lambda a: self.first_stage_model.encode(2. * a - 1.).sample() * self.scale_factor, tile_x, tile_y, overlap, upscale_amount = (1/8), out_channels=4, pbar=pbar)
        samples += utils.tiled_scale(pixel_samples, lambda a: self.first_stage_model.encode(2. * a - 1.).sample() * self.scale_factor, tile_x * 2, tile_y // 2, overlap, upscale_amount = (1/8), out_channels=4, pbar=pbar)
        samples += utils.tiled_scale(pixel_samples, lambda a: self.first_stage_model.encode(2. * a - 1.).sample() * self.scale_factor, tile_x // 2, tile_y * 2, overlap, upscale_amount = (1/8), out_channels=4, pbar=pbar)
586
        samples /= 3.0
comfyanonymous's avatar
comfyanonymous committed
587
588
589
        self.first_stage_model = self.first_stage_model.cpu()
        samples = samples.cpu()
        return samples
590

BlenderNeko's avatar
BlenderNeko committed
591
def broadcast_image_to(tensor, target_batch_size, batched_number):
592
    current_batch_size = tensor.shape[0]
593
    #print(current_batch_size, target_batch_size)
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
    if current_batch_size == 1:
        return tensor

    per_batch = target_batch_size // batched_number
    tensor = tensor[:per_batch]

    if per_batch > tensor.shape[0]:
        tensor = torch.cat([tensor] * (per_batch // tensor.shape[0]) + [tensor[:(per_batch % tensor.shape[0])]], dim=0)

    current_batch_size = tensor.shape[0]
    if current_batch_size == target_batch_size:
        return tensor
    else:
        return torch.cat([tensor] * batched_number, dim=0)

comfyanonymous's avatar
comfyanonymous committed
609
class ControlNet:
610
    def __init__(self, control_model, device=None):
comfyanonymous's avatar
comfyanonymous committed
611
612
613
        self.control_model = control_model
        self.cond_hint_original = None
        self.cond_hint = None
614
        self.strength = 1.0
615
616
        if device is None:
            device = model_management.get_torch_device()
617
        self.device = device
comfyanonymous's avatar
comfyanonymous committed
618
        self.previous_controlnet = None
comfyanonymous's avatar
comfyanonymous committed
619

620
    def get_control(self, x_noisy, t, cond_txt, batched_number):
comfyanonymous's avatar
comfyanonymous committed
621
622
        control_prev = None
        if self.previous_controlnet is not None:
623
            control_prev = self.previous_controlnet.get_control(x_noisy, t, cond_txt, batched_number)
comfyanonymous's avatar
comfyanonymous committed
624

625
        output_dtype = x_noisy.dtype
comfyanonymous's avatar
comfyanonymous committed
626
627
628
629
        if self.cond_hint is None or x_noisy.shape[2] * 8 != self.cond_hint.shape[2] or x_noisy.shape[3] * 8 != self.cond_hint.shape[3]:
            if self.cond_hint is not None:
                del self.cond_hint
            self.cond_hint = None
BlenderNeko's avatar
BlenderNeko committed
630
631
632
            self.cond_hint = utils.common_upscale(self.cond_hint_original, x_noisy.shape[3] * 8, x_noisy.shape[2] * 8, 'nearest-exact', "center").to(self.control_model.dtype).to(self.device)
        if x_noisy.shape[0] != self.cond_hint.shape[0]:
            self.cond_hint = broadcast_image_to(self.cond_hint, x_noisy.shape[0], batched_number)
633
634
635
636
637
638

        if self.control_model.dtype == torch.float16:
            precision_scope = torch.autocast
        else:
            precision_scope = contextlib.nullcontext

639
        with precision_scope(model_management.get_autocast_device(self.device)):
640
            self.control_model = model_management.load_if_low_vram(self.control_model)
641
            control = self.control_model(x=x_noisy, hint=self.cond_hint, timesteps=t, context=cond_txt)
642
            self.control_model = model_management.unload_if_low_vram(self.control_model)
643
        out = {'middle':[], 'output': []}
644
        autocast_enabled = torch.is_autocast_enabled()
comfyanonymous's avatar
comfyanonymous committed
645
646

        for i in range(len(control)):
comfyanonymous's avatar
comfyanonymous committed
647
648
649
650
651
652
            if i == (len(control) - 1):
                key = 'middle'
                index = 0
            else:
                key = 'output'
                index = i
comfyanonymous's avatar
comfyanonymous committed
653
            x = control[i]
654
            x *= self.strength
655
656
            if x.dtype != output_dtype and not autocast_enabled:
                x = x.to(output_dtype)
comfyanonymous's avatar
comfyanonymous committed
657

comfyanonymous's avatar
comfyanonymous committed
658
659
660
661
662
663
664
            if control_prev is not None and key in control_prev:
                prev = control_prev[key][index]
                if prev is not None:
                    x += prev
            out[key].append(x)
        if control_prev is not None and 'input' in control_prev:
            out['input'] = control_prev['input']
665
        return out
comfyanonymous's avatar
comfyanonymous committed
666

667
    def set_cond_hint(self, cond_hint, strength=1.0):
comfyanonymous's avatar
comfyanonymous committed
668
        self.cond_hint_original = cond_hint
669
        self.strength = strength
comfyanonymous's avatar
comfyanonymous committed
670
671
        return self

comfyanonymous's avatar
comfyanonymous committed
672
673
674
675
    def set_previous_controlnet(self, controlnet):
        self.previous_controlnet = controlnet
        return self

comfyanonymous's avatar
comfyanonymous committed
676
    def cleanup(self):
comfyanonymous's avatar
comfyanonymous committed
677
678
        if self.previous_controlnet is not None:
            self.previous_controlnet.cleanup()
comfyanonymous's avatar
comfyanonymous committed
679
680
681
682
683
684
685
        if self.cond_hint is not None:
            del self.cond_hint
            self.cond_hint = None

    def copy(self):
        c = ControlNet(self.control_model)
        c.cond_hint_original = self.cond_hint_original
686
        c.strength = self.strength
comfyanonymous's avatar
comfyanonymous committed
687
688
        return c

689
    def get_models(self):
comfyanonymous's avatar
comfyanonymous committed
690
691
        out = []
        if self.previous_controlnet is not None:
692
            out += self.previous_controlnet.get_models()
comfyanonymous's avatar
comfyanonymous committed
693
694
695
        out.append(self.control_model)
        return out

696
def load_controlnet(ckpt_path, model=None):
697
    controlnet_data = utils.load_torch_file(ckpt_path)
comfyanonymous's avatar
comfyanonymous committed
698
699
700
701
702
703
704
705
706
707
    pth_key = 'control_model.input_blocks.1.1.transformer_blocks.0.attn2.to_k.weight'
    pth = False
    sd2 = False
    key = 'input_blocks.1.1.transformer_blocks.0.attn2.to_k.weight'
    if pth_key in controlnet_data:
        pth = True
        key = pth_key
    elif key in controlnet_data:
        pass
    else:
708
709
710
711
        net = load_t2i_adapter(controlnet_data)
        if net is None:
            print("error checkpoint does not contain controlnet or t2i adapter data", ckpt_path)
        return net
comfyanonymous's avatar
comfyanonymous committed
712
713

    context_dim = controlnet_data[key].shape[1]
714
715

    use_fp16 = False
716
    if model_management.should_use_fp16() and controlnet_data[key].dtype == torch.float16:
717
718
        use_fp16 = True

comfyanonymous's avatar
comfyanonymous committed
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
    if context_dim == 768:
        #SD1.x
        control_model = cldm.ControlNet(image_size=32,
                                        in_channels=4,
                                        hint_channels=3,
                                        model_channels=320,
                                        attention_resolutions=[ 4, 2, 1 ],
                                        num_res_blocks=2,
                                        channel_mult=[ 1, 2, 4, 4 ],
                                        num_heads=8,
                                        use_spatial_transformer=True,
                                        transformer_depth=1,
                                        context_dim=context_dim,
                                        use_checkpoint=True,
                                        legacy=False,
                                        use_fp16=use_fp16)
    else:
        #SD2.x
        control_model = cldm.ControlNet(image_size=32,
                                        in_channels=4,
                                        hint_channels=3,
                                        model_channels=320,
                                        attention_resolutions=[ 4, 2, 1 ],
                                        num_res_blocks=2,
                                        channel_mult=[ 1, 2, 4, 4 ],
                                        num_head_channels=64,
                                        use_spatial_transformer=True,
                                        use_linear_in_transformer=True,
                                        transformer_depth=1,
                                        context_dim=context_dim,
                                        use_checkpoint=True,
                                        legacy=False,
                                        use_fp16=use_fp16)
comfyanonymous's avatar
comfyanonymous committed
752
    if pth:
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
        if 'difference' in controlnet_data:
            if model is not None:
                m = model.patch_model()
                model_sd = m.state_dict()
                for x in controlnet_data:
                    c_m = "control_model."
                    if x.startswith(c_m):
                        sd_key = "model.diffusion_model.{}".format(x[len(c_m):])
                        if sd_key in model_sd:
                            cd = controlnet_data[x]
                            cd += model_sd[sd_key].type(cd.dtype).to(cd.device)
                model.unpatch_model()
            else:
                print("WARNING: Loaded a diff controlnet without a model. It will very likely not work.")

comfyanonymous's avatar
comfyanonymous committed
768
769
770
771
772
773
774
775
        class WeightsLoader(torch.nn.Module):
            pass
        w = WeightsLoader()
        w.control_model = control_model
        w.load_state_dict(controlnet_data, strict=False)
    else:
        control_model.load_state_dict(controlnet_data, strict=False)

776
777
778
    if use_fp16:
        control_model = control_model.half()

comfyanonymous's avatar
comfyanonymous committed
779
780
781
    control = ControlNet(control_model)
    return control

782
class T2IAdapter:
783
    def __init__(self, t2i_model, channels_in, device=None):
784
785
786
        self.t2i_model = t2i_model
        self.channels_in = channels_in
        self.strength = 1.0
787
788
        if device is None:
            device = model_management.get_torch_device()
789
790
791
792
793
794
        self.device = device
        self.previous_controlnet = None
        self.control_input = None
        self.cond_hint_original = None
        self.cond_hint = None

795
    def get_control(self, x_noisy, t, cond_txt, batched_number):
796
797
        control_prev = None
        if self.previous_controlnet is not None:
798
            control_prev = self.previous_controlnet.get_control(x_noisy, t, cond_txt, batched_number)
799
800
801
802

        if self.cond_hint is None or x_noisy.shape[2] * 8 != self.cond_hint.shape[2] or x_noisy.shape[3] * 8 != self.cond_hint.shape[3]:
            if self.cond_hint is not None:
                del self.cond_hint
BlenderNeko's avatar
BlenderNeko committed
803
            self.control_input = None
804
            self.cond_hint = None
BlenderNeko's avatar
BlenderNeko committed
805
            self.cond_hint = utils.common_upscale(self.cond_hint_original, x_noisy.shape[3] * 8, x_noisy.shape[2] * 8, 'nearest-exact', "center").float().to(self.device)
806
807
            if self.channels_in == 1 and self.cond_hint.shape[1] > 1:
                self.cond_hint = torch.mean(self.cond_hint, 1, keepdim=True)
BlenderNeko's avatar
BlenderNeko committed
808
809
810
        if x_noisy.shape[0] != self.cond_hint.shape[0]:
            self.cond_hint = broadcast_image_to(self.cond_hint, x_noisy.shape[0], batched_number)
        if self.control_input is None:
811
812
813
814
815
816
817
            self.t2i_model.to(self.device)
            self.control_input = self.t2i_model(self.cond_hint)
            self.t2i_model.cpu()

        output_dtype = x_noisy.dtype
        out = {'input':[]}

comfyanonymous's avatar
comfyanonymous committed
818
        autocast_enabled = torch.is_autocast_enabled()
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
        for i in range(len(self.control_input)):
            key = 'input'
            x = self.control_input[i] * self.strength
            if x.dtype != output_dtype and not autocast_enabled:
                x = x.to(output_dtype)

            if control_prev is not None and key in control_prev:
                index = len(control_prev[key]) - i * 3 - 3
                prev = control_prev[key][index]
                if prev is not None:
                    x += prev
            out[key].insert(0, None)
            out[key].insert(0, None)
            out[key].insert(0, x)

        if control_prev is not None and 'input' in control_prev:
            for i in range(len(out['input'])):
                if out['input'][i] is None:
                    out['input'][i] = control_prev['input'][i]
        if control_prev is not None and 'middle' in control_prev:
            out['middle'] = control_prev['middle']
        if control_prev is not None and 'output' in control_prev:
            out['output'] = control_prev['output']
        return out

    def set_cond_hint(self, cond_hint, strength=1.0):
        self.cond_hint_original = cond_hint
        self.strength = strength
        return self

    def set_previous_controlnet(self, controlnet):
        self.previous_controlnet = controlnet
        return self

    def copy(self):
        c = T2IAdapter(self.t2i_model, self.channels_in)
        c.cond_hint_original = self.cond_hint_original
        c.strength = self.strength
        return c

    def cleanup(self):
        if self.previous_controlnet is not None:
            self.previous_controlnet.cleanup()
        if self.cond_hint is not None:
            del self.cond_hint
            self.cond_hint = None

866
    def get_models(self):
867
868
        out = []
        if self.previous_controlnet is not None:
869
            out += self.previous_controlnet.get_models()
870
871
        return out

872
def load_t2i_adapter(t2i_data):
873
    keys = t2i_data.keys()
874
    if "body.0.in_conv.weight" in keys:
875
876
        cin = t2i_data['body.0.in_conv.weight'].shape[1]
        model_ad = adapter.Adapter_light(cin=cin, channels=[320, 640, 1280, 1280], nums_rb=4)
877
    elif 'conv_in.weight' in keys:
878
879
        cin = t2i_data['conv_in.weight'].shape[1]
        model_ad = adapter.Adapter(cin=cin, channels=[320, 640, 1280, 1280][:4], nums_rb=2, ksize=1, sk=True, use_conv=False)
880
881
    else:
        return None
882
883
    model_ad.load_state_dict(t2i_data)
    return T2IAdapter(model_ad, cin // 64)
comfyanonymous's avatar
comfyanonymous committed
884

885
886
887
888
889
890
891
892
893
894

class StyleModel:
    def __init__(self, model, device="cpu"):
        self.model = model

    def get_cond(self, input):
        return self.model(input.last_hidden_state)


def load_style_model(ckpt_path):
895
    model_data = utils.load_torch_file(ckpt_path)
896
897
898
899
900
901
902
903
904
    keys = model_data.keys()
    if "style_embedding" in keys:
        model = adapter.StyleAdapter(width=1024, context_dim=768, num_head=8, n_layes=3, num_token=8)
    else:
        raise Exception("invalid style model {}".format(ckpt_path))
    model.load_state_dict(model_data)
    return StyleModel(model)


905
def load_clip(ckpt_path, embedding_directory=None):
906
    clip_data = utils.load_torch_file(ckpt_path)
907
908
    config = {}
    if "text_model.encoder.layers.22.mlp.fc1.weight" in clip_data:
comfyanonymous's avatar
comfyanonymous committed
909
        config['target'] = 'comfy.ldm.modules.encoders.modules.FrozenOpenCLIPEmbedder'
910
    else:
comfyanonymous's avatar
comfyanonymous committed
911
        config['target'] = 'comfy.ldm.modules.encoders.modules.FrozenCLIPEmbedder'
912
913
914
    clip = CLIP(config=config, embedding_directory=embedding_directory)
    clip.load_from_state_dict(clip_data)
    return clip
comfyanonymous's avatar
comfyanonymous committed
915

916
917
918
919
920
921
922
def load_gligen(ckpt_path):
    data = utils.load_torch_file(ckpt_path)
    model = gligen.load_gligen(data)
    if model_management.should_use_fp16():
        model = model.half()
    return model

923
def load_checkpoint(config_path, ckpt_path, output_vae=True, output_clip=True, embedding_directory=None):
924
925
    with open(config_path, 'r') as stream:
        config = yaml.safe_load(stream)
comfyanonymous's avatar
comfyanonymous committed
926
927
928
929
930
    model_config_params = config['model']['params']
    clip_config = model_config_params['cond_stage_config']
    scale_factor = model_config_params['scale_factor']
    vae_config = model_config_params['first_stage_config']

931
932
933
934
935
936
    fp16 = False
    if "unet_config" in model_config_params:
        if "params" in model_config_params["unet_config"]:
            if "use_fp16" in model_config_params["unet_config"]["params"]:
                fp16 = model_config_params["unet_config"]["params"]["use_fp16"]

comfyanonymous's avatar
comfyanonymous committed
937
938
939
940
941
942
943
944
945
946
947
948
949
950
    clip = None
    vae = None

    class WeightsLoader(torch.nn.Module):
        pass

    w = WeightsLoader()
    load_state_dict_to = []
    if output_vae:
        vae = VAE(scale_factor=scale_factor, config=vae_config)
        w.first_stage_model = vae.first_stage_model
        load_state_dict_to = [w]

    if output_clip:
951
        clip = CLIP(config=clip_config, embedding_directory=embedding_directory)
comfyanonymous's avatar
comfyanonymous committed
952
953
954
        w.cond_stage_model = clip.cond_stage_model
        load_state_dict_to = [w]

955
    model = instantiate_from_config(config["model"])
956
    sd = utils.load_torch_file(ckpt_path)
957
    model = load_model_weights(model, sd, verbose=False, load_state_dict_to=load_state_dict_to)
958
959
960
961

    if fp16:
        model = model.half()

962
    return (ModelPatcher(model), clip, vae)
963
964


965
966
def load_checkpoint_guess_config(ckpt_path, output_vae=True, output_clip=True, output_clipvision=False, embedding_directory=None):
    sd = utils.load_torch_file(ckpt_path)
967
968
    sd_keys = sd.keys()
    clip = None
969
    clipvision = None
970
971
    vae = None

972
973
    fp16 = model_management.should_use_fp16()

974
975
976
977
978
979
980
981
982
983
984
985
986
    class WeightsLoader(torch.nn.Module):
        pass

    w = WeightsLoader()
    load_state_dict_to = []
    if output_vae:
        vae = VAE()
        w.first_stage_model = vae.first_stage_model
        load_state_dict_to = [w]

    if output_clip:
        clip_config = {}
        if "cond_stage_model.model.transformer.resblocks.22.attn.out_proj.weight" in sd_keys:
comfyanonymous's avatar
comfyanonymous committed
987
            clip_config['target'] = 'comfy.ldm.modules.encoders.modules.FrozenOpenCLIPEmbedder'
988
        else:
comfyanonymous's avatar
comfyanonymous committed
989
            clip_config['target'] = 'comfy.ldm.modules.encoders.modules.FrozenCLIPEmbedder'
990
991
992
993
        clip = CLIP(config=clip_config, embedding_directory=embedding_directory)
        w.cond_stage_model = clip.cond_stage_model
        load_state_dict_to = [w]

994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
    clipvision_key = "embedder.model.visual.transformer.resblocks.0.attn.in_proj_weight"
    noise_aug_config = None
    if clipvision_key in sd_keys:
        size = sd[clipvision_key].shape[1]

        if output_clipvision:
            clipvision = clip_vision.load_clipvision_from_sd(sd)

        noise_aug_key = "noise_augmentor.betas"
        if noise_aug_key in sd_keys:
            noise_aug_config = {}
            params = {}
            noise_schedule_config = {}
            noise_schedule_config["timesteps"] = sd[noise_aug_key].shape[0]
            noise_schedule_config["beta_schedule"] = "squaredcos_cap_v2"
            params["noise_schedule_config"] = noise_schedule_config
comfyanonymous's avatar
comfyanonymous committed
1010
            noise_aug_config['target'] = "comfy.ldm.modules.encoders.noise_aug_modules.CLIPEmbeddingNoiseAugmentation"
1011
1012
1013
1014
1015
1016
            if size == 1280: #h
                params["timestep_dim"] = 1024
            elif size == 1024: #l
                params["timestep_dim"] = 768
            noise_aug_config['params'] = params

1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
    sd_config = {
        "linear_start": 0.00085,
        "linear_end": 0.012,
        "num_timesteps_cond": 1,
        "log_every_t": 200,
        "timesteps": 1000,
        "first_stage_key": "jpg",
        "cond_stage_key": "txt",
        "image_size": 64,
        "channels": 4,
        "cond_stage_trainable": False,
        "monitor": "val/loss_simple_ema",
        "scale_factor": 0.18215,
        "use_ema": False,
    }

    unet_config = {
        "use_checkpoint": True,
        "image_size": 32,
        "out_channels": 4,
        "attention_resolutions": [
            4,
            2,
            1
        ],
        "num_res_blocks": 2,
        "channel_mult": [
            1,
            2,
            4,
            4
        ],
        "use_spatial_transformer": True,
        "transformer_depth": 1,
        "legacy": False
    }

    if len(sd['model.diffusion_model.input_blocks.1.1.proj_in.weight'].shape) == 2:
        unet_config['use_linear_in_transformer'] = True

    unet_config["use_fp16"] = fp16
    unet_config["model_channels"] = sd['model.diffusion_model.input_blocks.0.0.weight'].shape[0]
    unet_config["in_channels"] = sd['model.diffusion_model.input_blocks.0.0.weight'].shape[1]
    unet_config["context_dim"] = sd['model.diffusion_model.input_blocks.1.1.transformer_blocks.0.attn2.to_k.weight'].shape[1]

comfyanonymous's avatar
comfyanonymous committed
1062
1063
    sd_config["unet_config"] = {"target": "comfy.ldm.modules.diffusionmodules.openaimodel.UNetModel", "params": unet_config}
    model_config = {"target": "comfy.ldm.models.diffusion.ddpm.LatentDiffusion", "params": sd_config}
1064

1065
1066
1067
1068
1069
    if noise_aug_config is not None: #SD2.x unclip model
        sd_config["noise_aug_config"] = noise_aug_config
        sd_config["image_size"] = 96
        sd_config["embedding_dropout"] = 0.25
        sd_config["conditioning_key"] = 'crossattn-adm'
comfyanonymous's avatar
comfyanonymous committed
1070
        model_config["target"] = "comfy.ldm.models.diffusion.ddpm.ImageEmbeddingConditionedLatentDiffusion"
1071
    elif unet_config["in_channels"] > 4: #inpainting model
1072
1073
        sd_config["conditioning_key"] = "hybrid"
        sd_config["finetune_keys"] = None
comfyanonymous's avatar
comfyanonymous committed
1074
        model_config["target"] = "comfy.ldm.models.diffusion.ddpm.LatentInpaintDiffusion"
1075
1076
1077
1078
1079
1080
1081
1082
    else:
        sd_config["conditioning_key"] = "crossattn"

    if unet_config["context_dim"] == 1024:
        unet_config["num_head_channels"] = 64 #SD2.x
    else:
        unet_config["num_heads"] = 8 #SD1.x

1083
1084
1085
1086
1087
    unclip = 'model.diffusion_model.label_emb.0.0.weight'
    if unclip in sd_keys:
        unet_config["num_classes"] = "sequential"
        unet_config["adm_in_channels"] = sd[unclip].shape[1]

comfyanonymous's avatar
comfyanonymous committed
1088
1089
1090
1091
1092
    if unet_config["context_dim"] == 1024 and unet_config["in_channels"] == 4: #only SD2.x non inpainting models are v prediction
        k = "model.diffusion_model.output_blocks.11.1.transformer_blocks.0.norm1.bias"
        out = sd[k]
        if torch.std(out, unbiased=False) > 0.09: # not sure how well this will actually work. I guess we will find out.
            sd_config["parameterization"] = 'v'
1093
1094
1095
1096

    model = instantiate_from_config(model_config)
    model = load_model_weights(model, sd, verbose=False, load_state_dict_to=load_state_dict_to)

1097
1098
1099
    if fp16:
        model = model.half()

1100
    return (ModelPatcher(model), clip, vae, clipvision)