sd.py 43 KB
Newer Older
comfyanonymous's avatar
comfyanonymous committed
1
import torch
2
import contextlib
3
import copy
comfyanonymous's avatar
comfyanonymous committed
4
5
6

import sd1_clip
import sd2_clip
7
from comfy import model_management
8
9
from .ldm.util import instantiate_from_config
from .ldm.models.autoencoder import AutoencoderKL
10
import yaml
comfyanonymous's avatar
comfyanonymous committed
11
from .cldm import cldm
12
from .t2i_adapter import adapter
comfyanonymous's avatar
comfyanonymous committed
13
14

from . import utils
15
from . import clip_vision
16
from . import gligen
17

18
def load_model_weights(model, sd, verbose=False, load_state_dict_to=[]):
comfyanonymous's avatar
comfyanonymous committed
19
20
21
22
23
24
25
26
27
    m, u = model.load_state_dict(sd, strict=False)

    k = list(sd.keys())
    for x in k:
        # print(x)
        if x.startswith("cond_stage_model.transformer.") and not x.startswith("cond_stage_model.transformer.text_model."):
            y = x.replace("cond_stage_model.transformer.", "cond_stage_model.transformer.text_model.")
            sd[y] = sd.pop(x)

comfyanonymous's avatar
comfyanonymous committed
28
29
30
31
    if 'cond_stage_model.transformer.text_model.embeddings.position_ids' in sd:
        ids = sd['cond_stage_model.transformer.text_model.embeddings.position_ids']
        if ids.dtype == torch.float32:
            sd['cond_stage_model.transformer.text_model.embeddings.position_ids'] = ids.round()
32

33
34
35
36
37
38
39
40
41
42
43
    keys_to_replace = {
        "cond_stage_model.model.positional_embedding": "cond_stage_model.transformer.text_model.embeddings.position_embedding.weight",
        "cond_stage_model.model.token_embedding.weight": "cond_stage_model.transformer.text_model.embeddings.token_embedding.weight",
        "cond_stage_model.model.ln_final.weight": "cond_stage_model.transformer.text_model.final_layer_norm.weight",
        "cond_stage_model.model.ln_final.bias": "cond_stage_model.transformer.text_model.final_layer_norm.bias",
    }

    for x in keys_to_replace:
        if x in sd:
            sd[keys_to_replace[x]] = sd.pop(x)

44
    sd = utils.transformers_convert(sd, "cond_stage_model.model", "cond_stage_model.transformer.text_model", 24)
45

comfyanonymous's avatar
comfyanonymous committed
46
47
48
49
50
51
52
53
54
55
56
57
58
    for x in load_state_dict_to:
        x.load_state_dict(sd, strict=False)

    if len(m) > 0 and verbose:
        print("missing keys:")
        print(m)
    if len(u) > 0 and verbose:
        print("unexpected keys:")
        print(u)

    model.eval()
    return model

59
60
61
62
63
64
65
66
67
LORA_CLIP_MAP = {
    "mlp.fc1": "mlp_fc1",
    "mlp.fc2": "mlp_fc2",
    "self_attn.k_proj": "self_attn_k_proj",
    "self_attn.q_proj": "self_attn_q_proj",
    "self_attn.v_proj": "self_attn_v_proj",
    "self_attn.out_proj": "self_attn_out_proj",
}

comfyanonymous's avatar
comfyanonymous committed
68
LORA_UNET_MAP_ATTENTIONS = {
69
70
71
72
73
74
75
76
77
78
79
80
81
82
    "proj_in": "proj_in",
    "proj_out": "proj_out",
    "transformer_blocks.0.attn1.to_q": "transformer_blocks_0_attn1_to_q",
    "transformer_blocks.0.attn1.to_k": "transformer_blocks_0_attn1_to_k",
    "transformer_blocks.0.attn1.to_v": "transformer_blocks_0_attn1_to_v",
    "transformer_blocks.0.attn1.to_out.0": "transformer_blocks_0_attn1_to_out_0",
    "transformer_blocks.0.attn2.to_q": "transformer_blocks_0_attn2_to_q",
    "transformer_blocks.0.attn2.to_k": "transformer_blocks_0_attn2_to_k",
    "transformer_blocks.0.attn2.to_v": "transformer_blocks_0_attn2_to_v",
    "transformer_blocks.0.attn2.to_out.0": "transformer_blocks_0_attn2_to_out_0",
    "transformer_blocks.0.ff.net.0.proj": "transformer_blocks_0_ff_net_0_proj",
    "transformer_blocks.0.ff.net.2": "transformer_blocks_0_ff_net_2",
}

comfyanonymous's avatar
comfyanonymous committed
83
84
85
86
87
88
LORA_UNET_MAP_RESNET = {
    "in_layers.2": "resnets_{}_conv1",
    "emb_layers.1": "resnets_{}_time_emb_proj",
    "out_layers.3": "resnets_{}_conv2",
    "skip_connection": "resnets_{}_conv_shortcut"
}
89
90

def load_lora(path, to_load):
91
    lora = utils.load_torch_file(path)
92
93
94
    patch_dict = {}
    loaded_keys = set()
    for x in to_load:
comfyanonymous's avatar
comfyanonymous committed
95
96
97
98
99
100
        alpha_name = "{}.alpha".format(x)
        alpha = None
        if alpha_name in lora.keys():
            alpha = lora[alpha_name].item()
            loaded_keys.add(alpha_name)

101
102
        A_name = "{}.lora_up.weight".format(x)
        B_name = "{}.lora_down.weight".format(x)
103
        mid_name = "{}.lora_mid.weight".format(x)
comfyanonymous's avatar
comfyanonymous committed
104

105
        if A_name in lora.keys():
106
107
108
109
110
            mid = None
            if mid_name in lora.keys():
                mid = lora[mid_name]
                loaded_keys.add(mid_name)
            patch_dict[to_load[x]] = (lora[A_name], lora[B_name], alpha, mid)
111
112
            loaded_keys.add(A_name)
            loaded_keys.add(B_name)
comfyanonymous's avatar
comfyanonymous committed
113

comfyanonymous's avatar
comfyanonymous committed
114
115

        ######## loha
comfyanonymous's avatar
comfyanonymous committed
116
117
118
119
        hada_w1_a_name = "{}.hada_w1_a".format(x)
        hada_w1_b_name = "{}.hada_w1_b".format(x)
        hada_w2_a_name = "{}.hada_w2_a".format(x)
        hada_w2_b_name = "{}.hada_w2_b".format(x)
120
121
        hada_t1_name = "{}.hada_t1".format(x)
        hada_t2_name = "{}.hada_t2".format(x)
comfyanonymous's avatar
comfyanonymous committed
122
        if hada_w1_a_name in lora.keys():
123
124
125
126
127
128
129
130
131
            hada_t1 = None
            hada_t2 = None
            if hada_t1_name in lora.keys():
                hada_t1 = lora[hada_t1_name]
                hada_t2 = lora[hada_t2_name]
                loaded_keys.add(hada_t1_name)
                loaded_keys.add(hada_t2_name)

            patch_dict[to_load[x]] = (lora[hada_w1_a_name], lora[hada_w1_b_name], alpha, lora[hada_w2_a_name], lora[hada_w2_b_name], hada_t1, hada_t2)
comfyanonymous's avatar
comfyanonymous committed
132
133
134
135
136
            loaded_keys.add(hada_w1_a_name)
            loaded_keys.add(hada_w1_b_name)
            loaded_keys.add(hada_w2_a_name)
            loaded_keys.add(hada_w2_b_name)

comfyanonymous's avatar
comfyanonymous committed
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184

        ######## lokr
        lokr_w1_name = "{}.lokr_w1".format(x)
        lokr_w2_name = "{}.lokr_w2".format(x)
        lokr_w1_a_name = "{}.lokr_w1_a".format(x)
        lokr_w1_b_name = "{}.lokr_w1_b".format(x)
        lokr_t2_name = "{}.lokr_t2".format(x)
        lokr_w2_a_name = "{}.lokr_w2_a".format(x)
        lokr_w2_b_name = "{}.lokr_w2_b".format(x)

        lokr_w1 = None
        if lokr_w1_name in lora.keys():
            lokr_w1 = lora[lokr_w1_name]
            loaded_keys.add(lokr_w1_name)

        lokr_w2 = None
        if lokr_w2_name in lora.keys():
            lokr_w2 = lora[lokr_w2_name]
            loaded_keys.add(lokr_w2_name)

        lokr_w1_a = None
        if lokr_w1_a_name in lora.keys():
            lokr_w1_a = lora[lokr_w1_a_name]
            loaded_keys.add(lokr_w1_a_name)

        lokr_w1_b = None
        if lokr_w1_b_name in lora.keys():
            lokr_w1_b = lora[lokr_w1_b_name]
            loaded_keys.add(lokr_w1_b_name)

        lokr_w2_a = None
        if lokr_w2_a_name in lora.keys():
            lokr_w2_a = lora[lokr_w2_a_name]
            loaded_keys.add(lokr_w2_a_name)

        lokr_w2_b = None
        if lokr_w2_b_name in lora.keys():
            lokr_w2_b = lora[lokr_w2_b_name]
            loaded_keys.add(lokr_w2_b_name)

        lokr_t2 = None
        if lokr_t2_name in lora.keys():
            lokr_t2 = lora[lokr_t2_name]
            loaded_keys.add(lokr_t2_name)

        if (lokr_w1 is not None) or (lokr_w2 is not None) or (lokr_w1_a is not None) or (lokr_w2_a is not None):
            patch_dict[to_load[x]] = (lokr_w1, lokr_w2, alpha, lokr_w1_a, lokr_w1_b, lokr_w2_a, lokr_w2_b, lokr_t2)

185
186
187
188
189
190
191
192
193
194
195
196
    for x in lora.keys():
        if x not in loaded_keys:
            print("lora key not loaded", x)
    return patch_dict

def model_lora_keys(model, key_map={}):
    sdk = model.state_dict().keys()

    counter = 0
    for b in range(12):
        tk = "model.diffusion_model.input_blocks.{}.1".format(b)
        up_counter = 0
comfyanonymous's avatar
comfyanonymous committed
197
        for c in LORA_UNET_MAP_ATTENTIONS:
198
199
            k = "{}.{}.weight".format(tk, c)
            if k in sdk:
comfyanonymous's avatar
comfyanonymous committed
200
                lora_key = "lora_unet_down_blocks_{}_attentions_{}_{}".format(counter // 2, counter % 2, LORA_UNET_MAP_ATTENTIONS[c])
201
                key_map[lora_key] = k
202
203
204
                up_counter += 1
        if up_counter >= 4:
            counter += 1
comfyanonymous's avatar
comfyanonymous committed
205
    for c in LORA_UNET_MAP_ATTENTIONS:
206
207
        k = "model.diffusion_model.middle_block.1.{}.weight".format(c)
        if k in sdk:
comfyanonymous's avatar
comfyanonymous committed
208
            lora_key = "lora_unet_mid_block_attentions_0_{}".format(LORA_UNET_MAP_ATTENTIONS[c])
209
            key_map[lora_key] = k
210
211
212
213
    counter = 3
    for b in range(12):
        tk = "model.diffusion_model.output_blocks.{}.1".format(b)
        up_counter = 0
comfyanonymous's avatar
comfyanonymous committed
214
        for c in LORA_UNET_MAP_ATTENTIONS:
215
216
            k = "{}.{}.weight".format(tk, c)
            if k in sdk:
comfyanonymous's avatar
comfyanonymous committed
217
                lora_key = "lora_unet_up_blocks_{}_attentions_{}_{}".format(counter // 3, counter % 3, LORA_UNET_MAP_ATTENTIONS[c])
218
                key_map[lora_key] = k
219
220
221
222
                up_counter += 1
        if up_counter >= 4:
            counter += 1
    counter = 0
comfyanonymous's avatar
comfyanonymous committed
223
    text_model_lora_key = "lora_te_text_model_encoder_layers_{}_{}"
224
    for b in range(24):
225
226
227
        for c in LORA_CLIP_MAP:
            k = "transformer.text_model.encoder.layers.{}.{}.weight".format(b, c)
            if k in sdk:
comfyanonymous's avatar
comfyanonymous committed
228
                lora_key = text_model_lora_key.format(b, LORA_CLIP_MAP[c])
229
                key_map[lora_key] = k
comfyanonymous's avatar
comfyanonymous committed
230

comfyanonymous's avatar
comfyanonymous committed
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285

    #Locon stuff
    ds_counter = 0
    counter = 0
    for b in range(12):
        tk = "model.diffusion_model.input_blocks.{}.0".format(b)
        key_in = False
        for c in LORA_UNET_MAP_RESNET:
            k = "{}.{}.weight".format(tk, c)
            if k in sdk:
                lora_key = "lora_unet_down_blocks_{}_{}".format(counter // 2, LORA_UNET_MAP_RESNET[c].format(counter % 2))
                key_map[lora_key] = k
                key_in = True
        for bb in range(3):
            k = "{}.{}.op.weight".format(tk[:-2], bb)
            if k in sdk:
                lora_key = "lora_unet_down_blocks_{}_downsamplers_0_conv".format(ds_counter)
                key_map[lora_key] = k
                ds_counter += 1
        if key_in:
            counter += 1

    counter = 0
    for b in range(3):
        tk = "model.diffusion_model.middle_block.{}".format(b)
        key_in = False
        for c in LORA_UNET_MAP_RESNET:
            k = "{}.{}.weight".format(tk, c)
            if k in sdk:
                lora_key = "lora_unet_mid_block_{}".format(LORA_UNET_MAP_RESNET[c].format(counter))
                key_map[lora_key] = k
                key_in = True
        if key_in:
            counter += 1

    counter = 0
    us_counter = 0
    for b in range(12):
        tk = "model.diffusion_model.output_blocks.{}.0".format(b)
        key_in = False
        for c in LORA_UNET_MAP_RESNET:
            k = "{}.{}.weight".format(tk, c)
            if k in sdk:
                lora_key = "lora_unet_up_blocks_{}_{}".format(counter // 3, LORA_UNET_MAP_RESNET[c].format(counter % 3))
                key_map[lora_key] = k
                key_in = True
        for bb in range(3):
            k = "{}.{}.conv.weight".format(tk[:-2], bb)
            if k in sdk:
                lora_key = "lora_unet_up_blocks_{}_upsamplers_0_conv".format(us_counter)
                key_map[lora_key] = k
                us_counter += 1
        if key_in:
            counter += 1

286
287
288
289
290
291
292
    return key_map

class ModelPatcher:
    def __init__(self, model):
        self.model = model
        self.patches = []
        self.backup = {}
293
        self.model_options = {"transformer_options":{}}
294
295
296
297

    def clone(self):
        n = ModelPatcher(self.model)
        n.patches = self.patches[:]
298
        n.model_options = copy.deepcopy(self.model_options)
299
300
        return n

301
302
303
    def set_model_tomesd(self, ratio):
        self.model_options["transformer_options"]["tomesd"] = {"ratio": ratio}

304
305
306
    def set_model_sampler_cfg_function(self, sampler_cfg_function):
        self.model_options["sampler_cfg_function"] = sampler_cfg_function

307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329

    def set_model_patch(self, patch, name):
        to = self.model_options["transformer_options"]
        if "patches" not in to:
            to["patches"] = {}
        to["patches"][name] = to["patches"].get(name, []) + [patch]

    def set_model_attn1_patch(self, patch):
        self.set_model_patch(patch, "attn1_patch")

    def set_model_attn2_patch(self, patch):
        self.set_model_patch(patch, "attn2_patch")

    def model_patches_to(self, device):
        to = self.model_options["transformer_options"]
        if "patches" in to:
            patches = to["patches"]
            for name in patches:
                patch_list = patches[name]
                for i in range(len(patch_list)):
                    if hasattr(patch_list[i], "to"):
                        patch_list[i] = patch_list[i].to(device)

330
331
332
    def model_dtype(self):
        return self.model.diffusion_model.dtype

333
334
335
336
    def add_patches(self, patches, strength=1.0):
        p = {}
        model_sd = self.model.state_dict()
        for k in patches:
337
            if k in model_sd:
338
339
340
341
342
343
344
345
346
                p[k] = patches[k]
        self.patches += [(strength, p)]
        return p.keys()

    def patch_model(self):
        model_sd = self.model.state_dict()
        for p in self.patches:
            for k in p[1]:
                v = p[1][k]
347
                key = k
comfyanonymous's avatar
comfyanonymous committed
348
                if key not in model_sd:
349
350
351
                    print("could not patch. key doesn't exist in model:", k)
                    continue

comfyanonymous's avatar
comfyanonymous committed
352
353
354
                weight = model_sd[key]
                if key not in self.backup:
                    self.backup[key] = weight.clone()
355
356

                alpha = p[0]
comfyanonymous's avatar
comfyanonymous committed
357
358
359
360
361
362
363
364
365
366
367

                if len(v) == 4: #lora/locon
                    mat1 = v[0]
                    mat2 = v[1]
                    if v[2] is not None:
                        alpha *= v[2] / mat2.shape[0]
                    if v[3] is not None:
                        #locon mid weights, hopefully the math is fine because I didn't properly test it
                        final_shape = [mat2.shape[1], mat2.shape[0], v[3].shape[2], v[3].shape[3]]
                        mat2 = torch.mm(mat2.transpose(0, 1).flatten(start_dim=1).float(), v[3].transpose(0, 1).flatten(start_dim=1).float()).reshape(final_shape).transpose(0, 1)
                    weight += (alpha * torch.mm(mat1.flatten(start_dim=1).float(), mat2.flatten(start_dim=1).float())).reshape(weight.shape).type(weight.dtype).to(weight.device)
comfyanonymous's avatar
comfyanonymous committed
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
                elif len(v) == 8: #lokr
                    w1 = v[0]
                    w2 = v[1]
                    w1_a = v[3]
                    w1_b = v[4]
                    w2_a = v[5]
                    w2_b = v[6]
                    t2 = v[7]
                    dim = None

                    if w1 is None:
                        dim = w1_b.shape[0]
                        w1 = torch.mm(w1_a.float(), w1_b.float())

                    if w2 is None:
                        dim = w2_b.shape[0]
                        if t2 is None:
                            w2 = torch.mm(w2_a.float(), w2_b.float())
                        else:
                            w2 = torch.einsum('i j k l, j r, i p -> p r k l', t2.float(), w2_b.float(), w2_a.float())

                    if len(w2.shape) == 4:
                        w1 = w1.unsqueeze(2).unsqueeze(2)
                    if v[2] is not None and dim is not None:
                        alpha *= v[2] / dim

                    weight += alpha * torch.kron(w1.float(), w2.float()).reshape(weight.shape).type(weight.dtype).to(weight.device)
comfyanonymous's avatar
comfyanonymous committed
395
396
397
398
399
400
401
                else: #loha
                    w1a = v[0]
                    w1b = v[1]
                    if v[2] is not None:
                        alpha *= v[2] / w1b.shape[0]
                    w2a = v[3]
                    w2b = v[4]
402
403
404
405
406
407
408
409
410
411
                    if v[5] is not None: #cp decomposition
                        t1 = v[5]
                        t2 = v[6]
                        m1 = torch.einsum('i j k l, j r, i p -> p r k l', t1.float(), w1b.float(), w1a.float())
                        m2 = torch.einsum('i j k l, j r, i p -> p r k l', t2.float(), w2b.float(), w2a.float())
                    else:
                        m1 = torch.mm(w1a.float(), w1b.float())
                        m2 = torch.mm(w2a.float(), w2b.float())

                    weight += (alpha * m1 * m2).reshape(weight.shape).type(weight.dtype).to(weight.device)
412
413
414
        return self.model
    def unpatch_model(self):
        model_sd = self.model.state_dict()
415
416
        keys = list(self.backup.keys())
        for k in keys:
417
            model_sd[k][:] = self.backup[k]
418
419
            del self.backup[k]

420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
        self.backup = {}

def load_lora_for_models(model, clip, lora_path, strength_model, strength_clip):
    key_map = model_lora_keys(model.model)
    key_map = model_lora_keys(clip.cond_stage_model, key_map)
    loaded = load_lora(lora_path, key_map)
    new_modelpatcher = model.clone()
    k = new_modelpatcher.add_patches(loaded, strength_model)
    new_clip = clip.clone()
    k1 = new_clip.add_patches(loaded, strength_clip)
    k = set(k)
    k1 = set(k1)
    for x in loaded:
        if (x not in k) and (x not in k1):
            print("NOT LOADED", x)

    return (new_modelpatcher, new_clip)
comfyanonymous's avatar
comfyanonymous committed
437
438
439


class CLIP:
440
441
442
    def __init__(self, config={}, embedding_directory=None, no_init=False):
        if no_init:
            return
comfyanonymous's avatar
comfyanonymous committed
443
        self.target_clip = config["target"]
444
445
446
447
448
        if "params" in config:
            params = config["params"]
        else:
            params = {}

comfyanonymous's avatar
comfyanonymous committed
449
450
451
452
453
454
        if self.target_clip == "ldm.modules.encoders.modules.FrozenOpenCLIPEmbedder":
            clip = sd2_clip.SD2ClipModel
            tokenizer = sd2_clip.SD2Tokenizer
        elif self.target_clip == "ldm.modules.encoders.modules.FrozenCLIPEmbedder":
            clip = sd1_clip.SD1ClipModel
            tokenizer = sd1_clip.SD1Tokenizer
455
456

        self.cond_stage_model = clip(**(params))
457
        self.tokenizer = tokenizer(embedding_directory=embedding_directory)
458
        self.patcher = ModelPatcher(self.cond_stage_model)
459
        self.layer_idx = None
460
461
462
463
464
465
466

    def clone(self):
        n = CLIP(no_init=True)
        n.target_clip = self.target_clip
        n.patcher = self.patcher.clone()
        n.cond_stage_model = self.cond_stage_model
        n.tokenizer = self.tokenizer
comfyanonymous's avatar
comfyanonymous committed
467
        n.layer_idx = self.layer_idx
468
469
        return n

470
471
472
    def load_from_state_dict(self, sd):
        self.cond_stage_model.transformer.load_state_dict(sd, strict=False)

473
474
    def add_patches(self, patches, strength=1.0):
        return self.patcher.add_patches(patches, strength)
comfyanonymous's avatar
comfyanonymous committed
475

476
    def clip_layer(self, layer_idx):
comfyanonymous's avatar
comfyanonymous committed
477
        self.layer_idx = layer_idx
478

479
480
    def tokenize(self, text, return_word_ids=False):
        return self.tokenizer.tokenize_with_weights(text, return_word_ids)
BlenderNeko's avatar
BlenderNeko committed
481

482
    def encode_from_tokens(self, tokens, return_pooled=False):
483
484
        if self.layer_idx is not None:
            self.cond_stage_model.clip_layer(self.layer_idx)
485
486
487
488
489
490
491
        try:
            self.patcher.patch_model()
            cond = self.cond_stage_model.encode_token_weights(tokens)
            self.patcher.unpatch_model()
        except Exception as e:
            self.patcher.unpatch_model()
            raise e
492
493
494
495
        if return_pooled:
            eos_token_index = max(range(len(tokens[0])), key=tokens[0].__getitem__)
            pooled = cond[:, eos_token_index]
            return cond, pooled
comfyanonymous's avatar
comfyanonymous committed
496
497
        return cond

498
    def encode(self, text):
499
        tokens = self.tokenize(text)
500
501
        return self.encode_from_tokens(tokens)

comfyanonymous's avatar
comfyanonymous committed
502
class VAE:
503
    def __init__(self, ckpt_path=None, scale_factor=0.18215, device=None, config=None):
comfyanonymous's avatar
comfyanonymous committed
504
505
506
507
508
509
510
511
        if config is None:
            #default SD1.x/SD2.x VAE parameters
            ddconfig = {'double_z': True, 'z_channels': 4, 'resolution': 256, 'in_channels': 3, 'out_ch': 3, 'ch': 128, 'ch_mult': [1, 2, 4, 4], 'num_res_blocks': 2, 'attn_resolutions': [], 'dropout': 0.0}
            self.first_stage_model = AutoencoderKL(ddconfig, {'target': 'torch.nn.Identity'}, 4, monitor="val/rec_loss", ckpt_path=ckpt_path)
        else:
            self.first_stage_model = AutoencoderKL(**(config['params']), ckpt_path=ckpt_path)
        self.first_stage_model = self.first_stage_model.eval()
        self.scale_factor = scale_factor
512
513
        if device is None:
            device = model_management.get_torch_device()
comfyanonymous's avatar
comfyanonymous committed
514
515
        self.device = device

516
    def decode_tiled_(self, samples, tile_x=64, tile_y=64, overlap = 16):
pythongosssss's avatar
pythongosssss committed
517
        steps = samples.shape[0] * utils.get_tiled_scale_steps(samples.shape[3], samples.shape[2], tile_x, tile_y, overlap)
comfyanonymous's avatar
comfyanonymous committed
518
519
        steps += samples.shape[0] * utils.get_tiled_scale_steps(samples.shape[3], samples.shape[2], tile_x // 2, tile_y * 2, overlap)
        steps += samples.shape[0] * utils.get_tiled_scale_steps(samples.shape[3], samples.shape[2], tile_x * 2, tile_y // 2, overlap)
pythongosssss's avatar
pythongosssss committed
520
        pbar = utils.ProgressBar(steps)
521

522
523
        decode_fn = lambda a: (self.first_stage_model.decode(1. / self.scale_factor * a.to(self.device)) + 1.0)
        output = torch.clamp((
524
525
526
            (utils.tiled_scale(samples, decode_fn, tile_x // 2, tile_y * 2, overlap, upscale_amount = 8, pbar = pbar) +
            utils.tiled_scale(samples, decode_fn, tile_x * 2, tile_y // 2, overlap, upscale_amount = 8, pbar = pbar) +
             utils.tiled_scale(samples, decode_fn, tile_x, tile_y, overlap, upscale_amount = 8, pbar = pbar))
527
528
529
530
            / 3.0) / 2.0, min=0.0, max=1.0)
        return output

    def decode(self, samples_in):
531
        model_management.unload_model()
comfyanonymous's avatar
comfyanonymous committed
532
        self.first_stage_model = self.first_stage_model.to(self.device)
533
        try:
534
535
536
537
538
539
540
541
            free_memory = model_management.get_free_memory(self.device)
            batch_number = int((free_memory * 0.7) / (2562 * samples_in.shape[2] * samples_in.shape[3] * 64))
            batch_number = max(1, batch_number)

            pixel_samples = torch.empty((samples_in.shape[0], 3, round(samples_in.shape[2] * 8), round(samples_in.shape[3] * 8)), device="cpu")
            for x in range(0, samples_in.shape[0], batch_number):
                samples = samples_in[x:x+batch_number].to(self.device)
                pixel_samples[x:x+batch_number] = torch.clamp((self.first_stage_model.decode(1. / self.scale_factor * samples) + 1.0) / 2.0, min=0.0, max=1.0).cpu()
542
543
544
545
        except model_management.OOM_EXCEPTION as e:
            print("Warning: Ran out of memory when regular VAE decoding, retrying with tiled VAE decoding.")
            pixel_samples = self.decode_tiled_(samples_in)

comfyanonymous's avatar
comfyanonymous committed
546
547
548
549
        self.first_stage_model = self.first_stage_model.cpu()
        pixel_samples = pixel_samples.cpu().movedim(1,-1)
        return pixel_samples

550
    def decode_tiled(self, samples, tile_x=64, tile_y=64, overlap = 16):
551
552
        model_management.unload_model()
        self.first_stage_model = self.first_stage_model.to(self.device)
553
        output = self.decode_tiled_(samples, tile_x, tile_y, overlap)
554
555
556
        self.first_stage_model = self.first_stage_model.cpu()
        return output.movedim(1,-1)

comfyanonymous's avatar
comfyanonymous committed
557
    def encode(self, pixel_samples):
558
        model_management.unload_model()
comfyanonymous's avatar
comfyanonymous committed
559
560
561
562
563
564
565
        self.first_stage_model = self.first_stage_model.to(self.device)
        pixel_samples = pixel_samples.movedim(-1,1).to(self.device)
        samples = self.first_stage_model.encode(2. * pixel_samples - 1.).sample() * self.scale_factor
        self.first_stage_model = self.first_stage_model.cpu()
        samples = samples.cpu()
        return samples

comfyanonymous's avatar
comfyanonymous committed
566
567
568
569
    def encode_tiled(self, pixel_samples, tile_x=512, tile_y=512, overlap = 64):
        model_management.unload_model()
        self.first_stage_model = self.first_stage_model.to(self.device)
        pixel_samples = pixel_samples.movedim(-1,1).to(self.device)
570

comfyanonymous's avatar
comfyanonymous committed
571
572
573
        steps = pixel_samples.shape[0] * utils.get_tiled_scale_steps(pixel_samples.shape[3], pixel_samples.shape[2], tile_x, tile_y, overlap)
        steps += pixel_samples.shape[0] * utils.get_tiled_scale_steps(pixel_samples.shape[3], pixel_samples.shape[2], tile_x // 2, tile_y * 2, overlap)
        steps += pixel_samples.shape[0] * utils.get_tiled_scale_steps(pixel_samples.shape[3], pixel_samples.shape[2], tile_x * 2, tile_y // 2, overlap)
pythongosssss's avatar
pythongosssss committed
574
575
        pbar = utils.ProgressBar(steps)

576
577
578
        samples = utils.tiled_scale(pixel_samples, lambda a: self.first_stage_model.encode(2. * a - 1.).sample() * self.scale_factor, tile_x, tile_y, overlap, upscale_amount = (1/8), out_channels=4, pbar=pbar)
        samples += utils.tiled_scale(pixel_samples, lambda a: self.first_stage_model.encode(2. * a - 1.).sample() * self.scale_factor, tile_x * 2, tile_y // 2, overlap, upscale_amount = (1/8), out_channels=4, pbar=pbar)
        samples += utils.tiled_scale(pixel_samples, lambda a: self.first_stage_model.encode(2. * a - 1.).sample() * self.scale_factor, tile_x // 2, tile_y * 2, overlap, upscale_amount = (1/8), out_channels=4, pbar=pbar)
579
        samples /= 3.0
comfyanonymous's avatar
comfyanonymous committed
580
581
582
        self.first_stage_model = self.first_stage_model.cpu()
        samples = samples.cpu()
        return samples
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604

def resize_image_to(tensor, target_latent_tensor, batched_number):
    tensor = utils.common_upscale(tensor, target_latent_tensor.shape[3] * 8, target_latent_tensor.shape[2] * 8, 'nearest-exact', "center")
    target_batch_size = target_latent_tensor.shape[0]

    current_batch_size = tensor.shape[0]
    print(current_batch_size, target_batch_size)
    if current_batch_size == 1:
        return tensor

    per_batch = target_batch_size // batched_number
    tensor = tensor[:per_batch]

    if per_batch > tensor.shape[0]:
        tensor = torch.cat([tensor] * (per_batch // tensor.shape[0]) + [tensor[:(per_batch % tensor.shape[0])]], dim=0)

    current_batch_size = tensor.shape[0]
    if current_batch_size == target_batch_size:
        return tensor
    else:
        return torch.cat([tensor] * batched_number, dim=0)

comfyanonymous's avatar
comfyanonymous committed
605
class ControlNet:
606
    def __init__(self, control_model, device=None):
comfyanonymous's avatar
comfyanonymous committed
607
608
609
        self.control_model = control_model
        self.cond_hint_original = None
        self.cond_hint = None
610
        self.strength = 1.0
611
612
        if device is None:
            device = model_management.get_torch_device()
613
        self.device = device
comfyanonymous's avatar
comfyanonymous committed
614
        self.previous_controlnet = None
comfyanonymous's avatar
comfyanonymous committed
615

616
    def get_control(self, x_noisy, t, cond_txt, batched_number):
comfyanonymous's avatar
comfyanonymous committed
617
618
        control_prev = None
        if self.previous_controlnet is not None:
619
            control_prev = self.previous_controlnet.get_control(x_noisy, t, cond_txt, batched_number)
comfyanonymous's avatar
comfyanonymous committed
620

621
        output_dtype = x_noisy.dtype
comfyanonymous's avatar
comfyanonymous committed
622
623
624
625
        if self.cond_hint is None or x_noisy.shape[2] * 8 != self.cond_hint.shape[2] or x_noisy.shape[3] * 8 != self.cond_hint.shape[3]:
            if self.cond_hint is not None:
                del self.cond_hint
            self.cond_hint = None
626
            self.cond_hint = resize_image_to(self.cond_hint_original, x_noisy, batched_number).to(self.control_model.dtype).to(self.device)
627
628
629
630
631
632

        if self.control_model.dtype == torch.float16:
            precision_scope = torch.autocast
        else:
            precision_scope = contextlib.nullcontext

633
        with precision_scope(model_management.get_autocast_device(self.device)):
634
            self.control_model = model_management.load_if_low_vram(self.control_model)
635
            control = self.control_model(x=x_noisy, hint=self.cond_hint, timesteps=t, context=cond_txt)
636
            self.control_model = model_management.unload_if_low_vram(self.control_model)
637
        out = {'middle':[], 'output': []}
638
        autocast_enabled = torch.is_autocast_enabled()
comfyanonymous's avatar
comfyanonymous committed
639
640

        for i in range(len(control)):
comfyanonymous's avatar
comfyanonymous committed
641
642
643
644
645
646
            if i == (len(control) - 1):
                key = 'middle'
                index = 0
            else:
                key = 'output'
                index = i
comfyanonymous's avatar
comfyanonymous committed
647
            x = control[i]
648
            x *= self.strength
649
650
            if x.dtype != output_dtype and not autocast_enabled:
                x = x.to(output_dtype)
comfyanonymous's avatar
comfyanonymous committed
651

comfyanonymous's avatar
comfyanonymous committed
652
653
654
655
656
657
658
            if control_prev is not None and key in control_prev:
                prev = control_prev[key][index]
                if prev is not None:
                    x += prev
            out[key].append(x)
        if control_prev is not None and 'input' in control_prev:
            out['input'] = control_prev['input']
659
        return out
comfyanonymous's avatar
comfyanonymous committed
660

661
    def set_cond_hint(self, cond_hint, strength=1.0):
comfyanonymous's avatar
comfyanonymous committed
662
        self.cond_hint_original = cond_hint
663
        self.strength = strength
comfyanonymous's avatar
comfyanonymous committed
664
665
        return self

comfyanonymous's avatar
comfyanonymous committed
666
667
668
669
    def set_previous_controlnet(self, controlnet):
        self.previous_controlnet = controlnet
        return self

comfyanonymous's avatar
comfyanonymous committed
670
    def cleanup(self):
comfyanonymous's avatar
comfyanonymous committed
671
672
        if self.previous_controlnet is not None:
            self.previous_controlnet.cleanup()
comfyanonymous's avatar
comfyanonymous committed
673
674
675
676
677
678
679
        if self.cond_hint is not None:
            del self.cond_hint
            self.cond_hint = None

    def copy(self):
        c = ControlNet(self.control_model)
        c.cond_hint_original = self.cond_hint_original
680
        c.strength = self.strength
comfyanonymous's avatar
comfyanonymous committed
681
682
        return c

683
    def get_models(self):
comfyanonymous's avatar
comfyanonymous committed
684
685
        out = []
        if self.previous_controlnet is not None:
686
            out += self.previous_controlnet.get_models()
comfyanonymous's avatar
comfyanonymous committed
687
688
689
        out.append(self.control_model)
        return out

690
def load_controlnet(ckpt_path, model=None):
691
    controlnet_data = utils.load_torch_file(ckpt_path)
comfyanonymous's avatar
comfyanonymous committed
692
693
694
695
696
697
698
699
700
701
    pth_key = 'control_model.input_blocks.1.1.transformer_blocks.0.attn2.to_k.weight'
    pth = False
    sd2 = False
    key = 'input_blocks.1.1.transformer_blocks.0.attn2.to_k.weight'
    if pth_key in controlnet_data:
        pth = True
        key = pth_key
    elif key in controlnet_data:
        pass
    else:
702
703
704
705
        net = load_t2i_adapter(controlnet_data)
        if net is None:
            print("error checkpoint does not contain controlnet or t2i adapter data", ckpt_path)
        return net
comfyanonymous's avatar
comfyanonymous committed
706
707

    context_dim = controlnet_data[key].shape[1]
708
709

    use_fp16 = False
710
    if model_management.should_use_fp16() and controlnet_data[key].dtype == torch.float16:
711
712
        use_fp16 = True

comfyanonymous's avatar
comfyanonymous committed
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
    if context_dim == 768:
        #SD1.x
        control_model = cldm.ControlNet(image_size=32,
                                        in_channels=4,
                                        hint_channels=3,
                                        model_channels=320,
                                        attention_resolutions=[ 4, 2, 1 ],
                                        num_res_blocks=2,
                                        channel_mult=[ 1, 2, 4, 4 ],
                                        num_heads=8,
                                        use_spatial_transformer=True,
                                        transformer_depth=1,
                                        context_dim=context_dim,
                                        use_checkpoint=True,
                                        legacy=False,
                                        use_fp16=use_fp16)
    else:
        #SD2.x
        control_model = cldm.ControlNet(image_size=32,
                                        in_channels=4,
                                        hint_channels=3,
                                        model_channels=320,
                                        attention_resolutions=[ 4, 2, 1 ],
                                        num_res_blocks=2,
                                        channel_mult=[ 1, 2, 4, 4 ],
                                        num_head_channels=64,
                                        use_spatial_transformer=True,
                                        use_linear_in_transformer=True,
                                        transformer_depth=1,
                                        context_dim=context_dim,
                                        use_checkpoint=True,
                                        legacy=False,
                                        use_fp16=use_fp16)
comfyanonymous's avatar
comfyanonymous committed
746
    if pth:
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
        if 'difference' in controlnet_data:
            if model is not None:
                m = model.patch_model()
                model_sd = m.state_dict()
                for x in controlnet_data:
                    c_m = "control_model."
                    if x.startswith(c_m):
                        sd_key = "model.diffusion_model.{}".format(x[len(c_m):])
                        if sd_key in model_sd:
                            cd = controlnet_data[x]
                            cd += model_sd[sd_key].type(cd.dtype).to(cd.device)
                model.unpatch_model()
            else:
                print("WARNING: Loaded a diff controlnet without a model. It will very likely not work.")

comfyanonymous's avatar
comfyanonymous committed
762
763
764
765
766
767
768
769
        class WeightsLoader(torch.nn.Module):
            pass
        w = WeightsLoader()
        w.control_model = control_model
        w.load_state_dict(controlnet_data, strict=False)
    else:
        control_model.load_state_dict(controlnet_data, strict=False)

770
771
772
    if use_fp16:
        control_model = control_model.half()

comfyanonymous's avatar
comfyanonymous committed
773
774
775
    control = ControlNet(control_model)
    return control

776
class T2IAdapter:
777
    def __init__(self, t2i_model, channels_in, device=None):
778
779
780
        self.t2i_model = t2i_model
        self.channels_in = channels_in
        self.strength = 1.0
781
782
        if device is None:
            device = model_management.get_torch_device()
783
784
785
786
787
788
        self.device = device
        self.previous_controlnet = None
        self.control_input = None
        self.cond_hint_original = None
        self.cond_hint = None

789
    def get_control(self, x_noisy, t, cond_txt, batched_number):
790
791
        control_prev = None
        if self.previous_controlnet is not None:
792
            control_prev = self.previous_controlnet.get_control(x_noisy, t, cond_txt, batched_number)
793
794
795
796
797

        if self.cond_hint is None or x_noisy.shape[2] * 8 != self.cond_hint.shape[2] or x_noisy.shape[3] * 8 != self.cond_hint.shape[3]:
            if self.cond_hint is not None:
                del self.cond_hint
            self.cond_hint = None
798
            self.cond_hint = resize_image_to(self.cond_hint_original, x_noisy, batched_number).float().to(self.device)
799
800
801
802
803
804
805
806
807
            if self.channels_in == 1 and self.cond_hint.shape[1] > 1:
                self.cond_hint = torch.mean(self.cond_hint, 1, keepdim=True)
            self.t2i_model.to(self.device)
            self.control_input = self.t2i_model(self.cond_hint)
            self.t2i_model.cpu()

        output_dtype = x_noisy.dtype
        out = {'input':[]}

comfyanonymous's avatar
comfyanonymous committed
808
        autocast_enabled = torch.is_autocast_enabled()
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
        for i in range(len(self.control_input)):
            key = 'input'
            x = self.control_input[i] * self.strength
            if x.dtype != output_dtype and not autocast_enabled:
                x = x.to(output_dtype)

            if control_prev is not None and key in control_prev:
                index = len(control_prev[key]) - i * 3 - 3
                prev = control_prev[key][index]
                if prev is not None:
                    x += prev
            out[key].insert(0, None)
            out[key].insert(0, None)
            out[key].insert(0, x)

        if control_prev is not None and 'input' in control_prev:
            for i in range(len(out['input'])):
                if out['input'][i] is None:
                    out['input'][i] = control_prev['input'][i]
        if control_prev is not None and 'middle' in control_prev:
            out['middle'] = control_prev['middle']
        if control_prev is not None and 'output' in control_prev:
            out['output'] = control_prev['output']
        return out

    def set_cond_hint(self, cond_hint, strength=1.0):
        self.cond_hint_original = cond_hint
        self.strength = strength
        return self

    def set_previous_controlnet(self, controlnet):
        self.previous_controlnet = controlnet
        return self

    def copy(self):
        c = T2IAdapter(self.t2i_model, self.channels_in)
        c.cond_hint_original = self.cond_hint_original
        c.strength = self.strength
        return c

    def cleanup(self):
        if self.previous_controlnet is not None:
            self.previous_controlnet.cleanup()
        if self.cond_hint is not None:
            del self.cond_hint
            self.cond_hint = None

856
    def get_models(self):
857
858
        out = []
        if self.previous_controlnet is not None:
859
            out += self.previous_controlnet.get_models()
860
861
        return out

862
def load_t2i_adapter(t2i_data):
863
    keys = t2i_data.keys()
864
    if "body.0.in_conv.weight" in keys:
865
866
        cin = t2i_data['body.0.in_conv.weight'].shape[1]
        model_ad = adapter.Adapter_light(cin=cin, channels=[320, 640, 1280, 1280], nums_rb=4)
867
    elif 'conv_in.weight' in keys:
868
869
        cin = t2i_data['conv_in.weight'].shape[1]
        model_ad = adapter.Adapter(cin=cin, channels=[320, 640, 1280, 1280][:4], nums_rb=2, ksize=1, sk=True, use_conv=False)
870
871
    else:
        return None
872
873
    model_ad.load_state_dict(t2i_data)
    return T2IAdapter(model_ad, cin // 64)
comfyanonymous's avatar
comfyanonymous committed
874

875
876
877
878
879
880
881
882
883
884

class StyleModel:
    def __init__(self, model, device="cpu"):
        self.model = model

    def get_cond(self, input):
        return self.model(input.last_hidden_state)


def load_style_model(ckpt_path):
885
    model_data = utils.load_torch_file(ckpt_path)
886
887
888
889
890
891
892
893
894
    keys = model_data.keys()
    if "style_embedding" in keys:
        model = adapter.StyleAdapter(width=1024, context_dim=768, num_head=8, n_layes=3, num_token=8)
    else:
        raise Exception("invalid style model {}".format(ckpt_path))
    model.load_state_dict(model_data)
    return StyleModel(model)


895
def load_clip(ckpt_path, embedding_directory=None):
896
    clip_data = utils.load_torch_file(ckpt_path)
897
898
899
900
901
902
903
904
    config = {}
    if "text_model.encoder.layers.22.mlp.fc1.weight" in clip_data:
        config['target'] = 'ldm.modules.encoders.modules.FrozenOpenCLIPEmbedder'
    else:
        config['target'] = 'ldm.modules.encoders.modules.FrozenCLIPEmbedder'
    clip = CLIP(config=config, embedding_directory=embedding_directory)
    clip.load_from_state_dict(clip_data)
    return clip
comfyanonymous's avatar
comfyanonymous committed
905

906
907
908
909
910
911
912
def load_gligen(ckpt_path):
    data = utils.load_torch_file(ckpt_path)
    model = gligen.load_gligen(data)
    if model_management.should_use_fp16():
        model = model.half()
    return model

913
def load_checkpoint(config_path, ckpt_path, output_vae=True, output_clip=True, embedding_directory=None):
914
915
    with open(config_path, 'r') as stream:
        config = yaml.safe_load(stream)
comfyanonymous's avatar
comfyanonymous committed
916
917
918
919
920
    model_config_params = config['model']['params']
    clip_config = model_config_params['cond_stage_config']
    scale_factor = model_config_params['scale_factor']
    vae_config = model_config_params['first_stage_config']

921
922
923
924
925
926
    fp16 = False
    if "unet_config" in model_config_params:
        if "params" in model_config_params["unet_config"]:
            if "use_fp16" in model_config_params["unet_config"]["params"]:
                fp16 = model_config_params["unet_config"]["params"]["use_fp16"]

comfyanonymous's avatar
comfyanonymous committed
927
928
929
930
931
932
933
934
935
936
937
938
939
940
    clip = None
    vae = None

    class WeightsLoader(torch.nn.Module):
        pass

    w = WeightsLoader()
    load_state_dict_to = []
    if output_vae:
        vae = VAE(scale_factor=scale_factor, config=vae_config)
        w.first_stage_model = vae.first_stage_model
        load_state_dict_to = [w]

    if output_clip:
941
        clip = CLIP(config=clip_config, embedding_directory=embedding_directory)
comfyanonymous's avatar
comfyanonymous committed
942
943
944
        w.cond_stage_model = clip.cond_stage_model
        load_state_dict_to = [w]

945
    model = instantiate_from_config(config["model"])
946
    sd = utils.load_torch_file(ckpt_path)
947
    model = load_model_weights(model, sd, verbose=False, load_state_dict_to=load_state_dict_to)
948
949
950
951

    if fp16:
        model = model.half()

952
    return (ModelPatcher(model), clip, vae)
953
954


955
956
def load_checkpoint_guess_config(ckpt_path, output_vae=True, output_clip=True, output_clipvision=False, embedding_directory=None):
    sd = utils.load_torch_file(ckpt_path)
957
958
    sd_keys = sd.keys()
    clip = None
959
    clipvision = None
960
961
    vae = None

962
963
    fp16 = model_management.should_use_fp16()

964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
    class WeightsLoader(torch.nn.Module):
        pass

    w = WeightsLoader()
    load_state_dict_to = []
    if output_vae:
        vae = VAE()
        w.first_stage_model = vae.first_stage_model
        load_state_dict_to = [w]

    if output_clip:
        clip_config = {}
        if "cond_stage_model.model.transformer.resblocks.22.attn.out_proj.weight" in sd_keys:
            clip_config['target'] = 'ldm.modules.encoders.modules.FrozenOpenCLIPEmbedder'
        else:
            clip_config['target'] = 'ldm.modules.encoders.modules.FrozenCLIPEmbedder'
        clip = CLIP(config=clip_config, embedding_directory=embedding_directory)
        w.cond_stage_model = clip.cond_stage_model
        load_state_dict_to = [w]

984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
    clipvision_key = "embedder.model.visual.transformer.resblocks.0.attn.in_proj_weight"
    noise_aug_config = None
    if clipvision_key in sd_keys:
        size = sd[clipvision_key].shape[1]

        if output_clipvision:
            clipvision = clip_vision.load_clipvision_from_sd(sd)

        noise_aug_key = "noise_augmentor.betas"
        if noise_aug_key in sd_keys:
            noise_aug_config = {}
            params = {}
            noise_schedule_config = {}
            noise_schedule_config["timesteps"] = sd[noise_aug_key].shape[0]
            noise_schedule_config["beta_schedule"] = "squaredcos_cap_v2"
            params["noise_schedule_config"] = noise_schedule_config
            noise_aug_config['target'] = "ldm.modules.encoders.noise_aug_modules.CLIPEmbeddingNoiseAugmentation"
            if size == 1280: #h
                params["timestep_dim"] = 1024
            elif size == 1024: #l
                params["timestep_dim"] = 768
            noise_aug_config['params'] = params

1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
    sd_config = {
        "linear_start": 0.00085,
        "linear_end": 0.012,
        "num_timesteps_cond": 1,
        "log_every_t": 200,
        "timesteps": 1000,
        "first_stage_key": "jpg",
        "cond_stage_key": "txt",
        "image_size": 64,
        "channels": 4,
        "cond_stage_trainable": False,
        "monitor": "val/loss_simple_ema",
        "scale_factor": 0.18215,
        "use_ema": False,
    }

    unet_config = {
        "use_checkpoint": True,
        "image_size": 32,
        "out_channels": 4,
        "attention_resolutions": [
            4,
            2,
            1
        ],
        "num_res_blocks": 2,
        "channel_mult": [
            1,
            2,
            4,
            4
        ],
        "use_spatial_transformer": True,
        "transformer_depth": 1,
        "legacy": False
    }

    if len(sd['model.diffusion_model.input_blocks.1.1.proj_in.weight'].shape) == 2:
        unet_config['use_linear_in_transformer'] = True

    unet_config["use_fp16"] = fp16
    unet_config["model_channels"] = sd['model.diffusion_model.input_blocks.0.0.weight'].shape[0]
    unet_config["in_channels"] = sd['model.diffusion_model.input_blocks.0.0.weight'].shape[1]
    unet_config["context_dim"] = sd['model.diffusion_model.input_blocks.1.1.transformer_blocks.0.attn2.to_k.weight'].shape[1]

    sd_config["unet_config"] = {"target": "ldm.modules.diffusionmodules.openaimodel.UNetModel", "params": unet_config}
    model_config = {"target": "ldm.models.diffusion.ddpm.LatentDiffusion", "params": sd_config}

1055
1056
1057
1058
1059
1060
1061
    if noise_aug_config is not None: #SD2.x unclip model
        sd_config["noise_aug_config"] = noise_aug_config
        sd_config["image_size"] = 96
        sd_config["embedding_dropout"] = 0.25
        sd_config["conditioning_key"] = 'crossattn-adm'
        model_config["target"] = "ldm.models.diffusion.ddpm.ImageEmbeddingConditionedLatentDiffusion"
    elif unet_config["in_channels"] > 4: #inpainting model
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
        sd_config["conditioning_key"] = "hybrid"
        sd_config["finetune_keys"] = None
        model_config["target"] = "ldm.models.diffusion.ddpm.LatentInpaintDiffusion"
    else:
        sd_config["conditioning_key"] = "crossattn"

    if unet_config["context_dim"] == 1024:
        unet_config["num_head_channels"] = 64 #SD2.x
    else:
        unet_config["num_heads"] = 8 #SD1.x

1073
1074
1075
1076
1077
    unclip = 'model.diffusion_model.label_emb.0.0.weight'
    if unclip in sd_keys:
        unet_config["num_classes"] = "sequential"
        unet_config["adm_in_channels"] = sd[unclip].shape[1]

comfyanonymous's avatar
comfyanonymous committed
1078
1079
1080
1081
1082
    if unet_config["context_dim"] == 1024 and unet_config["in_channels"] == 4: #only SD2.x non inpainting models are v prediction
        k = "model.diffusion_model.output_blocks.11.1.transformer_blocks.0.norm1.bias"
        out = sd[k]
        if torch.std(out, unbiased=False) > 0.09: # not sure how well this will actually work. I guess we will find out.
            sd_config["parameterization"] = 'v'
1083
1084
1085
1086

    model = instantiate_from_config(model_config)
    model = load_model_weights(model, sd, verbose=False, load_state_dict_to=load_state_dict_to)

1087
1088
1089
    if fp16:
        model = model.half()

1090
    return (ModelPatcher(model), clip, vae, clipvision)