sd.py 46.6 KB
Newer Older
comfyanonymous's avatar
comfyanonymous committed
1
import torch
2
import contextlib
3
import copy
4
import inspect
comfyanonymous's avatar
comfyanonymous committed
5

comfyanonymous's avatar
comfyanonymous committed
6
7
from . import sd1_clip
from . import sd2_clip
8
from comfy import model_management
9
10
from .ldm.util import instantiate_from_config
from .ldm.models.autoencoder import AutoencoderKL
11
import yaml
comfyanonymous's avatar
comfyanonymous committed
12
from .cldm import cldm
13
from .t2i_adapter import adapter
comfyanonymous's avatar
comfyanonymous committed
14
15

from . import utils
16
from . import clip_vision
17
from . import gligen
18
from . import diffusers_convert
comfyanonymous's avatar
comfyanonymous committed
19
from . import model_base
20

21
def load_model_weights(model, sd, verbose=False, load_state_dict_to=[]):
comfyanonymous's avatar
comfyanonymous committed
22
23
24
25
26
27
    replace_prefix = {"model.diffusion_model.": "diffusion_model."}
    for rp in replace_prefix:
        replace = list(map(lambda a: (a, "{}{}".format(replace_prefix[rp], a[len(rp):])), filter(lambda a: a.startswith(rp), sd.keys())))
        for x in replace:
            sd[x[1]] = sd.pop(x[0])

comfyanonymous's avatar
comfyanonymous committed
28
29
30
31
32
33
34
35
36
    m, u = model.load_state_dict(sd, strict=False)

    k = list(sd.keys())
    for x in k:
        # print(x)
        if x.startswith("cond_stage_model.transformer.") and not x.startswith("cond_stage_model.transformer.text_model."):
            y = x.replace("cond_stage_model.transformer.", "cond_stage_model.transformer.text_model.")
            sd[y] = sd.pop(x)

comfyanonymous's avatar
comfyanonymous committed
37
38
39
40
    if 'cond_stage_model.transformer.text_model.embeddings.position_ids' in sd:
        ids = sd['cond_stage_model.transformer.text_model.embeddings.position_ids']
        if ids.dtype == torch.float32:
            sd['cond_stage_model.transformer.text_model.embeddings.position_ids'] = ids.round()
41

42
    sd = utils.transformers_convert(sd, "cond_stage_model.model", "cond_stage_model.transformer.text_model", 24)
43

comfyanonymous's avatar
comfyanonymous committed
44
45
46
47
48
49
50
51
52
53
54
55
56
    for x in load_state_dict_to:
        x.load_state_dict(sd, strict=False)

    if len(m) > 0 and verbose:
        print("missing keys:")
        print(m)
    if len(u) > 0 and verbose:
        print("unexpected keys:")
        print(u)

    model.eval()
    return model

57
58
59
60
61
62
63
64
65
LORA_CLIP_MAP = {
    "mlp.fc1": "mlp_fc1",
    "mlp.fc2": "mlp_fc2",
    "self_attn.k_proj": "self_attn_k_proj",
    "self_attn.q_proj": "self_attn_q_proj",
    "self_attn.v_proj": "self_attn_v_proj",
    "self_attn.out_proj": "self_attn_out_proj",
}

comfyanonymous's avatar
comfyanonymous committed
66
LORA_UNET_MAP_ATTENTIONS = {
67
68
69
70
71
72
73
74
75
76
77
78
79
80
    "proj_in": "proj_in",
    "proj_out": "proj_out",
    "transformer_blocks.0.attn1.to_q": "transformer_blocks_0_attn1_to_q",
    "transformer_blocks.0.attn1.to_k": "transformer_blocks_0_attn1_to_k",
    "transformer_blocks.0.attn1.to_v": "transformer_blocks_0_attn1_to_v",
    "transformer_blocks.0.attn1.to_out.0": "transformer_blocks_0_attn1_to_out_0",
    "transformer_blocks.0.attn2.to_q": "transformer_blocks_0_attn2_to_q",
    "transformer_blocks.0.attn2.to_k": "transformer_blocks_0_attn2_to_k",
    "transformer_blocks.0.attn2.to_v": "transformer_blocks_0_attn2_to_v",
    "transformer_blocks.0.attn2.to_out.0": "transformer_blocks_0_attn2_to_out_0",
    "transformer_blocks.0.ff.net.0.proj": "transformer_blocks_0_ff_net_0_proj",
    "transformer_blocks.0.ff.net.2": "transformer_blocks_0_ff_net_2",
}

comfyanonymous's avatar
comfyanonymous committed
81
82
83
84
85
86
LORA_UNET_MAP_RESNET = {
    "in_layers.2": "resnets_{}_conv1",
    "emb_layers.1": "resnets_{}_time_emb_proj",
    "out_layers.3": "resnets_{}_conv2",
    "skip_connection": "resnets_{}_conv_shortcut"
}
87
88

def load_lora(path, to_load):
89
    lora = utils.load_torch_file(path, safe_load=True)
90
91
92
    patch_dict = {}
    loaded_keys = set()
    for x in to_load:
comfyanonymous's avatar
comfyanonymous committed
93
94
95
96
97
98
        alpha_name = "{}.alpha".format(x)
        alpha = None
        if alpha_name in lora.keys():
            alpha = lora[alpha_name].item()
            loaded_keys.add(alpha_name)

99
100
        A_name = "{}.lora_up.weight".format(x)
        B_name = "{}.lora_down.weight".format(x)
101
        mid_name = "{}.lora_mid.weight".format(x)
comfyanonymous's avatar
comfyanonymous committed
102

103
        if A_name in lora.keys():
104
105
106
107
108
            mid = None
            if mid_name in lora.keys():
                mid = lora[mid_name]
                loaded_keys.add(mid_name)
            patch_dict[to_load[x]] = (lora[A_name], lora[B_name], alpha, mid)
109
110
            loaded_keys.add(A_name)
            loaded_keys.add(B_name)
comfyanonymous's avatar
comfyanonymous committed
111

comfyanonymous's avatar
comfyanonymous committed
112
113

        ######## loha
comfyanonymous's avatar
comfyanonymous committed
114
115
116
117
        hada_w1_a_name = "{}.hada_w1_a".format(x)
        hada_w1_b_name = "{}.hada_w1_b".format(x)
        hada_w2_a_name = "{}.hada_w2_a".format(x)
        hada_w2_b_name = "{}.hada_w2_b".format(x)
118
119
        hada_t1_name = "{}.hada_t1".format(x)
        hada_t2_name = "{}.hada_t2".format(x)
comfyanonymous's avatar
comfyanonymous committed
120
        if hada_w1_a_name in lora.keys():
121
122
123
124
125
126
127
128
129
            hada_t1 = None
            hada_t2 = None
            if hada_t1_name in lora.keys():
                hada_t1 = lora[hada_t1_name]
                hada_t2 = lora[hada_t2_name]
                loaded_keys.add(hada_t1_name)
                loaded_keys.add(hada_t2_name)

            patch_dict[to_load[x]] = (lora[hada_w1_a_name], lora[hada_w1_b_name], alpha, lora[hada_w2_a_name], lora[hada_w2_b_name], hada_t1, hada_t2)
comfyanonymous's avatar
comfyanonymous committed
130
131
132
133
134
            loaded_keys.add(hada_w1_a_name)
            loaded_keys.add(hada_w1_b_name)
            loaded_keys.add(hada_w2_a_name)
            loaded_keys.add(hada_w2_b_name)

comfyanonymous's avatar
comfyanonymous committed
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182

        ######## lokr
        lokr_w1_name = "{}.lokr_w1".format(x)
        lokr_w2_name = "{}.lokr_w2".format(x)
        lokr_w1_a_name = "{}.lokr_w1_a".format(x)
        lokr_w1_b_name = "{}.lokr_w1_b".format(x)
        lokr_t2_name = "{}.lokr_t2".format(x)
        lokr_w2_a_name = "{}.lokr_w2_a".format(x)
        lokr_w2_b_name = "{}.lokr_w2_b".format(x)

        lokr_w1 = None
        if lokr_w1_name in lora.keys():
            lokr_w1 = lora[lokr_w1_name]
            loaded_keys.add(lokr_w1_name)

        lokr_w2 = None
        if lokr_w2_name in lora.keys():
            lokr_w2 = lora[lokr_w2_name]
            loaded_keys.add(lokr_w2_name)

        lokr_w1_a = None
        if lokr_w1_a_name in lora.keys():
            lokr_w1_a = lora[lokr_w1_a_name]
            loaded_keys.add(lokr_w1_a_name)

        lokr_w1_b = None
        if lokr_w1_b_name in lora.keys():
            lokr_w1_b = lora[lokr_w1_b_name]
            loaded_keys.add(lokr_w1_b_name)

        lokr_w2_a = None
        if lokr_w2_a_name in lora.keys():
            lokr_w2_a = lora[lokr_w2_a_name]
            loaded_keys.add(lokr_w2_a_name)

        lokr_w2_b = None
        if lokr_w2_b_name in lora.keys():
            lokr_w2_b = lora[lokr_w2_b_name]
            loaded_keys.add(lokr_w2_b_name)

        lokr_t2 = None
        if lokr_t2_name in lora.keys():
            lokr_t2 = lora[lokr_t2_name]
            loaded_keys.add(lokr_t2_name)

        if (lokr_w1 is not None) or (lokr_w2 is not None) or (lokr_w1_a is not None) or (lokr_w2_a is not None):
            patch_dict[to_load[x]] = (lokr_w1, lokr_w2, alpha, lokr_w1_a, lokr_w1_b, lokr_w2_a, lokr_w2_b, lokr_t2)

183
184
185
186
187
188
189
190
191
192
    for x in lora.keys():
        if x not in loaded_keys:
            print("lora key not loaded", x)
    return patch_dict

def model_lora_keys(model, key_map={}):
    sdk = model.state_dict().keys()

    counter = 0
    for b in range(12):
comfyanonymous's avatar
comfyanonymous committed
193
        tk = "diffusion_model.input_blocks.{}.1".format(b)
194
        up_counter = 0
comfyanonymous's avatar
comfyanonymous committed
195
        for c in LORA_UNET_MAP_ATTENTIONS:
196
197
            k = "{}.{}.weight".format(tk, c)
            if k in sdk:
comfyanonymous's avatar
comfyanonymous committed
198
                lora_key = "lora_unet_down_blocks_{}_attentions_{}_{}".format(counter // 2, counter % 2, LORA_UNET_MAP_ATTENTIONS[c])
199
                key_map[lora_key] = k
200
201
202
                up_counter += 1
        if up_counter >= 4:
            counter += 1
comfyanonymous's avatar
comfyanonymous committed
203
    for c in LORA_UNET_MAP_ATTENTIONS:
comfyanonymous's avatar
comfyanonymous committed
204
        k = "diffusion_model.middle_block.1.{}.weight".format(c)
205
        if k in sdk:
comfyanonymous's avatar
comfyanonymous committed
206
            lora_key = "lora_unet_mid_block_attentions_0_{}".format(LORA_UNET_MAP_ATTENTIONS[c])
207
            key_map[lora_key] = k
208
209
    counter = 3
    for b in range(12):
comfyanonymous's avatar
comfyanonymous committed
210
        tk = "diffusion_model.output_blocks.{}.1".format(b)
211
        up_counter = 0
comfyanonymous's avatar
comfyanonymous committed
212
        for c in LORA_UNET_MAP_ATTENTIONS:
213
214
            k = "{}.{}.weight".format(tk, c)
            if k in sdk:
comfyanonymous's avatar
comfyanonymous committed
215
                lora_key = "lora_unet_up_blocks_{}_attentions_{}_{}".format(counter // 3, counter % 3, LORA_UNET_MAP_ATTENTIONS[c])
216
                key_map[lora_key] = k
217
218
219
220
                up_counter += 1
        if up_counter >= 4:
            counter += 1
    counter = 0
comfyanonymous's avatar
comfyanonymous committed
221
    text_model_lora_key = "lora_te_text_model_encoder_layers_{}_{}"
222
    for b in range(24):
223
224
225
        for c in LORA_CLIP_MAP:
            k = "transformer.text_model.encoder.layers.{}.{}.weight".format(b, c)
            if k in sdk:
comfyanonymous's avatar
comfyanonymous committed
226
                lora_key = text_model_lora_key.format(b, LORA_CLIP_MAP[c])
227
                key_map[lora_key] = k
comfyanonymous's avatar
comfyanonymous committed
228

comfyanonymous's avatar
comfyanonymous committed
229
230
231
232
233

    #Locon stuff
    ds_counter = 0
    counter = 0
    for b in range(12):
comfyanonymous's avatar
comfyanonymous committed
234
        tk = "diffusion_model.input_blocks.{}.0".format(b)
comfyanonymous's avatar
comfyanonymous committed
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
        key_in = False
        for c in LORA_UNET_MAP_RESNET:
            k = "{}.{}.weight".format(tk, c)
            if k in sdk:
                lora_key = "lora_unet_down_blocks_{}_{}".format(counter // 2, LORA_UNET_MAP_RESNET[c].format(counter % 2))
                key_map[lora_key] = k
                key_in = True
        for bb in range(3):
            k = "{}.{}.op.weight".format(tk[:-2], bb)
            if k in sdk:
                lora_key = "lora_unet_down_blocks_{}_downsamplers_0_conv".format(ds_counter)
                key_map[lora_key] = k
                ds_counter += 1
        if key_in:
            counter += 1

    counter = 0
    for b in range(3):
comfyanonymous's avatar
comfyanonymous committed
253
        tk = "diffusion_model.middle_block.{}".format(b)
comfyanonymous's avatar
comfyanonymous committed
254
255
256
257
258
259
260
261
262
263
264
265
266
        key_in = False
        for c in LORA_UNET_MAP_RESNET:
            k = "{}.{}.weight".format(tk, c)
            if k in sdk:
                lora_key = "lora_unet_mid_block_{}".format(LORA_UNET_MAP_RESNET[c].format(counter))
                key_map[lora_key] = k
                key_in = True
        if key_in:
            counter += 1

    counter = 0
    us_counter = 0
    for b in range(12):
comfyanonymous's avatar
comfyanonymous committed
267
        tk = "diffusion_model.output_blocks.{}.0".format(b)
comfyanonymous's avatar
comfyanonymous committed
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
        key_in = False
        for c in LORA_UNET_MAP_RESNET:
            k = "{}.{}.weight".format(tk, c)
            if k in sdk:
                lora_key = "lora_unet_up_blocks_{}_{}".format(counter // 3, LORA_UNET_MAP_RESNET[c].format(counter % 3))
                key_map[lora_key] = k
                key_in = True
        for bb in range(3):
            k = "{}.{}.conv.weight".format(tk[:-2], bb)
            if k in sdk:
                lora_key = "lora_unet_up_blocks_{}_upsamplers_0_conv".format(us_counter)
                key_map[lora_key] = k
                us_counter += 1
        if key_in:
            counter += 1

284
285
    return key_map

286

287
class ModelPatcher:
288
289
    def __init__(self, model, size=0):
        self.size = size
290
291
292
        self.model = model
        self.patches = []
        self.backup = {}
293
        self.model_options = {"transformer_options":{}}
294
295
296
297
298
299
300
301
302
303
304
305
        self.model_size()

    def model_size(self):
        if self.size > 0:
            return self.size
        model_sd = self.model.state_dict()
        size = 0
        for k in model_sd:
            t = model_sd[k]
            size += t.nelement() * t.element_size()
        self.size = size
        return size
306
307

    def clone(self):
308
        n = ModelPatcher(self.model, self.size)
309
        n.patches = self.patches[:]
310
        n.model_options = copy.deepcopy(self.model_options)
311
312
        return n

313
314
315
    def set_model_tomesd(self, ratio):
        self.model_options["transformer_options"]["tomesd"] = {"ratio": ratio}

316
    def set_model_sampler_cfg_function(self, sampler_cfg_function):
317
318
319
320
        if len(inspect.signature(sampler_cfg_function).parameters) == 3:
            self.model_options["sampler_cfg_function"] = lambda args: sampler_cfg_function(args["cond"], args["uncond"], args["cond_scale"]) #Old way
        else:
            self.model_options["sampler_cfg_function"] = sampler_cfg_function
321
322
323
324
325
326
327
328
329
330
331
332
333

    def set_model_patch(self, patch, name):
        to = self.model_options["transformer_options"]
        if "patches" not in to:
            to["patches"] = {}
        to["patches"][name] = to["patches"].get(name, []) + [patch]

    def set_model_attn1_patch(self, patch):
        self.set_model_patch(patch, "attn1_patch")

    def set_model_attn2_patch(self, patch):
        self.set_model_patch(patch, "attn2_patch")

334
335
336
    def set_model_attn2_output_patch(self, patch):
        self.set_model_patch(patch, "attn2_output_patch")

337
338
339
340
341
342
343
344
345
346
    def model_patches_to(self, device):
        to = self.model_options["transformer_options"]
        if "patches" in to:
            patches = to["patches"]
            for name in patches:
                patch_list = patches[name]
                for i in range(len(patch_list)):
                    if hasattr(patch_list[i], "to"):
                        patch_list[i] = patch_list[i].to(device)

347
    def model_dtype(self):
comfyanonymous's avatar
comfyanonymous committed
348
        return self.model.get_dtype()
349

350
351
352
353
    def add_patches(self, patches, strength=1.0):
        p = {}
        model_sd = self.model.state_dict()
        for k in patches:
354
            if k in model_sd:
355
356
357
358
359
360
361
362
363
                p[k] = patches[k]
        self.patches += [(strength, p)]
        return p.keys()

    def patch_model(self):
        model_sd = self.model.state_dict()
        for p in self.patches:
            for k in p[1]:
                v = p[1][k]
364
                key = k
comfyanonymous's avatar
comfyanonymous committed
365
                if key not in model_sd:
366
367
368
                    print("could not patch. key doesn't exist in model:", k)
                    continue

comfyanonymous's avatar
comfyanonymous committed
369
370
371
                weight = model_sd[key]
                if key not in self.backup:
                    self.backup[key] = weight.clone()
372
373

                alpha = p[0]
comfyanonymous's avatar
comfyanonymous committed
374
375
376
377
378
379
380
381
382
383
384

                if len(v) == 4: #lora/locon
                    mat1 = v[0]
                    mat2 = v[1]
                    if v[2] is not None:
                        alpha *= v[2] / mat2.shape[0]
                    if v[3] is not None:
                        #locon mid weights, hopefully the math is fine because I didn't properly test it
                        final_shape = [mat2.shape[1], mat2.shape[0], v[3].shape[2], v[3].shape[3]]
                        mat2 = torch.mm(mat2.transpose(0, 1).flatten(start_dim=1).float(), v[3].transpose(0, 1).flatten(start_dim=1).float()).reshape(final_shape).transpose(0, 1)
                    weight += (alpha * torch.mm(mat1.flatten(start_dim=1).float(), mat2.flatten(start_dim=1).float())).reshape(weight.shape).type(weight.dtype).to(weight.device)
comfyanonymous's avatar
comfyanonymous committed
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
                elif len(v) == 8: #lokr
                    w1 = v[0]
                    w2 = v[1]
                    w1_a = v[3]
                    w1_b = v[4]
                    w2_a = v[5]
                    w2_b = v[6]
                    t2 = v[7]
                    dim = None

                    if w1 is None:
                        dim = w1_b.shape[0]
                        w1 = torch.mm(w1_a.float(), w1_b.float())

                    if w2 is None:
                        dim = w2_b.shape[0]
                        if t2 is None:
                            w2 = torch.mm(w2_a.float(), w2_b.float())
                        else:
                            w2 = torch.einsum('i j k l, j r, i p -> p r k l', t2.float(), w2_b.float(), w2_a.float())

                    if len(w2.shape) == 4:
                        w1 = w1.unsqueeze(2).unsqueeze(2)
                    if v[2] is not None and dim is not None:
                        alpha *= v[2] / dim

                    weight += alpha * torch.kron(w1.float(), w2.float()).reshape(weight.shape).type(weight.dtype).to(weight.device)
comfyanonymous's avatar
comfyanonymous committed
412
413
414
415
416
417
418
                else: #loha
                    w1a = v[0]
                    w1b = v[1]
                    if v[2] is not None:
                        alpha *= v[2] / w1b.shape[0]
                    w2a = v[3]
                    w2b = v[4]
419
420
421
422
423
424
425
426
427
428
                    if v[5] is not None: #cp decomposition
                        t1 = v[5]
                        t2 = v[6]
                        m1 = torch.einsum('i j k l, j r, i p -> p r k l', t1.float(), w1b.float(), w1a.float())
                        m2 = torch.einsum('i j k l, j r, i p -> p r k l', t2.float(), w2b.float(), w2a.float())
                    else:
                        m1 = torch.mm(w1a.float(), w1b.float())
                        m2 = torch.mm(w2a.float(), w2b.float())

                    weight += (alpha * m1 * m2).reshape(weight.shape).type(weight.dtype).to(weight.device)
429
430
431
        return self.model
    def unpatch_model(self):
        model_sd = self.model.state_dict()
432
433
        keys = list(self.backup.keys())
        for k in keys:
434
            model_sd[k][:] = self.backup[k]
435
436
            del self.backup[k]

437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
        self.backup = {}

def load_lora_for_models(model, clip, lora_path, strength_model, strength_clip):
    key_map = model_lora_keys(model.model)
    key_map = model_lora_keys(clip.cond_stage_model, key_map)
    loaded = load_lora(lora_path, key_map)
    new_modelpatcher = model.clone()
    k = new_modelpatcher.add_patches(loaded, strength_model)
    new_clip = clip.clone()
    k1 = new_clip.add_patches(loaded, strength_clip)
    k = set(k)
    k1 = set(k1)
    for x in loaded:
        if (x not in k) and (x not in k1):
            print("NOT LOADED", x)

    return (new_modelpatcher, new_clip)
comfyanonymous's avatar
comfyanonymous committed
454
455
456


class CLIP:
457
458
459
    def __init__(self, config={}, embedding_directory=None, no_init=False):
        if no_init:
            return
comfyanonymous's avatar
comfyanonymous committed
460
        self.target_clip = config["target"]
461
462
463
464
465
        if "params" in config:
            params = config["params"]
        else:
            params = {}

comfyanonymous's avatar
comfyanonymous committed
466
        if self.target_clip.endswith("FrozenOpenCLIPEmbedder"):
comfyanonymous's avatar
comfyanonymous committed
467
468
            clip = sd2_clip.SD2ClipModel
            tokenizer = sd2_clip.SD2Tokenizer
comfyanonymous's avatar
comfyanonymous committed
469
        elif self.target_clip.endswith("FrozenCLIPEmbedder"):
comfyanonymous's avatar
comfyanonymous committed
470
471
            clip = sd1_clip.SD1ClipModel
            tokenizer = sd1_clip.SD1Tokenizer
472

473
474
        self.device = model_management.text_encoder_device()
        params["device"] = self.device
475
        self.cond_stage_model = clip(**(params))
476
477
        self.cond_stage_model = self.cond_stage_model.to(self.device)

478
        self.tokenizer = tokenizer(embedding_directory=embedding_directory)
479
        self.patcher = ModelPatcher(self.cond_stage_model)
480
        self.layer_idx = None
481
482
483
484
485
486
487

    def clone(self):
        n = CLIP(no_init=True)
        n.target_clip = self.target_clip
        n.patcher = self.patcher.clone()
        n.cond_stage_model = self.cond_stage_model
        n.tokenizer = self.tokenizer
comfyanonymous's avatar
comfyanonymous committed
488
        n.layer_idx = self.layer_idx
489
490
        return n

491
492
493
    def load_from_state_dict(self, sd):
        self.cond_stage_model.transformer.load_state_dict(sd, strict=False)

494
495
    def add_patches(self, patches, strength=1.0):
        return self.patcher.add_patches(patches, strength)
comfyanonymous's avatar
comfyanonymous committed
496

497
    def clip_layer(self, layer_idx):
comfyanonymous's avatar
comfyanonymous committed
498
        self.layer_idx = layer_idx
499

500
501
    def tokenize(self, text, return_word_ids=False):
        return self.tokenizer.tokenize_with_weights(text, return_word_ids)
BlenderNeko's avatar
BlenderNeko committed
502

503
    def encode_from_tokens(self, tokens, return_pooled=False):
504
505
        if self.layer_idx is not None:
            self.cond_stage_model.clip_layer(self.layer_idx)
506
507
508
509
510
511
512
        try:
            self.patcher.patch_model()
            cond = self.cond_stage_model.encode_token_weights(tokens)
            self.patcher.unpatch_model()
        except Exception as e:
            self.patcher.unpatch_model()
            raise e
513
514
515
516
        if return_pooled:
            eos_token_index = max(range(len(tokens[0])), key=tokens[0].__getitem__)
            pooled = cond[:, eos_token_index]
            return cond, pooled
comfyanonymous's avatar
comfyanonymous committed
517
518
        return cond

519
    def encode(self, text):
520
        tokens = self.tokenize(text)
521
522
        return self.encode_from_tokens(tokens)

comfyanonymous's avatar
comfyanonymous committed
523
class VAE:
524
    def __init__(self, ckpt_path=None, scale_factor=0.18215, device=None, config=None):
comfyanonymous's avatar
comfyanonymous committed
525
526
527
        if config is None:
            #default SD1.x/SD2.x VAE parameters
            ddconfig = {'double_z': True, 'z_channels': 4, 'resolution': 256, 'in_channels': 3, 'out_ch': 3, 'ch': 128, 'ch_mult': [1, 2, 4, 4], 'num_res_blocks': 2, 'attn_resolutions': [], 'dropout': 0.0}
528
            self.first_stage_model = AutoencoderKL(ddconfig, {'target': 'torch.nn.Identity'}, 4, monitor="val/rec_loss")
comfyanonymous's avatar
comfyanonymous committed
529
        else:
530
            self.first_stage_model = AutoencoderKL(**(config['params']))
comfyanonymous's avatar
comfyanonymous committed
531
        self.first_stage_model = self.first_stage_model.eval()
532
533
534
535
536
537
        if ckpt_path is not None:
            sd = utils.load_torch_file(ckpt_path)
            if 'decoder.up_blocks.0.resnets.0.norm1.weight' in sd.keys(): #diffusers format
                sd = diffusers_convert.convert_vae_state_dict(sd)
            self.first_stage_model.load_state_dict(sd, strict=False)

comfyanonymous's avatar
comfyanonymous committed
538
        self.scale_factor = scale_factor
539
540
        if device is None:
            device = model_management.get_torch_device()
comfyanonymous's avatar
comfyanonymous committed
541
542
        self.device = device

543
    def decode_tiled_(self, samples, tile_x=64, tile_y=64, overlap = 16):
pythongosssss's avatar
pythongosssss committed
544
        steps = samples.shape[0] * utils.get_tiled_scale_steps(samples.shape[3], samples.shape[2], tile_x, tile_y, overlap)
comfyanonymous's avatar
comfyanonymous committed
545
546
        steps += samples.shape[0] * utils.get_tiled_scale_steps(samples.shape[3], samples.shape[2], tile_x // 2, tile_y * 2, overlap)
        steps += samples.shape[0] * utils.get_tiled_scale_steps(samples.shape[3], samples.shape[2], tile_x * 2, tile_y // 2, overlap)
pythongosssss's avatar
pythongosssss committed
547
        pbar = utils.ProgressBar(steps)
548

549
550
        decode_fn = lambda a: (self.first_stage_model.decode(1. / self.scale_factor * a.to(self.device)) + 1.0)
        output = torch.clamp((
551
552
553
            (utils.tiled_scale(samples, decode_fn, tile_x // 2, tile_y * 2, overlap, upscale_amount = 8, pbar = pbar) +
            utils.tiled_scale(samples, decode_fn, tile_x * 2, tile_y // 2, overlap, upscale_amount = 8, pbar = pbar) +
             utils.tiled_scale(samples, decode_fn, tile_x, tile_y, overlap, upscale_amount = 8, pbar = pbar))
554
555
556
            / 3.0) / 2.0, min=0.0, max=1.0)
        return output

557
558
559
560
561
562
563
564
565
566
567
568
569
    def encode_tiled_(self, pixel_samples, tile_x=512, tile_y=512, overlap = 64):
        steps = pixel_samples.shape[0] * utils.get_tiled_scale_steps(pixel_samples.shape[3], pixel_samples.shape[2], tile_x, tile_y, overlap)
        steps += pixel_samples.shape[0] * utils.get_tiled_scale_steps(pixel_samples.shape[3], pixel_samples.shape[2], tile_x // 2, tile_y * 2, overlap)
        steps += pixel_samples.shape[0] * utils.get_tiled_scale_steps(pixel_samples.shape[3], pixel_samples.shape[2], tile_x * 2, tile_y // 2, overlap)
        pbar = utils.ProgressBar(steps)

        encode_fn = lambda a: self.first_stage_model.encode(2. * a.to(self.device) - 1.).sample() * self.scale_factor
        samples = utils.tiled_scale(pixel_samples, encode_fn, tile_x, tile_y, overlap, upscale_amount = (1/8), out_channels=4, pbar=pbar)
        samples += utils.tiled_scale(pixel_samples, encode_fn, tile_x * 2, tile_y // 2, overlap, upscale_amount = (1/8), out_channels=4, pbar=pbar)
        samples += utils.tiled_scale(pixel_samples, encode_fn, tile_x // 2, tile_y * 2, overlap, upscale_amount = (1/8), out_channels=4, pbar=pbar)
        samples /= 3.0
        return samples

570
    def decode(self, samples_in):
571
        model_management.unload_model()
comfyanonymous's avatar
comfyanonymous committed
572
        self.first_stage_model = self.first_stage_model.to(self.device)
573
        try:
574
575
576
577
578
579
580
581
            free_memory = model_management.get_free_memory(self.device)
            batch_number = int((free_memory * 0.7) / (2562 * samples_in.shape[2] * samples_in.shape[3] * 64))
            batch_number = max(1, batch_number)

            pixel_samples = torch.empty((samples_in.shape[0], 3, round(samples_in.shape[2] * 8), round(samples_in.shape[3] * 8)), device="cpu")
            for x in range(0, samples_in.shape[0], batch_number):
                samples = samples_in[x:x+batch_number].to(self.device)
                pixel_samples[x:x+batch_number] = torch.clamp((self.first_stage_model.decode(1. / self.scale_factor * samples) + 1.0) / 2.0, min=0.0, max=1.0).cpu()
582
583
584
585
        except model_management.OOM_EXCEPTION as e:
            print("Warning: Ran out of memory when regular VAE decoding, retrying with tiled VAE decoding.")
            pixel_samples = self.decode_tiled_(samples_in)

comfyanonymous's avatar
comfyanonymous committed
586
587
588
589
        self.first_stage_model = self.first_stage_model.cpu()
        pixel_samples = pixel_samples.cpu().movedim(1,-1)
        return pixel_samples

590
    def decode_tiled(self, samples, tile_x=64, tile_y=64, overlap = 16):
591
592
        model_management.unload_model()
        self.first_stage_model = self.first_stage_model.to(self.device)
593
        output = self.decode_tiled_(samples, tile_x, tile_y, overlap)
594
595
596
        self.first_stage_model = self.first_stage_model.cpu()
        return output.movedim(1,-1)

comfyanonymous's avatar
comfyanonymous committed
597
    def encode(self, pixel_samples):
598
        model_management.unload_model()
comfyanonymous's avatar
comfyanonymous committed
599
        self.first_stage_model = self.first_stage_model.to(self.device)
600
601
        pixel_samples = pixel_samples.movedim(-1,1)
        try:
602
603
604
            free_memory = model_management.get_free_memory(self.device)
            batch_number = int((free_memory * 0.7) / (2078 * pixel_samples.shape[2] * pixel_samples.shape[3])) #NOTE: this constant along with the one in the decode above are estimated from the mem usage for the VAE and could change.
            batch_number = max(1, batch_number)
605
606
607
608
            samples = torch.empty((pixel_samples.shape[0], 4, round(pixel_samples.shape[2] // 8), round(pixel_samples.shape[3] // 8)), device="cpu")
            for x in range(0, pixel_samples.shape[0], batch_number):
                pixels_in = (2. * pixel_samples[x:x+batch_number] - 1.).to(self.device)
                samples[x:x+batch_number] = self.first_stage_model.encode(pixels_in).sample().cpu() * self.scale_factor
609

610
611
612
613
        except model_management.OOM_EXCEPTION as e:
            print("Warning: Ran out of memory when regular VAE encoding, retrying with tiled VAE encoding.")
            samples = self.encode_tiled_(pixel_samples)

comfyanonymous's avatar
comfyanonymous committed
614
615
616
        self.first_stage_model = self.first_stage_model.cpu()
        return samples

comfyanonymous's avatar
comfyanonymous committed
617
618
619
    def encode_tiled(self, pixel_samples, tile_x=512, tile_y=512, overlap = 64):
        model_management.unload_model()
        self.first_stage_model = self.first_stage_model.to(self.device)
620
621
        pixel_samples = pixel_samples.movedim(-1,1)
        samples = self.encode_tiled_(pixel_samples, tile_x=tile_x, tile_y=tile_y, overlap=overlap)
comfyanonymous's avatar
comfyanonymous committed
622
623
        self.first_stage_model = self.first_stage_model.cpu()
        return samples
624

BlenderNeko's avatar
BlenderNeko committed
625
def broadcast_image_to(tensor, target_batch_size, batched_number):
626
    current_batch_size = tensor.shape[0]
627
    #print(current_batch_size, target_batch_size)
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
    if current_batch_size == 1:
        return tensor

    per_batch = target_batch_size // batched_number
    tensor = tensor[:per_batch]

    if per_batch > tensor.shape[0]:
        tensor = torch.cat([tensor] * (per_batch // tensor.shape[0]) + [tensor[:(per_batch % tensor.shape[0])]], dim=0)

    current_batch_size = tensor.shape[0]
    if current_batch_size == target_batch_size:
        return tensor
    else:
        return torch.cat([tensor] * batched_number, dim=0)

comfyanonymous's avatar
comfyanonymous committed
643
class ControlNet:
644
    def __init__(self, control_model, global_average_pooling=False, device=None):
comfyanonymous's avatar
comfyanonymous committed
645
646
647
        self.control_model = control_model
        self.cond_hint_original = None
        self.cond_hint = None
648
        self.strength = 1.0
649
650
        if device is None:
            device = model_management.get_torch_device()
651
        self.device = device
comfyanonymous's avatar
comfyanonymous committed
652
        self.previous_controlnet = None
653
        self.global_average_pooling = global_average_pooling
comfyanonymous's avatar
comfyanonymous committed
654

655
    def get_control(self, x_noisy, t, cond_txt, batched_number):
comfyanonymous's avatar
comfyanonymous committed
656
657
        control_prev = None
        if self.previous_controlnet is not None:
658
            control_prev = self.previous_controlnet.get_control(x_noisy, t, cond_txt, batched_number)
comfyanonymous's avatar
comfyanonymous committed
659

660
        output_dtype = x_noisy.dtype
comfyanonymous's avatar
comfyanonymous committed
661
662
663
664
        if self.cond_hint is None or x_noisy.shape[2] * 8 != self.cond_hint.shape[2] or x_noisy.shape[3] * 8 != self.cond_hint.shape[3]:
            if self.cond_hint is not None:
                del self.cond_hint
            self.cond_hint = None
BlenderNeko's avatar
BlenderNeko committed
665
666
667
            self.cond_hint = utils.common_upscale(self.cond_hint_original, x_noisy.shape[3] * 8, x_noisy.shape[2] * 8, 'nearest-exact', "center").to(self.control_model.dtype).to(self.device)
        if x_noisy.shape[0] != self.cond_hint.shape[0]:
            self.cond_hint = broadcast_image_to(self.cond_hint, x_noisy.shape[0], batched_number)
668
669
670
671
672
673

        if self.control_model.dtype == torch.float16:
            precision_scope = torch.autocast
        else:
            precision_scope = contextlib.nullcontext

674
        with precision_scope(model_management.get_autocast_device(self.device)):
675
            self.control_model = model_management.load_if_low_vram(self.control_model)
676
            control = self.control_model(x=x_noisy, hint=self.cond_hint, timesteps=t, context=cond_txt)
677
            self.control_model = model_management.unload_if_low_vram(self.control_model)
678
        out = {'middle':[], 'output': []}
679
        autocast_enabled = torch.is_autocast_enabled()
comfyanonymous's avatar
comfyanonymous committed
680
681

        for i in range(len(control)):
comfyanonymous's avatar
comfyanonymous committed
682
683
684
685
686
687
            if i == (len(control) - 1):
                key = 'middle'
                index = 0
            else:
                key = 'output'
                index = i
comfyanonymous's avatar
comfyanonymous committed
688
            x = control[i]
689
690
691
            if self.global_average_pooling:
                x = torch.mean(x, dim=(2, 3), keepdim=True).repeat(1, 1, x.shape[2], x.shape[3])

692
            x *= self.strength
693
694
            if x.dtype != output_dtype and not autocast_enabled:
                x = x.to(output_dtype)
comfyanonymous's avatar
comfyanonymous committed
695

comfyanonymous's avatar
comfyanonymous committed
696
697
698
699
700
701
702
            if control_prev is not None and key in control_prev:
                prev = control_prev[key][index]
                if prev is not None:
                    x += prev
            out[key].append(x)
        if control_prev is not None and 'input' in control_prev:
            out['input'] = control_prev['input']
703
        return out
comfyanonymous's avatar
comfyanonymous committed
704

705
    def set_cond_hint(self, cond_hint, strength=1.0):
comfyanonymous's avatar
comfyanonymous committed
706
        self.cond_hint_original = cond_hint
707
        self.strength = strength
comfyanonymous's avatar
comfyanonymous committed
708
709
        return self

comfyanonymous's avatar
comfyanonymous committed
710
711
712
713
    def set_previous_controlnet(self, controlnet):
        self.previous_controlnet = controlnet
        return self

comfyanonymous's avatar
comfyanonymous committed
714
    def cleanup(self):
comfyanonymous's avatar
comfyanonymous committed
715
716
        if self.previous_controlnet is not None:
            self.previous_controlnet.cleanup()
comfyanonymous's avatar
comfyanonymous committed
717
718
719
720
721
        if self.cond_hint is not None:
            del self.cond_hint
            self.cond_hint = None

    def copy(self):
722
        c = ControlNet(self.control_model, global_average_pooling=self.global_average_pooling)
comfyanonymous's avatar
comfyanonymous committed
723
        c.cond_hint_original = self.cond_hint_original
724
        c.strength = self.strength
comfyanonymous's avatar
comfyanonymous committed
725
726
        return c

727
    def get_models(self):
comfyanonymous's avatar
comfyanonymous committed
728
729
        out = []
        if self.previous_controlnet is not None:
730
            out += self.previous_controlnet.get_models()
comfyanonymous's avatar
comfyanonymous committed
731
732
733
        out.append(self.control_model)
        return out

734
def load_controlnet(ckpt_path, model=None):
735
    controlnet_data = utils.load_torch_file(ckpt_path, safe_load=True)
comfyanonymous's avatar
comfyanonymous committed
736
737
738
739
740
741
742
743
744
745
    pth_key = 'control_model.input_blocks.1.1.transformer_blocks.0.attn2.to_k.weight'
    pth = False
    sd2 = False
    key = 'input_blocks.1.1.transformer_blocks.0.attn2.to_k.weight'
    if pth_key in controlnet_data:
        pth = True
        key = pth_key
    elif key in controlnet_data:
        pass
    else:
746
747
748
749
        net = load_t2i_adapter(controlnet_data)
        if net is None:
            print("error checkpoint does not contain controlnet or t2i adapter data", ckpt_path)
        return net
comfyanonymous's avatar
comfyanonymous committed
750
751

    context_dim = controlnet_data[key].shape[1]
752
753

    use_fp16 = False
754
    if model_management.should_use_fp16() and controlnet_data[key].dtype == torch.float16:
755
756
        use_fp16 = True

comfyanonymous's avatar
comfyanonymous committed
757
758
759
760
761
762
763
764
765
766
767
768
769
    if context_dim == 768:
        #SD1.x
        control_model = cldm.ControlNet(image_size=32,
                                        in_channels=4,
                                        hint_channels=3,
                                        model_channels=320,
                                        attention_resolutions=[ 4, 2, 1 ],
                                        num_res_blocks=2,
                                        channel_mult=[ 1, 2, 4, 4 ],
                                        num_heads=8,
                                        use_spatial_transformer=True,
                                        transformer_depth=1,
                                        context_dim=context_dim,
770
                                        use_checkpoint=False,
comfyanonymous's avatar
comfyanonymous committed
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
                                        legacy=False,
                                        use_fp16=use_fp16)
    else:
        #SD2.x
        control_model = cldm.ControlNet(image_size=32,
                                        in_channels=4,
                                        hint_channels=3,
                                        model_channels=320,
                                        attention_resolutions=[ 4, 2, 1 ],
                                        num_res_blocks=2,
                                        channel_mult=[ 1, 2, 4, 4 ],
                                        num_head_channels=64,
                                        use_spatial_transformer=True,
                                        use_linear_in_transformer=True,
                                        transformer_depth=1,
                                        context_dim=context_dim,
787
                                        use_checkpoint=False,
comfyanonymous's avatar
comfyanonymous committed
788
789
                                        legacy=False,
                                        use_fp16=use_fp16)
comfyanonymous's avatar
comfyanonymous committed
790
    if pth:
791
792
793
794
795
796
797
        if 'difference' in controlnet_data:
            if model is not None:
                m = model.patch_model()
                model_sd = m.state_dict()
                for x in controlnet_data:
                    c_m = "control_model."
                    if x.startswith(c_m):
comfyanonymous's avatar
comfyanonymous committed
798
                        sd_key = "diffusion_model.{}".format(x[len(c_m):])
799
800
801
802
803
804
805
                        if sd_key in model_sd:
                            cd = controlnet_data[x]
                            cd += model_sd[sd_key].type(cd.dtype).to(cd.device)
                model.unpatch_model()
            else:
                print("WARNING: Loaded a diff controlnet without a model. It will very likely not work.")

comfyanonymous's avatar
comfyanonymous committed
806
807
808
809
810
811
812
813
        class WeightsLoader(torch.nn.Module):
            pass
        w = WeightsLoader()
        w.control_model = control_model
        w.load_state_dict(controlnet_data, strict=False)
    else:
        control_model.load_state_dict(controlnet_data, strict=False)

814
815
816
    if use_fp16:
        control_model = control_model.half()

817
818
819
820
821
    global_average_pooling = False
    if ckpt_path.endswith("_shuffle.pth") or ckpt_path.endswith("_shuffle.safetensors") or ckpt_path.endswith("_shuffle_fp16.safetensors"): #TODO: smarter way of enabling global_average_pooling
        global_average_pooling = True

    control = ControlNet(control_model, global_average_pooling=global_average_pooling)
comfyanonymous's avatar
comfyanonymous committed
822
823
    return control

824
class T2IAdapter:
825
    def __init__(self, t2i_model, channels_in, device=None):
826
827
828
        self.t2i_model = t2i_model
        self.channels_in = channels_in
        self.strength = 1.0
829
830
        if device is None:
            device = model_management.get_torch_device()
831
832
833
834
835
836
        self.device = device
        self.previous_controlnet = None
        self.control_input = None
        self.cond_hint_original = None
        self.cond_hint = None

837
    def get_control(self, x_noisy, t, cond_txt, batched_number):
838
839
        control_prev = None
        if self.previous_controlnet is not None:
840
            control_prev = self.previous_controlnet.get_control(x_noisy, t, cond_txt, batched_number)
841
842
843
844

        if self.cond_hint is None or x_noisy.shape[2] * 8 != self.cond_hint.shape[2] or x_noisy.shape[3] * 8 != self.cond_hint.shape[3]:
            if self.cond_hint is not None:
                del self.cond_hint
BlenderNeko's avatar
BlenderNeko committed
845
            self.control_input = None
846
            self.cond_hint = None
BlenderNeko's avatar
BlenderNeko committed
847
            self.cond_hint = utils.common_upscale(self.cond_hint_original, x_noisy.shape[3] * 8, x_noisy.shape[2] * 8, 'nearest-exact', "center").float().to(self.device)
848
849
            if self.channels_in == 1 and self.cond_hint.shape[1] > 1:
                self.cond_hint = torch.mean(self.cond_hint, 1, keepdim=True)
BlenderNeko's avatar
BlenderNeko committed
850
851
852
        if x_noisy.shape[0] != self.cond_hint.shape[0]:
            self.cond_hint = broadcast_image_to(self.cond_hint, x_noisy.shape[0], batched_number)
        if self.control_input is None:
853
854
855
856
857
858
859
            self.t2i_model.to(self.device)
            self.control_input = self.t2i_model(self.cond_hint)
            self.t2i_model.cpu()

        output_dtype = x_noisy.dtype
        out = {'input':[]}

comfyanonymous's avatar
comfyanonymous committed
860
        autocast_enabled = torch.is_autocast_enabled()
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
        for i in range(len(self.control_input)):
            key = 'input'
            x = self.control_input[i] * self.strength
            if x.dtype != output_dtype and not autocast_enabled:
                x = x.to(output_dtype)

            if control_prev is not None and key in control_prev:
                index = len(control_prev[key]) - i * 3 - 3
                prev = control_prev[key][index]
                if prev is not None:
                    x += prev
            out[key].insert(0, None)
            out[key].insert(0, None)
            out[key].insert(0, x)

        if control_prev is not None and 'input' in control_prev:
            for i in range(len(out['input'])):
                if out['input'][i] is None:
                    out['input'][i] = control_prev['input'][i]
        if control_prev is not None and 'middle' in control_prev:
            out['middle'] = control_prev['middle']
        if control_prev is not None and 'output' in control_prev:
            out['output'] = control_prev['output']
        return out

    def set_cond_hint(self, cond_hint, strength=1.0):
        self.cond_hint_original = cond_hint
        self.strength = strength
        return self

    def set_previous_controlnet(self, controlnet):
        self.previous_controlnet = controlnet
        return self

    def copy(self):
        c = T2IAdapter(self.t2i_model, self.channels_in)
        c.cond_hint_original = self.cond_hint_original
        c.strength = self.strength
        return c

    def cleanup(self):
        if self.previous_controlnet is not None:
            self.previous_controlnet.cleanup()
        if self.cond_hint is not None:
            del self.cond_hint
            self.cond_hint = None

908
    def get_models(self):
909
910
        out = []
        if self.previous_controlnet is not None:
911
            out += self.previous_controlnet.get_models()
912
913
        return out

914
def load_t2i_adapter(t2i_data):
915
    keys = t2i_data.keys()
916
    if "body.0.in_conv.weight" in keys:
917
918
        cin = t2i_data['body.0.in_conv.weight'].shape[1]
        model_ad = adapter.Adapter_light(cin=cin, channels=[320, 640, 1280, 1280], nums_rb=4)
919
    elif 'conv_in.weight' in keys:
920
921
        cin = t2i_data['conv_in.weight'].shape[1]
        model_ad = adapter.Adapter(cin=cin, channels=[320, 640, 1280, 1280][:4], nums_rb=2, ksize=1, sk=True, use_conv=False)
922
923
    else:
        return None
924
925
    model_ad.load_state_dict(t2i_data)
    return T2IAdapter(model_ad, cin // 64)
comfyanonymous's avatar
comfyanonymous committed
926

927
928
929
930
931
932
933
934
935
936

class StyleModel:
    def __init__(self, model, device="cpu"):
        self.model = model

    def get_cond(self, input):
        return self.model(input.last_hidden_state)


def load_style_model(ckpt_path):
937
    model_data = utils.load_torch_file(ckpt_path, safe_load=True)
938
939
940
941
942
943
944
945
946
    keys = model_data.keys()
    if "style_embedding" in keys:
        model = adapter.StyleAdapter(width=1024, context_dim=768, num_head=8, n_layes=3, num_token=8)
    else:
        raise Exception("invalid style model {}".format(ckpt_path))
    model.load_state_dict(model_data)
    return StyleModel(model)


947
def load_clip(ckpt_path, embedding_directory=None):
948
    clip_data = utils.load_torch_file(ckpt_path, safe_load=True)
949
950
    config = {}
    if "text_model.encoder.layers.22.mlp.fc1.weight" in clip_data:
comfyanonymous's avatar
comfyanonymous committed
951
        config['target'] = 'comfy.ldm.modules.encoders.modules.FrozenOpenCLIPEmbedder'
952
    else:
comfyanonymous's avatar
comfyanonymous committed
953
        config['target'] = 'comfy.ldm.modules.encoders.modules.FrozenCLIPEmbedder'
954
955
956
    clip = CLIP(config=config, embedding_directory=embedding_directory)
    clip.load_from_state_dict(clip_data)
    return clip
comfyanonymous's avatar
comfyanonymous committed
957

958
def load_gligen(ckpt_path):
959
    data = utils.load_torch_file(ckpt_path, safe_load=True)
960
961
962
963
964
    model = gligen.load_gligen(data)
    if model_management.should_use_fp16():
        model = model.half()
    return model

comfyanonymous's avatar
comfyanonymous committed
965
966
967
968
def load_checkpoint(config_path=None, ckpt_path=None, output_vae=True, output_clip=True, embedding_directory=None, state_dict=None, config=None):
    if config is None:
        with open(config_path, 'r') as stream:
            config = yaml.safe_load(stream)
comfyanonymous's avatar
comfyanonymous committed
969
970
971
972
973
    model_config_params = config['model']['params']
    clip_config = model_config_params['cond_stage_config']
    scale_factor = model_config_params['scale_factor']
    vae_config = model_config_params['first_stage_config']

974
975
976
    fp16 = False
    if "unet_config" in model_config_params:
        if "params" in model_config_params["unet_config"]:
comfyanonymous's avatar
comfyanonymous committed
977
978
979
980
981
982
983
984
985
986
987
988
989
            unet_config = model_config_params["unet_config"]["params"]
            if "use_fp16" in unet_config:
                fp16 = unet_config["use_fp16"]

    noise_aug_config = None
    if "noise_aug_config" in model_config_params:
        noise_aug_config = model_config_params["noise_aug_config"]

    v_prediction = False

    if "parameterization" in model_config_params:
        if model_config_params["parameterization"] == "v":
            v_prediction = True
990

comfyanonymous's avatar
comfyanonymous committed
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
    clip = None
    vae = None

    class WeightsLoader(torch.nn.Module):
        pass

    w = WeightsLoader()
    load_state_dict_to = []
    if output_vae:
        vae = VAE(scale_factor=scale_factor, config=vae_config)
        w.first_stage_model = vae.first_stage_model
        load_state_dict_to = [w]

    if output_clip:
1005
        clip = CLIP(config=clip_config, embedding_directory=embedding_directory)
comfyanonymous's avatar
comfyanonymous committed
1006
1007
1008
        w.cond_stage_model = clip.cond_stage_model
        load_state_dict_to = [w]

comfyanonymous's avatar
comfyanonymous committed
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
    if config['model']["target"].endswith("LatentInpaintDiffusion"):
        model = model_base.SDInpaint(unet_config, v_prediction=v_prediction)
    elif config['model']["target"].endswith("ImageEmbeddingConditionedLatentDiffusion"):
        model = model_base.SD21UNCLIP(unet_config, noise_aug_config["params"], v_prediction=v_prediction)
    else:
        model = model_base.BaseModel(unet_config, v_prediction=v_prediction)

    if state_dict is None:
        state_dict = utils.load_torch_file(ckpt_path)
    model = load_model_weights(model, state_dict, verbose=False, load_state_dict_to=load_state_dict_to)
1019
1020
1021
1022

    if fp16:
        model = model.half()

1023
    return (ModelPatcher(model), clip, vae)
1024
1025


1026
1027
def load_checkpoint_guess_config(ckpt_path, output_vae=True, output_clip=True, output_clipvision=False, embedding_directory=None):
    sd = utils.load_torch_file(ckpt_path)
1028
1029
    sd_keys = sd.keys()
    clip = None
1030
    clipvision = None
1031
1032
    vae = None

1033
1034
    fp16 = model_management.should_use_fp16()

1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
    class WeightsLoader(torch.nn.Module):
        pass

    w = WeightsLoader()
    load_state_dict_to = []
    if output_vae:
        vae = VAE()
        w.first_stage_model = vae.first_stage_model
        load_state_dict_to = [w]

    if output_clip:
        clip_config = {}
        if "cond_stage_model.model.transformer.resblocks.22.attn.out_proj.weight" in sd_keys:
comfyanonymous's avatar
comfyanonymous committed
1048
            clip_config['target'] = 'comfy.ldm.modules.encoders.modules.FrozenOpenCLIPEmbedder'
1049
        else:
comfyanonymous's avatar
comfyanonymous committed
1050
            clip_config['target'] = 'comfy.ldm.modules.encoders.modules.FrozenCLIPEmbedder'
1051
1052
1053
1054
        clip = CLIP(config=clip_config, embedding_directory=embedding_directory)
        w.cond_stage_model = clip.cond_stage_model
        load_state_dict_to = [w]

1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
    clipvision_key = "embedder.model.visual.transformer.resblocks.0.attn.in_proj_weight"
    noise_aug_config = None
    if clipvision_key in sd_keys:
        size = sd[clipvision_key].shape[1]

        if output_clipvision:
            clipvision = clip_vision.load_clipvision_from_sd(sd)

        noise_aug_key = "noise_augmentor.betas"
        if noise_aug_key in sd_keys:
            noise_aug_config = {}
            params = {}
            noise_schedule_config = {}
            noise_schedule_config["timesteps"] = sd[noise_aug_key].shape[0]
            noise_schedule_config["beta_schedule"] = "squaredcos_cap_v2"
            params["noise_schedule_config"] = noise_schedule_config
comfyanonymous's avatar
comfyanonymous committed
1071
            noise_aug_config['target'] = "comfy.ldm.modules.encoders.noise_aug_modules.CLIPEmbeddingNoiseAugmentation"
1072
1073
1074
1075
1076
1077
            if size == 1280: #h
                params["timestep_dim"] = 1024
            elif size == 1024: #l
                params["timestep_dim"] = 768
            noise_aug_config['params'] = params

1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
    sd_config = {
        "linear_start": 0.00085,
        "linear_end": 0.012,
        "num_timesteps_cond": 1,
        "log_every_t": 200,
        "timesteps": 1000,
        "first_stage_key": "jpg",
        "cond_stage_key": "txt",
        "image_size": 64,
        "channels": 4,
        "cond_stage_trainable": False,
        "monitor": "val/loss_simple_ema",
        "scale_factor": 0.18215,
        "use_ema": False,
    }

    unet_config = {
1095
        "use_checkpoint": False,
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
        "image_size": 32,
        "out_channels": 4,
        "attention_resolutions": [
            4,
            2,
            1
        ],
        "num_res_blocks": 2,
        "channel_mult": [
            1,
            2,
            4,
            4
        ],
        "use_spatial_transformer": True,
        "transformer_depth": 1,
        "legacy": False
    }

comfyanonymous's avatar
comfyanonymous committed
1115
    if len(sd['model.diffusion_model.input_blocks.4.1.proj_in.weight'].shape) == 2:
1116
1117
1118
1119
1120
        unet_config['use_linear_in_transformer'] = True

    unet_config["use_fp16"] = fp16
    unet_config["model_channels"] = sd['model.diffusion_model.input_blocks.0.0.weight'].shape[0]
    unet_config["in_channels"] = sd['model.diffusion_model.input_blocks.0.0.weight'].shape[1]
comfyanonymous's avatar
comfyanonymous committed
1121
    unet_config["context_dim"] = sd['model.diffusion_model.input_blocks.4.1.transformer_blocks.0.attn2.to_k.weight'].shape[1]
1122

comfyanonymous's avatar
comfyanonymous committed
1123
    sd_config["unet_config"] = {"target": "comfy.ldm.modules.diffusionmodules.openaimodel.UNetModel", "params": unet_config}
1124

comfyanonymous's avatar
comfyanonymous committed
1125
1126
    unclip_model = False
    inpaint_model = False
1127
1128
1129
1130
1131
    if noise_aug_config is not None: #SD2.x unclip model
        sd_config["noise_aug_config"] = noise_aug_config
        sd_config["image_size"] = 96
        sd_config["embedding_dropout"] = 0.25
        sd_config["conditioning_key"] = 'crossattn-adm'
comfyanonymous's avatar
comfyanonymous committed
1132
        unclip_model = True
1133
    elif unet_config["in_channels"] > 4: #inpainting model
1134
1135
        sd_config["conditioning_key"] = "hybrid"
        sd_config["finetune_keys"] = None
comfyanonymous's avatar
comfyanonymous committed
1136
        inpaint_model = True
1137
1138
1139
    else:
        sd_config["conditioning_key"] = "crossattn"

comfyanonymous's avatar
comfyanonymous committed
1140
    if unet_config["context_dim"] == 768:
1141
        unet_config["num_heads"] = 8 #SD1.x
comfyanonymous's avatar
comfyanonymous committed
1142
1143
    else:
        unet_config["num_head_channels"] = 64 #SD2.x
1144

1145
1146
1147
1148
1149
    unclip = 'model.diffusion_model.label_emb.0.0.weight'
    if unclip in sd_keys:
        unet_config["num_classes"] = "sequential"
        unet_config["adm_in_channels"] = sd[unclip].shape[1]

comfyanonymous's avatar
comfyanonymous committed
1150
    v_prediction = False
comfyanonymous's avatar
comfyanonymous committed
1151
1152
1153
1154
    if unet_config["context_dim"] == 1024 and unet_config["in_channels"] == 4: #only SD2.x non inpainting models are v prediction
        k = "model.diffusion_model.output_blocks.11.1.transformer_blocks.0.norm1.bias"
        out = sd[k]
        if torch.std(out, unbiased=False) > 0.09: # not sure how well this will actually work. I guess we will find out.
comfyanonymous's avatar
comfyanonymous committed
1155
            v_prediction = True
comfyanonymous's avatar
comfyanonymous committed
1156
            sd_config["parameterization"] = 'v'
1157

comfyanonymous's avatar
comfyanonymous committed
1158
1159
1160
1161
1162
1163
1164
    if inpaint_model:
        model = model_base.SDInpaint(unet_config, v_prediction=v_prediction)
    elif unclip_model:
        model = model_base.SD21UNCLIP(unet_config, noise_aug_config["params"], v_prediction=v_prediction)
    else:
        model = model_base.BaseModel(unet_config, v_prediction=v_prediction)

1165
1166
    model = load_model_weights(model, sd, verbose=False, load_state_dict_to=load_state_dict_to)

1167
    return (ModelPatcher(model), clip, vae, clipvision)