sd.py 42.2 KB
Newer Older
comfyanonymous's avatar
comfyanonymous committed
1
import torch
2
import contextlib
3
import copy
4
import inspect
comfyanonymous's avatar
comfyanonymous committed
5

6
from comfy import model_management
7
8
from .ldm.util import instantiate_from_config
from .ldm.models.autoencoder import AutoencoderKL
9
import yaml
comfyanonymous's avatar
comfyanonymous committed
10
from .cldm import cldm
11
from .t2i_adapter import adapter
comfyanonymous's avatar
comfyanonymous committed
12
13

from . import utils
14
from . import clip_vision
15
from . import gligen
16
from . import diffusers_convert
comfyanonymous's avatar
comfyanonymous committed
17
from . import model_base
18
from . import model_detection
19

20
21
from . import sd1_clip
from . import sd2_clip
comfyanonymous's avatar
comfyanonymous committed
22

23
def load_model_weights(model, sd):
comfyanonymous's avatar
comfyanonymous committed
24
    m, u = model.load_state_dict(sd, strict=False)
25
26
    m = set(m)
    unexpected_keys = set(u)
comfyanonymous's avatar
comfyanonymous committed
27
28
29

    k = list(sd.keys())
    for x in k:
30
31
32
33
34
35
36
37
38
39
        if x not in unexpected_keys:
            w = sd.pop(x)
            del w
    if len(m) > 0:
        print("missing", m)
    return model

def load_clip_weights(model, sd):
    k = list(sd.keys())
    for x in k:
comfyanonymous's avatar
comfyanonymous committed
40
41
42
43
        if x.startswith("cond_stage_model.transformer.") and not x.startswith("cond_stage_model.transformer.text_model."):
            y = x.replace("cond_stage_model.transformer.", "cond_stage_model.transformer.text_model.")
            sd[y] = sd.pop(x)

comfyanonymous's avatar
comfyanonymous committed
44
45
46
47
    if 'cond_stage_model.transformer.text_model.embeddings.position_ids' in sd:
        ids = sd['cond_stage_model.transformer.text_model.embeddings.position_ids']
        if ids.dtype == torch.float32:
            sd['cond_stage_model.transformer.text_model.embeddings.position_ids'] = ids.round()
48

49
50
    sd = utils.transformers_convert(sd, "cond_stage_model.model.", "cond_stage_model.transformer.text_model.", 24)
    return load_model_weights(model, sd)
comfyanonymous's avatar
comfyanonymous committed
51

52
53
54
55
56
57
58
59
60
LORA_CLIP_MAP = {
    "mlp.fc1": "mlp_fc1",
    "mlp.fc2": "mlp_fc2",
    "self_attn.k_proj": "self_attn_k_proj",
    "self_attn.q_proj": "self_attn_q_proj",
    "self_attn.v_proj": "self_attn_v_proj",
    "self_attn.out_proj": "self_attn_out_proj",
}

comfyanonymous's avatar
comfyanonymous committed
61
LORA_UNET_MAP_ATTENTIONS = {
62
63
64
65
    "proj_in": "proj_in",
    "proj_out": "proj_out",
}

66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
transformer_lora_blocks = {
    "transformer_blocks.{}.attn1.to_q": "transformer_blocks_{}_attn1_to_q",
    "transformer_blocks.{}.attn1.to_k": "transformer_blocks_{}_attn1_to_k",
    "transformer_blocks.{}.attn1.to_v": "transformer_blocks_{}_attn1_to_v",
    "transformer_blocks.{}.attn1.to_out.0": "transformer_blocks_{}_attn1_to_out_0",
    "transformer_blocks.{}.attn2.to_q": "transformer_blocks_{}_attn2_to_q",
    "transformer_blocks.{}.attn2.to_k": "transformer_blocks_{}_attn2_to_k",
    "transformer_blocks.{}.attn2.to_v": "transformer_blocks_{}_attn2_to_v",
    "transformer_blocks.{}.attn2.to_out.0": "transformer_blocks_{}_attn2_to_out_0",
    "transformer_blocks.{}.ff.net.0.proj": "transformer_blocks_{}_ff_net_0_proj",
    "transformer_blocks.{}.ff.net.2": "transformer_blocks_{}_ff_net_2",
}

for i in range(10):
    for k in transformer_lora_blocks:
        LORA_UNET_MAP_ATTENTIONS[k.format(i)] = transformer_lora_blocks[k].format(i)


comfyanonymous's avatar
comfyanonymous committed
84
85
86
87
88
89
LORA_UNET_MAP_RESNET = {
    "in_layers.2": "resnets_{}_conv1",
    "emb_layers.1": "resnets_{}_time_emb_proj",
    "out_layers.3": "resnets_{}_conv2",
    "skip_connection": "resnets_{}_conv_shortcut"
}
90
91

def load_lora(path, to_load):
92
    lora = utils.load_torch_file(path, safe_load=True)
93
94
95
    patch_dict = {}
    loaded_keys = set()
    for x in to_load:
comfyanonymous's avatar
comfyanonymous committed
96
97
98
99
100
101
        alpha_name = "{}.alpha".format(x)
        alpha = None
        if alpha_name in lora.keys():
            alpha = lora[alpha_name].item()
            loaded_keys.add(alpha_name)

102
103
        A_name = "{}.lora_up.weight".format(x)
        B_name = "{}.lora_down.weight".format(x)
104
        mid_name = "{}.lora_mid.weight".format(x)
comfyanonymous's avatar
comfyanonymous committed
105

106
        if A_name in lora.keys():
107
108
109
110
111
            mid = None
            if mid_name in lora.keys():
                mid = lora[mid_name]
                loaded_keys.add(mid_name)
            patch_dict[to_load[x]] = (lora[A_name], lora[B_name], alpha, mid)
112
113
            loaded_keys.add(A_name)
            loaded_keys.add(B_name)
comfyanonymous's avatar
comfyanonymous committed
114

comfyanonymous's avatar
comfyanonymous committed
115
116

        ######## loha
comfyanonymous's avatar
comfyanonymous committed
117
118
119
120
        hada_w1_a_name = "{}.hada_w1_a".format(x)
        hada_w1_b_name = "{}.hada_w1_b".format(x)
        hada_w2_a_name = "{}.hada_w2_a".format(x)
        hada_w2_b_name = "{}.hada_w2_b".format(x)
121
122
        hada_t1_name = "{}.hada_t1".format(x)
        hada_t2_name = "{}.hada_t2".format(x)
comfyanonymous's avatar
comfyanonymous committed
123
        if hada_w1_a_name in lora.keys():
124
125
126
127
128
129
130
131
132
            hada_t1 = None
            hada_t2 = None
            if hada_t1_name in lora.keys():
                hada_t1 = lora[hada_t1_name]
                hada_t2 = lora[hada_t2_name]
                loaded_keys.add(hada_t1_name)
                loaded_keys.add(hada_t2_name)

            patch_dict[to_load[x]] = (lora[hada_w1_a_name], lora[hada_w1_b_name], alpha, lora[hada_w2_a_name], lora[hada_w2_b_name], hada_t1, hada_t2)
comfyanonymous's avatar
comfyanonymous committed
133
134
135
136
137
            loaded_keys.add(hada_w1_a_name)
            loaded_keys.add(hada_w1_b_name)
            loaded_keys.add(hada_w2_a_name)
            loaded_keys.add(hada_w2_b_name)

comfyanonymous's avatar
comfyanonymous committed
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185

        ######## lokr
        lokr_w1_name = "{}.lokr_w1".format(x)
        lokr_w2_name = "{}.lokr_w2".format(x)
        lokr_w1_a_name = "{}.lokr_w1_a".format(x)
        lokr_w1_b_name = "{}.lokr_w1_b".format(x)
        lokr_t2_name = "{}.lokr_t2".format(x)
        lokr_w2_a_name = "{}.lokr_w2_a".format(x)
        lokr_w2_b_name = "{}.lokr_w2_b".format(x)

        lokr_w1 = None
        if lokr_w1_name in lora.keys():
            lokr_w1 = lora[lokr_w1_name]
            loaded_keys.add(lokr_w1_name)

        lokr_w2 = None
        if lokr_w2_name in lora.keys():
            lokr_w2 = lora[lokr_w2_name]
            loaded_keys.add(lokr_w2_name)

        lokr_w1_a = None
        if lokr_w1_a_name in lora.keys():
            lokr_w1_a = lora[lokr_w1_a_name]
            loaded_keys.add(lokr_w1_a_name)

        lokr_w1_b = None
        if lokr_w1_b_name in lora.keys():
            lokr_w1_b = lora[lokr_w1_b_name]
            loaded_keys.add(lokr_w1_b_name)

        lokr_w2_a = None
        if lokr_w2_a_name in lora.keys():
            lokr_w2_a = lora[lokr_w2_a_name]
            loaded_keys.add(lokr_w2_a_name)

        lokr_w2_b = None
        if lokr_w2_b_name in lora.keys():
            lokr_w2_b = lora[lokr_w2_b_name]
            loaded_keys.add(lokr_w2_b_name)

        lokr_t2 = None
        if lokr_t2_name in lora.keys():
            lokr_t2 = lora[lokr_t2_name]
            loaded_keys.add(lokr_t2_name)

        if (lokr_w1 is not None) or (lokr_w2 is not None) or (lokr_w1_a is not None) or (lokr_w2_a is not None):
            patch_dict[to_load[x]] = (lokr_w1, lokr_w2, alpha, lokr_w1_a, lokr_w1_b, lokr_w2_a, lokr_w2_b, lokr_t2)

186
187
188
189
190
191
192
193
194
195
    for x in lora.keys():
        if x not in loaded_keys:
            print("lora key not loaded", x)
    return patch_dict

def model_lora_keys(model, key_map={}):
    sdk = model.state_dict().keys()

    counter = 0
    for b in range(12):
comfyanonymous's avatar
comfyanonymous committed
196
        tk = "diffusion_model.input_blocks.{}.1".format(b)
197
        up_counter = 0
comfyanonymous's avatar
comfyanonymous committed
198
        for c in LORA_UNET_MAP_ATTENTIONS:
199
200
            k = "{}.{}.weight".format(tk, c)
            if k in sdk:
comfyanonymous's avatar
comfyanonymous committed
201
                lora_key = "lora_unet_down_blocks_{}_attentions_{}_{}".format(counter // 2, counter % 2, LORA_UNET_MAP_ATTENTIONS[c])
202
                key_map[lora_key] = k
203
204
205
                up_counter += 1
        if up_counter >= 4:
            counter += 1
comfyanonymous's avatar
comfyanonymous committed
206
    for c in LORA_UNET_MAP_ATTENTIONS:
comfyanonymous's avatar
comfyanonymous committed
207
        k = "diffusion_model.middle_block.1.{}.weight".format(c)
208
        if k in sdk:
comfyanonymous's avatar
comfyanonymous committed
209
            lora_key = "lora_unet_mid_block_attentions_0_{}".format(LORA_UNET_MAP_ATTENTIONS[c])
210
            key_map[lora_key] = k
211
212
    counter = 3
    for b in range(12):
comfyanonymous's avatar
comfyanonymous committed
213
        tk = "diffusion_model.output_blocks.{}.1".format(b)
214
        up_counter = 0
comfyanonymous's avatar
comfyanonymous committed
215
        for c in LORA_UNET_MAP_ATTENTIONS:
216
217
            k = "{}.{}.weight".format(tk, c)
            if k in sdk:
comfyanonymous's avatar
comfyanonymous committed
218
                lora_key = "lora_unet_up_blocks_{}_attentions_{}_{}".format(counter // 3, counter % 3, LORA_UNET_MAP_ATTENTIONS[c])
219
                key_map[lora_key] = k
220
221
222
223
                up_counter += 1
        if up_counter >= 4:
            counter += 1
    counter = 0
comfyanonymous's avatar
comfyanonymous committed
224
    text_model_lora_key = "lora_te_text_model_encoder_layers_{}_{}"
225
    for b in range(24):
226
227
228
        for c in LORA_CLIP_MAP:
            k = "transformer.text_model.encoder.layers.{}.{}.weight".format(b, c)
            if k in sdk:
comfyanonymous's avatar
comfyanonymous committed
229
                lora_key = text_model_lora_key.format(b, LORA_CLIP_MAP[c])
230
                key_map[lora_key] = k
comfyanonymous's avatar
comfyanonymous committed
231

comfyanonymous's avatar
comfyanonymous committed
232
233
234
235
236

    #Locon stuff
    ds_counter = 0
    counter = 0
    for b in range(12):
comfyanonymous's avatar
comfyanonymous committed
237
        tk = "diffusion_model.input_blocks.{}.0".format(b)
comfyanonymous's avatar
comfyanonymous committed
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
        key_in = False
        for c in LORA_UNET_MAP_RESNET:
            k = "{}.{}.weight".format(tk, c)
            if k in sdk:
                lora_key = "lora_unet_down_blocks_{}_{}".format(counter // 2, LORA_UNET_MAP_RESNET[c].format(counter % 2))
                key_map[lora_key] = k
                key_in = True
        for bb in range(3):
            k = "{}.{}.op.weight".format(tk[:-2], bb)
            if k in sdk:
                lora_key = "lora_unet_down_blocks_{}_downsamplers_0_conv".format(ds_counter)
                key_map[lora_key] = k
                ds_counter += 1
        if key_in:
            counter += 1

    counter = 0
    for b in range(3):
comfyanonymous's avatar
comfyanonymous committed
256
        tk = "diffusion_model.middle_block.{}".format(b)
comfyanonymous's avatar
comfyanonymous committed
257
258
259
260
261
262
263
264
265
266
267
268
269
        key_in = False
        for c in LORA_UNET_MAP_RESNET:
            k = "{}.{}.weight".format(tk, c)
            if k in sdk:
                lora_key = "lora_unet_mid_block_{}".format(LORA_UNET_MAP_RESNET[c].format(counter))
                key_map[lora_key] = k
                key_in = True
        if key_in:
            counter += 1

    counter = 0
    us_counter = 0
    for b in range(12):
comfyanonymous's avatar
comfyanonymous committed
270
        tk = "diffusion_model.output_blocks.{}.0".format(b)
comfyanonymous's avatar
comfyanonymous committed
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
        key_in = False
        for c in LORA_UNET_MAP_RESNET:
            k = "{}.{}.weight".format(tk, c)
            if k in sdk:
                lora_key = "lora_unet_up_blocks_{}_{}".format(counter // 3, LORA_UNET_MAP_RESNET[c].format(counter % 3))
                key_map[lora_key] = k
                key_in = True
        for bb in range(3):
            k = "{}.{}.conv.weight".format(tk[:-2], bb)
            if k in sdk:
                lora_key = "lora_unet_up_blocks_{}_upsamplers_0_conv".format(us_counter)
                key_map[lora_key] = k
                us_counter += 1
        if key_in:
            counter += 1

287
288
    return key_map

289

290
class ModelPatcher:
291
292
    def __init__(self, model, size=0):
        self.size = size
293
294
295
        self.model = model
        self.patches = []
        self.backup = {}
296
        self.model_options = {"transformer_options":{}}
297
298
299
300
301
302
303
304
305
306
307
        self.model_size()

    def model_size(self):
        if self.size > 0:
            return self.size
        model_sd = self.model.state_dict()
        size = 0
        for k in model_sd:
            t = model_sd[k]
            size += t.nelement() * t.element_size()
        self.size = size
308
        self.model_keys = set(model_sd.keys())
309
        return size
310
311

    def clone(self):
312
        n = ModelPatcher(self.model, self.size)
313
        n.patches = self.patches[:]
314
        n.model_options = copy.deepcopy(self.model_options)
315
        n.model_keys = self.model_keys
316
317
        return n

318
319
320
    def set_model_tomesd(self, ratio):
        self.model_options["transformer_options"]["tomesd"] = {"ratio": ratio}

321
    def set_model_sampler_cfg_function(self, sampler_cfg_function):
322
323
324
325
        if len(inspect.signature(sampler_cfg_function).parameters) == 3:
            self.model_options["sampler_cfg_function"] = lambda args: sampler_cfg_function(args["cond"], args["uncond"], args["cond_scale"]) #Old way
        else:
            self.model_options["sampler_cfg_function"] = sampler_cfg_function
326
327
328
329
330
331
332
333
334
335
336
337
338

    def set_model_patch(self, patch, name):
        to = self.model_options["transformer_options"]
        if "patches" not in to:
            to["patches"] = {}
        to["patches"][name] = to["patches"].get(name, []) + [patch]

    def set_model_attn1_patch(self, patch):
        self.set_model_patch(patch, "attn1_patch")

    def set_model_attn2_patch(self, patch):
        self.set_model_patch(patch, "attn2_patch")

339
340
341
    def set_model_attn2_output_patch(self, patch):
        self.set_model_patch(patch, "attn2_output_patch")

342
343
344
345
346
347
348
349
350
351
    def model_patches_to(self, device):
        to = self.model_options["transformer_options"]
        if "patches" in to:
            patches = to["patches"]
            for name in patches:
                patch_list = patches[name]
                for i in range(len(patch_list)):
                    if hasattr(patch_list[i], "to"):
                        patch_list[i] = patch_list[i].to(device)

352
    def model_dtype(self):
comfyanonymous's avatar
comfyanonymous committed
353
        return self.model.get_dtype()
354

355
    def add_patches(self, patches, strength_patch=1.0, strength_model=1.0):
356
357
        p = {}
        for k in patches:
358
            if k in self.model_keys:
359
                p[k] = patches[k]
360
        self.patches += [(strength_patch, p, strength_model)]
361
362
        return p.keys()

363
    def model_state_dict(self, filter_prefix=None):
364
365
        sd = self.model.state_dict()
        keys = list(sd.keys())
366
367
368
369
        if filter_prefix is not None:
            for k in keys:
                if not k.startswith(filter_prefix):
                    sd.pop(k)
370
371
        return sd

372
    def patch_model(self):
373
        model_sd = self.model_state_dict()
374
375
376
        for p in self.patches:
            for k in p[1]:
                v = p[1][k]
377
                key = k
comfyanonymous's avatar
comfyanonymous committed
378
                if key not in model_sd:
379
380
381
                    print("could not patch. key doesn't exist in model:", k)
                    continue

comfyanonymous's avatar
comfyanonymous committed
382
383
384
                weight = model_sd[key]
                if key not in self.backup:
                    self.backup[key] = weight.clone()
385
386

                alpha = p[0]
387
388
389
390
                strength_model = p[2]

                if strength_model != 1.0:
                    weight *= strength_model
comfyanonymous's avatar
comfyanonymous committed
391

392
393
394
                if len(v) == 1:
                    weight += alpha * (v[0]).type(weight.dtype).to(weight.device)
                elif len(v) == 4: #lora/locon
comfyanonymous's avatar
comfyanonymous committed
395
396
397
398
399
400
401
402
403
                    mat1 = v[0]
                    mat2 = v[1]
                    if v[2] is not None:
                        alpha *= v[2] / mat2.shape[0]
                    if v[3] is not None:
                        #locon mid weights, hopefully the math is fine because I didn't properly test it
                        final_shape = [mat2.shape[1], mat2.shape[0], v[3].shape[2], v[3].shape[3]]
                        mat2 = torch.mm(mat2.transpose(0, 1).flatten(start_dim=1).float(), v[3].transpose(0, 1).flatten(start_dim=1).float()).reshape(final_shape).transpose(0, 1)
                    weight += (alpha * torch.mm(mat1.flatten(start_dim=1).float(), mat2.flatten(start_dim=1).float())).reshape(weight.shape).type(weight.dtype).to(weight.device)
comfyanonymous's avatar
comfyanonymous committed
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
                elif len(v) == 8: #lokr
                    w1 = v[0]
                    w2 = v[1]
                    w1_a = v[3]
                    w1_b = v[4]
                    w2_a = v[5]
                    w2_b = v[6]
                    t2 = v[7]
                    dim = None

                    if w1 is None:
                        dim = w1_b.shape[0]
                        w1 = torch.mm(w1_a.float(), w1_b.float())

                    if w2 is None:
                        dim = w2_b.shape[0]
                        if t2 is None:
                            w2 = torch.mm(w2_a.float(), w2_b.float())
                        else:
                            w2 = torch.einsum('i j k l, j r, i p -> p r k l', t2.float(), w2_b.float(), w2_a.float())

                    if len(w2.shape) == 4:
                        w1 = w1.unsqueeze(2).unsqueeze(2)
                    if v[2] is not None and dim is not None:
                        alpha *= v[2] / dim

                    weight += alpha * torch.kron(w1.float(), w2.float()).reshape(weight.shape).type(weight.dtype).to(weight.device)
comfyanonymous's avatar
comfyanonymous committed
431
432
433
434
435
436
437
                else: #loha
                    w1a = v[0]
                    w1b = v[1]
                    if v[2] is not None:
                        alpha *= v[2] / w1b.shape[0]
                    w2a = v[3]
                    w2b = v[4]
438
439
440
441
442
443
444
445
446
447
                    if v[5] is not None: #cp decomposition
                        t1 = v[5]
                        t2 = v[6]
                        m1 = torch.einsum('i j k l, j r, i p -> p r k l', t1.float(), w1b.float(), w1a.float())
                        m2 = torch.einsum('i j k l, j r, i p -> p r k l', t2.float(), w2b.float(), w2a.float())
                    else:
                        m1 = torch.mm(w1a.float(), w1b.float())
                        m2 = torch.mm(w2a.float(), w2b.float())

                    weight += (alpha * m1 * m2).reshape(weight.shape).type(weight.dtype).to(weight.device)
448
449
        return self.model
    def unpatch_model(self):
450
        model_sd = self.model_state_dict()
451
452
        keys = list(self.backup.keys())
        for k in keys:
453
            model_sd[k][:] = self.backup[k]
454
455
            del self.backup[k]

456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
        self.backup = {}

def load_lora_for_models(model, clip, lora_path, strength_model, strength_clip):
    key_map = model_lora_keys(model.model)
    key_map = model_lora_keys(clip.cond_stage_model, key_map)
    loaded = load_lora(lora_path, key_map)
    new_modelpatcher = model.clone()
    k = new_modelpatcher.add_patches(loaded, strength_model)
    new_clip = clip.clone()
    k1 = new_clip.add_patches(loaded, strength_clip)
    k = set(k)
    k1 = set(k1)
    for x in loaded:
        if (x not in k) and (x not in k1):
            print("NOT LOADED", x)

    return (new_modelpatcher, new_clip)
comfyanonymous's avatar
comfyanonymous committed
473
474
475


class CLIP:
476
    def __init__(self, target=None, embedding_directory=None, no_init=False):
477
478
        if no_init:
            return
479
480
481
        params = target.params
        clip = target.clip
        tokenizer = target.tokenizer
482

483
484
        self.device = model_management.text_encoder_device()
        params["device"] = self.device
485
        self.cond_stage_model = clip(**(params))
486
487
        self.cond_stage_model = self.cond_stage_model.to(self.device)

488
        self.tokenizer = tokenizer(embedding_directory=embedding_directory)
489
        self.patcher = ModelPatcher(self.cond_stage_model)
490
        self.layer_idx = None
491
492
493
494
495
496

    def clone(self):
        n = CLIP(no_init=True)
        n.patcher = self.patcher.clone()
        n.cond_stage_model = self.cond_stage_model
        n.tokenizer = self.tokenizer
comfyanonymous's avatar
comfyanonymous committed
497
        n.layer_idx = self.layer_idx
498
        n.device = self.device
499
500
        return n

501
502
503
    def load_from_state_dict(self, sd):
        self.cond_stage_model.transformer.load_state_dict(sd, strict=False)

504
505
    def add_patches(self, patches, strength=1.0):
        return self.patcher.add_patches(patches, strength)
comfyanonymous's avatar
comfyanonymous committed
506

507
    def clip_layer(self, layer_idx):
comfyanonymous's avatar
comfyanonymous committed
508
        self.layer_idx = layer_idx
509

510
511
    def tokenize(self, text, return_word_ids=False):
        return self.tokenizer.tokenize_with_weights(text, return_word_ids)
BlenderNeko's avatar
BlenderNeko committed
512

513
    def encode_from_tokens(self, tokens, return_pooled=False):
514
515
        if self.layer_idx is not None:
            self.cond_stage_model.clip_layer(self.layer_idx)
516
517
        try:
            self.patcher.patch_model()
518
            cond, pooled = self.cond_stage_model.encode_token_weights(tokens)
519
520
521
522
            self.patcher.unpatch_model()
        except Exception as e:
            self.patcher.unpatch_model()
            raise e
523
524

        cond_out = cond
525
        if return_pooled:
526
527
            return cond_out, pooled
        return cond_out
comfyanonymous's avatar
comfyanonymous committed
528

529
    def encode(self, text):
530
        tokens = self.tokenize(text)
531
532
        return self.encode_from_tokens(tokens)

533

comfyanonymous's avatar
comfyanonymous committed
534
class VAE:
535
    def __init__(self, ckpt_path=None, scale_factor=0.18215, device=None, config=None):
comfyanonymous's avatar
comfyanonymous committed
536
537
538
        if config is None:
            #default SD1.x/SD2.x VAE parameters
            ddconfig = {'double_z': True, 'z_channels': 4, 'resolution': 256, 'in_channels': 3, 'out_ch': 3, 'ch': 128, 'ch_mult': [1, 2, 4, 4], 'num_res_blocks': 2, 'attn_resolutions': [], 'dropout': 0.0}
539
            self.first_stage_model = AutoencoderKL(ddconfig, {'target': 'torch.nn.Identity'}, 4, monitor="val/rec_loss")
comfyanonymous's avatar
comfyanonymous committed
540
        else:
541
            self.first_stage_model = AutoencoderKL(**(config['params']))
comfyanonymous's avatar
comfyanonymous committed
542
        self.first_stage_model = self.first_stage_model.eval()
543
544
545
546
547
548
        if ckpt_path is not None:
            sd = utils.load_torch_file(ckpt_path)
            if 'decoder.up_blocks.0.resnets.0.norm1.weight' in sd.keys(): #diffusers format
                sd = diffusers_convert.convert_vae_state_dict(sd)
            self.first_stage_model.load_state_dict(sd, strict=False)

comfyanonymous's avatar
comfyanonymous committed
549
        self.scale_factor = scale_factor
550
551
        if device is None:
            device = model_management.get_torch_device()
comfyanonymous's avatar
comfyanonymous committed
552
553
        self.device = device

554
    def decode_tiled_(self, samples, tile_x=64, tile_y=64, overlap = 16):
pythongosssss's avatar
pythongosssss committed
555
        steps = samples.shape[0] * utils.get_tiled_scale_steps(samples.shape[3], samples.shape[2], tile_x, tile_y, overlap)
comfyanonymous's avatar
comfyanonymous committed
556
557
        steps += samples.shape[0] * utils.get_tiled_scale_steps(samples.shape[3], samples.shape[2], tile_x // 2, tile_y * 2, overlap)
        steps += samples.shape[0] * utils.get_tiled_scale_steps(samples.shape[3], samples.shape[2], tile_x * 2, tile_y // 2, overlap)
pythongosssss's avatar
pythongosssss committed
558
        pbar = utils.ProgressBar(steps)
559

560
561
        decode_fn = lambda a: (self.first_stage_model.decode(1. / self.scale_factor * a.to(self.device)) + 1.0)
        output = torch.clamp((
562
563
564
            (utils.tiled_scale(samples, decode_fn, tile_x // 2, tile_y * 2, overlap, upscale_amount = 8, pbar = pbar) +
            utils.tiled_scale(samples, decode_fn, tile_x * 2, tile_y // 2, overlap, upscale_amount = 8, pbar = pbar) +
             utils.tiled_scale(samples, decode_fn, tile_x, tile_y, overlap, upscale_amount = 8, pbar = pbar))
565
566
567
            / 3.0) / 2.0, min=0.0, max=1.0)
        return output

568
569
570
571
572
573
574
575
576
577
578
579
580
    def encode_tiled_(self, pixel_samples, tile_x=512, tile_y=512, overlap = 64):
        steps = pixel_samples.shape[0] * utils.get_tiled_scale_steps(pixel_samples.shape[3], pixel_samples.shape[2], tile_x, tile_y, overlap)
        steps += pixel_samples.shape[0] * utils.get_tiled_scale_steps(pixel_samples.shape[3], pixel_samples.shape[2], tile_x // 2, tile_y * 2, overlap)
        steps += pixel_samples.shape[0] * utils.get_tiled_scale_steps(pixel_samples.shape[3], pixel_samples.shape[2], tile_x * 2, tile_y // 2, overlap)
        pbar = utils.ProgressBar(steps)

        encode_fn = lambda a: self.first_stage_model.encode(2. * a.to(self.device) - 1.).sample() * self.scale_factor
        samples = utils.tiled_scale(pixel_samples, encode_fn, tile_x, tile_y, overlap, upscale_amount = (1/8), out_channels=4, pbar=pbar)
        samples += utils.tiled_scale(pixel_samples, encode_fn, tile_x * 2, tile_y // 2, overlap, upscale_amount = (1/8), out_channels=4, pbar=pbar)
        samples += utils.tiled_scale(pixel_samples, encode_fn, tile_x // 2, tile_y * 2, overlap, upscale_amount = (1/8), out_channels=4, pbar=pbar)
        samples /= 3.0
        return samples

581
    def decode(self, samples_in):
582
        model_management.unload_model()
comfyanonymous's avatar
comfyanonymous committed
583
        self.first_stage_model = self.first_stage_model.to(self.device)
584
        try:
585
586
587
588
589
590
591
592
            free_memory = model_management.get_free_memory(self.device)
            batch_number = int((free_memory * 0.7) / (2562 * samples_in.shape[2] * samples_in.shape[3] * 64))
            batch_number = max(1, batch_number)

            pixel_samples = torch.empty((samples_in.shape[0], 3, round(samples_in.shape[2] * 8), round(samples_in.shape[3] * 8)), device="cpu")
            for x in range(0, samples_in.shape[0], batch_number):
                samples = samples_in[x:x+batch_number].to(self.device)
                pixel_samples[x:x+batch_number] = torch.clamp((self.first_stage_model.decode(1. / self.scale_factor * samples) + 1.0) / 2.0, min=0.0, max=1.0).cpu()
593
594
595
596
        except model_management.OOM_EXCEPTION as e:
            print("Warning: Ran out of memory when regular VAE decoding, retrying with tiled VAE decoding.")
            pixel_samples = self.decode_tiled_(samples_in)

comfyanonymous's avatar
comfyanonymous committed
597
598
599
600
        self.first_stage_model = self.first_stage_model.cpu()
        pixel_samples = pixel_samples.cpu().movedim(1,-1)
        return pixel_samples

601
    def decode_tiled(self, samples, tile_x=64, tile_y=64, overlap = 16):
602
603
        model_management.unload_model()
        self.first_stage_model = self.first_stage_model.to(self.device)
604
        output = self.decode_tiled_(samples, tile_x, tile_y, overlap)
605
606
607
        self.first_stage_model = self.first_stage_model.cpu()
        return output.movedim(1,-1)

comfyanonymous's avatar
comfyanonymous committed
608
    def encode(self, pixel_samples):
609
        model_management.unload_model()
comfyanonymous's avatar
comfyanonymous committed
610
        self.first_stage_model = self.first_stage_model.to(self.device)
611
612
        pixel_samples = pixel_samples.movedim(-1,1)
        try:
613
614
615
            free_memory = model_management.get_free_memory(self.device)
            batch_number = int((free_memory * 0.7) / (2078 * pixel_samples.shape[2] * pixel_samples.shape[3])) #NOTE: this constant along with the one in the decode above are estimated from the mem usage for the VAE and could change.
            batch_number = max(1, batch_number)
616
617
618
619
            samples = torch.empty((pixel_samples.shape[0], 4, round(pixel_samples.shape[2] // 8), round(pixel_samples.shape[3] // 8)), device="cpu")
            for x in range(0, pixel_samples.shape[0], batch_number):
                pixels_in = (2. * pixel_samples[x:x+batch_number] - 1.).to(self.device)
                samples[x:x+batch_number] = self.first_stage_model.encode(pixels_in).sample().cpu() * self.scale_factor
620

621
622
623
624
        except model_management.OOM_EXCEPTION as e:
            print("Warning: Ran out of memory when regular VAE encoding, retrying with tiled VAE encoding.")
            samples = self.encode_tiled_(pixel_samples)

comfyanonymous's avatar
comfyanonymous committed
625
626
627
        self.first_stage_model = self.first_stage_model.cpu()
        return samples

comfyanonymous's avatar
comfyanonymous committed
628
629
630
    def encode_tiled(self, pixel_samples, tile_x=512, tile_y=512, overlap = 64):
        model_management.unload_model()
        self.first_stage_model = self.first_stage_model.to(self.device)
631
632
        pixel_samples = pixel_samples.movedim(-1,1)
        samples = self.encode_tiled_(pixel_samples, tile_x=tile_x, tile_y=tile_y, overlap=overlap)
comfyanonymous's avatar
comfyanonymous committed
633
634
        self.first_stage_model = self.first_stage_model.cpu()
        return samples
635

BlenderNeko's avatar
BlenderNeko committed
636
def broadcast_image_to(tensor, target_batch_size, batched_number):
637
    current_batch_size = tensor.shape[0]
638
    #print(current_batch_size, target_batch_size)
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
    if current_batch_size == 1:
        return tensor

    per_batch = target_batch_size // batched_number
    tensor = tensor[:per_batch]

    if per_batch > tensor.shape[0]:
        tensor = torch.cat([tensor] * (per_batch // tensor.shape[0]) + [tensor[:(per_batch % tensor.shape[0])]], dim=0)

    current_batch_size = tensor.shape[0]
    if current_batch_size == target_batch_size:
        return tensor
    else:
        return torch.cat([tensor] * batched_number, dim=0)

comfyanonymous's avatar
comfyanonymous committed
654
class ControlNet:
655
    def __init__(self, control_model, global_average_pooling=False, device=None):
comfyanonymous's avatar
comfyanonymous committed
656
657
658
        self.control_model = control_model
        self.cond_hint_original = None
        self.cond_hint = None
659
        self.strength = 1.0
660
661
        if device is None:
            device = model_management.get_torch_device()
662
        self.device = device
comfyanonymous's avatar
comfyanonymous committed
663
        self.previous_controlnet = None
664
        self.global_average_pooling = global_average_pooling
comfyanonymous's avatar
comfyanonymous committed
665

666
    def get_control(self, x_noisy, t, cond, batched_number):
comfyanonymous's avatar
comfyanonymous committed
667
668
        control_prev = None
        if self.previous_controlnet is not None:
669
            control_prev = self.previous_controlnet.get_control(x_noisy, t, cond, batched_number)
comfyanonymous's avatar
comfyanonymous committed
670

671
        output_dtype = x_noisy.dtype
comfyanonymous's avatar
comfyanonymous committed
672
673
674
675
        if self.cond_hint is None or x_noisy.shape[2] * 8 != self.cond_hint.shape[2] or x_noisy.shape[3] * 8 != self.cond_hint.shape[3]:
            if self.cond_hint is not None:
                del self.cond_hint
            self.cond_hint = None
BlenderNeko's avatar
BlenderNeko committed
676
677
678
            self.cond_hint = utils.common_upscale(self.cond_hint_original, x_noisy.shape[3] * 8, x_noisy.shape[2] * 8, 'nearest-exact', "center").to(self.control_model.dtype).to(self.device)
        if x_noisy.shape[0] != self.cond_hint.shape[0]:
            self.cond_hint = broadcast_image_to(self.cond_hint, x_noisy.shape[0], batched_number)
679
680
681
682
683
684

        if self.control_model.dtype == torch.float16:
            precision_scope = torch.autocast
        else:
            precision_scope = contextlib.nullcontext

685
        with precision_scope(model_management.get_autocast_device(self.device)):
686
            self.control_model = model_management.load_if_low_vram(self.control_model)
687
688
689
            context = torch.cat(cond['c_crossattn'], 1)
            y = cond.get('c_adm', None)
            control = self.control_model(x=x_noisy, hint=self.cond_hint, timesteps=t, context=context, y=y)
690
            self.control_model = model_management.unload_if_low_vram(self.control_model)
691
        out = {'middle':[], 'output': []}
692
        autocast_enabled = torch.is_autocast_enabled()
comfyanonymous's avatar
comfyanonymous committed
693
694

        for i in range(len(control)):
comfyanonymous's avatar
comfyanonymous committed
695
696
697
698
699
700
            if i == (len(control) - 1):
                key = 'middle'
                index = 0
            else:
                key = 'output'
                index = i
comfyanonymous's avatar
comfyanonymous committed
701
            x = control[i]
702
703
704
            if self.global_average_pooling:
                x = torch.mean(x, dim=(2, 3), keepdim=True).repeat(1, 1, x.shape[2], x.shape[3])

705
            x *= self.strength
706
707
            if x.dtype != output_dtype and not autocast_enabled:
                x = x.to(output_dtype)
comfyanonymous's avatar
comfyanonymous committed
708

comfyanonymous's avatar
comfyanonymous committed
709
710
711
712
713
714
715
            if control_prev is not None and key in control_prev:
                prev = control_prev[key][index]
                if prev is not None:
                    x += prev
            out[key].append(x)
        if control_prev is not None and 'input' in control_prev:
            out['input'] = control_prev['input']
716
        return out
comfyanonymous's avatar
comfyanonymous committed
717

718
    def set_cond_hint(self, cond_hint, strength=1.0):
comfyanonymous's avatar
comfyanonymous committed
719
        self.cond_hint_original = cond_hint
720
        self.strength = strength
comfyanonymous's avatar
comfyanonymous committed
721
722
        return self

comfyanonymous's avatar
comfyanonymous committed
723
724
725
726
    def set_previous_controlnet(self, controlnet):
        self.previous_controlnet = controlnet
        return self

comfyanonymous's avatar
comfyanonymous committed
727
    def cleanup(self):
comfyanonymous's avatar
comfyanonymous committed
728
729
        if self.previous_controlnet is not None:
            self.previous_controlnet.cleanup()
comfyanonymous's avatar
comfyanonymous committed
730
731
732
733
734
        if self.cond_hint is not None:
            del self.cond_hint
            self.cond_hint = None

    def copy(self):
735
        c = ControlNet(self.control_model, global_average_pooling=self.global_average_pooling)
comfyanonymous's avatar
comfyanonymous committed
736
        c.cond_hint_original = self.cond_hint_original
737
        c.strength = self.strength
comfyanonymous's avatar
comfyanonymous committed
738
739
        return c

740
    def get_models(self):
comfyanonymous's avatar
comfyanonymous committed
741
742
        out = []
        if self.previous_controlnet is not None:
743
            out += self.previous_controlnet.get_models()
comfyanonymous's avatar
comfyanonymous committed
744
745
746
        out.append(self.control_model)
        return out

747
def load_controlnet(ckpt_path, model=None):
748
    controlnet_data = utils.load_torch_file(ckpt_path, safe_load=True)
749
    pth_key = 'control_model.zero_convs.0.0.weight'
comfyanonymous's avatar
comfyanonymous committed
750
    pth = False
751
    key = 'zero_convs.0.0.weight'
comfyanonymous's avatar
comfyanonymous committed
752
753
754
    if pth_key in controlnet_data:
        pth = True
        key = pth_key
755
        prefix = "control_model."
comfyanonymous's avatar
comfyanonymous committed
756
    elif key in controlnet_data:
757
        prefix = ""
comfyanonymous's avatar
comfyanonymous committed
758
    else:
759
760
761
762
        net = load_t2i_adapter(controlnet_data)
        if net is None:
            print("error checkpoint does not contain controlnet or t2i adapter data", ckpt_path)
        return net
comfyanonymous's avatar
comfyanonymous committed
763

764
765
766
767
768
769
770
    use_fp16 = model_management.should_use_fp16()

    controlnet_config = model_detection.model_config_from_unet(controlnet_data, prefix, use_fp16).unet_config
    controlnet_config.pop("out_channels")
    controlnet_config["hint_channels"] = 3
    control_model = cldm.ControlNet(**controlnet_config)

comfyanonymous's avatar
comfyanonymous committed
771
    if pth:
772
773
774
775
776
777
778
        if 'difference' in controlnet_data:
            if model is not None:
                m = model.patch_model()
                model_sd = m.state_dict()
                for x in controlnet_data:
                    c_m = "control_model."
                    if x.startswith(c_m):
comfyanonymous's avatar
comfyanonymous committed
779
                        sd_key = "diffusion_model.{}".format(x[len(c_m):])
780
781
782
783
784
785
786
                        if sd_key in model_sd:
                            cd = controlnet_data[x]
                            cd += model_sd[sd_key].type(cd.dtype).to(cd.device)
                model.unpatch_model()
            else:
                print("WARNING: Loaded a diff controlnet without a model. It will very likely not work.")

comfyanonymous's avatar
comfyanonymous committed
787
788
789
790
        class WeightsLoader(torch.nn.Module):
            pass
        w = WeightsLoader()
        w.control_model = control_model
791
        missing, unexpected = w.load_state_dict(controlnet_data, strict=False)
comfyanonymous's avatar
comfyanonymous committed
792
    else:
793
794
        missing, unexpected = control_model.load_state_dict(controlnet_data, strict=False)
    print(missing, unexpected)
comfyanonymous's avatar
comfyanonymous committed
795

796
797
798
    if use_fp16:
        control_model = control_model.half()

799
800
801
802
803
    global_average_pooling = False
    if ckpt_path.endswith("_shuffle.pth") or ckpt_path.endswith("_shuffle.safetensors") or ckpt_path.endswith("_shuffle_fp16.safetensors"): #TODO: smarter way of enabling global_average_pooling
        global_average_pooling = True

    control = ControlNet(control_model, global_average_pooling=global_average_pooling)
comfyanonymous's avatar
comfyanonymous committed
804
805
    return control

806
class T2IAdapter:
807
    def __init__(self, t2i_model, channels_in, device=None):
808
809
810
        self.t2i_model = t2i_model
        self.channels_in = channels_in
        self.strength = 1.0
811
812
        if device is None:
            device = model_management.get_torch_device()
813
814
815
816
817
818
        self.device = device
        self.previous_controlnet = None
        self.control_input = None
        self.cond_hint_original = None
        self.cond_hint = None

819
    def get_control(self, x_noisy, t, cond, batched_number):
820
821
        control_prev = None
        if self.previous_controlnet is not None:
822
            control_prev = self.previous_controlnet.get_control(x_noisy, t, cond, batched_number)
823
824
825
826

        if self.cond_hint is None or x_noisy.shape[2] * 8 != self.cond_hint.shape[2] or x_noisy.shape[3] * 8 != self.cond_hint.shape[3]:
            if self.cond_hint is not None:
                del self.cond_hint
BlenderNeko's avatar
BlenderNeko committed
827
            self.control_input = None
828
            self.cond_hint = None
BlenderNeko's avatar
BlenderNeko committed
829
            self.cond_hint = utils.common_upscale(self.cond_hint_original, x_noisy.shape[3] * 8, x_noisy.shape[2] * 8, 'nearest-exact', "center").float().to(self.device)
830
831
            if self.channels_in == 1 and self.cond_hint.shape[1] > 1:
                self.cond_hint = torch.mean(self.cond_hint, 1, keepdim=True)
BlenderNeko's avatar
BlenderNeko committed
832
833
834
        if x_noisy.shape[0] != self.cond_hint.shape[0]:
            self.cond_hint = broadcast_image_to(self.cond_hint, x_noisy.shape[0], batched_number)
        if self.control_input is None:
835
836
837
838
839
840
841
            self.t2i_model.to(self.device)
            self.control_input = self.t2i_model(self.cond_hint)
            self.t2i_model.cpu()

        output_dtype = x_noisy.dtype
        out = {'input':[]}

comfyanonymous's avatar
comfyanonymous committed
842
        autocast_enabled = torch.is_autocast_enabled()
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
        for i in range(len(self.control_input)):
            key = 'input'
            x = self.control_input[i] * self.strength
            if x.dtype != output_dtype and not autocast_enabled:
                x = x.to(output_dtype)

            if control_prev is not None and key in control_prev:
                index = len(control_prev[key]) - i * 3 - 3
                prev = control_prev[key][index]
                if prev is not None:
                    x += prev
            out[key].insert(0, None)
            out[key].insert(0, None)
            out[key].insert(0, x)

        if control_prev is not None and 'input' in control_prev:
            for i in range(len(out['input'])):
                if out['input'][i] is None:
                    out['input'][i] = control_prev['input'][i]
        if control_prev is not None and 'middle' in control_prev:
            out['middle'] = control_prev['middle']
        if control_prev is not None and 'output' in control_prev:
            out['output'] = control_prev['output']
        return out

    def set_cond_hint(self, cond_hint, strength=1.0):
        self.cond_hint_original = cond_hint
        self.strength = strength
        return self

    def set_previous_controlnet(self, controlnet):
        self.previous_controlnet = controlnet
        return self

    def copy(self):
        c = T2IAdapter(self.t2i_model, self.channels_in)
        c.cond_hint_original = self.cond_hint_original
        c.strength = self.strength
        return c

    def cleanup(self):
        if self.previous_controlnet is not None:
            self.previous_controlnet.cleanup()
        if self.cond_hint is not None:
            del self.cond_hint
            self.cond_hint = None

890
    def get_models(self):
891
892
        out = []
        if self.previous_controlnet is not None:
893
            out += self.previous_controlnet.get_models()
894
895
        return out

896
def load_t2i_adapter(t2i_data):
897
    keys = t2i_data.keys()
898
899
900
    if 'adapter' in keys:
        t2i_data = t2i_data['adapter']
        keys = t2i_data.keys()
901
    if "body.0.in_conv.weight" in keys:
902
903
        cin = t2i_data['body.0.in_conv.weight'].shape[1]
        model_ad = adapter.Adapter_light(cin=cin, channels=[320, 640, 1280, 1280], nums_rb=4)
904
    elif 'conv_in.weight' in keys:
905
        cin = t2i_data['conv_in.weight'].shape[1]
906
907
908
909
910
911
912
        channel = t2i_data['conv_in.weight'].shape[0]
        ksize = t2i_data['body.0.block2.weight'].shape[2]
        use_conv = False
        down_opts = list(filter(lambda a: a.endswith("down_opt.op.weight"), keys))
        if len(down_opts) > 0:
            use_conv = True
        model_ad = adapter.Adapter(cin=cin, channels=[channel, channel*2, channel*4, channel*4][:4], nums_rb=2, ksize=ksize, sk=True, use_conv=use_conv)
913
914
    else:
        return None
915
916
    model_ad.load_state_dict(t2i_data)
    return T2IAdapter(model_ad, cin // 64)
comfyanonymous's avatar
comfyanonymous committed
917

918
919
920
921
922
923
924
925
926
927

class StyleModel:
    def __init__(self, model, device="cpu"):
        self.model = model

    def get_cond(self, input):
        return self.model(input.last_hidden_state)


def load_style_model(ckpt_path):
928
    model_data = utils.load_torch_file(ckpt_path, safe_load=True)
929
930
931
932
933
934
935
936
937
    keys = model_data.keys()
    if "style_embedding" in keys:
        model = adapter.StyleAdapter(width=1024, context_dim=768, num_head=8, n_layes=3, num_token=8)
    else:
        raise Exception("invalid style model {}".format(ckpt_path))
    model.load_state_dict(model_data)
    return StyleModel(model)


938
def load_clip(ckpt_path, embedding_directory=None):
939
    clip_data = utils.load_torch_file(ckpt_path, safe_load=True)
940
941
    config = {}
    if "text_model.encoder.layers.22.mlp.fc1.weight" in clip_data:
comfyanonymous's avatar
comfyanonymous committed
942
        config['target'] = 'comfy.ldm.modules.encoders.modules.FrozenOpenCLIPEmbedder'
943
    else:
comfyanonymous's avatar
comfyanonymous committed
944
        config['target'] = 'comfy.ldm.modules.encoders.modules.FrozenCLIPEmbedder'
945
946
947
    clip = CLIP(config=config, embedding_directory=embedding_directory)
    clip.load_from_state_dict(clip_data)
    return clip
comfyanonymous's avatar
comfyanonymous committed
948

949
def load_gligen(ckpt_path):
950
    data = utils.load_torch_file(ckpt_path, safe_load=True)
951
952
953
954
955
    model = gligen.load_gligen(data)
    if model_management.should_use_fp16():
        model = model.half()
    return model

comfyanonymous's avatar
comfyanonymous committed
956
957
958
959
def load_checkpoint(config_path=None, ckpt_path=None, output_vae=True, output_clip=True, embedding_directory=None, state_dict=None, config=None):
    if config is None:
        with open(config_path, 'r') as stream:
            config = yaml.safe_load(stream)
comfyanonymous's avatar
comfyanonymous committed
960
961
962
963
964
    model_config_params = config['model']['params']
    clip_config = model_config_params['cond_stage_config']
    scale_factor = model_config_params['scale_factor']
    vae_config = model_config_params['first_stage_config']

965
966
967
    fp16 = False
    if "unet_config" in model_config_params:
        if "params" in model_config_params["unet_config"]:
comfyanonymous's avatar
comfyanonymous committed
968
969
970
971
972
973
974
975
976
977
978
979
980
            unet_config = model_config_params["unet_config"]["params"]
            if "use_fp16" in unet_config:
                fp16 = unet_config["use_fp16"]

    noise_aug_config = None
    if "noise_aug_config" in model_config_params:
        noise_aug_config = model_config_params["noise_aug_config"]

    v_prediction = False

    if "parameterization" in model_config_params:
        if model_config_params["parameterization"] == "v":
            v_prediction = True
981

comfyanonymous's avatar
comfyanonymous committed
982
983
984
985
986
987
    clip = None
    vae = None

    class WeightsLoader(torch.nn.Module):
        pass

988
989
    if state_dict is None:
        state_dict = utils.load_torch_file(ckpt_path)
comfyanonymous's avatar
comfyanonymous committed
990

comfyanonymous's avatar
comfyanonymous committed
991
992
993
994
995
996
997
    if config['model']["target"].endswith("LatentInpaintDiffusion"):
        model = model_base.SDInpaint(unet_config, v_prediction=v_prediction)
    elif config['model']["target"].endswith("ImageEmbeddingConditionedLatentDiffusion"):
        model = model_base.SD21UNCLIP(unet_config, noise_aug_config["params"], v_prediction=v_prediction)
    else:
        model = model_base.BaseModel(unet_config, v_prediction=v_prediction)

998
999
1000
    if fp16:
        model = model.half()

1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
    model.load_model_weights(state_dict, "model.diffusion_model.")

    if output_vae:
        w = WeightsLoader()
        vae = VAE(scale_factor=scale_factor, config=vae_config)
        w.first_stage_model = vae.first_stage_model
        load_model_weights(w, state_dict)

    if output_clip:
        w = WeightsLoader()
        class EmptyClass:
            pass
        clip_target = EmptyClass()
        clip_target.params = clip_config["params"]
        if clip_config["target"].endswith("FrozenOpenCLIPEmbedder"):
            clip_target.clip = sd2_clip.SD2ClipModel
            clip_target.tokenizer = sd2_clip.SD2Tokenizer
        elif clip_config["target"].endswith("FrozenCLIPEmbedder"):
            clip_target.clip = sd1_clip.SD1ClipModel
            clip_target.tokenizer = sd1_clip.SD1Tokenizer
        clip = CLIP(clip_target, embedding_directory=embedding_directory)
        w.cond_stage_model = clip.cond_stage_model
        load_clip_weights(w, state_dict)

1025
    return (ModelPatcher(model), clip, vae)
1026
1027


1028
1029
def load_checkpoint_guess_config(ckpt_path, output_vae=True, output_clip=True, output_clipvision=False, embedding_directory=None):
    sd = utils.load_torch_file(ckpt_path)
1030
1031
    sd_keys = sd.keys()
    clip = None
1032
    clipvision = None
1033
    vae = None
1034
1035
    model = None
    clip_target = None
1036

1037
1038
    fp16 = model_management.should_use_fp16()

1039
1040
1041
    class WeightsLoader(torch.nn.Module):
        pass

1042
1043
1044
    model_config = model_detection.model_config_from_unet(sd, "model.diffusion_model.", fp16)
    if model_config is None:
        raise RuntimeError("ERROR: Could not detect model type of: {}".format(ckpt_path))
1045

1046
    if model_config.clip_vision_prefix is not None:
1047
        if output_clipvision:
1048
            clipvision = clip_vision.load_clipvision_from_sd(sd, model_config.clip_vision_prefix)
1049

1050
1051
    model = model_config.get_model(sd)
    model.load_model_weights(sd, "model.diffusion_model.")
1052

1053
1054
1055
1056
1057
    if output_vae:
        vae = VAE(scale_factor=model_config.vae_scale_factor)
        w = WeightsLoader()
        w.first_stage_model = vae.first_stage_model
        load_model_weights(w, sd)
1058

1059
1060
1061
1062
1063
1064
1065
    if output_clip:
        w = WeightsLoader()
        clip_target = model_config.clip_target()
        clip = CLIP(clip_target, embedding_directory=embedding_directory)
        w.cond_stage_model = clip.cond_stage_model
        sd = model_config.process_clip_state_dict(sd)
        load_model_weights(w, sd)
comfyanonymous's avatar
comfyanonymous committed
1066

1067
1068
1069
    left_over = sd.keys()
    if len(left_over) > 0:
        print("left over keys:", left_over)
1070

1071
    return (ModelPatcher(model), clip, vae, clipvision)