sd.py 42.4 KB
Newer Older
comfyanonymous's avatar
comfyanonymous committed
1
import torch
2
import contextlib
3
import copy
4
import inspect
comfyanonymous's avatar
comfyanonymous committed
5

6
from comfy import model_management
7
8
from .ldm.util import instantiate_from_config
from .ldm.models.autoencoder import AutoencoderKL
9
import yaml
comfyanonymous's avatar
comfyanonymous committed
10
from .cldm import cldm
11
from .t2i_adapter import adapter
comfyanonymous's avatar
comfyanonymous committed
12
13

from . import utils
14
from . import clip_vision
15
from . import gligen
16
from . import diffusers_convert
comfyanonymous's avatar
comfyanonymous committed
17
from . import model_base
18
from . import model_detection
19

20
21
from . import sd1_clip
from . import sd2_clip
comfyanonymous's avatar
comfyanonymous committed
22

23
def load_model_weights(model, sd):
comfyanonymous's avatar
comfyanonymous committed
24
    m, u = model.load_state_dict(sd, strict=False)
25
26
    m = set(m)
    unexpected_keys = set(u)
comfyanonymous's avatar
comfyanonymous committed
27
28
29

    k = list(sd.keys())
    for x in k:
30
31
32
33
34
35
36
37
38
39
        if x not in unexpected_keys:
            w = sd.pop(x)
            del w
    if len(m) > 0:
        print("missing", m)
    return model

def load_clip_weights(model, sd):
    k = list(sd.keys())
    for x in k:
comfyanonymous's avatar
comfyanonymous committed
40
41
42
43
        if x.startswith("cond_stage_model.transformer.") and not x.startswith("cond_stage_model.transformer.text_model."):
            y = x.replace("cond_stage_model.transformer.", "cond_stage_model.transformer.text_model.")
            sd[y] = sd.pop(x)

comfyanonymous's avatar
comfyanonymous committed
44
45
46
47
    if 'cond_stage_model.transformer.text_model.embeddings.position_ids' in sd:
        ids = sd['cond_stage_model.transformer.text_model.embeddings.position_ids']
        if ids.dtype == torch.float32:
            sd['cond_stage_model.transformer.text_model.embeddings.position_ids'] = ids.round()
48

49
50
    sd = utils.transformers_convert(sd, "cond_stage_model.model.", "cond_stage_model.transformer.text_model.", 24)
    return load_model_weights(model, sd)
comfyanonymous's avatar
comfyanonymous committed
51

52
53
54
55
56
57
58
59
60
LORA_CLIP_MAP = {
    "mlp.fc1": "mlp_fc1",
    "mlp.fc2": "mlp_fc2",
    "self_attn.k_proj": "self_attn_k_proj",
    "self_attn.q_proj": "self_attn_q_proj",
    "self_attn.v_proj": "self_attn_v_proj",
    "self_attn.out_proj": "self_attn_out_proj",
}

comfyanonymous's avatar
comfyanonymous committed
61
LORA_UNET_MAP_ATTENTIONS = {
62
63
64
65
    "proj_in": "proj_in",
    "proj_out": "proj_out",
}

66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
transformer_lora_blocks = {
    "transformer_blocks.{}.attn1.to_q": "transformer_blocks_{}_attn1_to_q",
    "transformer_blocks.{}.attn1.to_k": "transformer_blocks_{}_attn1_to_k",
    "transformer_blocks.{}.attn1.to_v": "transformer_blocks_{}_attn1_to_v",
    "transformer_blocks.{}.attn1.to_out.0": "transformer_blocks_{}_attn1_to_out_0",
    "transformer_blocks.{}.attn2.to_q": "transformer_blocks_{}_attn2_to_q",
    "transformer_blocks.{}.attn2.to_k": "transformer_blocks_{}_attn2_to_k",
    "transformer_blocks.{}.attn2.to_v": "transformer_blocks_{}_attn2_to_v",
    "transformer_blocks.{}.attn2.to_out.0": "transformer_blocks_{}_attn2_to_out_0",
    "transformer_blocks.{}.ff.net.0.proj": "transformer_blocks_{}_ff_net_0_proj",
    "transformer_blocks.{}.ff.net.2": "transformer_blocks_{}_ff_net_2",
}

for i in range(10):
    for k in transformer_lora_blocks:
        LORA_UNET_MAP_ATTENTIONS[k.format(i)] = transformer_lora_blocks[k].format(i)


comfyanonymous's avatar
comfyanonymous committed
84
85
86
87
88
89
LORA_UNET_MAP_RESNET = {
    "in_layers.2": "resnets_{}_conv1",
    "emb_layers.1": "resnets_{}_time_emb_proj",
    "out_layers.3": "resnets_{}_conv2",
    "skip_connection": "resnets_{}_conv_shortcut"
}
90
91

def load_lora(path, to_load):
92
    lora = utils.load_torch_file(path, safe_load=True)
93
94
95
    patch_dict = {}
    loaded_keys = set()
    for x in to_load:
comfyanonymous's avatar
comfyanonymous committed
96
97
98
99
100
101
        alpha_name = "{}.alpha".format(x)
        alpha = None
        if alpha_name in lora.keys():
            alpha = lora[alpha_name].item()
            loaded_keys.add(alpha_name)

102
103
        A_name = "{}.lora_up.weight".format(x)
        B_name = "{}.lora_down.weight".format(x)
104
        mid_name = "{}.lora_mid.weight".format(x)
comfyanonymous's avatar
comfyanonymous committed
105

106
        if A_name in lora.keys():
107
108
109
110
111
            mid = None
            if mid_name in lora.keys():
                mid = lora[mid_name]
                loaded_keys.add(mid_name)
            patch_dict[to_load[x]] = (lora[A_name], lora[B_name], alpha, mid)
112
113
            loaded_keys.add(A_name)
            loaded_keys.add(B_name)
comfyanonymous's avatar
comfyanonymous committed
114

comfyanonymous's avatar
comfyanonymous committed
115
116

        ######## loha
comfyanonymous's avatar
comfyanonymous committed
117
118
119
120
        hada_w1_a_name = "{}.hada_w1_a".format(x)
        hada_w1_b_name = "{}.hada_w1_b".format(x)
        hada_w2_a_name = "{}.hada_w2_a".format(x)
        hada_w2_b_name = "{}.hada_w2_b".format(x)
121
122
        hada_t1_name = "{}.hada_t1".format(x)
        hada_t2_name = "{}.hada_t2".format(x)
comfyanonymous's avatar
comfyanonymous committed
123
        if hada_w1_a_name in lora.keys():
124
125
126
127
128
129
130
131
132
            hada_t1 = None
            hada_t2 = None
            if hada_t1_name in lora.keys():
                hada_t1 = lora[hada_t1_name]
                hada_t2 = lora[hada_t2_name]
                loaded_keys.add(hada_t1_name)
                loaded_keys.add(hada_t2_name)

            patch_dict[to_load[x]] = (lora[hada_w1_a_name], lora[hada_w1_b_name], alpha, lora[hada_w2_a_name], lora[hada_w2_b_name], hada_t1, hada_t2)
comfyanonymous's avatar
comfyanonymous committed
133
134
135
136
137
            loaded_keys.add(hada_w1_a_name)
            loaded_keys.add(hada_w1_b_name)
            loaded_keys.add(hada_w2_a_name)
            loaded_keys.add(hada_w2_b_name)

comfyanonymous's avatar
comfyanonymous committed
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185

        ######## lokr
        lokr_w1_name = "{}.lokr_w1".format(x)
        lokr_w2_name = "{}.lokr_w2".format(x)
        lokr_w1_a_name = "{}.lokr_w1_a".format(x)
        lokr_w1_b_name = "{}.lokr_w1_b".format(x)
        lokr_t2_name = "{}.lokr_t2".format(x)
        lokr_w2_a_name = "{}.lokr_w2_a".format(x)
        lokr_w2_b_name = "{}.lokr_w2_b".format(x)

        lokr_w1 = None
        if lokr_w1_name in lora.keys():
            lokr_w1 = lora[lokr_w1_name]
            loaded_keys.add(lokr_w1_name)

        lokr_w2 = None
        if lokr_w2_name in lora.keys():
            lokr_w2 = lora[lokr_w2_name]
            loaded_keys.add(lokr_w2_name)

        lokr_w1_a = None
        if lokr_w1_a_name in lora.keys():
            lokr_w1_a = lora[lokr_w1_a_name]
            loaded_keys.add(lokr_w1_a_name)

        lokr_w1_b = None
        if lokr_w1_b_name in lora.keys():
            lokr_w1_b = lora[lokr_w1_b_name]
            loaded_keys.add(lokr_w1_b_name)

        lokr_w2_a = None
        if lokr_w2_a_name in lora.keys():
            lokr_w2_a = lora[lokr_w2_a_name]
            loaded_keys.add(lokr_w2_a_name)

        lokr_w2_b = None
        if lokr_w2_b_name in lora.keys():
            lokr_w2_b = lora[lokr_w2_b_name]
            loaded_keys.add(lokr_w2_b_name)

        lokr_t2 = None
        if lokr_t2_name in lora.keys():
            lokr_t2 = lora[lokr_t2_name]
            loaded_keys.add(lokr_t2_name)

        if (lokr_w1 is not None) or (lokr_w2 is not None) or (lokr_w1_a is not None) or (lokr_w2_a is not None):
            patch_dict[to_load[x]] = (lokr_w1, lokr_w2, alpha, lokr_w1_a, lokr_w1_b, lokr_w2_a, lokr_w2_b, lokr_t2)

186
187
188
189
190
191
192
193
194
195
    for x in lora.keys():
        if x not in loaded_keys:
            print("lora key not loaded", x)
    return patch_dict

def model_lora_keys(model, key_map={}):
    sdk = model.state_dict().keys()

    counter = 0
    for b in range(12):
comfyanonymous's avatar
comfyanonymous committed
196
        tk = "diffusion_model.input_blocks.{}.1".format(b)
197
        up_counter = 0
comfyanonymous's avatar
comfyanonymous committed
198
        for c in LORA_UNET_MAP_ATTENTIONS:
199
200
            k = "{}.{}.weight".format(tk, c)
            if k in sdk:
comfyanonymous's avatar
comfyanonymous committed
201
                lora_key = "lora_unet_down_blocks_{}_attentions_{}_{}".format(counter // 2, counter % 2, LORA_UNET_MAP_ATTENTIONS[c])
202
                key_map[lora_key] = k
203
204
205
                up_counter += 1
        if up_counter >= 4:
            counter += 1
comfyanonymous's avatar
comfyanonymous committed
206
    for c in LORA_UNET_MAP_ATTENTIONS:
comfyanonymous's avatar
comfyanonymous committed
207
        k = "diffusion_model.middle_block.1.{}.weight".format(c)
208
        if k in sdk:
comfyanonymous's avatar
comfyanonymous committed
209
            lora_key = "lora_unet_mid_block_attentions_0_{}".format(LORA_UNET_MAP_ATTENTIONS[c])
210
            key_map[lora_key] = k
211
212
    counter = 3
    for b in range(12):
comfyanonymous's avatar
comfyanonymous committed
213
        tk = "diffusion_model.output_blocks.{}.1".format(b)
214
        up_counter = 0
comfyanonymous's avatar
comfyanonymous committed
215
        for c in LORA_UNET_MAP_ATTENTIONS:
216
217
            k = "{}.{}.weight".format(tk, c)
            if k in sdk:
comfyanonymous's avatar
comfyanonymous committed
218
                lora_key = "lora_unet_up_blocks_{}_attentions_{}_{}".format(counter // 3, counter % 3, LORA_UNET_MAP_ATTENTIONS[c])
219
                key_map[lora_key] = k
220
221
222
223
                up_counter += 1
        if up_counter >= 4:
            counter += 1
    counter = 0
comfyanonymous's avatar
comfyanonymous committed
224
    text_model_lora_key = "lora_te_text_model_encoder_layers_{}_{}"
225
    for b in range(24):
226
227
228
        for c in LORA_CLIP_MAP:
            k = "transformer.text_model.encoder.layers.{}.{}.weight".format(b, c)
            if k in sdk:
comfyanonymous's avatar
comfyanonymous committed
229
                lora_key = text_model_lora_key.format(b, LORA_CLIP_MAP[c])
230
                key_map[lora_key] = k
comfyanonymous's avatar
comfyanonymous committed
231

comfyanonymous's avatar
comfyanonymous committed
232
233
234
235
236

    #Locon stuff
    ds_counter = 0
    counter = 0
    for b in range(12):
comfyanonymous's avatar
comfyanonymous committed
237
        tk = "diffusion_model.input_blocks.{}.0".format(b)
comfyanonymous's avatar
comfyanonymous committed
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
        key_in = False
        for c in LORA_UNET_MAP_RESNET:
            k = "{}.{}.weight".format(tk, c)
            if k in sdk:
                lora_key = "lora_unet_down_blocks_{}_{}".format(counter // 2, LORA_UNET_MAP_RESNET[c].format(counter % 2))
                key_map[lora_key] = k
                key_in = True
        for bb in range(3):
            k = "{}.{}.op.weight".format(tk[:-2], bb)
            if k in sdk:
                lora_key = "lora_unet_down_blocks_{}_downsamplers_0_conv".format(ds_counter)
                key_map[lora_key] = k
                ds_counter += 1
        if key_in:
            counter += 1

    counter = 0
    for b in range(3):
comfyanonymous's avatar
comfyanonymous committed
256
        tk = "diffusion_model.middle_block.{}".format(b)
comfyanonymous's avatar
comfyanonymous committed
257
258
259
260
261
262
263
264
265
266
267
268
269
        key_in = False
        for c in LORA_UNET_MAP_RESNET:
            k = "{}.{}.weight".format(tk, c)
            if k in sdk:
                lora_key = "lora_unet_mid_block_{}".format(LORA_UNET_MAP_RESNET[c].format(counter))
                key_map[lora_key] = k
                key_in = True
        if key_in:
            counter += 1

    counter = 0
    us_counter = 0
    for b in range(12):
comfyanonymous's avatar
comfyanonymous committed
270
        tk = "diffusion_model.output_blocks.{}.0".format(b)
comfyanonymous's avatar
comfyanonymous committed
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
        key_in = False
        for c in LORA_UNET_MAP_RESNET:
            k = "{}.{}.weight".format(tk, c)
            if k in sdk:
                lora_key = "lora_unet_up_blocks_{}_{}".format(counter // 3, LORA_UNET_MAP_RESNET[c].format(counter % 3))
                key_map[lora_key] = k
                key_in = True
        for bb in range(3):
            k = "{}.{}.conv.weight".format(tk[:-2], bb)
            if k in sdk:
                lora_key = "lora_unet_up_blocks_{}_upsamplers_0_conv".format(us_counter)
                key_map[lora_key] = k
                us_counter += 1
        if key_in:
            counter += 1

287
288
    return key_map

289

290
class ModelPatcher:
291
292
    def __init__(self, model, size=0):
        self.size = size
293
294
295
        self.model = model
        self.patches = []
        self.backup = {}
296
        self.model_options = {"transformer_options":{}}
297
298
299
300
301
302
303
304
305
306
307
        self.model_size()

    def model_size(self):
        if self.size > 0:
            return self.size
        model_sd = self.model.state_dict()
        size = 0
        for k in model_sd:
            t = model_sd[k]
            size += t.nelement() * t.element_size()
        self.size = size
308
        self.model_keys = set(model_sd.keys())
309
        return size
310
311

    def clone(self):
312
        n = ModelPatcher(self.model, self.size)
313
        n.patches = self.patches[:]
314
        n.model_options = copy.deepcopy(self.model_options)
315
        n.model_keys = self.model_keys
316
317
        return n

318
319
320
    def set_model_tomesd(self, ratio):
        self.model_options["transformer_options"]["tomesd"] = {"ratio": ratio}

321
    def set_model_sampler_cfg_function(self, sampler_cfg_function):
322
323
324
325
        if len(inspect.signature(sampler_cfg_function).parameters) == 3:
            self.model_options["sampler_cfg_function"] = lambda args: sampler_cfg_function(args["cond"], args["uncond"], args["cond_scale"]) #Old way
        else:
            self.model_options["sampler_cfg_function"] = sampler_cfg_function
326
327
328
329
330
331
332
333
334
335
336
337
338

    def set_model_patch(self, patch, name):
        to = self.model_options["transformer_options"]
        if "patches" not in to:
            to["patches"] = {}
        to["patches"][name] = to["patches"].get(name, []) + [patch]

    def set_model_attn1_patch(self, patch):
        self.set_model_patch(patch, "attn1_patch")

    def set_model_attn2_patch(self, patch):
        self.set_model_patch(patch, "attn2_patch")

339
340
341
    def set_model_attn2_output_patch(self, patch):
        self.set_model_patch(patch, "attn2_output_patch")

342
343
344
345
346
347
348
349
350
351
    def model_patches_to(self, device):
        to = self.model_options["transformer_options"]
        if "patches" in to:
            patches = to["patches"]
            for name in patches:
                patch_list = patches[name]
                for i in range(len(patch_list)):
                    if hasattr(patch_list[i], "to"):
                        patch_list[i] = patch_list[i].to(device)

352
    def model_dtype(self):
comfyanonymous's avatar
comfyanonymous committed
353
        return self.model.get_dtype()
354

355
    def add_patches(self, patches, strength_patch=1.0, strength_model=1.0):
356
357
        p = {}
        for k in patches:
358
            if k in self.model_keys:
359
                p[k] = patches[k]
360
        self.patches += [(strength_patch, p, strength_model)]
361
362
        return p.keys()

363
    def model_state_dict(self, filter_prefix=None):
364
365
        sd = self.model.state_dict()
        keys = list(sd.keys())
366
367
368
369
        if filter_prefix is not None:
            for k in keys:
                if not k.startswith(filter_prefix):
                    sd.pop(k)
370
371
        return sd

372
    def patch_model(self):
373
        model_sd = self.model_state_dict()
374
375
376
        for p in self.patches:
            for k in p[1]:
                v = p[1][k]
377
                key = k
comfyanonymous's avatar
comfyanonymous committed
378
                if key not in model_sd:
379
380
381
                    print("could not patch. key doesn't exist in model:", k)
                    continue

comfyanonymous's avatar
comfyanonymous committed
382
383
384
                weight = model_sd[key]
                if key not in self.backup:
                    self.backup[key] = weight.clone()
385
386

                alpha = p[0]
387
388
389
390
                strength_model = p[2]

                if strength_model != 1.0:
                    weight *= strength_model
comfyanonymous's avatar
comfyanonymous committed
391

392
                if len(v) == 1:
393
394
395
396
397
                    w1 = v[0]
                    if w1.shape != weight.shape:
                        print("WARNING SHAPE MISMATCH {} WEIGHT NOT MERGED {} != {}".format(key, w1.shape, weight.shape))
                    else:
                        weight += alpha * w1.type(weight.dtype).to(weight.device)
398
                elif len(v) == 4: #lora/locon
comfyanonymous's avatar
comfyanonymous committed
399
400
401
402
403
404
405
406
407
                    mat1 = v[0]
                    mat2 = v[1]
                    if v[2] is not None:
                        alpha *= v[2] / mat2.shape[0]
                    if v[3] is not None:
                        #locon mid weights, hopefully the math is fine because I didn't properly test it
                        final_shape = [mat2.shape[1], mat2.shape[0], v[3].shape[2], v[3].shape[3]]
                        mat2 = torch.mm(mat2.transpose(0, 1).flatten(start_dim=1).float(), v[3].transpose(0, 1).flatten(start_dim=1).float()).reshape(final_shape).transpose(0, 1)
                    weight += (alpha * torch.mm(mat1.flatten(start_dim=1).float(), mat2.flatten(start_dim=1).float())).reshape(weight.shape).type(weight.dtype).to(weight.device)
comfyanonymous's avatar
comfyanonymous committed
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
                elif len(v) == 8: #lokr
                    w1 = v[0]
                    w2 = v[1]
                    w1_a = v[3]
                    w1_b = v[4]
                    w2_a = v[5]
                    w2_b = v[6]
                    t2 = v[7]
                    dim = None

                    if w1 is None:
                        dim = w1_b.shape[0]
                        w1 = torch.mm(w1_a.float(), w1_b.float())

                    if w2 is None:
                        dim = w2_b.shape[0]
                        if t2 is None:
                            w2 = torch.mm(w2_a.float(), w2_b.float())
                        else:
                            w2 = torch.einsum('i j k l, j r, i p -> p r k l', t2.float(), w2_b.float(), w2_a.float())

                    if len(w2.shape) == 4:
                        w1 = w1.unsqueeze(2).unsqueeze(2)
                    if v[2] is not None and dim is not None:
                        alpha *= v[2] / dim

                    weight += alpha * torch.kron(w1.float(), w2.float()).reshape(weight.shape).type(weight.dtype).to(weight.device)
comfyanonymous's avatar
comfyanonymous committed
435
436
437
438
439
440
441
                else: #loha
                    w1a = v[0]
                    w1b = v[1]
                    if v[2] is not None:
                        alpha *= v[2] / w1b.shape[0]
                    w2a = v[3]
                    w2b = v[4]
442
443
444
445
446
447
448
449
450
451
                    if v[5] is not None: #cp decomposition
                        t1 = v[5]
                        t2 = v[6]
                        m1 = torch.einsum('i j k l, j r, i p -> p r k l', t1.float(), w1b.float(), w1a.float())
                        m2 = torch.einsum('i j k l, j r, i p -> p r k l', t2.float(), w2b.float(), w2a.float())
                    else:
                        m1 = torch.mm(w1a.float(), w1b.float())
                        m2 = torch.mm(w2a.float(), w2b.float())

                    weight += (alpha * m1 * m2).reshape(weight.shape).type(weight.dtype).to(weight.device)
452
453
        return self.model
    def unpatch_model(self):
454
        model_sd = self.model_state_dict()
455
456
        keys = list(self.backup.keys())
        for k in keys:
457
            model_sd[k][:] = self.backup[k]
458
459
            del self.backup[k]

460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
        self.backup = {}

def load_lora_for_models(model, clip, lora_path, strength_model, strength_clip):
    key_map = model_lora_keys(model.model)
    key_map = model_lora_keys(clip.cond_stage_model, key_map)
    loaded = load_lora(lora_path, key_map)
    new_modelpatcher = model.clone()
    k = new_modelpatcher.add_patches(loaded, strength_model)
    new_clip = clip.clone()
    k1 = new_clip.add_patches(loaded, strength_clip)
    k = set(k)
    k1 = set(k1)
    for x in loaded:
        if (x not in k) and (x not in k1):
            print("NOT LOADED", x)

    return (new_modelpatcher, new_clip)
comfyanonymous's avatar
comfyanonymous committed
477
478
479


class CLIP:
480
    def __init__(self, target=None, embedding_directory=None, no_init=False):
481
482
        if no_init:
            return
483
484
485
        params = target.params
        clip = target.clip
        tokenizer = target.tokenizer
486

487
488
        self.device = model_management.text_encoder_device()
        params["device"] = self.device
489
        self.cond_stage_model = clip(**(params))
490
491
        self.cond_stage_model = self.cond_stage_model.to(self.device)

492
        self.tokenizer = tokenizer(embedding_directory=embedding_directory)
493
        self.patcher = ModelPatcher(self.cond_stage_model)
494
        self.layer_idx = None
495
496
497
498
499
500

    def clone(self):
        n = CLIP(no_init=True)
        n.patcher = self.patcher.clone()
        n.cond_stage_model = self.cond_stage_model
        n.tokenizer = self.tokenizer
comfyanonymous's avatar
comfyanonymous committed
501
        n.layer_idx = self.layer_idx
502
        n.device = self.device
503
504
        return n

505
506
507
    def load_from_state_dict(self, sd):
        self.cond_stage_model.transformer.load_state_dict(sd, strict=False)

508
509
    def add_patches(self, patches, strength=1.0):
        return self.patcher.add_patches(patches, strength)
comfyanonymous's avatar
comfyanonymous committed
510

511
    def clip_layer(self, layer_idx):
comfyanonymous's avatar
comfyanonymous committed
512
        self.layer_idx = layer_idx
513

514
515
    def tokenize(self, text, return_word_ids=False):
        return self.tokenizer.tokenize_with_weights(text, return_word_ids)
BlenderNeko's avatar
BlenderNeko committed
516

517
    def encode_from_tokens(self, tokens, return_pooled=False):
518
519
        if self.layer_idx is not None:
            self.cond_stage_model.clip_layer(self.layer_idx)
520
521
        try:
            self.patcher.patch_model()
522
            cond, pooled = self.cond_stage_model.encode_token_weights(tokens)
523
524
525
526
            self.patcher.unpatch_model()
        except Exception as e:
            self.patcher.unpatch_model()
            raise e
527
528

        cond_out = cond
529
        if return_pooled:
530
531
            return cond_out, pooled
        return cond_out
comfyanonymous's avatar
comfyanonymous committed
532

533
    def encode(self, text):
534
        tokens = self.tokenize(text)
535
536
        return self.encode_from_tokens(tokens)

537

comfyanonymous's avatar
comfyanonymous committed
538
class VAE:
539
    def __init__(self, ckpt_path=None, device=None, config=None):
comfyanonymous's avatar
comfyanonymous committed
540
541
542
        if config is None:
            #default SD1.x/SD2.x VAE parameters
            ddconfig = {'double_z': True, 'z_channels': 4, 'resolution': 256, 'in_channels': 3, 'out_ch': 3, 'ch': 128, 'ch_mult': [1, 2, 4, 4], 'num_res_blocks': 2, 'attn_resolutions': [], 'dropout': 0.0}
543
            self.first_stage_model = AutoencoderKL(ddconfig, {'target': 'torch.nn.Identity'}, 4, monitor="val/rec_loss")
comfyanonymous's avatar
comfyanonymous committed
544
        else:
545
            self.first_stage_model = AutoencoderKL(**(config['params']))
comfyanonymous's avatar
comfyanonymous committed
546
        self.first_stage_model = self.first_stage_model.eval()
547
548
549
550
551
552
        if ckpt_path is not None:
            sd = utils.load_torch_file(ckpt_path)
            if 'decoder.up_blocks.0.resnets.0.norm1.weight' in sd.keys(): #diffusers format
                sd = diffusers_convert.convert_vae_state_dict(sd)
            self.first_stage_model.load_state_dict(sd, strict=False)

553
554
        if device is None:
            device = model_management.get_torch_device()
comfyanonymous's avatar
comfyanonymous committed
555
556
        self.device = device

557
    def decode_tiled_(self, samples, tile_x=64, tile_y=64, overlap = 16):
pythongosssss's avatar
pythongosssss committed
558
        steps = samples.shape[0] * utils.get_tiled_scale_steps(samples.shape[3], samples.shape[2], tile_x, tile_y, overlap)
comfyanonymous's avatar
comfyanonymous committed
559
560
        steps += samples.shape[0] * utils.get_tiled_scale_steps(samples.shape[3], samples.shape[2], tile_x // 2, tile_y * 2, overlap)
        steps += samples.shape[0] * utils.get_tiled_scale_steps(samples.shape[3], samples.shape[2], tile_x * 2, tile_y // 2, overlap)
pythongosssss's avatar
pythongosssss committed
561
        pbar = utils.ProgressBar(steps)
562

563
        decode_fn = lambda a: (self.first_stage_model.decode(a.to(self.device)) + 1.0)
564
        output = torch.clamp((
565
566
567
            (utils.tiled_scale(samples, decode_fn, tile_x // 2, tile_y * 2, overlap, upscale_amount = 8, pbar = pbar) +
            utils.tiled_scale(samples, decode_fn, tile_x * 2, tile_y // 2, overlap, upscale_amount = 8, pbar = pbar) +
             utils.tiled_scale(samples, decode_fn, tile_x, tile_y, overlap, upscale_amount = 8, pbar = pbar))
568
569
570
            / 3.0) / 2.0, min=0.0, max=1.0)
        return output

571
572
573
574
575
576
    def encode_tiled_(self, pixel_samples, tile_x=512, tile_y=512, overlap = 64):
        steps = pixel_samples.shape[0] * utils.get_tiled_scale_steps(pixel_samples.shape[3], pixel_samples.shape[2], tile_x, tile_y, overlap)
        steps += pixel_samples.shape[0] * utils.get_tiled_scale_steps(pixel_samples.shape[3], pixel_samples.shape[2], tile_x // 2, tile_y * 2, overlap)
        steps += pixel_samples.shape[0] * utils.get_tiled_scale_steps(pixel_samples.shape[3], pixel_samples.shape[2], tile_x * 2, tile_y // 2, overlap)
        pbar = utils.ProgressBar(steps)

577
        encode_fn = lambda a: self.first_stage_model.encode(2. * a.to(self.device) - 1.).sample()
578
579
580
581
582
583
        samples = utils.tiled_scale(pixel_samples, encode_fn, tile_x, tile_y, overlap, upscale_amount = (1/8), out_channels=4, pbar=pbar)
        samples += utils.tiled_scale(pixel_samples, encode_fn, tile_x * 2, tile_y // 2, overlap, upscale_amount = (1/8), out_channels=4, pbar=pbar)
        samples += utils.tiled_scale(pixel_samples, encode_fn, tile_x // 2, tile_y * 2, overlap, upscale_amount = (1/8), out_channels=4, pbar=pbar)
        samples /= 3.0
        return samples

584
    def decode(self, samples_in):
585
        model_management.unload_model()
comfyanonymous's avatar
comfyanonymous committed
586
        self.first_stage_model = self.first_stage_model.to(self.device)
587
        try:
588
589
590
591
592
593
594
            free_memory = model_management.get_free_memory(self.device)
            batch_number = int((free_memory * 0.7) / (2562 * samples_in.shape[2] * samples_in.shape[3] * 64))
            batch_number = max(1, batch_number)

            pixel_samples = torch.empty((samples_in.shape[0], 3, round(samples_in.shape[2] * 8), round(samples_in.shape[3] * 8)), device="cpu")
            for x in range(0, samples_in.shape[0], batch_number):
                samples = samples_in[x:x+batch_number].to(self.device)
595
                pixel_samples[x:x+batch_number] = torch.clamp((self.first_stage_model.decode(samples) + 1.0) / 2.0, min=0.0, max=1.0).cpu()
596
597
598
599
        except model_management.OOM_EXCEPTION as e:
            print("Warning: Ran out of memory when regular VAE decoding, retrying with tiled VAE decoding.")
            pixel_samples = self.decode_tiled_(samples_in)

comfyanonymous's avatar
comfyanonymous committed
600
601
602
603
        self.first_stage_model = self.first_stage_model.cpu()
        pixel_samples = pixel_samples.cpu().movedim(1,-1)
        return pixel_samples

604
    def decode_tiled(self, samples, tile_x=64, tile_y=64, overlap = 16):
605
606
        model_management.unload_model()
        self.first_stage_model = self.first_stage_model.to(self.device)
607
        output = self.decode_tiled_(samples, tile_x, tile_y, overlap)
608
609
610
        self.first_stage_model = self.first_stage_model.cpu()
        return output.movedim(1,-1)

comfyanonymous's avatar
comfyanonymous committed
611
    def encode(self, pixel_samples):
612
        model_management.unload_model()
comfyanonymous's avatar
comfyanonymous committed
613
        self.first_stage_model = self.first_stage_model.to(self.device)
614
615
        pixel_samples = pixel_samples.movedim(-1,1)
        try:
616
617
618
            free_memory = model_management.get_free_memory(self.device)
            batch_number = int((free_memory * 0.7) / (2078 * pixel_samples.shape[2] * pixel_samples.shape[3])) #NOTE: this constant along with the one in the decode above are estimated from the mem usage for the VAE and could change.
            batch_number = max(1, batch_number)
619
620
621
            samples = torch.empty((pixel_samples.shape[0], 4, round(pixel_samples.shape[2] // 8), round(pixel_samples.shape[3] // 8)), device="cpu")
            for x in range(0, pixel_samples.shape[0], batch_number):
                pixels_in = (2. * pixel_samples[x:x+batch_number] - 1.).to(self.device)
622
                samples[x:x+batch_number] = self.first_stage_model.encode(pixels_in).sample().cpu()
623

624
625
626
627
        except model_management.OOM_EXCEPTION as e:
            print("Warning: Ran out of memory when regular VAE encoding, retrying with tiled VAE encoding.")
            samples = self.encode_tiled_(pixel_samples)

comfyanonymous's avatar
comfyanonymous committed
628
629
630
        self.first_stage_model = self.first_stage_model.cpu()
        return samples

comfyanonymous's avatar
comfyanonymous committed
631
632
633
    def encode_tiled(self, pixel_samples, tile_x=512, tile_y=512, overlap = 64):
        model_management.unload_model()
        self.first_stage_model = self.first_stage_model.to(self.device)
634
635
        pixel_samples = pixel_samples.movedim(-1,1)
        samples = self.encode_tiled_(pixel_samples, tile_x=tile_x, tile_y=tile_y, overlap=overlap)
comfyanonymous's avatar
comfyanonymous committed
636
637
        self.first_stage_model = self.first_stage_model.cpu()
        return samples
638

BlenderNeko's avatar
BlenderNeko committed
639
def broadcast_image_to(tensor, target_batch_size, batched_number):
640
    current_batch_size = tensor.shape[0]
641
    #print(current_batch_size, target_batch_size)
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
    if current_batch_size == 1:
        return tensor

    per_batch = target_batch_size // batched_number
    tensor = tensor[:per_batch]

    if per_batch > tensor.shape[0]:
        tensor = torch.cat([tensor] * (per_batch // tensor.shape[0]) + [tensor[:(per_batch % tensor.shape[0])]], dim=0)

    current_batch_size = tensor.shape[0]
    if current_batch_size == target_batch_size:
        return tensor
    else:
        return torch.cat([tensor] * batched_number, dim=0)

comfyanonymous's avatar
comfyanonymous committed
657
class ControlNet:
658
    def __init__(self, control_model, global_average_pooling=False, device=None):
comfyanonymous's avatar
comfyanonymous committed
659
660
661
        self.control_model = control_model
        self.cond_hint_original = None
        self.cond_hint = None
662
        self.strength = 1.0
663
664
        if device is None:
            device = model_management.get_torch_device()
665
        self.device = device
comfyanonymous's avatar
comfyanonymous committed
666
        self.previous_controlnet = None
667
        self.global_average_pooling = global_average_pooling
comfyanonymous's avatar
comfyanonymous committed
668

669
    def get_control(self, x_noisy, t, cond, batched_number):
comfyanonymous's avatar
comfyanonymous committed
670
671
        control_prev = None
        if self.previous_controlnet is not None:
672
            control_prev = self.previous_controlnet.get_control(x_noisy, t, cond, batched_number)
comfyanonymous's avatar
comfyanonymous committed
673

674
        output_dtype = x_noisy.dtype
comfyanonymous's avatar
comfyanonymous committed
675
676
677
678
        if self.cond_hint is None or x_noisy.shape[2] * 8 != self.cond_hint.shape[2] or x_noisy.shape[3] * 8 != self.cond_hint.shape[3]:
            if self.cond_hint is not None:
                del self.cond_hint
            self.cond_hint = None
BlenderNeko's avatar
BlenderNeko committed
679
680
681
            self.cond_hint = utils.common_upscale(self.cond_hint_original, x_noisy.shape[3] * 8, x_noisy.shape[2] * 8, 'nearest-exact', "center").to(self.control_model.dtype).to(self.device)
        if x_noisy.shape[0] != self.cond_hint.shape[0]:
            self.cond_hint = broadcast_image_to(self.cond_hint, x_noisy.shape[0], batched_number)
682
683
684
685
686
687

        if self.control_model.dtype == torch.float16:
            precision_scope = torch.autocast
        else:
            precision_scope = contextlib.nullcontext

688
        with precision_scope(model_management.get_autocast_device(self.device)):
689
            self.control_model = model_management.load_if_low_vram(self.control_model)
690
691
692
            context = torch.cat(cond['c_crossattn'], 1)
            y = cond.get('c_adm', None)
            control = self.control_model(x=x_noisy, hint=self.cond_hint, timesteps=t, context=context, y=y)
693
            self.control_model = model_management.unload_if_low_vram(self.control_model)
694
        out = {'middle':[], 'output': []}
695
        autocast_enabled = torch.is_autocast_enabled()
comfyanonymous's avatar
comfyanonymous committed
696
697

        for i in range(len(control)):
comfyanonymous's avatar
comfyanonymous committed
698
699
700
701
702
703
            if i == (len(control) - 1):
                key = 'middle'
                index = 0
            else:
                key = 'output'
                index = i
comfyanonymous's avatar
comfyanonymous committed
704
            x = control[i]
705
706
707
            if self.global_average_pooling:
                x = torch.mean(x, dim=(2, 3), keepdim=True).repeat(1, 1, x.shape[2], x.shape[3])

708
            x *= self.strength
709
710
            if x.dtype != output_dtype and not autocast_enabled:
                x = x.to(output_dtype)
comfyanonymous's avatar
comfyanonymous committed
711

comfyanonymous's avatar
comfyanonymous committed
712
713
714
715
716
717
718
            if control_prev is not None and key in control_prev:
                prev = control_prev[key][index]
                if prev is not None:
                    x += prev
            out[key].append(x)
        if control_prev is not None and 'input' in control_prev:
            out['input'] = control_prev['input']
719
        return out
comfyanonymous's avatar
comfyanonymous committed
720

721
    def set_cond_hint(self, cond_hint, strength=1.0):
comfyanonymous's avatar
comfyanonymous committed
722
        self.cond_hint_original = cond_hint
723
        self.strength = strength
comfyanonymous's avatar
comfyanonymous committed
724
725
        return self

comfyanonymous's avatar
comfyanonymous committed
726
727
728
729
    def set_previous_controlnet(self, controlnet):
        self.previous_controlnet = controlnet
        return self

comfyanonymous's avatar
comfyanonymous committed
730
    def cleanup(self):
comfyanonymous's avatar
comfyanonymous committed
731
732
        if self.previous_controlnet is not None:
            self.previous_controlnet.cleanup()
comfyanonymous's avatar
comfyanonymous committed
733
734
735
736
737
        if self.cond_hint is not None:
            del self.cond_hint
            self.cond_hint = None

    def copy(self):
738
        c = ControlNet(self.control_model, global_average_pooling=self.global_average_pooling)
comfyanonymous's avatar
comfyanonymous committed
739
        c.cond_hint_original = self.cond_hint_original
740
        c.strength = self.strength
comfyanonymous's avatar
comfyanonymous committed
741
742
        return c

743
    def get_models(self):
comfyanonymous's avatar
comfyanonymous committed
744
745
        out = []
        if self.previous_controlnet is not None:
746
            out += self.previous_controlnet.get_models()
comfyanonymous's avatar
comfyanonymous committed
747
748
749
        out.append(self.control_model)
        return out

750
def load_controlnet(ckpt_path, model=None):
751
    controlnet_data = utils.load_torch_file(ckpt_path, safe_load=True)
752
    pth_key = 'control_model.zero_convs.0.0.weight'
comfyanonymous's avatar
comfyanonymous committed
753
    pth = False
754
    key = 'zero_convs.0.0.weight'
comfyanonymous's avatar
comfyanonymous committed
755
756
757
    if pth_key in controlnet_data:
        pth = True
        key = pth_key
758
        prefix = "control_model."
comfyanonymous's avatar
comfyanonymous committed
759
    elif key in controlnet_data:
760
        prefix = ""
comfyanonymous's avatar
comfyanonymous committed
761
    else:
762
763
764
765
        net = load_t2i_adapter(controlnet_data)
        if net is None:
            print("error checkpoint does not contain controlnet or t2i adapter data", ckpt_path)
        return net
comfyanonymous's avatar
comfyanonymous committed
766

767
768
769
770
771
772
773
    use_fp16 = model_management.should_use_fp16()

    controlnet_config = model_detection.model_config_from_unet(controlnet_data, prefix, use_fp16).unet_config
    controlnet_config.pop("out_channels")
    controlnet_config["hint_channels"] = 3
    control_model = cldm.ControlNet(**controlnet_config)

comfyanonymous's avatar
comfyanonymous committed
774
    if pth:
775
776
777
778
779
780
781
        if 'difference' in controlnet_data:
            if model is not None:
                m = model.patch_model()
                model_sd = m.state_dict()
                for x in controlnet_data:
                    c_m = "control_model."
                    if x.startswith(c_m):
comfyanonymous's avatar
comfyanonymous committed
782
                        sd_key = "diffusion_model.{}".format(x[len(c_m):])
783
784
785
786
787
788
789
                        if sd_key in model_sd:
                            cd = controlnet_data[x]
                            cd += model_sd[sd_key].type(cd.dtype).to(cd.device)
                model.unpatch_model()
            else:
                print("WARNING: Loaded a diff controlnet without a model. It will very likely not work.")

comfyanonymous's avatar
comfyanonymous committed
790
791
792
793
        class WeightsLoader(torch.nn.Module):
            pass
        w = WeightsLoader()
        w.control_model = control_model
794
        missing, unexpected = w.load_state_dict(controlnet_data, strict=False)
comfyanonymous's avatar
comfyanonymous committed
795
    else:
796
797
        missing, unexpected = control_model.load_state_dict(controlnet_data, strict=False)
    print(missing, unexpected)
comfyanonymous's avatar
comfyanonymous committed
798

799
800
801
    if use_fp16:
        control_model = control_model.half()

802
803
804
805
806
    global_average_pooling = False
    if ckpt_path.endswith("_shuffle.pth") or ckpt_path.endswith("_shuffle.safetensors") or ckpt_path.endswith("_shuffle_fp16.safetensors"): #TODO: smarter way of enabling global_average_pooling
        global_average_pooling = True

    control = ControlNet(control_model, global_average_pooling=global_average_pooling)
comfyanonymous's avatar
comfyanonymous committed
807
808
    return control

809
class T2IAdapter:
810
    def __init__(self, t2i_model, channels_in, device=None):
811
812
813
        self.t2i_model = t2i_model
        self.channels_in = channels_in
        self.strength = 1.0
814
815
        if device is None:
            device = model_management.get_torch_device()
816
817
818
819
820
821
        self.device = device
        self.previous_controlnet = None
        self.control_input = None
        self.cond_hint_original = None
        self.cond_hint = None

822
    def get_control(self, x_noisy, t, cond, batched_number):
823
824
        control_prev = None
        if self.previous_controlnet is not None:
825
            control_prev = self.previous_controlnet.get_control(x_noisy, t, cond, batched_number)
826
827
828
829

        if self.cond_hint is None or x_noisy.shape[2] * 8 != self.cond_hint.shape[2] or x_noisy.shape[3] * 8 != self.cond_hint.shape[3]:
            if self.cond_hint is not None:
                del self.cond_hint
BlenderNeko's avatar
BlenderNeko committed
830
            self.control_input = None
831
            self.cond_hint = None
BlenderNeko's avatar
BlenderNeko committed
832
            self.cond_hint = utils.common_upscale(self.cond_hint_original, x_noisy.shape[3] * 8, x_noisy.shape[2] * 8, 'nearest-exact', "center").float().to(self.device)
833
834
            if self.channels_in == 1 and self.cond_hint.shape[1] > 1:
                self.cond_hint = torch.mean(self.cond_hint, 1, keepdim=True)
BlenderNeko's avatar
BlenderNeko committed
835
836
837
        if x_noisy.shape[0] != self.cond_hint.shape[0]:
            self.cond_hint = broadcast_image_to(self.cond_hint, x_noisy.shape[0], batched_number)
        if self.control_input is None:
838
839
840
841
842
843
844
            self.t2i_model.to(self.device)
            self.control_input = self.t2i_model(self.cond_hint)
            self.t2i_model.cpu()

        output_dtype = x_noisy.dtype
        out = {'input':[]}

comfyanonymous's avatar
comfyanonymous committed
845
        autocast_enabled = torch.is_autocast_enabled()
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
        for i in range(len(self.control_input)):
            key = 'input'
            x = self.control_input[i] * self.strength
            if x.dtype != output_dtype and not autocast_enabled:
                x = x.to(output_dtype)

            if control_prev is not None and key in control_prev:
                index = len(control_prev[key]) - i * 3 - 3
                prev = control_prev[key][index]
                if prev is not None:
                    x += prev
            out[key].insert(0, None)
            out[key].insert(0, None)
            out[key].insert(0, x)

        if control_prev is not None and 'input' in control_prev:
            for i in range(len(out['input'])):
                if out['input'][i] is None:
                    out['input'][i] = control_prev['input'][i]
        if control_prev is not None and 'middle' in control_prev:
            out['middle'] = control_prev['middle']
        if control_prev is not None and 'output' in control_prev:
            out['output'] = control_prev['output']
        return out

    def set_cond_hint(self, cond_hint, strength=1.0):
        self.cond_hint_original = cond_hint
        self.strength = strength
        return self

    def set_previous_controlnet(self, controlnet):
        self.previous_controlnet = controlnet
        return self

    def copy(self):
        c = T2IAdapter(self.t2i_model, self.channels_in)
        c.cond_hint_original = self.cond_hint_original
        c.strength = self.strength
        return c

    def cleanup(self):
        if self.previous_controlnet is not None:
            self.previous_controlnet.cleanup()
        if self.cond_hint is not None:
            del self.cond_hint
            self.cond_hint = None

893
    def get_models(self):
894
895
        out = []
        if self.previous_controlnet is not None:
896
            out += self.previous_controlnet.get_models()
897
898
        return out

899
def load_t2i_adapter(t2i_data):
900
    keys = t2i_data.keys()
901
902
903
    if 'adapter' in keys:
        t2i_data = t2i_data['adapter']
        keys = t2i_data.keys()
904
    if "body.0.in_conv.weight" in keys:
905
906
        cin = t2i_data['body.0.in_conv.weight'].shape[1]
        model_ad = adapter.Adapter_light(cin=cin, channels=[320, 640, 1280, 1280], nums_rb=4)
907
    elif 'conv_in.weight' in keys:
908
        cin = t2i_data['conv_in.weight'].shape[1]
909
910
911
912
913
914
915
        channel = t2i_data['conv_in.weight'].shape[0]
        ksize = t2i_data['body.0.block2.weight'].shape[2]
        use_conv = False
        down_opts = list(filter(lambda a: a.endswith("down_opt.op.weight"), keys))
        if len(down_opts) > 0:
            use_conv = True
        model_ad = adapter.Adapter(cin=cin, channels=[channel, channel*2, channel*4, channel*4][:4], nums_rb=2, ksize=ksize, sk=True, use_conv=use_conv)
916
917
    else:
        return None
918
919
    model_ad.load_state_dict(t2i_data)
    return T2IAdapter(model_ad, cin // 64)
comfyanonymous's avatar
comfyanonymous committed
920

921
922
923
924
925
926
927
928
929
930

class StyleModel:
    def __init__(self, model, device="cpu"):
        self.model = model

    def get_cond(self, input):
        return self.model(input.last_hidden_state)


def load_style_model(ckpt_path):
931
    model_data = utils.load_torch_file(ckpt_path, safe_load=True)
932
933
934
935
936
937
938
939
940
    keys = model_data.keys()
    if "style_embedding" in keys:
        model = adapter.StyleAdapter(width=1024, context_dim=768, num_head=8, n_layes=3, num_token=8)
    else:
        raise Exception("invalid style model {}".format(ckpt_path))
    model.load_state_dict(model_data)
    return StyleModel(model)


941
def load_clip(ckpt_path, embedding_directory=None):
942
    clip_data = utils.load_torch_file(ckpt_path, safe_load=True)
943
944
    config = {}
    if "text_model.encoder.layers.22.mlp.fc1.weight" in clip_data:
comfyanonymous's avatar
comfyanonymous committed
945
        config['target'] = 'comfy.ldm.modules.encoders.modules.FrozenOpenCLIPEmbedder'
946
    else:
comfyanonymous's avatar
comfyanonymous committed
947
        config['target'] = 'comfy.ldm.modules.encoders.modules.FrozenCLIPEmbedder'
948
949
950
    clip = CLIP(config=config, embedding_directory=embedding_directory)
    clip.load_from_state_dict(clip_data)
    return clip
comfyanonymous's avatar
comfyanonymous committed
951

952
def load_gligen(ckpt_path):
953
    data = utils.load_torch_file(ckpt_path, safe_load=True)
954
955
956
957
958
    model = gligen.load_gligen(data)
    if model_management.should_use_fp16():
        model = model.half()
    return model

comfyanonymous's avatar
comfyanonymous committed
959
def load_checkpoint(config_path=None, ckpt_path=None, output_vae=True, output_clip=True, embedding_directory=None, state_dict=None, config=None):
960
    #TODO: this function is a mess and should be removed eventually
comfyanonymous's avatar
comfyanonymous committed
961
962
963
    if config is None:
        with open(config_path, 'r') as stream:
            config = yaml.safe_load(stream)
comfyanonymous's avatar
comfyanonymous committed
964
965
966
967
968
    model_config_params = config['model']['params']
    clip_config = model_config_params['cond_stage_config']
    scale_factor = model_config_params['scale_factor']
    vae_config = model_config_params['first_stage_config']

969
970
971
    fp16 = False
    if "unet_config" in model_config_params:
        if "params" in model_config_params["unet_config"]:
comfyanonymous's avatar
comfyanonymous committed
972
973
974
975
976
977
978
979
980
981
982
983
984
            unet_config = model_config_params["unet_config"]["params"]
            if "use_fp16" in unet_config:
                fp16 = unet_config["use_fp16"]

    noise_aug_config = None
    if "noise_aug_config" in model_config_params:
        noise_aug_config = model_config_params["noise_aug_config"]

    v_prediction = False

    if "parameterization" in model_config_params:
        if model_config_params["parameterization"] == "v":
            v_prediction = True
985

comfyanonymous's avatar
comfyanonymous committed
986
987
988
989
990
991
    clip = None
    vae = None

    class WeightsLoader(torch.nn.Module):
        pass

992
993
    if state_dict is None:
        state_dict = utils.load_torch_file(ckpt_path)
comfyanonymous's avatar
comfyanonymous committed
994

995
996
997
998
999
1000
1001
1002
    class EmptyClass:
        pass

    model_config = EmptyClass()
    model_config.unet_config = unet_config
    from . import latent_formats
    model_config.latent_format = latent_formats.SD15(scale_factor=scale_factor)

comfyanonymous's avatar
comfyanonymous committed
1003
    if config['model']["target"].endswith("LatentInpaintDiffusion"):
1004
        model = model_base.SDInpaint(model_config, v_prediction=v_prediction)
comfyanonymous's avatar
comfyanonymous committed
1005
    elif config['model']["target"].endswith("ImageEmbeddingConditionedLatentDiffusion"):
1006
        model = model_base.SD21UNCLIP(model_config, noise_aug_config["params"], v_prediction=v_prediction)
comfyanonymous's avatar
comfyanonymous committed
1007
    else:
1008
        model = model_base.BaseModel(model_config, v_prediction=v_prediction)
comfyanonymous's avatar
comfyanonymous committed
1009

1010
1011
1012
    if fp16:
        model = model.half()

1013
1014
1015
1016
    model.load_model_weights(state_dict, "model.diffusion_model.")

    if output_vae:
        w = WeightsLoader()
1017
        vae = VAE(config=vae_config)
1018
1019
1020
1021
1022
1023
        w.first_stage_model = vae.first_stage_model
        load_model_weights(w, state_dict)

    if output_clip:
        w = WeightsLoader()
        clip_target = EmptyClass()
1024
        clip_target.params = clip_config.get("params", {})
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
        if clip_config["target"].endswith("FrozenOpenCLIPEmbedder"):
            clip_target.clip = sd2_clip.SD2ClipModel
            clip_target.tokenizer = sd2_clip.SD2Tokenizer
        elif clip_config["target"].endswith("FrozenCLIPEmbedder"):
            clip_target.clip = sd1_clip.SD1ClipModel
            clip_target.tokenizer = sd1_clip.SD1Tokenizer
        clip = CLIP(clip_target, embedding_directory=embedding_directory)
        w.cond_stage_model = clip.cond_stage_model
        load_clip_weights(w, state_dict)

1035
    return (ModelPatcher(model), clip, vae)
1036
1037


1038
1039
def load_checkpoint_guess_config(ckpt_path, output_vae=True, output_clip=True, output_clipvision=False, embedding_directory=None):
    sd = utils.load_torch_file(ckpt_path)
1040
1041
    sd_keys = sd.keys()
    clip = None
1042
    clipvision = None
1043
    vae = None
1044
1045
    model = None
    clip_target = None
1046

1047
1048
    fp16 = model_management.should_use_fp16()

1049
1050
1051
    class WeightsLoader(torch.nn.Module):
        pass

1052
1053
1054
    model_config = model_detection.model_config_from_unet(sd, "model.diffusion_model.", fp16)
    if model_config is None:
        raise RuntimeError("ERROR: Could not detect model type of: {}".format(ckpt_path))
1055

1056
    if model_config.clip_vision_prefix is not None:
1057
        if output_clipvision:
1058
            clipvision = clip_vision.load_clipvision_from_sd(sd, model_config.clip_vision_prefix, True)
1059

1060
1061
    model = model_config.get_model(sd)
    model.load_model_weights(sd, "model.diffusion_model.")
1062

1063
    if output_vae:
1064
        vae = VAE()
1065
1066
1067
        w = WeightsLoader()
        w.first_stage_model = vae.first_stage_model
        load_model_weights(w, sd)
1068

1069
1070
1071
1072
1073
1074
1075
    if output_clip:
        w = WeightsLoader()
        clip_target = model_config.clip_target()
        clip = CLIP(clip_target, embedding_directory=embedding_directory)
        w.cond_stage_model = clip.cond_stage_model
        sd = model_config.process_clip_state_dict(sd)
        load_model_weights(w, sd)
comfyanonymous's avatar
comfyanonymous committed
1076

1077
1078
1079
    left_over = sd.keys()
    if len(left_over) > 0:
        print("left over keys:", left_over)
1080

1081
    return (ModelPatcher(model), clip, vae, clipvision)