sd.py 44.3 KB
Newer Older
comfyanonymous's avatar
comfyanonymous committed
1
import torch
2
import contextlib
3
import copy
4
import inspect
comfyanonymous's avatar
comfyanonymous committed
5

6
from comfy import model_management
7
8
from .ldm.util import instantiate_from_config
from .ldm.models.autoencoder import AutoencoderKL
9
import yaml
comfyanonymous's avatar
comfyanonymous committed
10
from .cldm import cldm
11
from .t2i_adapter import adapter
comfyanonymous's avatar
comfyanonymous committed
12
13

from . import utils
14
from . import clip_vision
15
from . import gligen
16
from . import diffusers_convert
comfyanonymous's avatar
comfyanonymous committed
17
from . import model_base
18
from . import model_detection
19

20
21
from . import sd1_clip
from . import sd2_clip
22
from . import sdxl_clip
comfyanonymous's avatar
comfyanonymous committed
23

24
def load_model_weights(model, sd):
comfyanonymous's avatar
comfyanonymous committed
25
    m, u = model.load_state_dict(sd, strict=False)
26
27
    m = set(m)
    unexpected_keys = set(u)
comfyanonymous's avatar
comfyanonymous committed
28
29
30

    k = list(sd.keys())
    for x in k:
31
32
33
34
35
36
37
38
39
40
        if x not in unexpected_keys:
            w = sd.pop(x)
            del w
    if len(m) > 0:
        print("missing", m)
    return model

def load_clip_weights(model, sd):
    k = list(sd.keys())
    for x in k:
comfyanonymous's avatar
comfyanonymous committed
41
42
43
44
        if x.startswith("cond_stage_model.transformer.") and not x.startswith("cond_stage_model.transformer.text_model."):
            y = x.replace("cond_stage_model.transformer.", "cond_stage_model.transformer.text_model.")
            sd[y] = sd.pop(x)

comfyanonymous's avatar
comfyanonymous committed
45
46
47
48
    if 'cond_stage_model.transformer.text_model.embeddings.position_ids' in sd:
        ids = sd['cond_stage_model.transformer.text_model.embeddings.position_ids']
        if ids.dtype == torch.float32:
            sd['cond_stage_model.transformer.text_model.embeddings.position_ids'] = ids.round()
49

50
51
    sd = utils.transformers_convert(sd, "cond_stage_model.model.", "cond_stage_model.transformer.text_model.", 24)
    return load_model_weights(model, sd)
comfyanonymous's avatar
comfyanonymous committed
52

53
54
55
56
57
58
59
60
61
LORA_CLIP_MAP = {
    "mlp.fc1": "mlp_fc1",
    "mlp.fc2": "mlp_fc2",
    "self_attn.k_proj": "self_attn_k_proj",
    "self_attn.q_proj": "self_attn_q_proj",
    "self_attn.v_proj": "self_attn_v_proj",
    "self_attn.out_proj": "self_attn_out_proj",
}

comfyanonymous's avatar
comfyanonymous committed
62
LORA_UNET_MAP_ATTENTIONS = {
63
64
65
66
    "proj_in": "proj_in",
    "proj_out": "proj_out",
}

67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
transformer_lora_blocks = {
    "transformer_blocks.{}.attn1.to_q": "transformer_blocks_{}_attn1_to_q",
    "transformer_blocks.{}.attn1.to_k": "transformer_blocks_{}_attn1_to_k",
    "transformer_blocks.{}.attn1.to_v": "transformer_blocks_{}_attn1_to_v",
    "transformer_blocks.{}.attn1.to_out.0": "transformer_blocks_{}_attn1_to_out_0",
    "transformer_blocks.{}.attn2.to_q": "transformer_blocks_{}_attn2_to_q",
    "transformer_blocks.{}.attn2.to_k": "transformer_blocks_{}_attn2_to_k",
    "transformer_blocks.{}.attn2.to_v": "transformer_blocks_{}_attn2_to_v",
    "transformer_blocks.{}.attn2.to_out.0": "transformer_blocks_{}_attn2_to_out_0",
    "transformer_blocks.{}.ff.net.0.proj": "transformer_blocks_{}_ff_net_0_proj",
    "transformer_blocks.{}.ff.net.2": "transformer_blocks_{}_ff_net_2",
}

for i in range(10):
    for k in transformer_lora_blocks:
        LORA_UNET_MAP_ATTENTIONS[k.format(i)] = transformer_lora_blocks[k].format(i)


comfyanonymous's avatar
comfyanonymous committed
85
86
87
88
89
90
LORA_UNET_MAP_RESNET = {
    "in_layers.2": "resnets_{}_conv1",
    "emb_layers.1": "resnets_{}_time_emb_proj",
    "out_layers.3": "resnets_{}_conv2",
    "skip_connection": "resnets_{}_conv_shortcut"
}
91
92

def load_lora(path, to_load):
93
    lora = utils.load_torch_file(path, safe_load=True)
94
95
96
    patch_dict = {}
    loaded_keys = set()
    for x in to_load:
comfyanonymous's avatar
comfyanonymous committed
97
98
99
100
101
102
        alpha_name = "{}.alpha".format(x)
        alpha = None
        if alpha_name in lora.keys():
            alpha = lora[alpha_name].item()
            loaded_keys.add(alpha_name)

103
104
        A_name = "{}.lora_up.weight".format(x)
        B_name = "{}.lora_down.weight".format(x)
105
        mid_name = "{}.lora_mid.weight".format(x)
comfyanonymous's avatar
comfyanonymous committed
106

107
        if A_name in lora.keys():
108
109
110
111
112
            mid = None
            if mid_name in lora.keys():
                mid = lora[mid_name]
                loaded_keys.add(mid_name)
            patch_dict[to_load[x]] = (lora[A_name], lora[B_name], alpha, mid)
113
114
            loaded_keys.add(A_name)
            loaded_keys.add(B_name)
comfyanonymous's avatar
comfyanonymous committed
115

comfyanonymous's avatar
comfyanonymous committed
116
117

        ######## loha
comfyanonymous's avatar
comfyanonymous committed
118
119
120
121
        hada_w1_a_name = "{}.hada_w1_a".format(x)
        hada_w1_b_name = "{}.hada_w1_b".format(x)
        hada_w2_a_name = "{}.hada_w2_a".format(x)
        hada_w2_b_name = "{}.hada_w2_b".format(x)
122
123
        hada_t1_name = "{}.hada_t1".format(x)
        hada_t2_name = "{}.hada_t2".format(x)
comfyanonymous's avatar
comfyanonymous committed
124
        if hada_w1_a_name in lora.keys():
125
126
127
128
129
130
131
132
133
            hada_t1 = None
            hada_t2 = None
            if hada_t1_name in lora.keys():
                hada_t1 = lora[hada_t1_name]
                hada_t2 = lora[hada_t2_name]
                loaded_keys.add(hada_t1_name)
                loaded_keys.add(hada_t2_name)

            patch_dict[to_load[x]] = (lora[hada_w1_a_name], lora[hada_w1_b_name], alpha, lora[hada_w2_a_name], lora[hada_w2_b_name], hada_t1, hada_t2)
comfyanonymous's avatar
comfyanonymous committed
134
135
136
137
138
            loaded_keys.add(hada_w1_a_name)
            loaded_keys.add(hada_w1_b_name)
            loaded_keys.add(hada_w2_a_name)
            loaded_keys.add(hada_w2_b_name)

comfyanonymous's avatar
comfyanonymous committed
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186

        ######## lokr
        lokr_w1_name = "{}.lokr_w1".format(x)
        lokr_w2_name = "{}.lokr_w2".format(x)
        lokr_w1_a_name = "{}.lokr_w1_a".format(x)
        lokr_w1_b_name = "{}.lokr_w1_b".format(x)
        lokr_t2_name = "{}.lokr_t2".format(x)
        lokr_w2_a_name = "{}.lokr_w2_a".format(x)
        lokr_w2_b_name = "{}.lokr_w2_b".format(x)

        lokr_w1 = None
        if lokr_w1_name in lora.keys():
            lokr_w1 = lora[lokr_w1_name]
            loaded_keys.add(lokr_w1_name)

        lokr_w2 = None
        if lokr_w2_name in lora.keys():
            lokr_w2 = lora[lokr_w2_name]
            loaded_keys.add(lokr_w2_name)

        lokr_w1_a = None
        if lokr_w1_a_name in lora.keys():
            lokr_w1_a = lora[lokr_w1_a_name]
            loaded_keys.add(lokr_w1_a_name)

        lokr_w1_b = None
        if lokr_w1_b_name in lora.keys():
            lokr_w1_b = lora[lokr_w1_b_name]
            loaded_keys.add(lokr_w1_b_name)

        lokr_w2_a = None
        if lokr_w2_a_name in lora.keys():
            lokr_w2_a = lora[lokr_w2_a_name]
            loaded_keys.add(lokr_w2_a_name)

        lokr_w2_b = None
        if lokr_w2_b_name in lora.keys():
            lokr_w2_b = lora[lokr_w2_b_name]
            loaded_keys.add(lokr_w2_b_name)

        lokr_t2 = None
        if lokr_t2_name in lora.keys():
            lokr_t2 = lora[lokr_t2_name]
            loaded_keys.add(lokr_t2_name)

        if (lokr_w1 is not None) or (lokr_w2 is not None) or (lokr_w1_a is not None) or (lokr_w2_a is not None):
            patch_dict[to_load[x]] = (lokr_w1, lokr_w2, alpha, lokr_w1_a, lokr_w1_b, lokr_w2_a, lokr_w2_b, lokr_t2)

187
188
189
190
191
192
193
194
195
196
    for x in lora.keys():
        if x not in loaded_keys:
            print("lora key not loaded", x)
    return patch_dict

def model_lora_keys(model, key_map={}):
    sdk = model.state_dict().keys()

    counter = 0
    for b in range(12):
comfyanonymous's avatar
comfyanonymous committed
197
        tk = "diffusion_model.input_blocks.{}.1".format(b)
198
        up_counter = 0
comfyanonymous's avatar
comfyanonymous committed
199
        for c in LORA_UNET_MAP_ATTENTIONS:
200
201
            k = "{}.{}.weight".format(tk, c)
            if k in sdk:
comfyanonymous's avatar
comfyanonymous committed
202
                lora_key = "lora_unet_down_blocks_{}_attentions_{}_{}".format(counter // 2, counter % 2, LORA_UNET_MAP_ATTENTIONS[c])
203
                key_map[lora_key] = k
204
205
206
                up_counter += 1
        if up_counter >= 4:
            counter += 1
comfyanonymous's avatar
comfyanonymous committed
207
    for c in LORA_UNET_MAP_ATTENTIONS:
comfyanonymous's avatar
comfyanonymous committed
208
        k = "diffusion_model.middle_block.1.{}.weight".format(c)
209
        if k in sdk:
comfyanonymous's avatar
comfyanonymous committed
210
            lora_key = "lora_unet_mid_block_attentions_0_{}".format(LORA_UNET_MAP_ATTENTIONS[c])
211
            key_map[lora_key] = k
212
213
    counter = 3
    for b in range(12):
comfyanonymous's avatar
comfyanonymous committed
214
        tk = "diffusion_model.output_blocks.{}.1".format(b)
215
        up_counter = 0
comfyanonymous's avatar
comfyanonymous committed
216
        for c in LORA_UNET_MAP_ATTENTIONS:
217
218
            k = "{}.{}.weight".format(tk, c)
            if k in sdk:
comfyanonymous's avatar
comfyanonymous committed
219
                lora_key = "lora_unet_up_blocks_{}_attentions_{}_{}".format(counter // 3, counter % 3, LORA_UNET_MAP_ATTENTIONS[c])
220
                key_map[lora_key] = k
221
222
223
224
                up_counter += 1
        if up_counter >= 4:
            counter += 1
    counter = 0
comfyanonymous's avatar
comfyanonymous committed
225
    text_model_lora_key = "lora_te_text_model_encoder_layers_{}_{}"
226
    for b in range(24):
227
228
229
        for c in LORA_CLIP_MAP:
            k = "transformer.text_model.encoder.layers.{}.{}.weight".format(b, c)
            if k in sdk:
comfyanonymous's avatar
comfyanonymous committed
230
                lora_key = text_model_lora_key.format(b, LORA_CLIP_MAP[c])
231
                key_map[lora_key] = k
comfyanonymous's avatar
comfyanonymous committed
232

comfyanonymous's avatar
comfyanonymous committed
233
234
235
236
237

    #Locon stuff
    ds_counter = 0
    counter = 0
    for b in range(12):
comfyanonymous's avatar
comfyanonymous committed
238
        tk = "diffusion_model.input_blocks.{}.0".format(b)
comfyanonymous's avatar
comfyanonymous committed
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
        key_in = False
        for c in LORA_UNET_MAP_RESNET:
            k = "{}.{}.weight".format(tk, c)
            if k in sdk:
                lora_key = "lora_unet_down_blocks_{}_{}".format(counter // 2, LORA_UNET_MAP_RESNET[c].format(counter % 2))
                key_map[lora_key] = k
                key_in = True
        for bb in range(3):
            k = "{}.{}.op.weight".format(tk[:-2], bb)
            if k in sdk:
                lora_key = "lora_unet_down_blocks_{}_downsamplers_0_conv".format(ds_counter)
                key_map[lora_key] = k
                ds_counter += 1
        if key_in:
            counter += 1

    counter = 0
    for b in range(3):
comfyanonymous's avatar
comfyanonymous committed
257
        tk = "diffusion_model.middle_block.{}".format(b)
comfyanonymous's avatar
comfyanonymous committed
258
259
260
261
262
263
264
265
266
267
268
269
270
        key_in = False
        for c in LORA_UNET_MAP_RESNET:
            k = "{}.{}.weight".format(tk, c)
            if k in sdk:
                lora_key = "lora_unet_mid_block_{}".format(LORA_UNET_MAP_RESNET[c].format(counter))
                key_map[lora_key] = k
                key_in = True
        if key_in:
            counter += 1

    counter = 0
    us_counter = 0
    for b in range(12):
comfyanonymous's avatar
comfyanonymous committed
271
        tk = "diffusion_model.output_blocks.{}.0".format(b)
comfyanonymous's avatar
comfyanonymous committed
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
        key_in = False
        for c in LORA_UNET_MAP_RESNET:
            k = "{}.{}.weight".format(tk, c)
            if k in sdk:
                lora_key = "lora_unet_up_blocks_{}_{}".format(counter // 3, LORA_UNET_MAP_RESNET[c].format(counter % 3))
                key_map[lora_key] = k
                key_in = True
        for bb in range(3):
            k = "{}.{}.conv.weight".format(tk[:-2], bb)
            if k in sdk:
                lora_key = "lora_unet_up_blocks_{}_upsamplers_0_conv".format(us_counter)
                key_map[lora_key] = k
                us_counter += 1
        if key_in:
            counter += 1

288
289
    return key_map

290

291
class ModelPatcher:
292
293
    def __init__(self, model, size=0):
        self.size = size
294
295
296
        self.model = model
        self.patches = []
        self.backup = {}
297
        self.model_options = {"transformer_options":{}}
298
299
300
301
302
303
304
305
306
307
308
        self.model_size()

    def model_size(self):
        if self.size > 0:
            return self.size
        model_sd = self.model.state_dict()
        size = 0
        for k in model_sd:
            t = model_sd[k]
            size += t.nelement() * t.element_size()
        self.size = size
309
        self.model_keys = set(model_sd.keys())
310
        return size
311
312

    def clone(self):
313
        n = ModelPatcher(self.model, self.size)
314
        n.patches = self.patches[:]
315
        n.model_options = copy.deepcopy(self.model_options)
316
        n.model_keys = self.model_keys
317
318
        return n

319
    def set_model_sampler_cfg_function(self, sampler_cfg_function):
320
321
322
323
        if len(inspect.signature(sampler_cfg_function).parameters) == 3:
            self.model_options["sampler_cfg_function"] = lambda args: sampler_cfg_function(args["cond"], args["uncond"], args["cond_scale"]) #Old way
        else:
            self.model_options["sampler_cfg_function"] = sampler_cfg_function
324
325
326
327
328
329
330

    def set_model_patch(self, patch, name):
        to = self.model_options["transformer_options"]
        if "patches" not in to:
            to["patches"] = {}
        to["patches"][name] = to["patches"].get(name, []) + [patch]

331
332
333
334
335
336
337
338
    def set_model_patch_replace(self, patch, name, block_name, number):
        to = self.model_options["transformer_options"]
        if "patches_replace" not in to:
            to["patches_replace"] = {}
        if name not in to["patches_replace"]:
            to["patches_replace"][name] = {}
        to["patches_replace"][name][(block_name, number)] = patch

339
340
341
342
343
344
    def set_model_attn1_patch(self, patch):
        self.set_model_patch(patch, "attn1_patch")

    def set_model_attn2_patch(self, patch):
        self.set_model_patch(patch, "attn2_patch")

345
346
347
348
349
350
351
352
353
    def set_model_attn1_replace(self, patch, block_name, number):
        self.set_model_patch_replace(patch, "attn1", block_name, number)

    def set_model_attn2_replace(self, patch, block_name, number):
        self.set_model_patch_replace(patch, "attn2", block_name, number)

    def set_model_attn1_output_patch(self, patch):
        self.set_model_patch(patch, "attn1_output_patch")

354
355
356
    def set_model_attn2_output_patch(self, patch):
        self.set_model_patch(patch, "attn2_output_patch")

357
358
359
360
361
362
363
364
365
    def model_patches_to(self, device):
        to = self.model_options["transformer_options"]
        if "patches" in to:
            patches = to["patches"]
            for name in patches:
                patch_list = patches[name]
                for i in range(len(patch_list)):
                    if hasattr(patch_list[i], "to"):
                        patch_list[i] = patch_list[i].to(device)
366
367
368
369
370
371
372
        if "patches_replace" in to:
            patches = to["patches_replace"]
            for name in patches:
                patch_list = patches[name]
                for k in patch_list:
                    if hasattr(patch_list[k], "to"):
                        patch_list[k] = patch_list[k].to(device)
373

374
    def model_dtype(self):
comfyanonymous's avatar
comfyanonymous committed
375
        return self.model.get_dtype()
376

377
    def add_patches(self, patches, strength_patch=1.0, strength_model=1.0):
378
379
        p = {}
        for k in patches:
380
            if k in self.model_keys:
381
                p[k] = patches[k]
382
        self.patches += [(strength_patch, p, strength_model)]
383
384
        return p.keys()

385
    def model_state_dict(self, filter_prefix=None):
386
387
        sd = self.model.state_dict()
        keys = list(sd.keys())
388
389
390
391
        if filter_prefix is not None:
            for k in keys:
                if not k.startswith(filter_prefix):
                    sd.pop(k)
392
393
        return sd

394
    def patch_model(self):
395
        model_sd = self.model_state_dict()
396
397
398
        for p in self.patches:
            for k in p[1]:
                v = p[1][k]
399
                key = k
comfyanonymous's avatar
comfyanonymous committed
400
                if key not in model_sd:
401
402
403
                    print("could not patch. key doesn't exist in model:", k)
                    continue

comfyanonymous's avatar
comfyanonymous committed
404
405
406
                weight = model_sd[key]
                if key not in self.backup:
                    self.backup[key] = weight.clone()
407
408

                alpha = p[0]
409
410
411
412
                strength_model = p[2]

                if strength_model != 1.0:
                    weight *= strength_model
comfyanonymous's avatar
comfyanonymous committed
413

414
                if len(v) == 1:
415
416
417
418
419
                    w1 = v[0]
                    if w1.shape != weight.shape:
                        print("WARNING SHAPE MISMATCH {} WEIGHT NOT MERGED {} != {}".format(key, w1.shape, weight.shape))
                    else:
                        weight += alpha * w1.type(weight.dtype).to(weight.device)
420
                elif len(v) == 4: #lora/locon
comfyanonymous's avatar
comfyanonymous committed
421
422
423
424
425
426
427
428
429
                    mat1 = v[0]
                    mat2 = v[1]
                    if v[2] is not None:
                        alpha *= v[2] / mat2.shape[0]
                    if v[3] is not None:
                        #locon mid weights, hopefully the math is fine because I didn't properly test it
                        final_shape = [mat2.shape[1], mat2.shape[0], v[3].shape[2], v[3].shape[3]]
                        mat2 = torch.mm(mat2.transpose(0, 1).flatten(start_dim=1).float(), v[3].transpose(0, 1).flatten(start_dim=1).float()).reshape(final_shape).transpose(0, 1)
                    weight += (alpha * torch.mm(mat1.flatten(start_dim=1).float(), mat2.flatten(start_dim=1).float())).reshape(weight.shape).type(weight.dtype).to(weight.device)
comfyanonymous's avatar
comfyanonymous committed
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
                elif len(v) == 8: #lokr
                    w1 = v[0]
                    w2 = v[1]
                    w1_a = v[3]
                    w1_b = v[4]
                    w2_a = v[5]
                    w2_b = v[6]
                    t2 = v[7]
                    dim = None

                    if w1 is None:
                        dim = w1_b.shape[0]
                        w1 = torch.mm(w1_a.float(), w1_b.float())

                    if w2 is None:
                        dim = w2_b.shape[0]
                        if t2 is None:
                            w2 = torch.mm(w2_a.float(), w2_b.float())
                        else:
                            w2 = torch.einsum('i j k l, j r, i p -> p r k l', t2.float(), w2_b.float(), w2_a.float())

                    if len(w2.shape) == 4:
                        w1 = w1.unsqueeze(2).unsqueeze(2)
                    if v[2] is not None and dim is not None:
                        alpha *= v[2] / dim

                    weight += alpha * torch.kron(w1.float(), w2.float()).reshape(weight.shape).type(weight.dtype).to(weight.device)
comfyanonymous's avatar
comfyanonymous committed
457
458
459
460
461
462
463
                else: #loha
                    w1a = v[0]
                    w1b = v[1]
                    if v[2] is not None:
                        alpha *= v[2] / w1b.shape[0]
                    w2a = v[3]
                    w2b = v[4]
464
465
466
467
468
469
470
471
472
473
                    if v[5] is not None: #cp decomposition
                        t1 = v[5]
                        t2 = v[6]
                        m1 = torch.einsum('i j k l, j r, i p -> p r k l', t1.float(), w1b.float(), w1a.float())
                        m2 = torch.einsum('i j k l, j r, i p -> p r k l', t2.float(), w2b.float(), w2a.float())
                    else:
                        m1 = torch.mm(w1a.float(), w1b.float())
                        m2 = torch.mm(w2a.float(), w2b.float())

                    weight += (alpha * m1 * m2).reshape(weight.shape).type(weight.dtype).to(weight.device)
474
475
        return self.model
    def unpatch_model(self):
476
        model_sd = self.model_state_dict()
477
478
        keys = list(self.backup.keys())
        for k in keys:
479
            model_sd[k][:] = self.backup[k]
480
481
            del self.backup[k]

482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
        self.backup = {}

def load_lora_for_models(model, clip, lora_path, strength_model, strength_clip):
    key_map = model_lora_keys(model.model)
    key_map = model_lora_keys(clip.cond_stage_model, key_map)
    loaded = load_lora(lora_path, key_map)
    new_modelpatcher = model.clone()
    k = new_modelpatcher.add_patches(loaded, strength_model)
    new_clip = clip.clone()
    k1 = new_clip.add_patches(loaded, strength_clip)
    k = set(k)
    k1 = set(k1)
    for x in loaded:
        if (x not in k) and (x not in k1):
            print("NOT LOADED", x)

    return (new_modelpatcher, new_clip)
comfyanonymous's avatar
comfyanonymous committed
499
500
501


class CLIP:
502
    def __init__(self, target=None, embedding_directory=None, no_init=False):
503
504
        if no_init:
            return
505
506
507
        params = target.params
        clip = target.clip
        tokenizer = target.tokenizer
508

509
510
        self.device = model_management.text_encoder_device()
        params["device"] = self.device
511
        self.cond_stage_model = clip(**(params))
512
513
        self.cond_stage_model = self.cond_stage_model.to(self.device)

514
        self.tokenizer = tokenizer(embedding_directory=embedding_directory)
515
        self.patcher = ModelPatcher(self.cond_stage_model)
516
        self.layer_idx = None
517
518
519
520
521
522

    def clone(self):
        n = CLIP(no_init=True)
        n.patcher = self.patcher.clone()
        n.cond_stage_model = self.cond_stage_model
        n.tokenizer = self.tokenizer
comfyanonymous's avatar
comfyanonymous committed
523
        n.layer_idx = self.layer_idx
524
        n.device = self.device
525
526
        return n

527
    def load_from_state_dict(self, sd):
528
        self.cond_stage_model.load_sd(sd)
529

530
531
    def add_patches(self, patches, strength=1.0):
        return self.patcher.add_patches(patches, strength)
comfyanonymous's avatar
comfyanonymous committed
532

533
    def clip_layer(self, layer_idx):
comfyanonymous's avatar
comfyanonymous committed
534
        self.layer_idx = layer_idx
535

536
537
    def tokenize(self, text, return_word_ids=False):
        return self.tokenizer.tokenize_with_weights(text, return_word_ids)
BlenderNeko's avatar
BlenderNeko committed
538

539
    def encode_from_tokens(self, tokens, return_pooled=False):
540
541
        if self.layer_idx is not None:
            self.cond_stage_model.clip_layer(self.layer_idx)
542
543
        try:
            self.patcher.patch_model()
544
            cond, pooled = self.cond_stage_model.encode_token_weights(tokens)
545
546
547
548
            self.patcher.unpatch_model()
        except Exception as e:
            self.patcher.unpatch_model()
            raise e
549
550

        cond_out = cond
551
        if return_pooled:
552
553
            return cond_out, pooled
        return cond_out
comfyanonymous's avatar
comfyanonymous committed
554

555
    def encode(self, text):
556
        tokens = self.tokenize(text)
557
558
        return self.encode_from_tokens(tokens)

559
560
    def load_sd(self, sd):
        return self.cond_stage_model.load_sd(sd)
561

comfyanonymous's avatar
comfyanonymous committed
562
class VAE:
563
    def __init__(self, ckpt_path=None, device=None, config=None):
comfyanonymous's avatar
comfyanonymous committed
564
565
566
        if config is None:
            #default SD1.x/SD2.x VAE parameters
            ddconfig = {'double_z': True, 'z_channels': 4, 'resolution': 256, 'in_channels': 3, 'out_ch': 3, 'ch': 128, 'ch_mult': [1, 2, 4, 4], 'num_res_blocks': 2, 'attn_resolutions': [], 'dropout': 0.0}
567
            self.first_stage_model = AutoencoderKL(ddconfig, {'target': 'torch.nn.Identity'}, 4, monitor="val/rec_loss")
comfyanonymous's avatar
comfyanonymous committed
568
        else:
569
            self.first_stage_model = AutoencoderKL(**(config['params']))
comfyanonymous's avatar
comfyanonymous committed
570
        self.first_stage_model = self.first_stage_model.eval()
571
572
573
574
575
576
        if ckpt_path is not None:
            sd = utils.load_torch_file(ckpt_path)
            if 'decoder.up_blocks.0.resnets.0.norm1.weight' in sd.keys(): #diffusers format
                sd = diffusers_convert.convert_vae_state_dict(sd)
            self.first_stage_model.load_state_dict(sd, strict=False)

577
578
        if device is None:
            device = model_management.get_torch_device()
comfyanonymous's avatar
comfyanonymous committed
579
580
        self.device = device

581
    def decode_tiled_(self, samples, tile_x=64, tile_y=64, overlap = 16):
pythongosssss's avatar
pythongosssss committed
582
        steps = samples.shape[0] * utils.get_tiled_scale_steps(samples.shape[3], samples.shape[2], tile_x, tile_y, overlap)
comfyanonymous's avatar
comfyanonymous committed
583
584
        steps += samples.shape[0] * utils.get_tiled_scale_steps(samples.shape[3], samples.shape[2], tile_x // 2, tile_y * 2, overlap)
        steps += samples.shape[0] * utils.get_tiled_scale_steps(samples.shape[3], samples.shape[2], tile_x * 2, tile_y // 2, overlap)
pythongosssss's avatar
pythongosssss committed
585
        pbar = utils.ProgressBar(steps)
586

587
        decode_fn = lambda a: (self.first_stage_model.decode(a.to(self.device)) + 1.0)
588
        output = torch.clamp((
589
590
591
            (utils.tiled_scale(samples, decode_fn, tile_x // 2, tile_y * 2, overlap, upscale_amount = 8, pbar = pbar) +
            utils.tiled_scale(samples, decode_fn, tile_x * 2, tile_y // 2, overlap, upscale_amount = 8, pbar = pbar) +
             utils.tiled_scale(samples, decode_fn, tile_x, tile_y, overlap, upscale_amount = 8, pbar = pbar))
592
593
594
            / 3.0) / 2.0, min=0.0, max=1.0)
        return output

595
596
597
598
599
600
    def encode_tiled_(self, pixel_samples, tile_x=512, tile_y=512, overlap = 64):
        steps = pixel_samples.shape[0] * utils.get_tiled_scale_steps(pixel_samples.shape[3], pixel_samples.shape[2], tile_x, tile_y, overlap)
        steps += pixel_samples.shape[0] * utils.get_tiled_scale_steps(pixel_samples.shape[3], pixel_samples.shape[2], tile_x // 2, tile_y * 2, overlap)
        steps += pixel_samples.shape[0] * utils.get_tiled_scale_steps(pixel_samples.shape[3], pixel_samples.shape[2], tile_x * 2, tile_y // 2, overlap)
        pbar = utils.ProgressBar(steps)

601
        encode_fn = lambda a: self.first_stage_model.encode(2. * a.to(self.device) - 1.).sample()
602
603
604
605
606
607
        samples = utils.tiled_scale(pixel_samples, encode_fn, tile_x, tile_y, overlap, upscale_amount = (1/8), out_channels=4, pbar=pbar)
        samples += utils.tiled_scale(pixel_samples, encode_fn, tile_x * 2, tile_y // 2, overlap, upscale_amount = (1/8), out_channels=4, pbar=pbar)
        samples += utils.tiled_scale(pixel_samples, encode_fn, tile_x // 2, tile_y * 2, overlap, upscale_amount = (1/8), out_channels=4, pbar=pbar)
        samples /= 3.0
        return samples

608
    def decode(self, samples_in):
609
        model_management.unload_model()
comfyanonymous's avatar
comfyanonymous committed
610
        self.first_stage_model = self.first_stage_model.to(self.device)
611
        try:
612
613
614
615
616
617
618
            free_memory = model_management.get_free_memory(self.device)
            batch_number = int((free_memory * 0.7) / (2562 * samples_in.shape[2] * samples_in.shape[3] * 64))
            batch_number = max(1, batch_number)

            pixel_samples = torch.empty((samples_in.shape[0], 3, round(samples_in.shape[2] * 8), round(samples_in.shape[3] * 8)), device="cpu")
            for x in range(0, samples_in.shape[0], batch_number):
                samples = samples_in[x:x+batch_number].to(self.device)
619
                pixel_samples[x:x+batch_number] = torch.clamp((self.first_stage_model.decode(samples) + 1.0) / 2.0, min=0.0, max=1.0).cpu()
620
621
622
623
        except model_management.OOM_EXCEPTION as e:
            print("Warning: Ran out of memory when regular VAE decoding, retrying with tiled VAE decoding.")
            pixel_samples = self.decode_tiled_(samples_in)

comfyanonymous's avatar
comfyanonymous committed
624
625
626
627
        self.first_stage_model = self.first_stage_model.cpu()
        pixel_samples = pixel_samples.cpu().movedim(1,-1)
        return pixel_samples

628
    def decode_tiled(self, samples, tile_x=64, tile_y=64, overlap = 16):
629
630
        model_management.unload_model()
        self.first_stage_model = self.first_stage_model.to(self.device)
631
        output = self.decode_tiled_(samples, tile_x, tile_y, overlap)
632
633
634
        self.first_stage_model = self.first_stage_model.cpu()
        return output.movedim(1,-1)

comfyanonymous's avatar
comfyanonymous committed
635
    def encode(self, pixel_samples):
636
        model_management.unload_model()
comfyanonymous's avatar
comfyanonymous committed
637
        self.first_stage_model = self.first_stage_model.to(self.device)
638
639
        pixel_samples = pixel_samples.movedim(-1,1)
        try:
640
641
642
            free_memory = model_management.get_free_memory(self.device)
            batch_number = int((free_memory * 0.7) / (2078 * pixel_samples.shape[2] * pixel_samples.shape[3])) #NOTE: this constant along with the one in the decode above are estimated from the mem usage for the VAE and could change.
            batch_number = max(1, batch_number)
643
644
645
            samples = torch.empty((pixel_samples.shape[0], 4, round(pixel_samples.shape[2] // 8), round(pixel_samples.shape[3] // 8)), device="cpu")
            for x in range(0, pixel_samples.shape[0], batch_number):
                pixels_in = (2. * pixel_samples[x:x+batch_number] - 1.).to(self.device)
646
                samples[x:x+batch_number] = self.first_stage_model.encode(pixels_in).sample().cpu()
647

648
649
650
651
        except model_management.OOM_EXCEPTION as e:
            print("Warning: Ran out of memory when regular VAE encoding, retrying with tiled VAE encoding.")
            samples = self.encode_tiled_(pixel_samples)

comfyanonymous's avatar
comfyanonymous committed
652
653
654
        self.first_stage_model = self.first_stage_model.cpu()
        return samples

comfyanonymous's avatar
comfyanonymous committed
655
656
657
    def encode_tiled(self, pixel_samples, tile_x=512, tile_y=512, overlap = 64):
        model_management.unload_model()
        self.first_stage_model = self.first_stage_model.to(self.device)
658
659
        pixel_samples = pixel_samples.movedim(-1,1)
        samples = self.encode_tiled_(pixel_samples, tile_x=tile_x, tile_y=tile_y, overlap=overlap)
comfyanonymous's avatar
comfyanonymous committed
660
661
        self.first_stage_model = self.first_stage_model.cpu()
        return samples
662

BlenderNeko's avatar
BlenderNeko committed
663
def broadcast_image_to(tensor, target_batch_size, batched_number):
664
    current_batch_size = tensor.shape[0]
665
    #print(current_batch_size, target_batch_size)
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
    if current_batch_size == 1:
        return tensor

    per_batch = target_batch_size // batched_number
    tensor = tensor[:per_batch]

    if per_batch > tensor.shape[0]:
        tensor = torch.cat([tensor] * (per_batch // tensor.shape[0]) + [tensor[:(per_batch % tensor.shape[0])]], dim=0)

    current_batch_size = tensor.shape[0]
    if current_batch_size == target_batch_size:
        return tensor
    else:
        return torch.cat([tensor] * batched_number, dim=0)

comfyanonymous's avatar
comfyanonymous committed
681
class ControlNet:
682
    def __init__(self, control_model, global_average_pooling=False, device=None):
comfyanonymous's avatar
comfyanonymous committed
683
684
685
        self.control_model = control_model
        self.cond_hint_original = None
        self.cond_hint = None
686
        self.strength = 1.0
687
688
        if device is None:
            device = model_management.get_torch_device()
689
        self.device = device
comfyanonymous's avatar
comfyanonymous committed
690
        self.previous_controlnet = None
691
        self.global_average_pooling = global_average_pooling
comfyanonymous's avatar
comfyanonymous committed
692

693
    def get_control(self, x_noisy, t, cond, batched_number):
comfyanonymous's avatar
comfyanonymous committed
694
695
        control_prev = None
        if self.previous_controlnet is not None:
696
            control_prev = self.previous_controlnet.get_control(x_noisy, t, cond, batched_number)
comfyanonymous's avatar
comfyanonymous committed
697

698
        output_dtype = x_noisy.dtype
comfyanonymous's avatar
comfyanonymous committed
699
700
701
702
        if self.cond_hint is None or x_noisy.shape[2] * 8 != self.cond_hint.shape[2] or x_noisy.shape[3] * 8 != self.cond_hint.shape[3]:
            if self.cond_hint is not None:
                del self.cond_hint
            self.cond_hint = None
BlenderNeko's avatar
BlenderNeko committed
703
704
705
            self.cond_hint = utils.common_upscale(self.cond_hint_original, x_noisy.shape[3] * 8, x_noisy.shape[2] * 8, 'nearest-exact', "center").to(self.control_model.dtype).to(self.device)
        if x_noisy.shape[0] != self.cond_hint.shape[0]:
            self.cond_hint = broadcast_image_to(self.cond_hint, x_noisy.shape[0], batched_number)
706
707
708
709
710
711

        if self.control_model.dtype == torch.float16:
            precision_scope = torch.autocast
        else:
            precision_scope = contextlib.nullcontext

712
        with precision_scope(model_management.get_autocast_device(self.device)):
713
            self.control_model = model_management.load_if_low_vram(self.control_model)
714
715
716
            context = torch.cat(cond['c_crossattn'], 1)
            y = cond.get('c_adm', None)
            control = self.control_model(x=x_noisy, hint=self.cond_hint, timesteps=t, context=context, y=y)
717
            self.control_model = model_management.unload_if_low_vram(self.control_model)
718
        out = {'middle':[], 'output': []}
719
        autocast_enabled = torch.is_autocast_enabled()
comfyanonymous's avatar
comfyanonymous committed
720
721

        for i in range(len(control)):
comfyanonymous's avatar
comfyanonymous committed
722
723
724
725
726
727
            if i == (len(control) - 1):
                key = 'middle'
                index = 0
            else:
                key = 'output'
                index = i
comfyanonymous's avatar
comfyanonymous committed
728
            x = control[i]
729
730
731
            if self.global_average_pooling:
                x = torch.mean(x, dim=(2, 3), keepdim=True).repeat(1, 1, x.shape[2], x.shape[3])

732
            x *= self.strength
733
734
            if x.dtype != output_dtype and not autocast_enabled:
                x = x.to(output_dtype)
comfyanonymous's avatar
comfyanonymous committed
735

comfyanonymous's avatar
comfyanonymous committed
736
737
738
739
740
741
742
            if control_prev is not None and key in control_prev:
                prev = control_prev[key][index]
                if prev is not None:
                    x += prev
            out[key].append(x)
        if control_prev is not None and 'input' in control_prev:
            out['input'] = control_prev['input']
743
        return out
comfyanonymous's avatar
comfyanonymous committed
744

745
    def set_cond_hint(self, cond_hint, strength=1.0):
comfyanonymous's avatar
comfyanonymous committed
746
        self.cond_hint_original = cond_hint
747
        self.strength = strength
comfyanonymous's avatar
comfyanonymous committed
748
749
        return self

comfyanonymous's avatar
comfyanonymous committed
750
751
752
753
    def set_previous_controlnet(self, controlnet):
        self.previous_controlnet = controlnet
        return self

comfyanonymous's avatar
comfyanonymous committed
754
    def cleanup(self):
comfyanonymous's avatar
comfyanonymous committed
755
756
        if self.previous_controlnet is not None:
            self.previous_controlnet.cleanup()
comfyanonymous's avatar
comfyanonymous committed
757
758
759
760
761
        if self.cond_hint is not None:
            del self.cond_hint
            self.cond_hint = None

    def copy(self):
762
        c = ControlNet(self.control_model, global_average_pooling=self.global_average_pooling)
comfyanonymous's avatar
comfyanonymous committed
763
        c.cond_hint_original = self.cond_hint_original
764
        c.strength = self.strength
comfyanonymous's avatar
comfyanonymous committed
765
766
        return c

767
    def get_models(self):
comfyanonymous's avatar
comfyanonymous committed
768
769
        out = []
        if self.previous_controlnet is not None:
770
            out += self.previous_controlnet.get_models()
comfyanonymous's avatar
comfyanonymous committed
771
772
773
        out.append(self.control_model)
        return out

774
def load_controlnet(ckpt_path, model=None):
775
    controlnet_data = utils.load_torch_file(ckpt_path, safe_load=True)
776
    pth_key = 'control_model.zero_convs.0.0.weight'
comfyanonymous's avatar
comfyanonymous committed
777
    pth = False
778
    key = 'zero_convs.0.0.weight'
comfyanonymous's avatar
comfyanonymous committed
779
780
781
    if pth_key in controlnet_data:
        pth = True
        key = pth_key
782
        prefix = "control_model."
comfyanonymous's avatar
comfyanonymous committed
783
    elif key in controlnet_data:
784
        prefix = ""
comfyanonymous's avatar
comfyanonymous committed
785
    else:
786
787
788
789
        net = load_t2i_adapter(controlnet_data)
        if net is None:
            print("error checkpoint does not contain controlnet or t2i adapter data", ckpt_path)
        return net
comfyanonymous's avatar
comfyanonymous committed
790

791
792
793
794
795
796
797
    use_fp16 = model_management.should_use_fp16()

    controlnet_config = model_detection.model_config_from_unet(controlnet_data, prefix, use_fp16).unet_config
    controlnet_config.pop("out_channels")
    controlnet_config["hint_channels"] = 3
    control_model = cldm.ControlNet(**controlnet_config)

comfyanonymous's avatar
comfyanonymous committed
798
    if pth:
799
800
801
802
803
804
805
        if 'difference' in controlnet_data:
            if model is not None:
                m = model.patch_model()
                model_sd = m.state_dict()
                for x in controlnet_data:
                    c_m = "control_model."
                    if x.startswith(c_m):
comfyanonymous's avatar
comfyanonymous committed
806
                        sd_key = "diffusion_model.{}".format(x[len(c_m):])
807
808
809
810
811
812
813
                        if sd_key in model_sd:
                            cd = controlnet_data[x]
                            cd += model_sd[sd_key].type(cd.dtype).to(cd.device)
                model.unpatch_model()
            else:
                print("WARNING: Loaded a diff controlnet without a model. It will very likely not work.")

comfyanonymous's avatar
comfyanonymous committed
814
815
816
817
        class WeightsLoader(torch.nn.Module):
            pass
        w = WeightsLoader()
        w.control_model = control_model
818
        missing, unexpected = w.load_state_dict(controlnet_data, strict=False)
comfyanonymous's avatar
comfyanonymous committed
819
    else:
820
821
        missing, unexpected = control_model.load_state_dict(controlnet_data, strict=False)
    print(missing, unexpected)
comfyanonymous's avatar
comfyanonymous committed
822

823
824
825
    if use_fp16:
        control_model = control_model.half()

826
827
828
829
830
    global_average_pooling = False
    if ckpt_path.endswith("_shuffle.pth") or ckpt_path.endswith("_shuffle.safetensors") or ckpt_path.endswith("_shuffle_fp16.safetensors"): #TODO: smarter way of enabling global_average_pooling
        global_average_pooling = True

    control = ControlNet(control_model, global_average_pooling=global_average_pooling)
comfyanonymous's avatar
comfyanonymous committed
831
832
    return control

833
class T2IAdapter:
834
    def __init__(self, t2i_model, channels_in, device=None):
835
836
837
        self.t2i_model = t2i_model
        self.channels_in = channels_in
        self.strength = 1.0
838
839
        if device is None:
            device = model_management.get_torch_device()
840
841
842
843
844
845
        self.device = device
        self.previous_controlnet = None
        self.control_input = None
        self.cond_hint_original = None
        self.cond_hint = None

846
    def get_control(self, x_noisy, t, cond, batched_number):
847
848
        control_prev = None
        if self.previous_controlnet is not None:
849
            control_prev = self.previous_controlnet.get_control(x_noisy, t, cond, batched_number)
850
851
852
853

        if self.cond_hint is None or x_noisy.shape[2] * 8 != self.cond_hint.shape[2] or x_noisy.shape[3] * 8 != self.cond_hint.shape[3]:
            if self.cond_hint is not None:
                del self.cond_hint
BlenderNeko's avatar
BlenderNeko committed
854
            self.control_input = None
855
            self.cond_hint = None
BlenderNeko's avatar
BlenderNeko committed
856
            self.cond_hint = utils.common_upscale(self.cond_hint_original, x_noisy.shape[3] * 8, x_noisy.shape[2] * 8, 'nearest-exact', "center").float().to(self.device)
857
858
            if self.channels_in == 1 and self.cond_hint.shape[1] > 1:
                self.cond_hint = torch.mean(self.cond_hint, 1, keepdim=True)
BlenderNeko's avatar
BlenderNeko committed
859
860
861
        if x_noisy.shape[0] != self.cond_hint.shape[0]:
            self.cond_hint = broadcast_image_to(self.cond_hint, x_noisy.shape[0], batched_number)
        if self.control_input is None:
862
863
864
865
866
867
868
            self.t2i_model.to(self.device)
            self.control_input = self.t2i_model(self.cond_hint)
            self.t2i_model.cpu()

        output_dtype = x_noisy.dtype
        out = {'input':[]}

comfyanonymous's avatar
comfyanonymous committed
869
        autocast_enabled = torch.is_autocast_enabled()
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
        for i in range(len(self.control_input)):
            key = 'input'
            x = self.control_input[i] * self.strength
            if x.dtype != output_dtype and not autocast_enabled:
                x = x.to(output_dtype)

            if control_prev is not None and key in control_prev:
                index = len(control_prev[key]) - i * 3 - 3
                prev = control_prev[key][index]
                if prev is not None:
                    x += prev
            out[key].insert(0, None)
            out[key].insert(0, None)
            out[key].insert(0, x)

        if control_prev is not None and 'input' in control_prev:
            for i in range(len(out['input'])):
                if out['input'][i] is None:
                    out['input'][i] = control_prev['input'][i]
        if control_prev is not None and 'middle' in control_prev:
            out['middle'] = control_prev['middle']
        if control_prev is not None and 'output' in control_prev:
            out['output'] = control_prev['output']
        return out

    def set_cond_hint(self, cond_hint, strength=1.0):
        self.cond_hint_original = cond_hint
        self.strength = strength
        return self

    def set_previous_controlnet(self, controlnet):
        self.previous_controlnet = controlnet
        return self

    def copy(self):
        c = T2IAdapter(self.t2i_model, self.channels_in)
        c.cond_hint_original = self.cond_hint_original
        c.strength = self.strength
        return c

    def cleanup(self):
        if self.previous_controlnet is not None:
            self.previous_controlnet.cleanup()
        if self.cond_hint is not None:
            del self.cond_hint
            self.cond_hint = None

917
    def get_models(self):
918
919
        out = []
        if self.previous_controlnet is not None:
920
            out += self.previous_controlnet.get_models()
921
922
        return out

923
def load_t2i_adapter(t2i_data):
924
    keys = t2i_data.keys()
925
926
927
    if 'adapter' in keys:
        t2i_data = t2i_data['adapter']
        keys = t2i_data.keys()
928
    if "body.0.in_conv.weight" in keys:
929
930
        cin = t2i_data['body.0.in_conv.weight'].shape[1]
        model_ad = adapter.Adapter_light(cin=cin, channels=[320, 640, 1280, 1280], nums_rb=4)
931
    elif 'conv_in.weight' in keys:
932
        cin = t2i_data['conv_in.weight'].shape[1]
933
934
935
936
937
938
939
        channel = t2i_data['conv_in.weight'].shape[0]
        ksize = t2i_data['body.0.block2.weight'].shape[2]
        use_conv = False
        down_opts = list(filter(lambda a: a.endswith("down_opt.op.weight"), keys))
        if len(down_opts) > 0:
            use_conv = True
        model_ad = adapter.Adapter(cin=cin, channels=[channel, channel*2, channel*4, channel*4][:4], nums_rb=2, ksize=ksize, sk=True, use_conv=use_conv)
940
941
    else:
        return None
942
943
    model_ad.load_state_dict(t2i_data)
    return T2IAdapter(model_ad, cin // 64)
comfyanonymous's avatar
comfyanonymous committed
944

945
946
947
948
949
950
951
952
953
954

class StyleModel:
    def __init__(self, model, device="cpu"):
        self.model = model

    def get_cond(self, input):
        return self.model(input.last_hidden_state)


def load_style_model(ckpt_path):
955
    model_data = utils.load_torch_file(ckpt_path, safe_load=True)
956
957
958
959
960
961
962
963
964
    keys = model_data.keys()
    if "style_embedding" in keys:
        model = adapter.StyleAdapter(width=1024, context_dim=768, num_head=8, n_layes=3, num_token=8)
    else:
        raise Exception("invalid style model {}".format(ckpt_path))
    model.load_state_dict(model_data)
    return StyleModel(model)


965
966
967
968
969
def load_clip(ckpt_paths, embedding_directory=None):
    clip_data = []
    for p in ckpt_paths:
        clip_data.append(utils.load_torch_file(p, safe_load=True))

comfyanonymous's avatar
comfyanonymous committed
970
971
972
    class EmptyClass:
        pass

973
974
975
976
    for i in range(len(clip_data)):
        if "transformer.resblocks.0.ln_1.weight" in clip_data[i]:
            clip_data[i] = utils.transformers_convert(clip_data[i], "", "text_model.", 32)

comfyanonymous's avatar
comfyanonymous committed
977
978
    clip_target = EmptyClass()
    clip_target.params = {}
979
980
981
982
983
984
985
986
987
988
    if len(clip_data) == 1:
        if "text_model.encoder.layers.30.mlp.fc1.weight" in clip_data[0]:
            clip_target.clip = sdxl_clip.SDXLRefinerClipModel
            clip_target.tokenizer = sdxl_clip.SDXLTokenizer
        elif "text_model.encoder.layers.22.mlp.fc1.weight" in clip_data[0]:
            clip_target.clip = sd2_clip.SD2ClipModel
            clip_target.tokenizer = sd2_clip.SD2Tokenizer
        else:
            clip_target.clip = sd1_clip.SD1ClipModel
            clip_target.tokenizer = sd1_clip.SD1Tokenizer
989
    else:
990
991
        clip_target.clip = sdxl_clip.SDXLClipModel
        clip_target.tokenizer = sdxl_clip.SDXLTokenizer
comfyanonymous's avatar
comfyanonymous committed
992
993

    clip = CLIP(clip_target, embedding_directory=embedding_directory)
994
995
996
997
998
999
1000
    for c in clip_data:
        m, u = clip.load_sd(c)
        if len(m) > 0:
            print("clip missing:", m)

        if len(u) > 0:
            print("clip unexpected:", u)
1001
    return clip
comfyanonymous's avatar
comfyanonymous committed
1002

1003
def load_gligen(ckpt_path):
1004
    data = utils.load_torch_file(ckpt_path, safe_load=True)
1005
1006
1007
1008
1009
    model = gligen.load_gligen(data)
    if model_management.should_use_fp16():
        model = model.half()
    return model

comfyanonymous's avatar
comfyanonymous committed
1010
def load_checkpoint(config_path=None, ckpt_path=None, output_vae=True, output_clip=True, embedding_directory=None, state_dict=None, config=None):
1011
    #TODO: this function is a mess and should be removed eventually
comfyanonymous's avatar
comfyanonymous committed
1012
1013
1014
    if config is None:
        with open(config_path, 'r') as stream:
            config = yaml.safe_load(stream)
comfyanonymous's avatar
comfyanonymous committed
1015
1016
1017
1018
1019
    model_config_params = config['model']['params']
    clip_config = model_config_params['cond_stage_config']
    scale_factor = model_config_params['scale_factor']
    vae_config = model_config_params['first_stage_config']

1020
1021
1022
    fp16 = False
    if "unet_config" in model_config_params:
        if "params" in model_config_params["unet_config"]:
comfyanonymous's avatar
comfyanonymous committed
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
            unet_config = model_config_params["unet_config"]["params"]
            if "use_fp16" in unet_config:
                fp16 = unet_config["use_fp16"]

    noise_aug_config = None
    if "noise_aug_config" in model_config_params:
        noise_aug_config = model_config_params["noise_aug_config"]

    v_prediction = False

    if "parameterization" in model_config_params:
        if model_config_params["parameterization"] == "v":
            v_prediction = True
1036

comfyanonymous's avatar
comfyanonymous committed
1037
1038
1039
1040
1041
1042
    clip = None
    vae = None

    class WeightsLoader(torch.nn.Module):
        pass

1043
1044
    if state_dict is None:
        state_dict = utils.load_torch_file(ckpt_path)
comfyanonymous's avatar
comfyanonymous committed
1045

1046
1047
1048
1049
1050
1051
1052
1053
    class EmptyClass:
        pass

    model_config = EmptyClass()
    model_config.unet_config = unet_config
    from . import latent_formats
    model_config.latent_format = latent_formats.SD15(scale_factor=scale_factor)

comfyanonymous's avatar
comfyanonymous committed
1054
    if config['model']["target"].endswith("LatentInpaintDiffusion"):
1055
        model = model_base.SDInpaint(model_config, v_prediction=v_prediction)
comfyanonymous's avatar
comfyanonymous committed
1056
    elif config['model']["target"].endswith("ImageEmbeddingConditionedLatentDiffusion"):
1057
        model = model_base.SD21UNCLIP(model_config, noise_aug_config["params"], v_prediction=v_prediction)
comfyanonymous's avatar
comfyanonymous committed
1058
    else:
1059
        model = model_base.BaseModel(model_config, v_prediction=v_prediction)
comfyanonymous's avatar
comfyanonymous committed
1060

1061
1062
1063
    if fp16:
        model = model.half()

1064
1065
1066
1067
    model.load_model_weights(state_dict, "model.diffusion_model.")

    if output_vae:
        w = WeightsLoader()
1068
        vae = VAE(config=vae_config)
1069
1070
1071
1072
1073
1074
        w.first_stage_model = vae.first_stage_model
        load_model_weights(w, state_dict)

    if output_clip:
        w = WeightsLoader()
        clip_target = EmptyClass()
1075
        clip_target.params = clip_config.get("params", {})
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
        if clip_config["target"].endswith("FrozenOpenCLIPEmbedder"):
            clip_target.clip = sd2_clip.SD2ClipModel
            clip_target.tokenizer = sd2_clip.SD2Tokenizer
        elif clip_config["target"].endswith("FrozenCLIPEmbedder"):
            clip_target.clip = sd1_clip.SD1ClipModel
            clip_target.tokenizer = sd1_clip.SD1Tokenizer
        clip = CLIP(clip_target, embedding_directory=embedding_directory)
        w.cond_stage_model = clip.cond_stage_model
        load_clip_weights(w, state_dict)

1086
    return (ModelPatcher(model), clip, vae)
1087
1088


1089
1090
def load_checkpoint_guess_config(ckpt_path, output_vae=True, output_clip=True, output_clipvision=False, embedding_directory=None):
    sd = utils.load_torch_file(ckpt_path)
1091
1092
    sd_keys = sd.keys()
    clip = None
1093
    clipvision = None
1094
    vae = None
1095
1096
    model = None
    clip_target = None
1097

1098
1099
    fp16 = model_management.should_use_fp16()

1100
1101
1102
    class WeightsLoader(torch.nn.Module):
        pass

1103
1104
1105
    model_config = model_detection.model_config_from_unet(sd, "model.diffusion_model.", fp16)
    if model_config is None:
        raise RuntimeError("ERROR: Could not detect model type of: {}".format(ckpt_path))
1106

1107
    if model_config.clip_vision_prefix is not None:
1108
        if output_clipvision:
1109
            clipvision = clip_vision.load_clipvision_from_sd(sd, model_config.clip_vision_prefix, True)
1110

1111
1112
    model = model_config.get_model(sd)
    model.load_model_weights(sd, "model.diffusion_model.")
1113

1114
    if output_vae:
1115
        vae = VAE()
1116
1117
1118
        w = WeightsLoader()
        w.first_stage_model = vae.first_stage_model
        load_model_weights(w, sd)
1119

1120
1121
1122
1123
1124
1125
1126
    if output_clip:
        w = WeightsLoader()
        clip_target = model_config.clip_target()
        clip = CLIP(clip_target, embedding_directory=embedding_directory)
        w.cond_stage_model = clip.cond_stage_model
        sd = model_config.process_clip_state_dict(sd)
        load_model_weights(w, sd)
comfyanonymous's avatar
comfyanonymous committed
1127

1128
1129
1130
    left_over = sd.keys()
    if len(left_over) > 0:
        print("left over keys:", left_over)
1131

1132
    return (ModelPatcher(model), clip, vae, clipvision)