sd.py 49.4 KB
Newer Older
comfyanonymous's avatar
comfyanonymous committed
1
import torch
2
import contextlib
3
import copy
4
import inspect
comfyanonymous's avatar
comfyanonymous committed
5

6
from comfy import model_management
7
8
from .ldm.util import instantiate_from_config
from .ldm.models.autoencoder import AutoencoderKL
9
import yaml
comfyanonymous's avatar
comfyanonymous committed
10
from .cldm import cldm
11
from .t2i_adapter import adapter
comfyanonymous's avatar
comfyanonymous committed
12
13

from . import utils
14
from . import clip_vision
15
from . import gligen
16
from . import diffusers_convert
comfyanonymous's avatar
comfyanonymous committed
17
from . import model_base
18
from . import model_detection
19

20
21
from . import sd1_clip
from . import sd2_clip
22
from . import sdxl_clip
comfyanonymous's avatar
comfyanonymous committed
23

24
def load_model_weights(model, sd):
comfyanonymous's avatar
comfyanonymous committed
25
    m, u = model.load_state_dict(sd, strict=False)
26
27
    m = set(m)
    unexpected_keys = set(u)
comfyanonymous's avatar
comfyanonymous committed
28
29
30

    k = list(sd.keys())
    for x in k:
31
32
33
34
35
36
37
38
39
40
        if x not in unexpected_keys:
            w = sd.pop(x)
            del w
    if len(m) > 0:
        print("missing", m)
    return model

def load_clip_weights(model, sd):
    k = list(sd.keys())
    for x in k:
comfyanonymous's avatar
comfyanonymous committed
41
42
43
44
        if x.startswith("cond_stage_model.transformer.") and not x.startswith("cond_stage_model.transformer.text_model."):
            y = x.replace("cond_stage_model.transformer.", "cond_stage_model.transformer.text_model.")
            sd[y] = sd.pop(x)

comfyanonymous's avatar
comfyanonymous committed
45
46
47
48
    if 'cond_stage_model.transformer.text_model.embeddings.position_ids' in sd:
        ids = sd['cond_stage_model.transformer.text_model.embeddings.position_ids']
        if ids.dtype == torch.float32:
            sd['cond_stage_model.transformer.text_model.embeddings.position_ids'] = ids.round()
49

50
51
    sd = utils.transformers_convert(sd, "cond_stage_model.model.", "cond_stage_model.transformer.text_model.", 24)
    return load_model_weights(model, sd)
comfyanonymous's avatar
comfyanonymous committed
52

53
54
55
56
57
58
59
60
61
62
LORA_CLIP_MAP = {
    "mlp.fc1": "mlp_fc1",
    "mlp.fc2": "mlp_fc2",
    "self_attn.k_proj": "self_attn_k_proj",
    "self_attn.q_proj": "self_attn_q_proj",
    "self_attn.v_proj": "self_attn_v_proj",
    "self_attn.out_proj": "self_attn_out_proj",
}


63
def load_lora(lora, to_load):
64
65
66
    patch_dict = {}
    loaded_keys = set()
    for x in to_load:
comfyanonymous's avatar
comfyanonymous committed
67
68
69
70
71
72
        alpha_name = "{}.alpha".format(x)
        alpha = None
        if alpha_name in lora.keys():
            alpha = lora[alpha_name].item()
            loaded_keys.add(alpha_name)

73
74
        A_name = "{}.lora_up.weight".format(x)
        B_name = "{}.lora_down.weight".format(x)
75
        mid_name = "{}.lora_mid.weight".format(x)
comfyanonymous's avatar
comfyanonymous committed
76

77
        if A_name in lora.keys():
78
79
80
81
82
            mid = None
            if mid_name in lora.keys():
                mid = lora[mid_name]
                loaded_keys.add(mid_name)
            patch_dict[to_load[x]] = (lora[A_name], lora[B_name], alpha, mid)
83
84
            loaded_keys.add(A_name)
            loaded_keys.add(B_name)
comfyanonymous's avatar
comfyanonymous committed
85

comfyanonymous's avatar
comfyanonymous committed
86
87

        ######## loha
comfyanonymous's avatar
comfyanonymous committed
88
89
90
91
        hada_w1_a_name = "{}.hada_w1_a".format(x)
        hada_w1_b_name = "{}.hada_w1_b".format(x)
        hada_w2_a_name = "{}.hada_w2_a".format(x)
        hada_w2_b_name = "{}.hada_w2_b".format(x)
92
93
        hada_t1_name = "{}.hada_t1".format(x)
        hada_t2_name = "{}.hada_t2".format(x)
comfyanonymous's avatar
comfyanonymous committed
94
        if hada_w1_a_name in lora.keys():
95
96
97
98
99
100
101
102
103
            hada_t1 = None
            hada_t2 = None
            if hada_t1_name in lora.keys():
                hada_t1 = lora[hada_t1_name]
                hada_t2 = lora[hada_t2_name]
                loaded_keys.add(hada_t1_name)
                loaded_keys.add(hada_t2_name)

            patch_dict[to_load[x]] = (lora[hada_w1_a_name], lora[hada_w1_b_name], alpha, lora[hada_w2_a_name], lora[hada_w2_b_name], hada_t1, hada_t2)
comfyanonymous's avatar
comfyanonymous committed
104
105
106
107
108
            loaded_keys.add(hada_w1_a_name)
            loaded_keys.add(hada_w1_b_name)
            loaded_keys.add(hada_w2_a_name)
            loaded_keys.add(hada_w2_b_name)

comfyanonymous's avatar
comfyanonymous committed
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156

        ######## lokr
        lokr_w1_name = "{}.lokr_w1".format(x)
        lokr_w2_name = "{}.lokr_w2".format(x)
        lokr_w1_a_name = "{}.lokr_w1_a".format(x)
        lokr_w1_b_name = "{}.lokr_w1_b".format(x)
        lokr_t2_name = "{}.lokr_t2".format(x)
        lokr_w2_a_name = "{}.lokr_w2_a".format(x)
        lokr_w2_b_name = "{}.lokr_w2_b".format(x)

        lokr_w1 = None
        if lokr_w1_name in lora.keys():
            lokr_w1 = lora[lokr_w1_name]
            loaded_keys.add(lokr_w1_name)

        lokr_w2 = None
        if lokr_w2_name in lora.keys():
            lokr_w2 = lora[lokr_w2_name]
            loaded_keys.add(lokr_w2_name)

        lokr_w1_a = None
        if lokr_w1_a_name in lora.keys():
            lokr_w1_a = lora[lokr_w1_a_name]
            loaded_keys.add(lokr_w1_a_name)

        lokr_w1_b = None
        if lokr_w1_b_name in lora.keys():
            lokr_w1_b = lora[lokr_w1_b_name]
            loaded_keys.add(lokr_w1_b_name)

        lokr_w2_a = None
        if lokr_w2_a_name in lora.keys():
            lokr_w2_a = lora[lokr_w2_a_name]
            loaded_keys.add(lokr_w2_a_name)

        lokr_w2_b = None
        if lokr_w2_b_name in lora.keys():
            lokr_w2_b = lora[lokr_w2_b_name]
            loaded_keys.add(lokr_w2_b_name)

        lokr_t2 = None
        if lokr_t2_name in lora.keys():
            lokr_t2 = lora[lokr_t2_name]
            loaded_keys.add(lokr_t2_name)

        if (lokr_w1 is not None) or (lokr_w2 is not None) or (lokr_w1_a is not None) or (lokr_w2_a is not None):
            patch_dict[to_load[x]] = (lokr_w1, lokr_w2, alpha, lokr_w1_a, lokr_w1_b, lokr_w2_a, lokr_w2_b, lokr_t2)

157
158
159
160
161
    for x in lora.keys():
        if x not in loaded_keys:
            print("lora key not loaded", x)
    return patch_dict

162
def model_lora_keys_clip(model, key_map={}):
163
164
    sdk = model.state_dict().keys()

comfyanonymous's avatar
comfyanonymous committed
165
    text_model_lora_key = "lora_te_text_model_encoder_layers_{}_{}"
166
167
    clip_l_present = False
    for b in range(32):
168
169
170
        for c in LORA_CLIP_MAP:
            k = "transformer.text_model.encoder.layers.{}.{}.weight".format(b, c)
            if k in sdk:
comfyanonymous's avatar
comfyanonymous committed
171
                lora_key = text_model_lora_key.format(b, LORA_CLIP_MAP[c])
172
                key_map[lora_key] = k
comfyanonymous's avatar
comfyanonymous committed
173

174
175
176
177
178
179
180
181
182
183
184
185
186
187
            k = "clip_l.transformer.text_model.encoder.layers.{}.{}.weight".format(b, c)
            if k in sdk:
                lora_key = "lora_te1_text_model_encoder_layers_{}_{}".format(b, LORA_CLIP_MAP[c]) #SDXL base
                key_map[lora_key] = k
                clip_l_present = True

            k = "clip_g.transformer.text_model.encoder.layers.{}.{}.weight".format(b, c)
            if k in sdk:
                if clip_l_present:
                    lora_key = "lora_te2_text_model_encoder_layers_{}_{}".format(b, LORA_CLIP_MAP[c]) #SDXL base
                else:
                    lora_key = "lora_te_text_model_encoder_layers_{}_{}".format(b, LORA_CLIP_MAP[c]) #TODO: test if this is correct for SDXL-Refiner
                key_map[lora_key] = k

188
    return key_map
comfyanonymous's avatar
comfyanonymous committed
189

190
191
def model_lora_keys_unet(model, key_map={}):
    sdk = model.state_dict().keys()
comfyanonymous's avatar
comfyanonymous committed
192

193
194
195
196
197
    for k in sdk:
        if k.startswith("diffusion_model.") and k.endswith(".weight"):
            key_lora = k[len("diffusion_model."):-len(".weight")].replace(".", "_")
            key_map["lora_unet_{}".format(key_lora)] = k

198
199
200
201
202
    diffusers_keys = utils.unet_to_diffusers(model.model_config.unet_config)
    for k in diffusers_keys:
        if k.endswith(".weight"):
            key_lora = k[:-len(".weight")].replace(".", "_")
            key_map["lora_unet_{}".format(key_lora)] = "diffusion_model.{}".format(diffusers_keys[k])
203
204
205
    return key_map

class ModelPatcher:
206
    def __init__(self, model, load_device, offload_device, size=0):
207
        self.size = size
208
        self.model = model
209
        self.patches = {}
210
        self.backup = {}
211
        self.model_options = {"transformer_options":{}}
212
        self.model_size()
213
214
        self.load_device = load_device
        self.offload_device = offload_device
215
216
217
218
219
220
221
222
223
224

    def model_size(self):
        if self.size > 0:
            return self.size
        model_sd = self.model.state_dict()
        size = 0
        for k in model_sd:
            t = model_sd[k]
            size += t.nelement() * t.element_size()
        self.size = size
225
        self.model_keys = set(model_sd.keys())
226
        return size
227
228

    def clone(self):
229
        n = ModelPatcher(self.model, self.load_device, self.offload_device, self.size)
230
231
232
233
        n.patches = {}
        for k in self.patches:
            n.patches[k] = self.patches[k][:]

234
        n.model_options = copy.deepcopy(self.model_options)
235
        n.model_keys = self.model_keys
236
237
        return n

238
    def set_model_sampler_cfg_function(self, sampler_cfg_function):
239
240
241
242
        if len(inspect.signature(sampler_cfg_function).parameters) == 3:
            self.model_options["sampler_cfg_function"] = lambda args: sampler_cfg_function(args["cond"], args["uncond"], args["cond_scale"]) #Old way
        else:
            self.model_options["sampler_cfg_function"] = sampler_cfg_function
243

244
245
246
    def set_model_unet_function_wrapper(self, unet_wrapper_function):
        self.model_options["model_function_wrapper"] = unet_wrapper_function

247
248
249
250
251
252
    def set_model_patch(self, patch, name):
        to = self.model_options["transformer_options"]
        if "patches" not in to:
            to["patches"] = {}
        to["patches"][name] = to["patches"].get(name, []) + [patch]

253
254
255
256
257
258
259
260
    def set_model_patch_replace(self, patch, name, block_name, number):
        to = self.model_options["transformer_options"]
        if "patches_replace" not in to:
            to["patches_replace"] = {}
        if name not in to["patches_replace"]:
            to["patches_replace"][name] = {}
        to["patches_replace"][name][(block_name, number)] = patch

261
262
263
264
265
266
    def set_model_attn1_patch(self, patch):
        self.set_model_patch(patch, "attn1_patch")

    def set_model_attn2_patch(self, patch):
        self.set_model_patch(patch, "attn2_patch")

267
268
269
270
271
272
273
274
275
    def set_model_attn1_replace(self, patch, block_name, number):
        self.set_model_patch_replace(patch, "attn1", block_name, number)

    def set_model_attn2_replace(self, patch, block_name, number):
        self.set_model_patch_replace(patch, "attn2", block_name, number)

    def set_model_attn1_output_patch(self, patch):
        self.set_model_patch(patch, "attn1_output_patch")

276
277
278
    def set_model_attn2_output_patch(self, patch):
        self.set_model_patch(patch, "attn2_output_patch")

279
280
281
282
283
284
285
286
287
    def model_patches_to(self, device):
        to = self.model_options["transformer_options"]
        if "patches" in to:
            patches = to["patches"]
            for name in patches:
                patch_list = patches[name]
                for i in range(len(patch_list)):
                    if hasattr(patch_list[i], "to"):
                        patch_list[i] = patch_list[i].to(device)
288
289
290
291
292
293
294
        if "patches_replace" in to:
            patches = to["patches_replace"]
            for name in patches:
                patch_list = patches[name]
                for k in patch_list:
                    if hasattr(patch_list[k], "to"):
                        patch_list[k] = patch_list[k].to(device)
295

296
    def model_dtype(self):
297
298
        if hasattr(self.model, "get_dtype"):
            return self.model.get_dtype()
299

300
    def add_patches(self, patches, strength_patch=1.0, strength_model=1.0):
301
        p = set()
302
        for k in patches:
303
            if k in self.model_keys:
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
                p.add(k)
                current_patches = self.patches.get(k, [])
                current_patches.append((strength_patch, patches[k], strength_model))
                self.patches[k] = current_patches

        return list(p)

    def get_key_patches(self, filter_prefix=None):
        model_sd = self.model_state_dict()
        p = {}
        for k in model_sd:
            if filter_prefix is not None:
                if not k.startswith(filter_prefix):
                    continue
            if k in self.patches:
                p[k] = [model_sd[k]] + self.patches[k]
            else:
                p[k] = (model_sd[k],)
        return p
323

324
    def model_state_dict(self, filter_prefix=None):
325
326
        sd = self.model.state_dict()
        keys = list(sd.keys())
327
328
329
330
        if filter_prefix is not None:
            for k in keys:
                if not k.startswith(filter_prefix):
                    sd.pop(k)
331
332
        return sd

333
    def patch_model(self):
334
        model_sd = self.model_state_dict()
335
336
337
338
        for key in self.patches:
            if key not in model_sd:
                print("could not patch. key doesn't exist in model:", k)
                continue
339

340
            weight = model_sd[key]
341

342
            if key not in self.backup:
comfyanonymous's avatar
comfyanonymous committed
343
                self.backup[key] = weight.to(self.offload_device, copy=True)
344

345
346
347
            temp_weight = weight.to(torch.float32, copy=True)
            weight[:] = self.calculate_weight(self.patches[key], temp_weight, key).to(weight.dtype)
            del temp_weight
348
        return self.model
comfyanonymous's avatar
comfyanonymous committed
349

350
351
352
353
354
355
356
357
358
359
360
361
362
363
    def calculate_weight(self, patches, weight, key):
        for p in patches:
            alpha = p[0]
            v = p[1]
            strength_model = p[2]

            if strength_model != 1.0:
                weight *= strength_model

            if isinstance(v, list):
                v = (self.calculate_weight(v[1:], v[0].clone(), key), )

            if len(v) == 1:
                w1 = v[0]
364
365
366
367
368
                if alpha != 0.0:
                    if w1.shape != weight.shape:
                        print("WARNING SHAPE MISMATCH {} WEIGHT NOT MERGED {} != {}".format(key, w1.shape, weight.shape))
                    else:
                        weight += alpha * w1.type(weight.dtype).to(weight.device)
369
            elif len(v) == 4: #lora/locon
comfyanonymous's avatar
comfyanonymous committed
370
371
                mat1 = v[0].float().to(weight.device)
                mat2 = v[1].float().to(weight.device)
372
373
374
375
                if v[2] is not None:
                    alpha *= v[2] / mat2.shape[0]
                if v[3] is not None:
                    #locon mid weights, hopefully the math is fine because I didn't properly test it
comfyanonymous's avatar
comfyanonymous committed
376
377
378
379
                    mat3 = v[3].float().to(weight.device)
                    final_shape = [mat2.shape[1], mat2.shape[0], mat3.shape[2], mat3.shape[3]]
                    mat2 = torch.mm(mat2.transpose(0, 1).flatten(start_dim=1), mat3.transpose(0, 1).flatten(start_dim=1)).reshape(final_shape).transpose(0, 1)
                weight += (alpha * torch.mm(mat1.flatten(start_dim=1), mat2.flatten(start_dim=1))).reshape(weight.shape).type(weight.dtype)
380
381
382
383
384
385
386
387
388
389
390
391
392
            elif len(v) == 8: #lokr
                w1 = v[0]
                w2 = v[1]
                w1_a = v[3]
                w1_b = v[4]
                w2_a = v[5]
                w2_b = v[6]
                t2 = v[7]
                dim = None

                if w1 is None:
                    dim = w1_b.shape[0]
                    w1 = torch.mm(w1_a.float(), w1_b.float())
comfyanonymous's avatar
comfyanonymous committed
393
394
                else:
                    w1 = w1.float().to(weight.device)
395
396
397
398

                if w2 is None:
                    dim = w2_b.shape[0]
                    if t2 is None:
comfyanonymous's avatar
comfyanonymous committed
399
                        w2 = torch.mm(w2_a.float().to(weight.device), w2_b.float().to(weight.device))
400
                    else:
comfyanonymous's avatar
comfyanonymous committed
401
402
403
                        w2 = torch.einsum('i j k l, j r, i p -> p r k l', t2.float().to(weight.device), w2_b.float().to(weight.device), w2_a.float().to(weight.device))
                else:
                    w2 = w2.float().to(weight.device)
404
405
406
407
408
409

                if len(w2.shape) == 4:
                    w1 = w1.unsqueeze(2).unsqueeze(2)
                if v[2] is not None and dim is not None:
                    alpha *= v[2] / dim

comfyanonymous's avatar
comfyanonymous committed
410
                weight += alpha * torch.kron(w1, w2).reshape(weight.shape).type(weight.dtype)
411
412
413
414
415
416
417
418
419
420
            else: #loha
                w1a = v[0]
                w1b = v[1]
                if v[2] is not None:
                    alpha *= v[2] / w1b.shape[0]
                w2a = v[3]
                w2b = v[4]
                if v[5] is not None: #cp decomposition
                    t1 = v[5]
                    t2 = v[6]
comfyanonymous's avatar
comfyanonymous committed
421
422
                    m1 = torch.einsum('i j k l, j r, i p -> p r k l', t1.float().to(weight.device), w1b.float().to(weight.device), w1a.float().to(weight.device))
                    m2 = torch.einsum('i j k l, j r, i p -> p r k l', t2.float().to(weight.device), w2b.float().to(weight.device), w2a.float().to(weight.device))
423
                else:
comfyanonymous's avatar
comfyanonymous committed
424
425
                    m1 = torch.mm(w1a.float().to(weight.device), w1b.float().to(weight.device))
                    m2 = torch.mm(w2a.float().to(weight.device), w2b.float().to(weight.device))
426

comfyanonymous's avatar
comfyanonymous committed
427
                weight += (alpha * m1 * m2).reshape(weight.shape).type(weight.dtype)
428
        return weight
429

430
    def unpatch_model(self):
431
        keys = list(self.backup.keys())
432
433
434
435
436
437
438
439
        def set_attr(obj, attr, value):
            attrs = attr.split(".")
            for name in attrs[:-1]:
                obj = getattr(obj, name)
            prev = getattr(obj, attrs[-1])
            setattr(obj, attrs[-1], torch.nn.Parameter(value))
            del prev

440
        for k in keys:
441
            set_attr(self.model, k, self.backup[k])
442

443
444
        self.backup = {}

445
def load_lora_for_models(model, clip, lora, strength_model, strength_clip):
446
447
    key_map = model_lora_keys_unet(model.model)
    key_map = model_lora_keys_clip(clip.cond_stage_model, key_map)
448
    loaded = load_lora(lora, key_map)
449
450
451
452
453
454
455
456
457
458
459
    new_modelpatcher = model.clone()
    k = new_modelpatcher.add_patches(loaded, strength_model)
    new_clip = clip.clone()
    k1 = new_clip.add_patches(loaded, strength_clip)
    k = set(k)
    k1 = set(k1)
    for x in loaded:
        if (x not in k) and (x not in k1):
            print("NOT LOADED", x)

    return (new_modelpatcher, new_clip)
comfyanonymous's avatar
comfyanonymous committed
460
461
462


class CLIP:
463
    def __init__(self, target=None, embedding_directory=None, no_init=False):
464
465
        if no_init:
            return
comfyanonymous's avatar
comfyanonymous committed
466
        params = target.params.copy()
467
468
        clip = target.clip
        tokenizer = target.tokenizer
469

470
471
        load_device = model_management.text_encoder_device()
        offload_device = model_management.text_encoder_offload_device()
comfyanonymous's avatar
comfyanonymous committed
472
        params['device'] = load_device
473
        self.cond_stage_model = clip(**(params))
474
475
476
        #TODO: make sure this doesn't have a quality loss before enabling.
        # if model_management.should_use_fp16(load_device):
        #     self.cond_stage_model.half()
477
478

        self.cond_stage_model = self.cond_stage_model.to()
479

480
        self.tokenizer = tokenizer(embedding_directory=embedding_directory)
481
        self.patcher = ModelPatcher(self.cond_stage_model, load_device=load_device, offload_device=offload_device)
482
        self.layer_idx = None
483
484
485
486
487
488

    def clone(self):
        n = CLIP(no_init=True)
        n.patcher = self.patcher.clone()
        n.cond_stage_model = self.cond_stage_model
        n.tokenizer = self.tokenizer
comfyanonymous's avatar
comfyanonymous committed
489
        n.layer_idx = self.layer_idx
490
491
        return n

492
    def load_from_state_dict(self, sd):
493
        self.cond_stage_model.load_sd(sd)
494

495
496
    def add_patches(self, patches, strength_patch=1.0, strength_model=1.0):
        return self.patcher.add_patches(patches, strength_patch, strength_model)
comfyanonymous's avatar
comfyanonymous committed
497

498
    def clip_layer(self, layer_idx):
comfyanonymous's avatar
comfyanonymous committed
499
        self.layer_idx = layer_idx
500

501
502
    def tokenize(self, text, return_word_ids=False):
        return self.tokenizer.tokenize_with_weights(text, return_word_ids)
BlenderNeko's avatar
BlenderNeko committed
503

504
    def encode_from_tokens(self, tokens, return_pooled=False):
505
506
        if self.layer_idx is not None:
            self.cond_stage_model.clip_layer(self.layer_idx)
507
508
        else:
            self.cond_stage_model.reset_clip_layer()
509
510
511

        model_management.load_model_gpu(self.patcher)
        cond, pooled = self.cond_stage_model.encode_token_weights(tokens)
512
        if return_pooled:
513
514
            return cond, pooled
        return cond
comfyanonymous's avatar
comfyanonymous committed
515

516
    def encode(self, text):
517
        tokens = self.tokenize(text)
518
519
        return self.encode_from_tokens(tokens)

520
521
    def load_sd(self, sd):
        return self.cond_stage_model.load_sd(sd)
522

523
524
525
526
527
528
529
530
531
    def get_sd(self):
        return self.cond_stage_model.state_dict()

    def patch_model(self):
        self.patcher.patch_model()

    def unpatch_model(self):
        self.patcher.unpatch_model()

532
533
534
    def get_key_patches(self):
        return self.patcher.get_key_patches()

comfyanonymous's avatar
comfyanonymous committed
535
class VAE:
536
    def __init__(self, ckpt_path=None, device=None, config=None):
comfyanonymous's avatar
comfyanonymous committed
537
538
539
        if config is None:
            #default SD1.x/SD2.x VAE parameters
            ddconfig = {'double_z': True, 'z_channels': 4, 'resolution': 256, 'in_channels': 3, 'out_ch': 3, 'ch': 128, 'ch_mult': [1, 2, 4, 4], 'num_res_blocks': 2, 'attn_resolutions': [], 'dropout': 0.0}
540
            self.first_stage_model = AutoencoderKL(ddconfig, {'target': 'torch.nn.Identity'}, 4, monitor="val/rec_loss")
comfyanonymous's avatar
comfyanonymous committed
541
        else:
542
            self.first_stage_model = AutoencoderKL(**(config['params']))
comfyanonymous's avatar
comfyanonymous committed
543
        self.first_stage_model = self.first_stage_model.eval()
544
545
546
547
548
549
        if ckpt_path is not None:
            sd = utils.load_torch_file(ckpt_path)
            if 'decoder.up_blocks.0.resnets.0.norm1.weight' in sd.keys(): #diffusers format
                sd = diffusers_convert.convert_vae_state_dict(sd)
            self.first_stage_model.load_state_dict(sd, strict=False)

550
        if device is None:
551
            device = model_management.vae_device()
comfyanonymous's avatar
comfyanonymous committed
552
        self.device = device
553
        self.offload_device = model_management.vae_offload_device()
554
555
        self.vae_dtype = model_management.vae_dtype()
        self.first_stage_model.to(self.vae_dtype)
comfyanonymous's avatar
comfyanonymous committed
556

557
    def decode_tiled_(self, samples, tile_x=64, tile_y=64, overlap = 16):
pythongosssss's avatar
pythongosssss committed
558
        steps = samples.shape[0] * utils.get_tiled_scale_steps(samples.shape[3], samples.shape[2], tile_x, tile_y, overlap)
comfyanonymous's avatar
comfyanonymous committed
559
560
        steps += samples.shape[0] * utils.get_tiled_scale_steps(samples.shape[3], samples.shape[2], tile_x // 2, tile_y * 2, overlap)
        steps += samples.shape[0] * utils.get_tiled_scale_steps(samples.shape[3], samples.shape[2], tile_x * 2, tile_y // 2, overlap)
pythongosssss's avatar
pythongosssss committed
561
        pbar = utils.ProgressBar(steps)
562

563
        decode_fn = lambda a: (self.first_stage_model.decode(a.to(self.vae_dtype).to(self.device)) + 1.0).float()
564
        output = torch.clamp((
565
566
567
            (utils.tiled_scale(samples, decode_fn, tile_x // 2, tile_y * 2, overlap, upscale_amount = 8, pbar = pbar) +
            utils.tiled_scale(samples, decode_fn, tile_x * 2, tile_y // 2, overlap, upscale_amount = 8, pbar = pbar) +
             utils.tiled_scale(samples, decode_fn, tile_x, tile_y, overlap, upscale_amount = 8, pbar = pbar))
568
569
570
            / 3.0) / 2.0, min=0.0, max=1.0)
        return output

571
572
573
574
575
576
    def encode_tiled_(self, pixel_samples, tile_x=512, tile_y=512, overlap = 64):
        steps = pixel_samples.shape[0] * utils.get_tiled_scale_steps(pixel_samples.shape[3], pixel_samples.shape[2], tile_x, tile_y, overlap)
        steps += pixel_samples.shape[0] * utils.get_tiled_scale_steps(pixel_samples.shape[3], pixel_samples.shape[2], tile_x // 2, tile_y * 2, overlap)
        steps += pixel_samples.shape[0] * utils.get_tiled_scale_steps(pixel_samples.shape[3], pixel_samples.shape[2], tile_x * 2, tile_y // 2, overlap)
        pbar = utils.ProgressBar(steps)

577
        encode_fn = lambda a: self.first_stage_model.encode(2. * a.to(self.vae_dtype).to(self.device) - 1.).sample().float()
578
579
580
581
582
583
        samples = utils.tiled_scale(pixel_samples, encode_fn, tile_x, tile_y, overlap, upscale_amount = (1/8), out_channels=4, pbar=pbar)
        samples += utils.tiled_scale(pixel_samples, encode_fn, tile_x * 2, tile_y // 2, overlap, upscale_amount = (1/8), out_channels=4, pbar=pbar)
        samples += utils.tiled_scale(pixel_samples, encode_fn, tile_x // 2, tile_y * 2, overlap, upscale_amount = (1/8), out_channels=4, pbar=pbar)
        samples /= 3.0
        return samples

584
    def decode(self, samples_in):
585
        model_management.unload_model()
comfyanonymous's avatar
comfyanonymous committed
586
        self.first_stage_model = self.first_stage_model.to(self.device)
587
        try:
588
589
590
591
592
593
            free_memory = model_management.get_free_memory(self.device)
            batch_number = int((free_memory * 0.7) / (2562 * samples_in.shape[2] * samples_in.shape[3] * 64))
            batch_number = max(1, batch_number)

            pixel_samples = torch.empty((samples_in.shape[0], 3, round(samples_in.shape[2] * 8), round(samples_in.shape[3] * 8)), device="cpu")
            for x in range(0, samples_in.shape[0], batch_number):
594
595
                samples = samples_in[x:x+batch_number].to(self.vae_dtype).to(self.device)
                pixel_samples[x:x+batch_number] = torch.clamp((self.first_stage_model.decode(samples) + 1.0) / 2.0, min=0.0, max=1.0).cpu().float()
596
597
598
599
        except model_management.OOM_EXCEPTION as e:
            print("Warning: Ran out of memory when regular VAE decoding, retrying with tiled VAE decoding.")
            pixel_samples = self.decode_tiled_(samples_in)

600
        self.first_stage_model = self.first_stage_model.to(self.offload_device)
comfyanonymous's avatar
comfyanonymous committed
601
602
603
        pixel_samples = pixel_samples.cpu().movedim(1,-1)
        return pixel_samples

604
    def decode_tiled(self, samples, tile_x=64, tile_y=64, overlap = 16):
605
606
        model_management.unload_model()
        self.first_stage_model = self.first_stage_model.to(self.device)
607
        output = self.decode_tiled_(samples, tile_x, tile_y, overlap)
608
        self.first_stage_model = self.first_stage_model.to(self.offload_device)
609
610
        return output.movedim(1,-1)

comfyanonymous's avatar
comfyanonymous committed
611
    def encode(self, pixel_samples):
612
        model_management.unload_model()
comfyanonymous's avatar
comfyanonymous committed
613
        self.first_stage_model = self.first_stage_model.to(self.device)
614
615
        pixel_samples = pixel_samples.movedim(-1,1)
        try:
616
617
618
            free_memory = model_management.get_free_memory(self.device)
            batch_number = int((free_memory * 0.7) / (2078 * pixel_samples.shape[2] * pixel_samples.shape[3])) #NOTE: this constant along with the one in the decode above are estimated from the mem usage for the VAE and could change.
            batch_number = max(1, batch_number)
619
620
            samples = torch.empty((pixel_samples.shape[0], 4, round(pixel_samples.shape[2] // 8), round(pixel_samples.shape[3] // 8)), device="cpu")
            for x in range(0, pixel_samples.shape[0], batch_number):
621
622
                pixels_in = (2. * pixel_samples[x:x+batch_number] - 1.).to(self.vae_dtype).to(self.device)
                samples[x:x+batch_number] = self.first_stage_model.encode(pixels_in).sample().cpu().float()
623

624
625
626
627
        except model_management.OOM_EXCEPTION as e:
            print("Warning: Ran out of memory when regular VAE encoding, retrying with tiled VAE encoding.")
            samples = self.encode_tiled_(pixel_samples)

628
        self.first_stage_model = self.first_stage_model.to(self.offload_device)
comfyanonymous's avatar
comfyanonymous committed
629
630
        return samples

comfyanonymous's avatar
comfyanonymous committed
631
632
633
    def encode_tiled(self, pixel_samples, tile_x=512, tile_y=512, overlap = 64):
        model_management.unload_model()
        self.first_stage_model = self.first_stage_model.to(self.device)
634
635
        pixel_samples = pixel_samples.movedim(-1,1)
        samples = self.encode_tiled_(pixel_samples, tile_x=tile_x, tile_y=tile_y, overlap=overlap)
636
        self.first_stage_model = self.first_stage_model.to(self.offload_device)
comfyanonymous's avatar
comfyanonymous committed
637
        return samples
638

639
640
641
642
    def get_sd(self):
        return self.first_stage_model.state_dict()


BlenderNeko's avatar
BlenderNeko committed
643
def broadcast_image_to(tensor, target_batch_size, batched_number):
644
    current_batch_size = tensor.shape[0]
645
    #print(current_batch_size, target_batch_size)
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
    if current_batch_size == 1:
        return tensor

    per_batch = target_batch_size // batched_number
    tensor = tensor[:per_batch]

    if per_batch > tensor.shape[0]:
        tensor = torch.cat([tensor] * (per_batch // tensor.shape[0]) + [tensor[:(per_batch % tensor.shape[0])]], dim=0)

    current_batch_size = tensor.shape[0]
    if current_batch_size == target_batch_size:
        return tensor
    else:
        return torch.cat([tensor] * batched_number, dim=0)

comfyanonymous's avatar
comfyanonymous committed
661
class ControlNet:
662
    def __init__(self, control_model, global_average_pooling=False, device=None):
comfyanonymous's avatar
comfyanonymous committed
663
664
665
        self.control_model = control_model
        self.cond_hint_original = None
        self.cond_hint = None
666
        self.strength = 1.0
667
668
        if device is None:
            device = model_management.get_torch_device()
669
        self.device = device
comfyanonymous's avatar
comfyanonymous committed
670
        self.previous_controlnet = None
671
        self.global_average_pooling = global_average_pooling
comfyanonymous's avatar
comfyanonymous committed
672

673
    def get_control(self, x_noisy, t, cond, batched_number):
comfyanonymous's avatar
comfyanonymous committed
674
675
        control_prev = None
        if self.previous_controlnet is not None:
676
            control_prev = self.previous_controlnet.get_control(x_noisy, t, cond, batched_number)
comfyanonymous's avatar
comfyanonymous committed
677

678
        output_dtype = x_noisy.dtype
comfyanonymous's avatar
comfyanonymous committed
679
680
681
682
        if self.cond_hint is None or x_noisy.shape[2] * 8 != self.cond_hint.shape[2] or x_noisy.shape[3] * 8 != self.cond_hint.shape[3]:
            if self.cond_hint is not None:
                del self.cond_hint
            self.cond_hint = None
BlenderNeko's avatar
BlenderNeko committed
683
684
685
            self.cond_hint = utils.common_upscale(self.cond_hint_original, x_noisy.shape[3] * 8, x_noisy.shape[2] * 8, 'nearest-exact', "center").to(self.control_model.dtype).to(self.device)
        if x_noisy.shape[0] != self.cond_hint.shape[0]:
            self.cond_hint = broadcast_image_to(self.cond_hint, x_noisy.shape[0], batched_number)
686
687
688
689
690
691

        if self.control_model.dtype == torch.float16:
            precision_scope = torch.autocast
        else:
            precision_scope = contextlib.nullcontext

692
        with precision_scope(model_management.get_autocast_device(self.device)):
693
            self.control_model = model_management.load_if_low_vram(self.control_model)
694
695
696
            context = torch.cat(cond['c_crossattn'], 1)
            y = cond.get('c_adm', None)
            control = self.control_model(x=x_noisy, hint=self.cond_hint, timesteps=t, context=context, y=y)
697
            self.control_model = model_management.unload_if_low_vram(self.control_model)
698
        out = {'middle':[], 'output': []}
699
        autocast_enabled = torch.is_autocast_enabled()
comfyanonymous's avatar
comfyanonymous committed
700
701

        for i in range(len(control)):
comfyanonymous's avatar
comfyanonymous committed
702
703
704
705
706
707
            if i == (len(control) - 1):
                key = 'middle'
                index = 0
            else:
                key = 'output'
                index = i
comfyanonymous's avatar
comfyanonymous committed
708
            x = control[i]
709
710
711
            if self.global_average_pooling:
                x = torch.mean(x, dim=(2, 3), keepdim=True).repeat(1, 1, x.shape[2], x.shape[3])

712
            x *= self.strength
713
714
            if x.dtype != output_dtype and not autocast_enabled:
                x = x.to(output_dtype)
comfyanonymous's avatar
comfyanonymous committed
715

comfyanonymous's avatar
comfyanonymous committed
716
717
718
719
720
721
722
            if control_prev is not None and key in control_prev:
                prev = control_prev[key][index]
                if prev is not None:
                    x += prev
            out[key].append(x)
        if control_prev is not None and 'input' in control_prev:
            out['input'] = control_prev['input']
723
        return out
comfyanonymous's avatar
comfyanonymous committed
724

725
    def set_cond_hint(self, cond_hint, strength=1.0):
comfyanonymous's avatar
comfyanonymous committed
726
        self.cond_hint_original = cond_hint
727
        self.strength = strength
comfyanonymous's avatar
comfyanonymous committed
728
729
        return self

comfyanonymous's avatar
comfyanonymous committed
730
731
732
733
    def set_previous_controlnet(self, controlnet):
        self.previous_controlnet = controlnet
        return self

comfyanonymous's avatar
comfyanonymous committed
734
    def cleanup(self):
comfyanonymous's avatar
comfyanonymous committed
735
736
        if self.previous_controlnet is not None:
            self.previous_controlnet.cleanup()
comfyanonymous's avatar
comfyanonymous committed
737
738
739
740
741
        if self.cond_hint is not None:
            del self.cond_hint
            self.cond_hint = None

    def copy(self):
742
        c = ControlNet(self.control_model, global_average_pooling=self.global_average_pooling)
comfyanonymous's avatar
comfyanonymous committed
743
        c.cond_hint_original = self.cond_hint_original
744
        c.strength = self.strength
comfyanonymous's avatar
comfyanonymous committed
745
746
        return c

747
    def get_models(self):
comfyanonymous's avatar
comfyanonymous committed
748
749
        out = []
        if self.previous_controlnet is not None:
750
            out += self.previous_controlnet.get_models()
comfyanonymous's avatar
comfyanonymous committed
751
752
753
        out.append(self.control_model)
        return out

754
def load_controlnet(ckpt_path, model=None):
755
    controlnet_data = utils.load_torch_file(ckpt_path, safe_load=True)
756
    pth_key = 'control_model.zero_convs.0.0.weight'
comfyanonymous's avatar
comfyanonymous committed
757
    pth = False
758
    key = 'zero_convs.0.0.weight'
comfyanonymous's avatar
comfyanonymous committed
759
760
761
    if pth_key in controlnet_data:
        pth = True
        key = pth_key
762
        prefix = "control_model."
comfyanonymous's avatar
comfyanonymous committed
763
    elif key in controlnet_data:
764
        prefix = ""
comfyanonymous's avatar
comfyanonymous committed
765
    else:
766
767
768
769
        net = load_t2i_adapter(controlnet_data)
        if net is None:
            print("error checkpoint does not contain controlnet or t2i adapter data", ckpt_path)
        return net
comfyanonymous's avatar
comfyanonymous committed
770

771
772
773
774
775
776
777
    use_fp16 = model_management.should_use_fp16()

    controlnet_config = model_detection.model_config_from_unet(controlnet_data, prefix, use_fp16).unet_config
    controlnet_config.pop("out_channels")
    controlnet_config["hint_channels"] = 3
    control_model = cldm.ControlNet(**controlnet_config)

comfyanonymous's avatar
comfyanonymous committed
778
    if pth:
779
780
781
782
783
784
785
        if 'difference' in controlnet_data:
            if model is not None:
                m = model.patch_model()
                model_sd = m.state_dict()
                for x in controlnet_data:
                    c_m = "control_model."
                    if x.startswith(c_m):
comfyanonymous's avatar
comfyanonymous committed
786
                        sd_key = "diffusion_model.{}".format(x[len(c_m):])
787
788
789
790
791
792
793
                        if sd_key in model_sd:
                            cd = controlnet_data[x]
                            cd += model_sd[sd_key].type(cd.dtype).to(cd.device)
                model.unpatch_model()
            else:
                print("WARNING: Loaded a diff controlnet without a model. It will very likely not work.")

comfyanonymous's avatar
comfyanonymous committed
794
795
796
797
        class WeightsLoader(torch.nn.Module):
            pass
        w = WeightsLoader()
        w.control_model = control_model
798
        missing, unexpected = w.load_state_dict(controlnet_data, strict=False)
comfyanonymous's avatar
comfyanonymous committed
799
    else:
800
801
        missing, unexpected = control_model.load_state_dict(controlnet_data, strict=False)
    print(missing, unexpected)
comfyanonymous's avatar
comfyanonymous committed
802

803
804
805
    if use_fp16:
        control_model = control_model.half()

806
807
808
809
810
    global_average_pooling = False
    if ckpt_path.endswith("_shuffle.pth") or ckpt_path.endswith("_shuffle.safetensors") or ckpt_path.endswith("_shuffle_fp16.safetensors"): #TODO: smarter way of enabling global_average_pooling
        global_average_pooling = True

    control = ControlNet(control_model, global_average_pooling=global_average_pooling)
comfyanonymous's avatar
comfyanonymous committed
811
812
    return control

813
class T2IAdapter:
814
    def __init__(self, t2i_model, channels_in, device=None):
815
816
817
        self.t2i_model = t2i_model
        self.channels_in = channels_in
        self.strength = 1.0
818
819
        if device is None:
            device = model_management.get_torch_device()
820
821
822
823
824
825
        self.device = device
        self.previous_controlnet = None
        self.control_input = None
        self.cond_hint_original = None
        self.cond_hint = None

826
    def get_control(self, x_noisy, t, cond, batched_number):
827
828
        control_prev = None
        if self.previous_controlnet is not None:
829
            control_prev = self.previous_controlnet.get_control(x_noisy, t, cond, batched_number)
830
831
832
833

        if self.cond_hint is None or x_noisy.shape[2] * 8 != self.cond_hint.shape[2] or x_noisy.shape[3] * 8 != self.cond_hint.shape[3]:
            if self.cond_hint is not None:
                del self.cond_hint
BlenderNeko's avatar
BlenderNeko committed
834
            self.control_input = None
835
            self.cond_hint = None
BlenderNeko's avatar
BlenderNeko committed
836
            self.cond_hint = utils.common_upscale(self.cond_hint_original, x_noisy.shape[3] * 8, x_noisy.shape[2] * 8, 'nearest-exact', "center").float().to(self.device)
837
838
            if self.channels_in == 1 and self.cond_hint.shape[1] > 1:
                self.cond_hint = torch.mean(self.cond_hint, 1, keepdim=True)
BlenderNeko's avatar
BlenderNeko committed
839
840
841
        if x_noisy.shape[0] != self.cond_hint.shape[0]:
            self.cond_hint = broadcast_image_to(self.cond_hint, x_noisy.shape[0], batched_number)
        if self.control_input is None:
842
843
844
845
846
847
848
            self.t2i_model.to(self.device)
            self.control_input = self.t2i_model(self.cond_hint)
            self.t2i_model.cpu()

        output_dtype = x_noisy.dtype
        out = {'input':[]}

comfyanonymous's avatar
comfyanonymous committed
849
        autocast_enabled = torch.is_autocast_enabled()
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
        for i in range(len(self.control_input)):
            key = 'input'
            x = self.control_input[i] * self.strength
            if x.dtype != output_dtype and not autocast_enabled:
                x = x.to(output_dtype)

            if control_prev is not None and key in control_prev:
                index = len(control_prev[key]) - i * 3 - 3
                prev = control_prev[key][index]
                if prev is not None:
                    x += prev
            out[key].insert(0, None)
            out[key].insert(0, None)
            out[key].insert(0, x)

        if control_prev is not None and 'input' in control_prev:
            for i in range(len(out['input'])):
                if out['input'][i] is None:
                    out['input'][i] = control_prev['input'][i]
        if control_prev is not None and 'middle' in control_prev:
            out['middle'] = control_prev['middle']
        if control_prev is not None and 'output' in control_prev:
            out['output'] = control_prev['output']
        return out

    def set_cond_hint(self, cond_hint, strength=1.0):
        self.cond_hint_original = cond_hint
        self.strength = strength
        return self

    def set_previous_controlnet(self, controlnet):
        self.previous_controlnet = controlnet
        return self

    def copy(self):
        c = T2IAdapter(self.t2i_model, self.channels_in)
        c.cond_hint_original = self.cond_hint_original
        c.strength = self.strength
        return c

    def cleanup(self):
        if self.previous_controlnet is not None:
            self.previous_controlnet.cleanup()
        if self.cond_hint is not None:
            del self.cond_hint
            self.cond_hint = None

897
    def get_models(self):
898
899
        out = []
        if self.previous_controlnet is not None:
900
            out += self.previous_controlnet.get_models()
901
902
        return out

903
def load_t2i_adapter(t2i_data):
904
    keys = t2i_data.keys()
905
906
907
    if 'adapter' in keys:
        t2i_data = t2i_data['adapter']
        keys = t2i_data.keys()
908
    if "body.0.in_conv.weight" in keys:
909
910
        cin = t2i_data['body.0.in_conv.weight'].shape[1]
        model_ad = adapter.Adapter_light(cin=cin, channels=[320, 640, 1280, 1280], nums_rb=4)
911
    elif 'conv_in.weight' in keys:
912
        cin = t2i_data['conv_in.weight'].shape[1]
913
914
915
916
917
918
919
        channel = t2i_data['conv_in.weight'].shape[0]
        ksize = t2i_data['body.0.block2.weight'].shape[2]
        use_conv = False
        down_opts = list(filter(lambda a: a.endswith("down_opt.op.weight"), keys))
        if len(down_opts) > 0:
            use_conv = True
        model_ad = adapter.Adapter(cin=cin, channels=[channel, channel*2, channel*4, channel*4][:4], nums_rb=2, ksize=ksize, sk=True, use_conv=use_conv)
920
921
    else:
        return None
922
923
    model_ad.load_state_dict(t2i_data)
    return T2IAdapter(model_ad, cin // 64)
comfyanonymous's avatar
comfyanonymous committed
924

925
926
927
928
929
930
931
932
933
934

class StyleModel:
    def __init__(self, model, device="cpu"):
        self.model = model

    def get_cond(self, input):
        return self.model(input.last_hidden_state)


def load_style_model(ckpt_path):
935
    model_data = utils.load_torch_file(ckpt_path, safe_load=True)
936
937
938
939
940
941
942
943
944
    keys = model_data.keys()
    if "style_embedding" in keys:
        model = adapter.StyleAdapter(width=1024, context_dim=768, num_head=8, n_layes=3, num_token=8)
    else:
        raise Exception("invalid style model {}".format(ckpt_path))
    model.load_state_dict(model_data)
    return StyleModel(model)


945
946
947
948
949
def load_clip(ckpt_paths, embedding_directory=None):
    clip_data = []
    for p in ckpt_paths:
        clip_data.append(utils.load_torch_file(p, safe_load=True))

comfyanonymous's avatar
comfyanonymous committed
950
951
952
    class EmptyClass:
        pass

953
954
955
956
    for i in range(len(clip_data)):
        if "transformer.resblocks.0.ln_1.weight" in clip_data[i]:
            clip_data[i] = utils.transformers_convert(clip_data[i], "", "text_model.", 32)

comfyanonymous's avatar
comfyanonymous committed
957
958
    clip_target = EmptyClass()
    clip_target.params = {}
959
960
961
962
963
964
965
966
967
968
    if len(clip_data) == 1:
        if "text_model.encoder.layers.30.mlp.fc1.weight" in clip_data[0]:
            clip_target.clip = sdxl_clip.SDXLRefinerClipModel
            clip_target.tokenizer = sdxl_clip.SDXLTokenizer
        elif "text_model.encoder.layers.22.mlp.fc1.weight" in clip_data[0]:
            clip_target.clip = sd2_clip.SD2ClipModel
            clip_target.tokenizer = sd2_clip.SD2Tokenizer
        else:
            clip_target.clip = sd1_clip.SD1ClipModel
            clip_target.tokenizer = sd1_clip.SD1Tokenizer
969
    else:
970
971
        clip_target.clip = sdxl_clip.SDXLClipModel
        clip_target.tokenizer = sdxl_clip.SDXLTokenizer
comfyanonymous's avatar
comfyanonymous committed
972
973

    clip = CLIP(clip_target, embedding_directory=embedding_directory)
974
975
976
977
978
979
980
    for c in clip_data:
        m, u = clip.load_sd(c)
        if len(m) > 0:
            print("clip missing:", m)

        if len(u) > 0:
            print("clip unexpected:", u)
981
    return clip
comfyanonymous's avatar
comfyanonymous committed
982

983
def load_gligen(ckpt_path):
984
    data = utils.load_torch_file(ckpt_path, safe_load=True)
985
986
987
988
989
    model = gligen.load_gligen(data)
    if model_management.should_use_fp16():
        model = model.half()
    return model

comfyanonymous's avatar
comfyanonymous committed
990
def load_checkpoint(config_path=None, ckpt_path=None, output_vae=True, output_clip=True, embedding_directory=None, state_dict=None, config=None):
991
    #TODO: this function is a mess and should be removed eventually
comfyanonymous's avatar
comfyanonymous committed
992
993
994
    if config is None:
        with open(config_path, 'r') as stream:
            config = yaml.safe_load(stream)
comfyanonymous's avatar
comfyanonymous committed
995
996
997
998
999
    model_config_params = config['model']['params']
    clip_config = model_config_params['cond_stage_config']
    scale_factor = model_config_params['scale_factor']
    vae_config = model_config_params['first_stage_config']

1000
1001
1002
    fp16 = False
    if "unet_config" in model_config_params:
        if "params" in model_config_params["unet_config"]:
comfyanonymous's avatar
comfyanonymous committed
1003
1004
1005
1006
1007
1008
1009
1010
            unet_config = model_config_params["unet_config"]["params"]
            if "use_fp16" in unet_config:
                fp16 = unet_config["use_fp16"]

    noise_aug_config = None
    if "noise_aug_config" in model_config_params:
        noise_aug_config = model_config_params["noise_aug_config"]

1011
    model_type = model_base.ModelType.EPS
comfyanonymous's avatar
comfyanonymous committed
1012
1013
1014

    if "parameterization" in model_config_params:
        if model_config_params["parameterization"] == "v":
1015
            model_type = model_base.ModelType.V_PREDICTION
1016

comfyanonymous's avatar
comfyanonymous committed
1017
1018
1019
1020
1021
1022
    clip = None
    vae = None

    class WeightsLoader(torch.nn.Module):
        pass

1023
1024
    if state_dict is None:
        state_dict = utils.load_torch_file(ckpt_path)
comfyanonymous's avatar
comfyanonymous committed
1025

1026
1027
1028
1029
1030
1031
1032
1033
    class EmptyClass:
        pass

    model_config = EmptyClass()
    model_config.unet_config = unet_config
    from . import latent_formats
    model_config.latent_format = latent_formats.SD15(scale_factor=scale_factor)

comfyanonymous's avatar
comfyanonymous committed
1034
    if config['model']["target"].endswith("LatentInpaintDiffusion"):
1035
        model = model_base.SDInpaint(model_config, model_type=model_type)
comfyanonymous's avatar
comfyanonymous committed
1036
    elif config['model']["target"].endswith("ImageEmbeddingConditionedLatentDiffusion"):
1037
        model = model_base.SD21UNCLIP(model_config, noise_aug_config["params"], model_type=model_type)
comfyanonymous's avatar
comfyanonymous committed
1038
    else:
1039
        model = model_base.BaseModel(model_config, model_type=model_type)
comfyanonymous's avatar
comfyanonymous committed
1040

1041
1042
1043
    if fp16:
        model = model.half()

1044
1045
    offload_device = model_management.unet_offload_device()
    model = model.to(offload_device)
1046
1047
1048
1049
    model.load_model_weights(state_dict, "model.diffusion_model.")

    if output_vae:
        w = WeightsLoader()
1050
        vae = VAE(config=vae_config)
1051
1052
1053
1054
1055
1056
        w.first_stage_model = vae.first_stage_model
        load_model_weights(w, state_dict)

    if output_clip:
        w = WeightsLoader()
        clip_target = EmptyClass()
1057
        clip_target.params = clip_config.get("params", {})
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
        if clip_config["target"].endswith("FrozenOpenCLIPEmbedder"):
            clip_target.clip = sd2_clip.SD2ClipModel
            clip_target.tokenizer = sd2_clip.SD2Tokenizer
        elif clip_config["target"].endswith("FrozenCLIPEmbedder"):
            clip_target.clip = sd1_clip.SD1ClipModel
            clip_target.tokenizer = sd1_clip.SD1Tokenizer
        clip = CLIP(clip_target, embedding_directory=embedding_directory)
        w.cond_stage_model = clip.cond_stage_model
        load_clip_weights(w, state_dict)

1068
    return (ModelPatcher(model, load_device=model_management.get_torch_device(), offload_device=offload_device), clip, vae)
1069

1070
1071
1072
1073
1074
1075
def calculate_parameters(sd, prefix):
    params = 0
    for k in sd.keys():
        if k.startswith(prefix):
            params += sd[k].nelement()
    return params
1076

1077
1078
def load_checkpoint_guess_config(ckpt_path, output_vae=True, output_clip=True, output_clipvision=False, embedding_directory=None):
    sd = utils.load_torch_file(ckpt_path)
1079
1080
    sd_keys = sd.keys()
    clip = None
1081
    clipvision = None
1082
    vae = None
1083
1084
    model = None
    clip_target = None
1085

1086
1087
    parameters = calculate_parameters(sd, "model.diffusion_model.")
    fp16 = model_management.should_use_fp16(model_params=parameters)
1088

1089
1090
1091
    class WeightsLoader(torch.nn.Module):
        pass

1092
1093
1094
    model_config = model_detection.model_config_from_unet(sd, "model.diffusion_model.", fp16)
    if model_config is None:
        raise RuntimeError("ERROR: Could not detect model type of: {}".format(ckpt_path))
1095

1096
    if model_config.clip_vision_prefix is not None:
1097
        if output_clipvision:
1098
            clipvision = clip_vision.load_clipvision_from_sd(sd, model_config.clip_vision_prefix, True)
1099

1100
    offload_device = model_management.unet_offload_device()
1101
    model = model_config.get_model(sd, "model.diffusion_model.")
1102
    model = model.to(offload_device)
1103
    model.load_model_weights(sd, "model.diffusion_model.")
1104

1105
    if output_vae:
1106
        vae = VAE()
1107
1108
1109
        w = WeightsLoader()
        w.first_stage_model = vae.first_stage_model
        load_model_weights(w, sd)
1110

1111
1112
1113
1114
1115
1116
1117
    if output_clip:
        w = WeightsLoader()
        clip_target = model_config.clip_target()
        clip = CLIP(clip_target, embedding_directory=embedding_directory)
        w.cond_stage_model = clip.cond_stage_model
        sd = model_config.process_clip_state_dict(sd)
        load_model_weights(w, sd)
comfyanonymous's avatar
comfyanonymous committed
1118

1119
1120
1121
    left_over = sd.keys()
    if len(left_over) > 0:
        print("left over keys:", left_over)
1122

1123
    return (ModelPatcher(model, load_device=model_management.get_torch_device(), offload_device=offload_device), clip, vae, clipvision)
1124

1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191

def load_unet(unet_path): #load unet in diffusers format
    sd = utils.load_torch_file(unet_path)
    parameters = calculate_parameters(sd, "")
    fp16 = model_management.should_use_fp16(model_params=parameters)

    match = {}
    match["context_dim"] = sd["down_blocks.0.attentions.1.transformer_blocks.0.attn2.to_k.weight"].shape[1]
    match["model_channels"] = sd["conv_in.weight"].shape[0]
    match["in_channels"] = sd["conv_in.weight"].shape[1]
    match["adm_in_channels"] = None
    if "class_embedding.linear_1.weight" in sd:
        match["adm_in_channels"] = sd["class_embedding.linear_1.weight"].shape[1]

    SDXL = {'use_checkpoint': False, 'image_size': 32, 'out_channels': 4, 'use_spatial_transformer': True, 'legacy': False,
            'num_classes': 'sequential', 'adm_in_channels': 2816, 'use_fp16': fp16, 'in_channels': 4, 'model_channels': 320,
            'num_res_blocks': 2, 'attention_resolutions': [2, 4], 'transformer_depth': [0, 2, 10], 'channel_mult': [1, 2, 4],
            'transformer_depth_middle': 10, 'use_linear_in_transformer': True, 'context_dim': 2048}

    SDXL_refiner = {'use_checkpoint': False, 'image_size': 32, 'out_channels': 4, 'use_spatial_transformer': True, 'legacy': False,
                    'num_classes': 'sequential', 'adm_in_channels': 2560, 'use_fp16': fp16, 'in_channels': 4, 'model_channels': 384,
                    'num_res_blocks': 2, 'attention_resolutions': [2, 4], 'transformer_depth': [0, 4, 4, 0], 'channel_mult': [1, 2, 4, 4],
                    'transformer_depth_middle': 4, 'use_linear_in_transformer': True, 'context_dim': 1280}

    SD21 = {'use_checkpoint': False, 'image_size': 32, 'out_channels': 4, 'use_spatial_transformer': True, 'legacy': False,
            'adm_in_channels': None, 'use_fp16': fp16, 'in_channels': 4, 'model_channels': 320, 'num_res_blocks': 2,
            'attention_resolutions': [1, 2, 4], 'transformer_depth': [1, 1, 1, 0], 'channel_mult': [1, 2, 4, 4],
            'transformer_depth_middle': 1, 'use_linear_in_transformer': True, 'context_dim': 1024}

    SD21_uncliph = {'use_checkpoint': False, 'image_size': 32, 'out_channels': 4, 'use_spatial_transformer': True, 'legacy': False,
                    'num_classes': 'sequential', 'adm_in_channels': 2048, 'use_fp16': True, 'in_channels': 4, 'model_channels': 320,
                    'num_res_blocks': 2, 'attention_resolutions': [1, 2, 4], 'transformer_depth': [1, 1, 1, 0], 'channel_mult': [1, 2, 4, 4],
                    'transformer_depth_middle': 1, 'use_linear_in_transformer': True, 'context_dim': 1024}

    SD21_unclipl = {'use_checkpoint': False, 'image_size': 32, 'out_channels': 4, 'use_spatial_transformer': True, 'legacy': False,
                    'num_classes': 'sequential', 'adm_in_channels': 1536, 'use_fp16': True, 'in_channels': 4, 'model_channels': 320,
                    'num_res_blocks': 2, 'attention_resolutions': [1, 2, 4], 'transformer_depth': [1, 1, 1, 0], 'channel_mult': [1, 2, 4, 4],
                    'transformer_depth_middle': 1, 'use_linear_in_transformer': True, 'context_dim': 1024}

    SD15 = {'use_checkpoint': False, 'image_size': 32, 'out_channels': 4, 'use_spatial_transformer': True, 'legacy': False,
            'adm_in_channels': None, 'use_fp16': True, 'in_channels': 4, 'model_channels': 320, 'num_res_blocks': 2,
            'attention_resolutions': [1, 2, 4], 'transformer_depth': [1, 1, 1, 0], 'channel_mult': [1, 2, 4, 4],
            'transformer_depth_middle': 1, 'use_linear_in_transformer': False, 'context_dim': 768}

    supported_models = [SDXL, SDXL_refiner, SD21, SD15, SD21_uncliph, SD21_unclipl]
    print("match", match)
    for unet_config in supported_models:
        matches = True
        for k in match:
            if match[k] != unet_config[k]:
                matches = False
                break
        if matches:
            diffusers_keys = utils.unet_to_diffusers(unet_config)
            new_sd = {}
            for k in diffusers_keys:
                if k in sd:
                    new_sd[diffusers_keys[k]] = sd.pop(k)
                else:
                    print(diffusers_keys[k], k)
            offload_device = model_management.unet_offload_device()
            model_config = model_detection.model_config_from_unet_config(unet_config)
            model = model_config.get_model(new_sd, "")
            model = model.to(offload_device)
            model.load_model_weights(new_sd, "")
            return ModelPatcher(model, load_device=model_management.get_torch_device(), offload_device=offload_device)

1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
def save_checkpoint(output_path, model, clip, vae, metadata=None):
    try:
        model.patch_model()
        clip.patch_model()
        sd = model.model.state_dict_for_saving(clip.get_sd(), vae.get_sd())
        utils.save_torch_file(sd, output_path, metadata=metadata)
        model.unpatch_model()
        clip.unpatch_model()
    except Exception as e:
        model.unpatch_model()
        clip.unpatch_model()
        raise e