sd.py 48.7 KB
Newer Older
comfyanonymous's avatar
comfyanonymous committed
1
import torch
2
import contextlib
3
import copy
4
import inspect
comfyanonymous's avatar
comfyanonymous committed
5

6
from comfy import model_management
7
8
from .ldm.util import instantiate_from_config
from .ldm.models.autoencoder import AutoencoderKL
9
import yaml
comfyanonymous's avatar
comfyanonymous committed
10
from .cldm import cldm
11
from .t2i_adapter import adapter
comfyanonymous's avatar
comfyanonymous committed
12
13

from . import utils
14
from . import clip_vision
15
from . import gligen
16
from . import diffusers_convert
comfyanonymous's avatar
comfyanonymous committed
17
from . import model_base
18
from . import model_detection
19

20
21
from . import sd1_clip
from . import sd2_clip
22
from . import sdxl_clip
comfyanonymous's avatar
comfyanonymous committed
23

24
def load_model_weights(model, sd):
comfyanonymous's avatar
comfyanonymous committed
25
    m, u = model.load_state_dict(sd, strict=False)
26
27
    m = set(m)
    unexpected_keys = set(u)
comfyanonymous's avatar
comfyanonymous committed
28
29
30

    k = list(sd.keys())
    for x in k:
31
32
33
34
35
36
37
38
39
40
        if x not in unexpected_keys:
            w = sd.pop(x)
            del w
    if len(m) > 0:
        print("missing", m)
    return model

def load_clip_weights(model, sd):
    k = list(sd.keys())
    for x in k:
comfyanonymous's avatar
comfyanonymous committed
41
42
43
44
        if x.startswith("cond_stage_model.transformer.") and not x.startswith("cond_stage_model.transformer.text_model."):
            y = x.replace("cond_stage_model.transformer.", "cond_stage_model.transformer.text_model.")
            sd[y] = sd.pop(x)

comfyanonymous's avatar
comfyanonymous committed
45
46
47
48
    if 'cond_stage_model.transformer.text_model.embeddings.position_ids' in sd:
        ids = sd['cond_stage_model.transformer.text_model.embeddings.position_ids']
        if ids.dtype == torch.float32:
            sd['cond_stage_model.transformer.text_model.embeddings.position_ids'] = ids.round()
49

50
51
    sd = utils.transformers_convert(sd, "cond_stage_model.model.", "cond_stage_model.transformer.text_model.", 24)
    return load_model_weights(model, sd)
comfyanonymous's avatar
comfyanonymous committed
52

53
54
55
56
57
58
59
60
61
62
LORA_CLIP_MAP = {
    "mlp.fc1": "mlp_fc1",
    "mlp.fc2": "mlp_fc2",
    "self_attn.k_proj": "self_attn_k_proj",
    "self_attn.q_proj": "self_attn_q_proj",
    "self_attn.v_proj": "self_attn_v_proj",
    "self_attn.out_proj": "self_attn_out_proj",
}


63
def load_lora(lora, to_load):
64
65
66
    patch_dict = {}
    loaded_keys = set()
    for x in to_load:
comfyanonymous's avatar
comfyanonymous committed
67
68
69
70
71
72
        alpha_name = "{}.alpha".format(x)
        alpha = None
        if alpha_name in lora.keys():
            alpha = lora[alpha_name].item()
            loaded_keys.add(alpha_name)

73
74
        A_name = "{}.lora_up.weight".format(x)
        B_name = "{}.lora_down.weight".format(x)
75
        mid_name = "{}.lora_mid.weight".format(x)
comfyanonymous's avatar
comfyanonymous committed
76

77
        if A_name in lora.keys():
78
79
80
81
82
            mid = None
            if mid_name in lora.keys():
                mid = lora[mid_name]
                loaded_keys.add(mid_name)
            patch_dict[to_load[x]] = (lora[A_name], lora[B_name], alpha, mid)
83
84
            loaded_keys.add(A_name)
            loaded_keys.add(B_name)
comfyanonymous's avatar
comfyanonymous committed
85

comfyanonymous's avatar
comfyanonymous committed
86
87

        ######## loha
comfyanonymous's avatar
comfyanonymous committed
88
89
90
91
        hada_w1_a_name = "{}.hada_w1_a".format(x)
        hada_w1_b_name = "{}.hada_w1_b".format(x)
        hada_w2_a_name = "{}.hada_w2_a".format(x)
        hada_w2_b_name = "{}.hada_w2_b".format(x)
92
93
        hada_t1_name = "{}.hada_t1".format(x)
        hada_t2_name = "{}.hada_t2".format(x)
comfyanonymous's avatar
comfyanonymous committed
94
        if hada_w1_a_name in lora.keys():
95
96
97
98
99
100
101
102
103
            hada_t1 = None
            hada_t2 = None
            if hada_t1_name in lora.keys():
                hada_t1 = lora[hada_t1_name]
                hada_t2 = lora[hada_t2_name]
                loaded_keys.add(hada_t1_name)
                loaded_keys.add(hada_t2_name)

            patch_dict[to_load[x]] = (lora[hada_w1_a_name], lora[hada_w1_b_name], alpha, lora[hada_w2_a_name], lora[hada_w2_b_name], hada_t1, hada_t2)
comfyanonymous's avatar
comfyanonymous committed
104
105
106
107
108
            loaded_keys.add(hada_w1_a_name)
            loaded_keys.add(hada_w1_b_name)
            loaded_keys.add(hada_w2_a_name)
            loaded_keys.add(hada_w2_b_name)

comfyanonymous's avatar
comfyanonymous committed
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156

        ######## lokr
        lokr_w1_name = "{}.lokr_w1".format(x)
        lokr_w2_name = "{}.lokr_w2".format(x)
        lokr_w1_a_name = "{}.lokr_w1_a".format(x)
        lokr_w1_b_name = "{}.lokr_w1_b".format(x)
        lokr_t2_name = "{}.lokr_t2".format(x)
        lokr_w2_a_name = "{}.lokr_w2_a".format(x)
        lokr_w2_b_name = "{}.lokr_w2_b".format(x)

        lokr_w1 = None
        if lokr_w1_name in lora.keys():
            lokr_w1 = lora[lokr_w1_name]
            loaded_keys.add(lokr_w1_name)

        lokr_w2 = None
        if lokr_w2_name in lora.keys():
            lokr_w2 = lora[lokr_w2_name]
            loaded_keys.add(lokr_w2_name)

        lokr_w1_a = None
        if lokr_w1_a_name in lora.keys():
            lokr_w1_a = lora[lokr_w1_a_name]
            loaded_keys.add(lokr_w1_a_name)

        lokr_w1_b = None
        if lokr_w1_b_name in lora.keys():
            lokr_w1_b = lora[lokr_w1_b_name]
            loaded_keys.add(lokr_w1_b_name)

        lokr_w2_a = None
        if lokr_w2_a_name in lora.keys():
            lokr_w2_a = lora[lokr_w2_a_name]
            loaded_keys.add(lokr_w2_a_name)

        lokr_w2_b = None
        if lokr_w2_b_name in lora.keys():
            lokr_w2_b = lora[lokr_w2_b_name]
            loaded_keys.add(lokr_w2_b_name)

        lokr_t2 = None
        if lokr_t2_name in lora.keys():
            lokr_t2 = lora[lokr_t2_name]
            loaded_keys.add(lokr_t2_name)

        if (lokr_w1 is not None) or (lokr_w2 is not None) or (lokr_w1_a is not None) or (lokr_w2_a is not None):
            patch_dict[to_load[x]] = (lokr_w1, lokr_w2, alpha, lokr_w1_a, lokr_w1_b, lokr_w2_a, lokr_w2_b, lokr_t2)

157
158
159
160
161
    for x in lora.keys():
        if x not in loaded_keys:
            print("lora key not loaded", x)
    return patch_dict

162
def model_lora_keys_clip(model, key_map={}):
163
164
    sdk = model.state_dict().keys()

comfyanonymous's avatar
comfyanonymous committed
165
    text_model_lora_key = "lora_te_text_model_encoder_layers_{}_{}"
166
167
    clip_l_present = False
    for b in range(32):
168
169
170
        for c in LORA_CLIP_MAP:
            k = "transformer.text_model.encoder.layers.{}.{}.weight".format(b, c)
            if k in sdk:
comfyanonymous's avatar
comfyanonymous committed
171
                lora_key = text_model_lora_key.format(b, LORA_CLIP_MAP[c])
172
                key_map[lora_key] = k
comfyanonymous's avatar
comfyanonymous committed
173
174
                lora_key = "lora_te1_text_model_encoder_layers_{}_{}".format(b, LORA_CLIP_MAP[c])
                key_map[lora_key] = k
comfyanonymous's avatar
comfyanonymous committed
175

176
177
178
179
180
181
182
183
184
185
186
187
188
189
            k = "clip_l.transformer.text_model.encoder.layers.{}.{}.weight".format(b, c)
            if k in sdk:
                lora_key = "lora_te1_text_model_encoder_layers_{}_{}".format(b, LORA_CLIP_MAP[c]) #SDXL base
                key_map[lora_key] = k
                clip_l_present = True

            k = "clip_g.transformer.text_model.encoder.layers.{}.{}.weight".format(b, c)
            if k in sdk:
                if clip_l_present:
                    lora_key = "lora_te2_text_model_encoder_layers_{}_{}".format(b, LORA_CLIP_MAP[c]) #SDXL base
                else:
                    lora_key = "lora_te_text_model_encoder_layers_{}_{}".format(b, LORA_CLIP_MAP[c]) #TODO: test if this is correct for SDXL-Refiner
                key_map[lora_key] = k

190
    return key_map
comfyanonymous's avatar
comfyanonymous committed
191

192
193
def model_lora_keys_unet(model, key_map={}):
    sdk = model.state_dict().keys()
comfyanonymous's avatar
comfyanonymous committed
194

195
196
197
198
199
    for k in sdk:
        if k.startswith("diffusion_model.") and k.endswith(".weight"):
            key_lora = k[len("diffusion_model."):-len(".weight")].replace(".", "_")
            key_map["lora_unet_{}".format(key_lora)] = k

200
201
202
203
204
    diffusers_keys = utils.unet_to_diffusers(model.model_config.unet_config)
    for k in diffusers_keys:
        if k.endswith(".weight"):
            key_lora = k[:-len(".weight")].replace(".", "_")
            key_map["lora_unet_{}".format(key_lora)] = "diffusion_model.{}".format(diffusers_keys[k])
205
206
    return key_map

207
208
209
210
211
212
213
214
def set_attr(obj, attr, value):
    attrs = attr.split(".")
    for name in attrs[:-1]:
        obj = getattr(obj, name)
    prev = getattr(obj, attrs[-1])
    setattr(obj, attrs[-1], torch.nn.Parameter(value))
    del prev

215
class ModelPatcher:
216
    def __init__(self, model, load_device, offload_device, size=0):
217
        self.size = size
218
        self.model = model
219
        self.patches = {}
220
        self.backup = {}
221
        self.model_options = {"transformer_options":{}}
222
        self.model_size()
223
224
        self.load_device = load_device
        self.offload_device = offload_device
225
226
227
228
229
230
231
232
233
234

    def model_size(self):
        if self.size > 0:
            return self.size
        model_sd = self.model.state_dict()
        size = 0
        for k in model_sd:
            t = model_sd[k]
            size += t.nelement() * t.element_size()
        self.size = size
235
        self.model_keys = set(model_sd.keys())
236
        return size
237
238

    def clone(self):
239
        n = ModelPatcher(self.model, self.load_device, self.offload_device, self.size)
240
241
242
243
        n.patches = {}
        for k in self.patches:
            n.patches[k] = self.patches[k][:]

244
        n.model_options = copy.deepcopy(self.model_options)
245
        n.model_keys = self.model_keys
246
247
        return n

248
    def set_model_sampler_cfg_function(self, sampler_cfg_function):
249
250
251
252
        if len(inspect.signature(sampler_cfg_function).parameters) == 3:
            self.model_options["sampler_cfg_function"] = lambda args: sampler_cfg_function(args["cond"], args["uncond"], args["cond_scale"]) #Old way
        else:
            self.model_options["sampler_cfg_function"] = sampler_cfg_function
253

254
255
256
    def set_model_unet_function_wrapper(self, unet_wrapper_function):
        self.model_options["model_function_wrapper"] = unet_wrapper_function

257
258
259
260
261
262
    def set_model_patch(self, patch, name):
        to = self.model_options["transformer_options"]
        if "patches" not in to:
            to["patches"] = {}
        to["patches"][name] = to["patches"].get(name, []) + [patch]

263
264
265
266
267
268
269
270
    def set_model_patch_replace(self, patch, name, block_name, number):
        to = self.model_options["transformer_options"]
        if "patches_replace" not in to:
            to["patches_replace"] = {}
        if name not in to["patches_replace"]:
            to["patches_replace"][name] = {}
        to["patches_replace"][name][(block_name, number)] = patch

271
272
273
274
275
276
    def set_model_attn1_patch(self, patch):
        self.set_model_patch(patch, "attn1_patch")

    def set_model_attn2_patch(self, patch):
        self.set_model_patch(patch, "attn2_patch")

277
278
279
280
281
282
283
284
285
    def set_model_attn1_replace(self, patch, block_name, number):
        self.set_model_patch_replace(patch, "attn1", block_name, number)

    def set_model_attn2_replace(self, patch, block_name, number):
        self.set_model_patch_replace(patch, "attn2", block_name, number)

    def set_model_attn1_output_patch(self, patch):
        self.set_model_patch(patch, "attn1_output_patch")

286
287
288
    def set_model_attn2_output_patch(self, patch):
        self.set_model_patch(patch, "attn2_output_patch")

289
290
291
292
293
294
295
296
297
    def model_patches_to(self, device):
        to = self.model_options["transformer_options"]
        if "patches" in to:
            patches = to["patches"]
            for name in patches:
                patch_list = patches[name]
                for i in range(len(patch_list)):
                    if hasattr(patch_list[i], "to"):
                        patch_list[i] = patch_list[i].to(device)
298
299
300
301
302
303
304
        if "patches_replace" in to:
            patches = to["patches_replace"]
            for name in patches:
                patch_list = patches[name]
                for k in patch_list:
                    if hasattr(patch_list[k], "to"):
                        patch_list[k] = patch_list[k].to(device)
305

306
    def model_dtype(self):
307
308
        if hasattr(self.model, "get_dtype"):
            return self.model.get_dtype()
309

310
    def add_patches(self, patches, strength_patch=1.0, strength_model=1.0):
311
        p = set()
312
        for k in patches:
313
            if k in self.model_keys:
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
                p.add(k)
                current_patches = self.patches.get(k, [])
                current_patches.append((strength_patch, patches[k], strength_model))
                self.patches[k] = current_patches

        return list(p)

    def get_key_patches(self, filter_prefix=None):
        model_sd = self.model_state_dict()
        p = {}
        for k in model_sd:
            if filter_prefix is not None:
                if not k.startswith(filter_prefix):
                    continue
            if k in self.patches:
                p[k] = [model_sd[k]] + self.patches[k]
            else:
                p[k] = (model_sd[k],)
        return p
333

334
    def model_state_dict(self, filter_prefix=None):
335
336
        sd = self.model.state_dict()
        keys = list(sd.keys())
337
338
339
340
        if filter_prefix is not None:
            for k in keys:
                if not k.startswith(filter_prefix):
                    sd.pop(k)
341
342
        return sd

343
    def patch_model(self, device_to=None):
344
        model_sd = self.model_state_dict()
345
346
347
348
        for key in self.patches:
            if key not in model_sd:
                print("could not patch. key doesn't exist in model:", k)
                continue
349

350
            weight = model_sd[key]
351

352
            if key not in self.backup:
353
                self.backup[key] = weight.to(self.offload_device)
354

355
356
357
358
            if device_to is not None:
                temp_weight = weight.float().to(device_to, copy=True)
            else:
                temp_weight = weight.to(torch.float32, copy=True)
359
360
            out_weight = self.calculate_weight(self.patches[key], temp_weight, key).to(weight.dtype)
            set_attr(self.model, key, out_weight)
361
            del temp_weight
362
        return self.model
comfyanonymous's avatar
comfyanonymous committed
363

364
365
366
367
368
369
370
371
372
373
374
375
376
377
    def calculate_weight(self, patches, weight, key):
        for p in patches:
            alpha = p[0]
            v = p[1]
            strength_model = p[2]

            if strength_model != 1.0:
                weight *= strength_model

            if isinstance(v, list):
                v = (self.calculate_weight(v[1:], v[0].clone(), key), )

            if len(v) == 1:
                w1 = v[0]
378
379
380
381
382
                if alpha != 0.0:
                    if w1.shape != weight.shape:
                        print("WARNING SHAPE MISMATCH {} WEIGHT NOT MERGED {} != {}".format(key, w1.shape, weight.shape))
                    else:
                        weight += alpha * w1.type(weight.dtype).to(weight.device)
383
            elif len(v) == 4: #lora/locon
comfyanonymous's avatar
comfyanonymous committed
384
385
                mat1 = v[0].float().to(weight.device)
                mat2 = v[1].float().to(weight.device)
386
387
388
389
                if v[2] is not None:
                    alpha *= v[2] / mat2.shape[0]
                if v[3] is not None:
                    #locon mid weights, hopefully the math is fine because I didn't properly test it
comfyanonymous's avatar
comfyanonymous committed
390
391
392
                    mat3 = v[3].float().to(weight.device)
                    final_shape = [mat2.shape[1], mat2.shape[0], mat3.shape[2], mat3.shape[3]]
                    mat2 = torch.mm(mat2.transpose(0, 1).flatten(start_dim=1), mat3.transpose(0, 1).flatten(start_dim=1)).reshape(final_shape).transpose(0, 1)
393
394
395
396
                try:
                    weight += (alpha * torch.mm(mat1.flatten(start_dim=1), mat2.flatten(start_dim=1))).reshape(weight.shape).type(weight.dtype)
                except Exception as e:
                    print("ERROR", key, e)
397
398
399
400
401
402
403
404
405
406
407
408
409
            elif len(v) == 8: #lokr
                w1 = v[0]
                w2 = v[1]
                w1_a = v[3]
                w1_b = v[4]
                w2_a = v[5]
                w2_b = v[6]
                t2 = v[7]
                dim = None

                if w1 is None:
                    dim = w1_b.shape[0]
                    w1 = torch.mm(w1_a.float(), w1_b.float())
comfyanonymous's avatar
comfyanonymous committed
410
411
                else:
                    w1 = w1.float().to(weight.device)
412
413
414
415

                if w2 is None:
                    dim = w2_b.shape[0]
                    if t2 is None:
comfyanonymous's avatar
comfyanonymous committed
416
                        w2 = torch.mm(w2_a.float().to(weight.device), w2_b.float().to(weight.device))
417
                    else:
comfyanonymous's avatar
comfyanonymous committed
418
419
420
                        w2 = torch.einsum('i j k l, j r, i p -> p r k l', t2.float().to(weight.device), w2_b.float().to(weight.device), w2_a.float().to(weight.device))
                else:
                    w2 = w2.float().to(weight.device)
421
422
423
424
425
426

                if len(w2.shape) == 4:
                    w1 = w1.unsqueeze(2).unsqueeze(2)
                if v[2] is not None and dim is not None:
                    alpha *= v[2] / dim

427
428
429
430
                try:
                    weight += alpha * torch.kron(w1, w2).reshape(weight.shape).type(weight.dtype)
                except Exception as e:
                    print("ERROR", key, e)
431
432
433
434
435
436
437
438
439
440
            else: #loha
                w1a = v[0]
                w1b = v[1]
                if v[2] is not None:
                    alpha *= v[2] / w1b.shape[0]
                w2a = v[3]
                w2b = v[4]
                if v[5] is not None: #cp decomposition
                    t1 = v[5]
                    t2 = v[6]
comfyanonymous's avatar
comfyanonymous committed
441
442
                    m1 = torch.einsum('i j k l, j r, i p -> p r k l', t1.float().to(weight.device), w1b.float().to(weight.device), w1a.float().to(weight.device))
                    m2 = torch.einsum('i j k l, j r, i p -> p r k l', t2.float().to(weight.device), w2b.float().to(weight.device), w2a.float().to(weight.device))
443
                else:
comfyanonymous's avatar
comfyanonymous committed
444
445
                    m1 = torch.mm(w1a.float().to(weight.device), w1b.float().to(weight.device))
                    m2 = torch.mm(w2a.float().to(weight.device), w2b.float().to(weight.device))
446

447
448
449
450
451
                try:
                    weight += (alpha * m1 * m2).reshape(weight.shape).type(weight.dtype)
                except Exception as e:
                    print("ERROR", key, e)

452
        return weight
453

454
    def unpatch_model(self):
455
        keys = list(self.backup.keys())
456

457
        for k in keys:
458
            set_attr(self.model, k, self.backup[k])
459

460
461
        self.backup = {}

462
def load_lora_for_models(model, clip, lora, strength_model, strength_clip):
463
464
    key_map = model_lora_keys_unet(model.model)
    key_map = model_lora_keys_clip(clip.cond_stage_model, key_map)
465
    loaded = load_lora(lora, key_map)
466
467
468
469
470
471
472
473
474
475
476
    new_modelpatcher = model.clone()
    k = new_modelpatcher.add_patches(loaded, strength_model)
    new_clip = clip.clone()
    k1 = new_clip.add_patches(loaded, strength_clip)
    k = set(k)
    k1 = set(k1)
    for x in loaded:
        if (x not in k) and (x not in k1):
            print("NOT LOADED", x)

    return (new_modelpatcher, new_clip)
comfyanonymous's avatar
comfyanonymous committed
477
478
479


class CLIP:
480
    def __init__(self, target=None, embedding_directory=None, no_init=False):
481
482
        if no_init:
            return
comfyanonymous's avatar
comfyanonymous committed
483
        params = target.params.copy()
484
485
        clip = target.clip
        tokenizer = target.tokenizer
486

487
488
        load_device = model_management.text_encoder_device()
        offload_device = model_management.text_encoder_offload_device()
comfyanonymous's avatar
comfyanonymous committed
489
        params['device'] = load_device
490
        self.cond_stage_model = clip(**(params))
491
492
493
        #TODO: make sure this doesn't have a quality loss before enabling.
        # if model_management.should_use_fp16(load_device):
        #     self.cond_stage_model.half()
494
495

        self.cond_stage_model = self.cond_stage_model.to()
496

497
        self.tokenizer = tokenizer(embedding_directory=embedding_directory)
498
        self.patcher = ModelPatcher(self.cond_stage_model, load_device=load_device, offload_device=offload_device)
499
        self.layer_idx = None
500
501
502
503
504
505

    def clone(self):
        n = CLIP(no_init=True)
        n.patcher = self.patcher.clone()
        n.cond_stage_model = self.cond_stage_model
        n.tokenizer = self.tokenizer
comfyanonymous's avatar
comfyanonymous committed
506
        n.layer_idx = self.layer_idx
507
508
        return n

509
    def load_from_state_dict(self, sd):
510
        self.cond_stage_model.load_sd(sd)
511

512
513
    def add_patches(self, patches, strength_patch=1.0, strength_model=1.0):
        return self.patcher.add_patches(patches, strength_patch, strength_model)
comfyanonymous's avatar
comfyanonymous committed
514

515
    def clip_layer(self, layer_idx):
comfyanonymous's avatar
comfyanonymous committed
516
        self.layer_idx = layer_idx
517

518
519
    def tokenize(self, text, return_word_ids=False):
        return self.tokenizer.tokenize_with_weights(text, return_word_ids)
BlenderNeko's avatar
BlenderNeko committed
520

521
    def encode_from_tokens(self, tokens, return_pooled=False):
522
523
        if self.layer_idx is not None:
            self.cond_stage_model.clip_layer(self.layer_idx)
524
525
        else:
            self.cond_stage_model.reset_clip_layer()
526
527
528

        model_management.load_model_gpu(self.patcher)
        cond, pooled = self.cond_stage_model.encode_token_weights(tokens)
529
        if return_pooled:
530
531
            return cond, pooled
        return cond
comfyanonymous's avatar
comfyanonymous committed
532

533
    def encode(self, text):
534
        tokens = self.tokenize(text)
535
536
        return self.encode_from_tokens(tokens)

537
538
    def load_sd(self, sd):
        return self.cond_stage_model.load_sd(sd)
539

540
541
542
543
544
545
546
547
548
    def get_sd(self):
        return self.cond_stage_model.state_dict()

    def patch_model(self):
        self.patcher.patch_model()

    def unpatch_model(self):
        self.patcher.unpatch_model()

549
550
551
    def get_key_patches(self):
        return self.patcher.get_key_patches()

comfyanonymous's avatar
comfyanonymous committed
552
class VAE:
553
    def __init__(self, ckpt_path=None, device=None, config=None):
comfyanonymous's avatar
comfyanonymous committed
554
555
556
        if config is None:
            #default SD1.x/SD2.x VAE parameters
            ddconfig = {'double_z': True, 'z_channels': 4, 'resolution': 256, 'in_channels': 3, 'out_ch': 3, 'ch': 128, 'ch_mult': [1, 2, 4, 4], 'num_res_blocks': 2, 'attn_resolutions': [], 'dropout': 0.0}
557
            self.first_stage_model = AutoencoderKL(ddconfig, {'target': 'torch.nn.Identity'}, 4, monitor="val/rec_loss")
comfyanonymous's avatar
comfyanonymous committed
558
        else:
559
            self.first_stage_model = AutoencoderKL(**(config['params']))
comfyanonymous's avatar
comfyanonymous committed
560
        self.first_stage_model = self.first_stage_model.eval()
561
562
563
564
565
566
        if ckpt_path is not None:
            sd = utils.load_torch_file(ckpt_path)
            if 'decoder.up_blocks.0.resnets.0.norm1.weight' in sd.keys(): #diffusers format
                sd = diffusers_convert.convert_vae_state_dict(sd)
            self.first_stage_model.load_state_dict(sd, strict=False)

567
        if device is None:
568
            device = model_management.vae_device()
comfyanonymous's avatar
comfyanonymous committed
569
        self.device = device
570
        self.offload_device = model_management.vae_offload_device()
571
572
        self.vae_dtype = model_management.vae_dtype()
        self.first_stage_model.to(self.vae_dtype)
comfyanonymous's avatar
comfyanonymous committed
573

574
    def decode_tiled_(self, samples, tile_x=64, tile_y=64, overlap = 16):
pythongosssss's avatar
pythongosssss committed
575
        steps = samples.shape[0] * utils.get_tiled_scale_steps(samples.shape[3], samples.shape[2], tile_x, tile_y, overlap)
comfyanonymous's avatar
comfyanonymous committed
576
577
        steps += samples.shape[0] * utils.get_tiled_scale_steps(samples.shape[3], samples.shape[2], tile_x // 2, tile_y * 2, overlap)
        steps += samples.shape[0] * utils.get_tiled_scale_steps(samples.shape[3], samples.shape[2], tile_x * 2, tile_y // 2, overlap)
pythongosssss's avatar
pythongosssss committed
578
        pbar = utils.ProgressBar(steps)
579

580
        decode_fn = lambda a: (self.first_stage_model.decode(a.to(self.vae_dtype).to(self.device)) + 1.0).float()
581
        output = torch.clamp((
582
583
584
            (utils.tiled_scale(samples, decode_fn, tile_x // 2, tile_y * 2, overlap, upscale_amount = 8, pbar = pbar) +
            utils.tiled_scale(samples, decode_fn, tile_x * 2, tile_y // 2, overlap, upscale_amount = 8, pbar = pbar) +
             utils.tiled_scale(samples, decode_fn, tile_x, tile_y, overlap, upscale_amount = 8, pbar = pbar))
585
586
587
            / 3.0) / 2.0, min=0.0, max=1.0)
        return output

588
589
590
591
592
593
    def encode_tiled_(self, pixel_samples, tile_x=512, tile_y=512, overlap = 64):
        steps = pixel_samples.shape[0] * utils.get_tiled_scale_steps(pixel_samples.shape[3], pixel_samples.shape[2], tile_x, tile_y, overlap)
        steps += pixel_samples.shape[0] * utils.get_tiled_scale_steps(pixel_samples.shape[3], pixel_samples.shape[2], tile_x // 2, tile_y * 2, overlap)
        steps += pixel_samples.shape[0] * utils.get_tiled_scale_steps(pixel_samples.shape[3], pixel_samples.shape[2], tile_x * 2, tile_y // 2, overlap)
        pbar = utils.ProgressBar(steps)

594
        encode_fn = lambda a: self.first_stage_model.encode(2. * a.to(self.vae_dtype).to(self.device) - 1.).sample().float()
595
596
597
598
599
600
        samples = utils.tiled_scale(pixel_samples, encode_fn, tile_x, tile_y, overlap, upscale_amount = (1/8), out_channels=4, pbar=pbar)
        samples += utils.tiled_scale(pixel_samples, encode_fn, tile_x * 2, tile_y // 2, overlap, upscale_amount = (1/8), out_channels=4, pbar=pbar)
        samples += utils.tiled_scale(pixel_samples, encode_fn, tile_x // 2, tile_y * 2, overlap, upscale_amount = (1/8), out_channels=4, pbar=pbar)
        samples /= 3.0
        return samples

601
    def decode(self, samples_in):
602
        model_management.unload_model()
comfyanonymous's avatar
comfyanonymous committed
603
        self.first_stage_model = self.first_stage_model.to(self.device)
604
        try:
605
606
607
608
609
610
            free_memory = model_management.get_free_memory(self.device)
            batch_number = int((free_memory * 0.7) / (2562 * samples_in.shape[2] * samples_in.shape[3] * 64))
            batch_number = max(1, batch_number)

            pixel_samples = torch.empty((samples_in.shape[0], 3, round(samples_in.shape[2] * 8), round(samples_in.shape[3] * 8)), device="cpu")
            for x in range(0, samples_in.shape[0], batch_number):
611
612
                samples = samples_in[x:x+batch_number].to(self.vae_dtype).to(self.device)
                pixel_samples[x:x+batch_number] = torch.clamp((self.first_stage_model.decode(samples) + 1.0) / 2.0, min=0.0, max=1.0).cpu().float()
613
614
615
616
        except model_management.OOM_EXCEPTION as e:
            print("Warning: Ran out of memory when regular VAE decoding, retrying with tiled VAE decoding.")
            pixel_samples = self.decode_tiled_(samples_in)

617
        self.first_stage_model = self.first_stage_model.to(self.offload_device)
comfyanonymous's avatar
comfyanonymous committed
618
619
620
        pixel_samples = pixel_samples.cpu().movedim(1,-1)
        return pixel_samples

621
    def decode_tiled(self, samples, tile_x=64, tile_y=64, overlap = 16):
622
623
        model_management.unload_model()
        self.first_stage_model = self.first_stage_model.to(self.device)
624
        output = self.decode_tiled_(samples, tile_x, tile_y, overlap)
625
        self.first_stage_model = self.first_stage_model.to(self.offload_device)
626
627
        return output.movedim(1,-1)

comfyanonymous's avatar
comfyanonymous committed
628
    def encode(self, pixel_samples):
629
        model_management.unload_model()
comfyanonymous's avatar
comfyanonymous committed
630
        self.first_stage_model = self.first_stage_model.to(self.device)
631
632
        pixel_samples = pixel_samples.movedim(-1,1)
        try:
633
634
635
            free_memory = model_management.get_free_memory(self.device)
            batch_number = int((free_memory * 0.7) / (2078 * pixel_samples.shape[2] * pixel_samples.shape[3])) #NOTE: this constant along with the one in the decode above are estimated from the mem usage for the VAE and could change.
            batch_number = max(1, batch_number)
636
637
            samples = torch.empty((pixel_samples.shape[0], 4, round(pixel_samples.shape[2] // 8), round(pixel_samples.shape[3] // 8)), device="cpu")
            for x in range(0, pixel_samples.shape[0], batch_number):
638
639
                pixels_in = (2. * pixel_samples[x:x+batch_number] - 1.).to(self.vae_dtype).to(self.device)
                samples[x:x+batch_number] = self.first_stage_model.encode(pixels_in).sample().cpu().float()
640

641
642
643
644
        except model_management.OOM_EXCEPTION as e:
            print("Warning: Ran out of memory when regular VAE encoding, retrying with tiled VAE encoding.")
            samples = self.encode_tiled_(pixel_samples)

645
        self.first_stage_model = self.first_stage_model.to(self.offload_device)
comfyanonymous's avatar
comfyanonymous committed
646
647
        return samples

comfyanonymous's avatar
comfyanonymous committed
648
649
650
    def encode_tiled(self, pixel_samples, tile_x=512, tile_y=512, overlap = 64):
        model_management.unload_model()
        self.first_stage_model = self.first_stage_model.to(self.device)
651
652
        pixel_samples = pixel_samples.movedim(-1,1)
        samples = self.encode_tiled_(pixel_samples, tile_x=tile_x, tile_y=tile_y, overlap=overlap)
653
        self.first_stage_model = self.first_stage_model.to(self.offload_device)
comfyanonymous's avatar
comfyanonymous committed
654
        return samples
655

656
657
658
659
    def get_sd(self):
        return self.first_stage_model.state_dict()


BlenderNeko's avatar
BlenderNeko committed
660
def broadcast_image_to(tensor, target_batch_size, batched_number):
661
    current_batch_size = tensor.shape[0]
662
    #print(current_batch_size, target_batch_size)
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
    if current_batch_size == 1:
        return tensor

    per_batch = target_batch_size // batched_number
    tensor = tensor[:per_batch]

    if per_batch > tensor.shape[0]:
        tensor = torch.cat([tensor] * (per_batch // tensor.shape[0]) + [tensor[:(per_batch % tensor.shape[0])]], dim=0)

    current_batch_size = tensor.shape[0]
    if current_batch_size == target_batch_size:
        return tensor
    else:
        return torch.cat([tensor] * batched_number, dim=0)

678
679
class ControlBase:
    def __init__(self, device=None):
comfyanonymous's avatar
comfyanonymous committed
680
681
        self.cond_hint_original = None
        self.cond_hint = None
682
        self.strength = 1.0
683
684
685
        self.timestep_percent_range = (1.0, 0.0)
        self.timestep_range = None

686
687
        if device is None:
            device = model_management.get_torch_device()
688
        self.device = device
comfyanonymous's avatar
comfyanonymous committed
689
        self.previous_controlnet = None
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728

    def set_cond_hint(self, cond_hint, strength=1.0, timestep_percent_range=(1.0, 0.0)):
        self.cond_hint_original = cond_hint
        self.strength = strength
        self.timestep_percent_range = timestep_percent_range
        return self

    def pre_run(self, model, percent_to_timestep_function):
        self.timestep_range = (percent_to_timestep_function(self.timestep_percent_range[0]), percent_to_timestep_function(self.timestep_percent_range[1]))
        if self.previous_controlnet is not None:
            self.previous_controlnet.pre_run(model, percent_to_timestep_function)

    def set_previous_controlnet(self, controlnet):
        self.previous_controlnet = controlnet
        return self

    def cleanup(self):
        if self.previous_controlnet is not None:
            self.previous_controlnet.cleanup()
        if self.cond_hint is not None:
            del self.cond_hint
            self.cond_hint = None
        self.timestep_range = None

    def get_models(self):
        out = []
        if self.previous_controlnet is not None:
            out += self.previous_controlnet.get_models()
        return out

    def copy_to(self, c):
        c.cond_hint_original = self.cond_hint_original
        c.strength = self.strength
        c.timestep_percent_range = self.timestep_percent_range

class ControlNet(ControlBase):
    def __init__(self, control_model, global_average_pooling=False, device=None):
        super().__init__(device)
        self.control_model = control_model
729
        self.global_average_pooling = global_average_pooling
comfyanonymous's avatar
comfyanonymous committed
730

731
    def get_control(self, x_noisy, t, cond, batched_number):
comfyanonymous's avatar
comfyanonymous committed
732
733
        control_prev = None
        if self.previous_controlnet is not None:
734
            control_prev = self.previous_controlnet.get_control(x_noisy, t, cond, batched_number)
comfyanonymous's avatar
comfyanonymous committed
735

736
737
738
739
740
741
742
        if self.timestep_range is not None:
            if t[0] > self.timestep_range[0] or t[0] < self.timestep_range[1]:
                if control_prev is not None:
                    return control_prev
                else:
                    return {}

743
        output_dtype = x_noisy.dtype
comfyanonymous's avatar
comfyanonymous committed
744
745
746
747
        if self.cond_hint is None or x_noisy.shape[2] * 8 != self.cond_hint.shape[2] or x_noisy.shape[3] * 8 != self.cond_hint.shape[3]:
            if self.cond_hint is not None:
                del self.cond_hint
            self.cond_hint = None
BlenderNeko's avatar
BlenderNeko committed
748
749
750
            self.cond_hint = utils.common_upscale(self.cond_hint_original, x_noisy.shape[3] * 8, x_noisy.shape[2] * 8, 'nearest-exact', "center").to(self.control_model.dtype).to(self.device)
        if x_noisy.shape[0] != self.cond_hint.shape[0]:
            self.cond_hint = broadcast_image_to(self.cond_hint, x_noisy.shape[0], batched_number)
751
752
753
754
755
756

        if self.control_model.dtype == torch.float16:
            precision_scope = torch.autocast
        else:
            precision_scope = contextlib.nullcontext

757
        with precision_scope(model_management.get_autocast_device(self.device)):
758
            self.control_model = model_management.load_if_low_vram(self.control_model)
759
760
761
            context = torch.cat(cond['c_crossattn'], 1)
            y = cond.get('c_adm', None)
            control = self.control_model(x=x_noisy, hint=self.cond_hint, timesteps=t, context=context, y=y)
762
            self.control_model = model_management.unload_if_low_vram(self.control_model)
763
        out = {'middle':[], 'output': []}
764
        autocast_enabled = torch.is_autocast_enabled()
comfyanonymous's avatar
comfyanonymous committed
765
766

        for i in range(len(control)):
comfyanonymous's avatar
comfyanonymous committed
767
768
769
770
771
772
            if i == (len(control) - 1):
                key = 'middle'
                index = 0
            else:
                key = 'output'
                index = i
comfyanonymous's avatar
comfyanonymous committed
773
            x = control[i]
774
775
776
            if self.global_average_pooling:
                x = torch.mean(x, dim=(2, 3), keepdim=True).repeat(1, 1, x.shape[2], x.shape[3])

777
            x *= self.strength
778
779
            if x.dtype != output_dtype and not autocast_enabled:
                x = x.to(output_dtype)
comfyanonymous's avatar
comfyanonymous committed
780

comfyanonymous's avatar
comfyanonymous committed
781
782
783
784
785
786
787
            if control_prev is not None and key in control_prev:
                prev = control_prev[key][index]
                if prev is not None:
                    x += prev
            out[key].append(x)
        if control_prev is not None and 'input' in control_prev:
            out['input'] = control_prev['input']
788
        return out
comfyanonymous's avatar
comfyanonymous committed
789
790

    def copy(self):
791
        c = ControlNet(self.control_model, global_average_pooling=self.global_average_pooling)
792
        self.copy_to(c)
comfyanonymous's avatar
comfyanonymous committed
793
794
        return c

795
796
797
798
799
800
    def get_models(self):
        out = super().get_models()
        out.append(self.control_model)
        return out


801
def load_controlnet(ckpt_path, model=None):
802
    controlnet_data = utils.load_torch_file(ckpt_path, safe_load=True)
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847

    controlnet_config = None
    if "controlnet_cond_embedding.conv_in.weight" in controlnet_data: #diffusers format
        use_fp16 = model_management.should_use_fp16()
        controlnet_config = model_detection.model_config_from_diffusers_unet(controlnet_data, use_fp16).unet_config
        diffusers_keys = utils.unet_to_diffusers(controlnet_config)
        diffusers_keys["controlnet_mid_block.weight"] = "middle_block_out.0.weight"
        diffusers_keys["controlnet_mid_block.bias"] = "middle_block_out.0.bias"

        count = 0
        loop = True
        while loop:
            suffix = [".weight", ".bias"]
            for s in suffix:
                k_in = "controlnet_down_blocks.{}{}".format(count, s)
                k_out = "zero_convs.{}.0{}".format(count, s)
                if k_in not in controlnet_data:
                    loop = False
                    break
                diffusers_keys[k_in] = k_out
            count += 1

        count = 0
        loop = True
        while loop:
            suffix = [".weight", ".bias"]
            for s in suffix:
                if count == 0:
                    k_in = "controlnet_cond_embedding.conv_in{}".format(s)
                else:
                    k_in = "controlnet_cond_embedding.blocks.{}{}".format(count - 1, s)
                k_out = "input_hint_block.{}{}".format(count * 2, s)
                if k_in not in controlnet_data:
                    k_in = "controlnet_cond_embedding.conv_out{}".format(s)
                    loop = False
                diffusers_keys[k_in] = k_out
            count += 1

        new_sd = {}
        for k in diffusers_keys:
            if k in controlnet_data:
                new_sd[diffusers_keys[k]] = controlnet_data.pop(k)

        controlnet_data = new_sd

848
    pth_key = 'control_model.zero_convs.0.0.weight'
comfyanonymous's avatar
comfyanonymous committed
849
    pth = False
850
    key = 'zero_convs.0.0.weight'
comfyanonymous's avatar
comfyanonymous committed
851
852
853
    if pth_key in controlnet_data:
        pth = True
        key = pth_key
854
        prefix = "control_model."
comfyanonymous's avatar
comfyanonymous committed
855
    elif key in controlnet_data:
856
        prefix = ""
comfyanonymous's avatar
comfyanonymous committed
857
    else:
858
859
860
861
        net = load_t2i_adapter(controlnet_data)
        if net is None:
            print("error checkpoint does not contain controlnet or t2i adapter data", ckpt_path)
        return net
comfyanonymous's avatar
comfyanonymous committed
862

863
864
865
    if controlnet_config is None:
        use_fp16 = model_management.should_use_fp16()
        controlnet_config = model_detection.model_config_from_unet(controlnet_data, prefix, use_fp16).unet_config
866
867
868
869
    controlnet_config.pop("out_channels")
    controlnet_config["hint_channels"] = 3
    control_model = cldm.ControlNet(**controlnet_config)

comfyanonymous's avatar
comfyanonymous committed
870
    if pth:
871
872
873
874
875
876
877
        if 'difference' in controlnet_data:
            if model is not None:
                m = model.patch_model()
                model_sd = m.state_dict()
                for x in controlnet_data:
                    c_m = "control_model."
                    if x.startswith(c_m):
comfyanonymous's avatar
comfyanonymous committed
878
                        sd_key = "diffusion_model.{}".format(x[len(c_m):])
879
880
881
882
883
884
885
                        if sd_key in model_sd:
                            cd = controlnet_data[x]
                            cd += model_sd[sd_key].type(cd.dtype).to(cd.device)
                model.unpatch_model()
            else:
                print("WARNING: Loaded a diff controlnet without a model. It will very likely not work.")

comfyanonymous's avatar
comfyanonymous committed
886
887
888
889
        class WeightsLoader(torch.nn.Module):
            pass
        w = WeightsLoader()
        w.control_model = control_model
890
        missing, unexpected = w.load_state_dict(controlnet_data, strict=False)
comfyanonymous's avatar
comfyanonymous committed
891
    else:
892
893
        missing, unexpected = control_model.load_state_dict(controlnet_data, strict=False)
    print(missing, unexpected)
comfyanonymous's avatar
comfyanonymous committed
894

895
896
897
    if use_fp16:
        control_model = control_model.half()

898
899
900
901
902
    global_average_pooling = False
    if ckpt_path.endswith("_shuffle.pth") or ckpt_path.endswith("_shuffle.safetensors") or ckpt_path.endswith("_shuffle_fp16.safetensors"): #TODO: smarter way of enabling global_average_pooling
        global_average_pooling = True

    control = ControlNet(control_model, global_average_pooling=global_average_pooling)
comfyanonymous's avatar
comfyanonymous committed
903
904
    return control

905
class T2IAdapter(ControlBase):
906
    def __init__(self, t2i_model, channels_in, device=None):
907
        super().__init__(device)
908
909
910
911
        self.t2i_model = t2i_model
        self.channels_in = channels_in
        self.control_input = None

912
    def get_control(self, x_noisy, t, cond, batched_number):
913
914
        control_prev = None
        if self.previous_controlnet is not None:
915
            control_prev = self.previous_controlnet.get_control(x_noisy, t, cond, batched_number)
916

917
918
919
920
921
922
923
        if self.timestep_range is not None:
            if t[0] > self.timestep_range[0] or t[0] < self.timestep_range[1]:
                if control_prev is not None:
                    return control_prev
                else:
                    return {}

924
925
926
        if self.cond_hint is None or x_noisy.shape[2] * 8 != self.cond_hint.shape[2] or x_noisy.shape[3] * 8 != self.cond_hint.shape[3]:
            if self.cond_hint is not None:
                del self.cond_hint
BlenderNeko's avatar
BlenderNeko committed
927
            self.control_input = None
928
            self.cond_hint = None
BlenderNeko's avatar
BlenderNeko committed
929
            self.cond_hint = utils.common_upscale(self.cond_hint_original, x_noisy.shape[3] * 8, x_noisy.shape[2] * 8, 'nearest-exact', "center").float().to(self.device)
930
931
            if self.channels_in == 1 and self.cond_hint.shape[1] > 1:
                self.cond_hint = torch.mean(self.cond_hint, 1, keepdim=True)
BlenderNeko's avatar
BlenderNeko committed
932
933
934
        if x_noisy.shape[0] != self.cond_hint.shape[0]:
            self.cond_hint = broadcast_image_to(self.cond_hint, x_noisy.shape[0], batched_number)
        if self.control_input is None:
935
936
937
938
939
940
941
            self.t2i_model.to(self.device)
            self.control_input = self.t2i_model(self.cond_hint)
            self.t2i_model.cpu()

        output_dtype = x_noisy.dtype
        out = {'input':[]}

comfyanonymous's avatar
comfyanonymous committed
942
        autocast_enabled = torch.is_autocast_enabled()
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
        for i in range(len(self.control_input)):
            key = 'input'
            x = self.control_input[i] * self.strength
            if x.dtype != output_dtype and not autocast_enabled:
                x = x.to(output_dtype)

            if control_prev is not None and key in control_prev:
                index = len(control_prev[key]) - i * 3 - 3
                prev = control_prev[key][index]
                if prev is not None:
                    x += prev
            out[key].insert(0, None)
            out[key].insert(0, None)
            out[key].insert(0, x)

        if control_prev is not None and 'input' in control_prev:
            for i in range(len(out['input'])):
                if out['input'][i] is None:
                    out['input'][i] = control_prev['input'][i]
        if control_prev is not None and 'middle' in control_prev:
            out['middle'] = control_prev['middle']
        if control_prev is not None and 'output' in control_prev:
            out['output'] = control_prev['output']
        return out

    def copy(self):
        c = T2IAdapter(self.t2i_model, self.channels_in)
970
        self.copy_to(c)
971
972
973
        return c


974
def load_t2i_adapter(t2i_data):
975
    keys = t2i_data.keys()
976
977
978
    if 'adapter' in keys:
        t2i_data = t2i_data['adapter']
        keys = t2i_data.keys()
979
    if "body.0.in_conv.weight" in keys:
980
981
        cin = t2i_data['body.0.in_conv.weight'].shape[1]
        model_ad = adapter.Adapter_light(cin=cin, channels=[320, 640, 1280, 1280], nums_rb=4)
982
    elif 'conv_in.weight' in keys:
983
        cin = t2i_data['conv_in.weight'].shape[1]
984
985
986
987
988
989
990
        channel = t2i_data['conv_in.weight'].shape[0]
        ksize = t2i_data['body.0.block2.weight'].shape[2]
        use_conv = False
        down_opts = list(filter(lambda a: a.endswith("down_opt.op.weight"), keys))
        if len(down_opts) > 0:
            use_conv = True
        model_ad = adapter.Adapter(cin=cin, channels=[channel, channel*2, channel*4, channel*4][:4], nums_rb=2, ksize=ksize, sk=True, use_conv=use_conv)
991
992
    else:
        return None
993
994
    model_ad.load_state_dict(t2i_data)
    return T2IAdapter(model_ad, cin // 64)
comfyanonymous's avatar
comfyanonymous committed
995

996
997
998
999
1000
1001
1002
1003
1004
1005

class StyleModel:
    def __init__(self, model, device="cpu"):
        self.model = model

    def get_cond(self, input):
        return self.model(input.last_hidden_state)


def load_style_model(ckpt_path):
1006
    model_data = utils.load_torch_file(ckpt_path, safe_load=True)
1007
1008
1009
1010
1011
1012
1013
1014
1015
    keys = model_data.keys()
    if "style_embedding" in keys:
        model = adapter.StyleAdapter(width=1024, context_dim=768, num_head=8, n_layes=3, num_token=8)
    else:
        raise Exception("invalid style model {}".format(ckpt_path))
    model.load_state_dict(model_data)
    return StyleModel(model)


1016
1017
1018
1019
1020
def load_clip(ckpt_paths, embedding_directory=None):
    clip_data = []
    for p in ckpt_paths:
        clip_data.append(utils.load_torch_file(p, safe_load=True))

comfyanonymous's avatar
comfyanonymous committed
1021
1022
1023
    class EmptyClass:
        pass

1024
1025
1026
1027
    for i in range(len(clip_data)):
        if "transformer.resblocks.0.ln_1.weight" in clip_data[i]:
            clip_data[i] = utils.transformers_convert(clip_data[i], "", "text_model.", 32)

comfyanonymous's avatar
comfyanonymous committed
1028
1029
    clip_target = EmptyClass()
    clip_target.params = {}
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
    if len(clip_data) == 1:
        if "text_model.encoder.layers.30.mlp.fc1.weight" in clip_data[0]:
            clip_target.clip = sdxl_clip.SDXLRefinerClipModel
            clip_target.tokenizer = sdxl_clip.SDXLTokenizer
        elif "text_model.encoder.layers.22.mlp.fc1.weight" in clip_data[0]:
            clip_target.clip = sd2_clip.SD2ClipModel
            clip_target.tokenizer = sd2_clip.SD2Tokenizer
        else:
            clip_target.clip = sd1_clip.SD1ClipModel
            clip_target.tokenizer = sd1_clip.SD1Tokenizer
1040
    else:
1041
1042
        clip_target.clip = sdxl_clip.SDXLClipModel
        clip_target.tokenizer = sdxl_clip.SDXLTokenizer
comfyanonymous's avatar
comfyanonymous committed
1043
1044

    clip = CLIP(clip_target, embedding_directory=embedding_directory)
1045
1046
1047
1048
1049
1050
1051
    for c in clip_data:
        m, u = clip.load_sd(c)
        if len(m) > 0:
            print("clip missing:", m)

        if len(u) > 0:
            print("clip unexpected:", u)
1052
    return clip
comfyanonymous's avatar
comfyanonymous committed
1053

1054
def load_gligen(ckpt_path):
1055
    data = utils.load_torch_file(ckpt_path, safe_load=True)
1056
1057
1058
1059
1060
    model = gligen.load_gligen(data)
    if model_management.should_use_fp16():
        model = model.half()
    return model

comfyanonymous's avatar
comfyanonymous committed
1061
def load_checkpoint(config_path=None, ckpt_path=None, output_vae=True, output_clip=True, embedding_directory=None, state_dict=None, config=None):
1062
    #TODO: this function is a mess and should be removed eventually
comfyanonymous's avatar
comfyanonymous committed
1063
1064
1065
    if config is None:
        with open(config_path, 'r') as stream:
            config = yaml.safe_load(stream)
comfyanonymous's avatar
comfyanonymous committed
1066
1067
1068
1069
1070
    model_config_params = config['model']['params']
    clip_config = model_config_params['cond_stage_config']
    scale_factor = model_config_params['scale_factor']
    vae_config = model_config_params['first_stage_config']

1071
1072
1073
    fp16 = False
    if "unet_config" in model_config_params:
        if "params" in model_config_params["unet_config"]:
comfyanonymous's avatar
comfyanonymous committed
1074
1075
1076
1077
1078
1079
1080
1081
            unet_config = model_config_params["unet_config"]["params"]
            if "use_fp16" in unet_config:
                fp16 = unet_config["use_fp16"]

    noise_aug_config = None
    if "noise_aug_config" in model_config_params:
        noise_aug_config = model_config_params["noise_aug_config"]

1082
    model_type = model_base.ModelType.EPS
comfyanonymous's avatar
comfyanonymous committed
1083
1084
1085

    if "parameterization" in model_config_params:
        if model_config_params["parameterization"] == "v":
1086
            model_type = model_base.ModelType.V_PREDICTION
1087

comfyanonymous's avatar
comfyanonymous committed
1088
1089
1090
1091
1092
1093
    clip = None
    vae = None

    class WeightsLoader(torch.nn.Module):
        pass

1094
1095
    if state_dict is None:
        state_dict = utils.load_torch_file(ckpt_path)
comfyanonymous's avatar
comfyanonymous committed
1096

1097
1098
1099
1100
1101
1102
1103
1104
    class EmptyClass:
        pass

    model_config = EmptyClass()
    model_config.unet_config = unet_config
    from . import latent_formats
    model_config.latent_format = latent_formats.SD15(scale_factor=scale_factor)

comfyanonymous's avatar
comfyanonymous committed
1105
    if config['model']["target"].endswith("LatentInpaintDiffusion"):
1106
        model = model_base.SDInpaint(model_config, model_type=model_type)
comfyanonymous's avatar
comfyanonymous committed
1107
    elif config['model']["target"].endswith("ImageEmbeddingConditionedLatentDiffusion"):
1108
        model = model_base.SD21UNCLIP(model_config, noise_aug_config["params"], model_type=model_type)
comfyanonymous's avatar
comfyanonymous committed
1109
    else:
1110
        model = model_base.BaseModel(model_config, model_type=model_type)
comfyanonymous's avatar
comfyanonymous committed
1111

1112
1113
1114
    if fp16:
        model = model.half()

1115
1116
    offload_device = model_management.unet_offload_device()
    model = model.to(offload_device)
1117
1118
1119
1120
    model.load_model_weights(state_dict, "model.diffusion_model.")

    if output_vae:
        w = WeightsLoader()
1121
        vae = VAE(config=vae_config)
1122
1123
1124
1125
1126
1127
        w.first_stage_model = vae.first_stage_model
        load_model_weights(w, state_dict)

    if output_clip:
        w = WeightsLoader()
        clip_target = EmptyClass()
1128
        clip_target.params = clip_config.get("params", {})
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
        if clip_config["target"].endswith("FrozenOpenCLIPEmbedder"):
            clip_target.clip = sd2_clip.SD2ClipModel
            clip_target.tokenizer = sd2_clip.SD2Tokenizer
        elif clip_config["target"].endswith("FrozenCLIPEmbedder"):
            clip_target.clip = sd1_clip.SD1ClipModel
            clip_target.tokenizer = sd1_clip.SD1Tokenizer
        clip = CLIP(clip_target, embedding_directory=embedding_directory)
        w.cond_stage_model = clip.cond_stage_model
        load_clip_weights(w, state_dict)

1139
    return (ModelPatcher(model, load_device=model_management.get_torch_device(), offload_device=offload_device), clip, vae)
1140

1141
1142
1143
1144
1145
1146
def calculate_parameters(sd, prefix):
    params = 0
    for k in sd.keys():
        if k.startswith(prefix):
            params += sd[k].nelement()
    return params
1147

1148
1149
def load_checkpoint_guess_config(ckpt_path, output_vae=True, output_clip=True, output_clipvision=False, embedding_directory=None):
    sd = utils.load_torch_file(ckpt_path)
1150
1151
    sd_keys = sd.keys()
    clip = None
1152
    clipvision = None
1153
    vae = None
1154
1155
    model = None
    clip_target = None
1156

1157
1158
    parameters = calculate_parameters(sd, "model.diffusion_model.")
    fp16 = model_management.should_use_fp16(model_params=parameters)
1159

1160
1161
1162
    class WeightsLoader(torch.nn.Module):
        pass

1163
1164
1165
    model_config = model_detection.model_config_from_unet(sd, "model.diffusion_model.", fp16)
    if model_config is None:
        raise RuntimeError("ERROR: Could not detect model type of: {}".format(ckpt_path))
1166

1167
    if model_config.clip_vision_prefix is not None:
1168
        if output_clipvision:
1169
            clipvision = clip_vision.load_clipvision_from_sd(sd, model_config.clip_vision_prefix, True)
1170

1171
    offload_device = model_management.unet_offload_device()
1172
    model = model_config.get_model(sd, "model.diffusion_model.", device=offload_device)
1173
    model.load_model_weights(sd, "model.diffusion_model.")
1174

1175
    if output_vae:
1176
        vae = VAE()
1177
1178
1179
        w = WeightsLoader()
        w.first_stage_model = vae.first_stage_model
        load_model_weights(w, sd)
1180

1181
1182
1183
1184
1185
1186
1187
    if output_clip:
        w = WeightsLoader()
        clip_target = model_config.clip_target()
        clip = CLIP(clip_target, embedding_directory=embedding_directory)
        w.cond_stage_model = clip.cond_stage_model
        sd = model_config.process_clip_state_dict(sd)
        load_model_weights(w, sd)
comfyanonymous's avatar
comfyanonymous committed
1188

1189
1190
1191
    left_over = sd.keys()
    if len(left_over) > 0:
        print("left over keys:", left_over)
1192

1193
    return (ModelPatcher(model, load_device=model_management.get_torch_device(), offload_device=offload_device), clip, vae, clipvision)
1194

1195
1196
1197
1198
1199
1200

def load_unet(unet_path): #load unet in diffusers format
    sd = utils.load_torch_file(unet_path)
    parameters = calculate_parameters(sd, "")
    fp16 = model_management.should_use_fp16(model_params=parameters)

1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
    model_config = model_detection.model_config_from_diffusers_unet(sd, fp16)
    if model_config is None:
        print("ERROR UNSUPPORTED UNET", unet_path)
        return None

    diffusers_keys = utils.unet_to_diffusers(model_config.unet_config)

    new_sd = {}
    for k in diffusers_keys:
        if k in sd:
            new_sd[diffusers_keys[k]] = sd.pop(k)
        else:
            print(diffusers_keys[k], k)
    offload_device = model_management.unet_offload_device()
    model = model_config.get_model(new_sd, "")
    model = model.to(offload_device)
    model.load_model_weights(new_sd, "")
    return ModelPatcher(model, load_device=model_management.get_torch_device(), offload_device=offload_device)
1219

1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
def save_checkpoint(output_path, model, clip, vae, metadata=None):
    try:
        model.patch_model()
        clip.patch_model()
        sd = model.model.state_dict_for_saving(clip.get_sd(), vae.get_sd())
        utils.save_torch_file(sd, output_path, metadata=metadata)
        model.unpatch_model()
        clip.unpatch_model()
    except Exception as e:
        model.unpatch_model()
        clip.unpatch_model()
        raise e