sd.py 50.5 KB
Newer Older
comfyanonymous's avatar
comfyanonymous committed
1
import torch
2
import contextlib
3
import copy
4
import inspect
comfyanonymous's avatar
comfyanonymous committed
5

6
from comfy import model_management
7
8
from .ldm.util import instantiate_from_config
from .ldm.models.autoencoder import AutoencoderKL
9
import yaml
comfyanonymous's avatar
comfyanonymous committed
10
from .cldm import cldm
11
from .t2i_adapter import adapter
comfyanonymous's avatar
comfyanonymous committed
12
13

from . import utils
14
from . import clip_vision
15
from . import gligen
16
from . import diffusers_convert
comfyanonymous's avatar
comfyanonymous committed
17
from . import model_base
18
from . import model_detection
19

20
21
from . import sd1_clip
from . import sd2_clip
22
from . import sdxl_clip
comfyanonymous's avatar
comfyanonymous committed
23

24
def load_model_weights(model, sd):
comfyanonymous's avatar
comfyanonymous committed
25
    m, u = model.load_state_dict(sd, strict=False)
26
27
    m = set(m)
    unexpected_keys = set(u)
comfyanonymous's avatar
comfyanonymous committed
28
29
30

    k = list(sd.keys())
    for x in k:
31
32
33
34
35
36
37
38
39
40
        if x not in unexpected_keys:
            w = sd.pop(x)
            del w
    if len(m) > 0:
        print("missing", m)
    return model

def load_clip_weights(model, sd):
    k = list(sd.keys())
    for x in k:
comfyanonymous's avatar
comfyanonymous committed
41
42
43
44
        if x.startswith("cond_stage_model.transformer.") and not x.startswith("cond_stage_model.transformer.text_model."):
            y = x.replace("cond_stage_model.transformer.", "cond_stage_model.transformer.text_model.")
            sd[y] = sd.pop(x)

comfyanonymous's avatar
comfyanonymous committed
45
46
47
48
    if 'cond_stage_model.transformer.text_model.embeddings.position_ids' in sd:
        ids = sd['cond_stage_model.transformer.text_model.embeddings.position_ids']
        if ids.dtype == torch.float32:
            sd['cond_stage_model.transformer.text_model.embeddings.position_ids'] = ids.round()
49

50
51
    sd = utils.transformers_convert(sd, "cond_stage_model.model.", "cond_stage_model.transformer.text_model.", 24)
    return load_model_weights(model, sd)
comfyanonymous's avatar
comfyanonymous committed
52

53
54
55
56
57
58
59
60
61
62
LORA_CLIP_MAP = {
    "mlp.fc1": "mlp_fc1",
    "mlp.fc2": "mlp_fc2",
    "self_attn.k_proj": "self_attn_k_proj",
    "self_attn.q_proj": "self_attn_q_proj",
    "self_attn.v_proj": "self_attn_v_proj",
    "self_attn.out_proj": "self_attn_out_proj",
}


63
def load_lora(lora, to_load):
64
65
66
    patch_dict = {}
    loaded_keys = set()
    for x in to_load:
comfyanonymous's avatar
comfyanonymous committed
67
68
69
70
71
72
        alpha_name = "{}.alpha".format(x)
        alpha = None
        if alpha_name in lora.keys():
            alpha = lora[alpha_name].item()
            loaded_keys.add(alpha_name)

73
74
        regular_lora = "{}.lora_up.weight".format(x)
        diffusers_lora = "{}_lora.up.weight".format(x)
75
        transformers_lora = "{}.lora_linear_layer.up.weight".format(x)
76
77
78
79
80
81
82
83
84
85
        A_name = None

        if regular_lora in lora.keys():
            A_name = regular_lora
            B_name = "{}.lora_down.weight".format(x)
            mid_name = "{}.lora_mid.weight".format(x)
        elif diffusers_lora in lora.keys():
            A_name = diffusers_lora
            B_name = "{}_lora.down.weight".format(x)
            mid_name = None
86
87
88
89
        elif transformers_lora in lora.keys():
            A_name = transformers_lora
            B_name ="{}.lora_linear_layer.down.weight".format(x)
            mid_name = None
90
91

        if A_name is not None:
92
            mid = None
93
            if mid_name is not None and mid_name in lora.keys():
94
95
96
                mid = lora[mid_name]
                loaded_keys.add(mid_name)
            patch_dict[to_load[x]] = (lora[A_name], lora[B_name], alpha, mid)
97
98
            loaded_keys.add(A_name)
            loaded_keys.add(B_name)
comfyanonymous's avatar
comfyanonymous committed
99

comfyanonymous's avatar
comfyanonymous committed
100
101

        ######## loha
comfyanonymous's avatar
comfyanonymous committed
102
103
104
105
        hada_w1_a_name = "{}.hada_w1_a".format(x)
        hada_w1_b_name = "{}.hada_w1_b".format(x)
        hada_w2_a_name = "{}.hada_w2_a".format(x)
        hada_w2_b_name = "{}.hada_w2_b".format(x)
106
107
        hada_t1_name = "{}.hada_t1".format(x)
        hada_t2_name = "{}.hada_t2".format(x)
comfyanonymous's avatar
comfyanonymous committed
108
        if hada_w1_a_name in lora.keys():
109
110
111
112
113
114
115
116
117
            hada_t1 = None
            hada_t2 = None
            if hada_t1_name in lora.keys():
                hada_t1 = lora[hada_t1_name]
                hada_t2 = lora[hada_t2_name]
                loaded_keys.add(hada_t1_name)
                loaded_keys.add(hada_t2_name)

            patch_dict[to_load[x]] = (lora[hada_w1_a_name], lora[hada_w1_b_name], alpha, lora[hada_w2_a_name], lora[hada_w2_b_name], hada_t1, hada_t2)
comfyanonymous's avatar
comfyanonymous committed
118
119
120
121
122
            loaded_keys.add(hada_w1_a_name)
            loaded_keys.add(hada_w1_b_name)
            loaded_keys.add(hada_w2_a_name)
            loaded_keys.add(hada_w2_b_name)

comfyanonymous's avatar
comfyanonymous committed
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170

        ######## lokr
        lokr_w1_name = "{}.lokr_w1".format(x)
        lokr_w2_name = "{}.lokr_w2".format(x)
        lokr_w1_a_name = "{}.lokr_w1_a".format(x)
        lokr_w1_b_name = "{}.lokr_w1_b".format(x)
        lokr_t2_name = "{}.lokr_t2".format(x)
        lokr_w2_a_name = "{}.lokr_w2_a".format(x)
        lokr_w2_b_name = "{}.lokr_w2_b".format(x)

        lokr_w1 = None
        if lokr_w1_name in lora.keys():
            lokr_w1 = lora[lokr_w1_name]
            loaded_keys.add(lokr_w1_name)

        lokr_w2 = None
        if lokr_w2_name in lora.keys():
            lokr_w2 = lora[lokr_w2_name]
            loaded_keys.add(lokr_w2_name)

        lokr_w1_a = None
        if lokr_w1_a_name in lora.keys():
            lokr_w1_a = lora[lokr_w1_a_name]
            loaded_keys.add(lokr_w1_a_name)

        lokr_w1_b = None
        if lokr_w1_b_name in lora.keys():
            lokr_w1_b = lora[lokr_w1_b_name]
            loaded_keys.add(lokr_w1_b_name)

        lokr_w2_a = None
        if lokr_w2_a_name in lora.keys():
            lokr_w2_a = lora[lokr_w2_a_name]
            loaded_keys.add(lokr_w2_a_name)

        lokr_w2_b = None
        if lokr_w2_b_name in lora.keys():
            lokr_w2_b = lora[lokr_w2_b_name]
            loaded_keys.add(lokr_w2_b_name)

        lokr_t2 = None
        if lokr_t2_name in lora.keys():
            lokr_t2 = lora[lokr_t2_name]
            loaded_keys.add(lokr_t2_name)

        if (lokr_w1 is not None) or (lokr_w2 is not None) or (lokr_w1_a is not None) or (lokr_w2_a is not None):
            patch_dict[to_load[x]] = (lokr_w1, lokr_w2, alpha, lokr_w1_a, lokr_w1_b, lokr_w2_a, lokr_w2_b, lokr_t2)

171
172
173
174
175
    for x in lora.keys():
        if x not in loaded_keys:
            print("lora key not loaded", x)
    return patch_dict

176
def model_lora_keys_clip(model, key_map={}):
177
178
    sdk = model.state_dict().keys()

comfyanonymous's avatar
comfyanonymous committed
179
    text_model_lora_key = "lora_te_text_model_encoder_layers_{}_{}"
180
181
    clip_l_present = False
    for b in range(32):
182
183
184
        for c in LORA_CLIP_MAP:
            k = "transformer.text_model.encoder.layers.{}.{}.weight".format(b, c)
            if k in sdk:
comfyanonymous's avatar
comfyanonymous committed
185
                lora_key = text_model_lora_key.format(b, LORA_CLIP_MAP[c])
186
                key_map[lora_key] = k
comfyanonymous's avatar
comfyanonymous committed
187
188
                lora_key = "lora_te1_text_model_encoder_layers_{}_{}".format(b, LORA_CLIP_MAP[c])
                key_map[lora_key] = k
189
190
                lora_key = "text_encoder.text_model.encoder.layers.{}.{}".format(b, c) #diffusers lora
                key_map[lora_key] = k
comfyanonymous's avatar
comfyanonymous committed
191

192
193
194
195
196
            k = "clip_l.transformer.text_model.encoder.layers.{}.{}.weight".format(b, c)
            if k in sdk:
                lora_key = "lora_te1_text_model_encoder_layers_{}_{}".format(b, LORA_CLIP_MAP[c]) #SDXL base
                key_map[lora_key] = k
                clip_l_present = True
197
198
                lora_key = "text_encoder.text_model.encoder.layers.{}.{}".format(b, c) #diffusers lora
                key_map[lora_key] = k
199
200
201
202
203

            k = "clip_g.transformer.text_model.encoder.layers.{}.{}.weight".format(b, c)
            if k in sdk:
                if clip_l_present:
                    lora_key = "lora_te2_text_model_encoder_layers_{}_{}".format(b, LORA_CLIP_MAP[c]) #SDXL base
204
205
206
                    key_map[lora_key] = k
                    lora_key = "text_encoder_2.text_model.encoder.layers.{}.{}".format(b, c) #diffusers lora
                    key_map[lora_key] = k
207
208
                else:
                    lora_key = "lora_te_text_model_encoder_layers_{}_{}".format(b, LORA_CLIP_MAP[c]) #TODO: test if this is correct for SDXL-Refiner
209
210
211
                    key_map[lora_key] = k
                    lora_key = "text_encoder.text_model.encoder.layers.{}.{}".format(b, c) #diffusers lora
                    key_map[lora_key] = k
212

213
    return key_map
comfyanonymous's avatar
comfyanonymous committed
214

215
216
def model_lora_keys_unet(model, key_map={}):
    sdk = model.state_dict().keys()
comfyanonymous's avatar
comfyanonymous committed
217

218
219
220
221
222
    for k in sdk:
        if k.startswith("diffusion_model.") and k.endswith(".weight"):
            key_lora = k[len("diffusion_model."):-len(".weight")].replace(".", "_")
            key_map["lora_unet_{}".format(key_lora)] = k

223
224
225
    diffusers_keys = utils.unet_to_diffusers(model.model_config.unet_config)
    for k in diffusers_keys:
        if k.endswith(".weight"):
226
            unet_key = "diffusion_model.{}".format(diffusers_keys[k])
227
            key_lora = k[:-len(".weight")].replace(".", "_")
228
229
230
231
232
233
234
235
            key_map["lora_unet_{}".format(key_lora)] = unet_key

            diffusers_lora_prefix = ["", "unet."]
            for p in diffusers_lora_prefix:
                diffusers_lora_key = "{}{}".format(p, k[:-len(".weight")].replace(".to_", ".processor.to_"))
                if diffusers_lora_key.endswith(".to_out.0"):
                    diffusers_lora_key = diffusers_lora_key[:-2]
                key_map[diffusers_lora_key] = unet_key
236
237
    return key_map

238
239
240
241
242
243
244
245
def set_attr(obj, attr, value):
    attrs = attr.split(".")
    for name in attrs[:-1]:
        obj = getattr(obj, name)
    prev = getattr(obj, attrs[-1])
    setattr(obj, attrs[-1], torch.nn.Parameter(value))
    del prev

246
class ModelPatcher:
247
    def __init__(self, model, load_device, offload_device, size=0):
248
        self.size = size
249
        self.model = model
250
        self.patches = {}
251
        self.backup = {}
252
        self.model_options = {"transformer_options":{}}
253
        self.model_size()
254
255
        self.load_device = load_device
        self.offload_device = offload_device
256
257
258
259
260
261
262
263
264
265

    def model_size(self):
        if self.size > 0:
            return self.size
        model_sd = self.model.state_dict()
        size = 0
        for k in model_sd:
            t = model_sd[k]
            size += t.nelement() * t.element_size()
        self.size = size
266
        self.model_keys = set(model_sd.keys())
267
        return size
268
269

    def clone(self):
270
        n = ModelPatcher(self.model, self.load_device, self.offload_device, self.size)
271
272
273
274
        n.patches = {}
        for k in self.patches:
            n.patches[k] = self.patches[k][:]

275
        n.model_options = copy.deepcopy(self.model_options)
276
        n.model_keys = self.model_keys
277
278
        return n

279
    def set_model_sampler_cfg_function(self, sampler_cfg_function):
280
281
282
283
        if len(inspect.signature(sampler_cfg_function).parameters) == 3:
            self.model_options["sampler_cfg_function"] = lambda args: sampler_cfg_function(args["cond"], args["uncond"], args["cond_scale"]) #Old way
        else:
            self.model_options["sampler_cfg_function"] = sampler_cfg_function
284

285
286
287
    def set_model_unet_function_wrapper(self, unet_wrapper_function):
        self.model_options["model_function_wrapper"] = unet_wrapper_function

288
289
290
291
292
293
    def set_model_patch(self, patch, name):
        to = self.model_options["transformer_options"]
        if "patches" not in to:
            to["patches"] = {}
        to["patches"][name] = to["patches"].get(name, []) + [patch]

294
295
296
297
298
299
300
301
    def set_model_patch_replace(self, patch, name, block_name, number):
        to = self.model_options["transformer_options"]
        if "patches_replace" not in to:
            to["patches_replace"] = {}
        if name not in to["patches_replace"]:
            to["patches_replace"][name] = {}
        to["patches_replace"][name][(block_name, number)] = patch

302
303
304
305
306
307
    def set_model_attn1_patch(self, patch):
        self.set_model_patch(patch, "attn1_patch")

    def set_model_attn2_patch(self, patch):
        self.set_model_patch(patch, "attn2_patch")

308
309
310
311
312
313
314
315
316
    def set_model_attn1_replace(self, patch, block_name, number):
        self.set_model_patch_replace(patch, "attn1", block_name, number)

    def set_model_attn2_replace(self, patch, block_name, number):
        self.set_model_patch_replace(patch, "attn2", block_name, number)

    def set_model_attn1_output_patch(self, patch):
        self.set_model_patch(patch, "attn1_output_patch")

317
318
319
    def set_model_attn2_output_patch(self, patch):
        self.set_model_patch(patch, "attn2_output_patch")

320
321
322
323
324
325
326
327
328
    def model_patches_to(self, device):
        to = self.model_options["transformer_options"]
        if "patches" in to:
            patches = to["patches"]
            for name in patches:
                patch_list = patches[name]
                for i in range(len(patch_list)):
                    if hasattr(patch_list[i], "to"):
                        patch_list[i] = patch_list[i].to(device)
329
330
331
332
333
334
335
        if "patches_replace" in to:
            patches = to["patches_replace"]
            for name in patches:
                patch_list = patches[name]
                for k in patch_list:
                    if hasattr(patch_list[k], "to"):
                        patch_list[k] = patch_list[k].to(device)
336

337
    def model_dtype(self):
338
339
        if hasattr(self.model, "get_dtype"):
            return self.model.get_dtype()
340

341
    def add_patches(self, patches, strength_patch=1.0, strength_model=1.0):
342
        p = set()
343
        for k in patches:
344
            if k in self.model_keys:
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
                p.add(k)
                current_patches = self.patches.get(k, [])
                current_patches.append((strength_patch, patches[k], strength_model))
                self.patches[k] = current_patches

        return list(p)

    def get_key_patches(self, filter_prefix=None):
        model_sd = self.model_state_dict()
        p = {}
        for k in model_sd:
            if filter_prefix is not None:
                if not k.startswith(filter_prefix):
                    continue
            if k in self.patches:
                p[k] = [model_sd[k]] + self.patches[k]
            else:
                p[k] = (model_sd[k],)
        return p
364

365
    def model_state_dict(self, filter_prefix=None):
366
367
        sd = self.model.state_dict()
        keys = list(sd.keys())
368
369
370
371
        if filter_prefix is not None:
            for k in keys:
                if not k.startswith(filter_prefix):
                    sd.pop(k)
372
373
        return sd

374
    def patch_model(self, device_to=None):
375
        model_sd = self.model_state_dict()
376
377
378
379
        for key in self.patches:
            if key not in model_sd:
                print("could not patch. key doesn't exist in model:", k)
                continue
380

381
            weight = model_sd[key]
382

383
            if key not in self.backup:
384
                self.backup[key] = weight.to(self.offload_device)
385

386
387
388
389
            if device_to is not None:
                temp_weight = weight.float().to(device_to, copy=True)
            else:
                temp_weight = weight.to(torch.float32, copy=True)
390
391
            out_weight = self.calculate_weight(self.patches[key], temp_weight, key).to(weight.dtype)
            set_attr(self.model, key, out_weight)
392
            del temp_weight
393
        return self.model
comfyanonymous's avatar
comfyanonymous committed
394

395
396
397
398
399
400
401
402
403
404
405
406
407
408
    def calculate_weight(self, patches, weight, key):
        for p in patches:
            alpha = p[0]
            v = p[1]
            strength_model = p[2]

            if strength_model != 1.0:
                weight *= strength_model

            if isinstance(v, list):
                v = (self.calculate_weight(v[1:], v[0].clone(), key), )

            if len(v) == 1:
                w1 = v[0]
409
410
411
412
413
                if alpha != 0.0:
                    if w1.shape != weight.shape:
                        print("WARNING SHAPE MISMATCH {} WEIGHT NOT MERGED {} != {}".format(key, w1.shape, weight.shape))
                    else:
                        weight += alpha * w1.type(weight.dtype).to(weight.device)
414
            elif len(v) == 4: #lora/locon
comfyanonymous's avatar
comfyanonymous committed
415
416
                mat1 = v[0].float().to(weight.device)
                mat2 = v[1].float().to(weight.device)
417
418
419
420
                if v[2] is not None:
                    alpha *= v[2] / mat2.shape[0]
                if v[3] is not None:
                    #locon mid weights, hopefully the math is fine because I didn't properly test it
comfyanonymous's avatar
comfyanonymous committed
421
422
423
                    mat3 = v[3].float().to(weight.device)
                    final_shape = [mat2.shape[1], mat2.shape[0], mat3.shape[2], mat3.shape[3]]
                    mat2 = torch.mm(mat2.transpose(0, 1).flatten(start_dim=1), mat3.transpose(0, 1).flatten(start_dim=1)).reshape(final_shape).transpose(0, 1)
424
425
426
427
                try:
                    weight += (alpha * torch.mm(mat1.flatten(start_dim=1), mat2.flatten(start_dim=1))).reshape(weight.shape).type(weight.dtype)
                except Exception as e:
                    print("ERROR", key, e)
428
429
430
431
432
433
434
435
436
437
438
439
440
            elif len(v) == 8: #lokr
                w1 = v[0]
                w2 = v[1]
                w1_a = v[3]
                w1_b = v[4]
                w2_a = v[5]
                w2_b = v[6]
                t2 = v[7]
                dim = None

                if w1 is None:
                    dim = w1_b.shape[0]
                    w1 = torch.mm(w1_a.float(), w1_b.float())
comfyanonymous's avatar
comfyanonymous committed
441
442
                else:
                    w1 = w1.float().to(weight.device)
443
444
445
446

                if w2 is None:
                    dim = w2_b.shape[0]
                    if t2 is None:
comfyanonymous's avatar
comfyanonymous committed
447
                        w2 = torch.mm(w2_a.float().to(weight.device), w2_b.float().to(weight.device))
448
                    else:
comfyanonymous's avatar
comfyanonymous committed
449
450
451
                        w2 = torch.einsum('i j k l, j r, i p -> p r k l', t2.float().to(weight.device), w2_b.float().to(weight.device), w2_a.float().to(weight.device))
                else:
                    w2 = w2.float().to(weight.device)
452
453
454
455
456
457

                if len(w2.shape) == 4:
                    w1 = w1.unsqueeze(2).unsqueeze(2)
                if v[2] is not None and dim is not None:
                    alpha *= v[2] / dim

458
459
460
461
                try:
                    weight += alpha * torch.kron(w1, w2).reshape(weight.shape).type(weight.dtype)
                except Exception as e:
                    print("ERROR", key, e)
462
463
464
465
466
467
468
469
470
471
            else: #loha
                w1a = v[0]
                w1b = v[1]
                if v[2] is not None:
                    alpha *= v[2] / w1b.shape[0]
                w2a = v[3]
                w2b = v[4]
                if v[5] is not None: #cp decomposition
                    t1 = v[5]
                    t2 = v[6]
comfyanonymous's avatar
comfyanonymous committed
472
473
                    m1 = torch.einsum('i j k l, j r, i p -> p r k l', t1.float().to(weight.device), w1b.float().to(weight.device), w1a.float().to(weight.device))
                    m2 = torch.einsum('i j k l, j r, i p -> p r k l', t2.float().to(weight.device), w2b.float().to(weight.device), w2a.float().to(weight.device))
474
                else:
comfyanonymous's avatar
comfyanonymous committed
475
476
                    m1 = torch.mm(w1a.float().to(weight.device), w1b.float().to(weight.device))
                    m2 = torch.mm(w2a.float().to(weight.device), w2b.float().to(weight.device))
477

478
479
480
481
482
                try:
                    weight += (alpha * m1 * m2).reshape(weight.shape).type(weight.dtype)
                except Exception as e:
                    print("ERROR", key, e)

483
        return weight
484

485
    def unpatch_model(self):
486
        keys = list(self.backup.keys())
487

488
        for k in keys:
489
            set_attr(self.model, k, self.backup[k])
490

491
492
        self.backup = {}

493
def load_lora_for_models(model, clip, lora, strength_model, strength_clip):
494
495
    key_map = model_lora_keys_unet(model.model)
    key_map = model_lora_keys_clip(clip.cond_stage_model, key_map)
496
    loaded = load_lora(lora, key_map)
497
498
499
500
501
502
503
504
505
506
507
    new_modelpatcher = model.clone()
    k = new_modelpatcher.add_patches(loaded, strength_model)
    new_clip = clip.clone()
    k1 = new_clip.add_patches(loaded, strength_clip)
    k = set(k)
    k1 = set(k1)
    for x in loaded:
        if (x not in k) and (x not in k1):
            print("NOT LOADED", x)

    return (new_modelpatcher, new_clip)
comfyanonymous's avatar
comfyanonymous committed
508
509
510


class CLIP:
511
    def __init__(self, target=None, embedding_directory=None, no_init=False):
512
513
        if no_init:
            return
comfyanonymous's avatar
comfyanonymous committed
514
        params = target.params.copy()
515
516
        clip = target.clip
        tokenizer = target.tokenizer
517

518
519
        load_device = model_management.text_encoder_device()
        offload_device = model_management.text_encoder_offload_device()
comfyanonymous's avatar
comfyanonymous committed
520
        params['device'] = load_device
521
        self.cond_stage_model = clip(**(params))
522
523
524
        #TODO: make sure this doesn't have a quality loss before enabling.
        # if model_management.should_use_fp16(load_device):
        #     self.cond_stage_model.half()
525
526

        self.cond_stage_model = self.cond_stage_model.to()
527

528
        self.tokenizer = tokenizer(embedding_directory=embedding_directory)
529
        self.patcher = ModelPatcher(self.cond_stage_model, load_device=load_device, offload_device=offload_device)
530
        self.layer_idx = None
531
532
533
534
535
536

    def clone(self):
        n = CLIP(no_init=True)
        n.patcher = self.patcher.clone()
        n.cond_stage_model = self.cond_stage_model
        n.tokenizer = self.tokenizer
comfyanonymous's avatar
comfyanonymous committed
537
        n.layer_idx = self.layer_idx
538
539
        return n

540
    def load_from_state_dict(self, sd):
541
        self.cond_stage_model.load_sd(sd)
542

543
544
    def add_patches(self, patches, strength_patch=1.0, strength_model=1.0):
        return self.patcher.add_patches(patches, strength_patch, strength_model)
comfyanonymous's avatar
comfyanonymous committed
545

546
    def clip_layer(self, layer_idx):
comfyanonymous's avatar
comfyanonymous committed
547
        self.layer_idx = layer_idx
548

549
550
    def tokenize(self, text, return_word_ids=False):
        return self.tokenizer.tokenize_with_weights(text, return_word_ids)
BlenderNeko's avatar
BlenderNeko committed
551

552
    def encode_from_tokens(self, tokens, return_pooled=False):
553
554
        if self.layer_idx is not None:
            self.cond_stage_model.clip_layer(self.layer_idx)
555
556
        else:
            self.cond_stage_model.reset_clip_layer()
557
558
559

        model_management.load_model_gpu(self.patcher)
        cond, pooled = self.cond_stage_model.encode_token_weights(tokens)
560
        if return_pooled:
561
562
            return cond, pooled
        return cond
comfyanonymous's avatar
comfyanonymous committed
563

564
    def encode(self, text):
565
        tokens = self.tokenize(text)
566
567
        return self.encode_from_tokens(tokens)

568
569
    def load_sd(self, sd):
        return self.cond_stage_model.load_sd(sd)
570

571
572
573
574
575
576
577
578
579
    def get_sd(self):
        return self.cond_stage_model.state_dict()

    def patch_model(self):
        self.patcher.patch_model()

    def unpatch_model(self):
        self.patcher.unpatch_model()

580
581
582
    def get_key_patches(self):
        return self.patcher.get_key_patches()

comfyanonymous's avatar
comfyanonymous committed
583
class VAE:
584
    def __init__(self, ckpt_path=None, device=None, config=None):
comfyanonymous's avatar
comfyanonymous committed
585
586
587
        if config is None:
            #default SD1.x/SD2.x VAE parameters
            ddconfig = {'double_z': True, 'z_channels': 4, 'resolution': 256, 'in_channels': 3, 'out_ch': 3, 'ch': 128, 'ch_mult': [1, 2, 4, 4], 'num_res_blocks': 2, 'attn_resolutions': [], 'dropout': 0.0}
588
            self.first_stage_model = AutoencoderKL(ddconfig, {'target': 'torch.nn.Identity'}, 4, monitor="val/rec_loss")
comfyanonymous's avatar
comfyanonymous committed
589
        else:
590
            self.first_stage_model = AutoencoderKL(**(config['params']))
comfyanonymous's avatar
comfyanonymous committed
591
        self.first_stage_model = self.first_stage_model.eval()
592
593
594
595
596
597
        if ckpt_path is not None:
            sd = utils.load_torch_file(ckpt_path)
            if 'decoder.up_blocks.0.resnets.0.norm1.weight' in sd.keys(): #diffusers format
                sd = diffusers_convert.convert_vae_state_dict(sd)
            self.first_stage_model.load_state_dict(sd, strict=False)

598
        if device is None:
599
            device = model_management.vae_device()
comfyanonymous's avatar
comfyanonymous committed
600
        self.device = device
601
        self.offload_device = model_management.vae_offload_device()
602
603
        self.vae_dtype = model_management.vae_dtype()
        self.first_stage_model.to(self.vae_dtype)
comfyanonymous's avatar
comfyanonymous committed
604

605
    def decode_tiled_(self, samples, tile_x=64, tile_y=64, overlap = 16):
pythongosssss's avatar
pythongosssss committed
606
        steps = samples.shape[0] * utils.get_tiled_scale_steps(samples.shape[3], samples.shape[2], tile_x, tile_y, overlap)
comfyanonymous's avatar
comfyanonymous committed
607
608
        steps += samples.shape[0] * utils.get_tiled_scale_steps(samples.shape[3], samples.shape[2], tile_x // 2, tile_y * 2, overlap)
        steps += samples.shape[0] * utils.get_tiled_scale_steps(samples.shape[3], samples.shape[2], tile_x * 2, tile_y // 2, overlap)
pythongosssss's avatar
pythongosssss committed
609
        pbar = utils.ProgressBar(steps)
610

611
        decode_fn = lambda a: (self.first_stage_model.decode(a.to(self.vae_dtype).to(self.device)) + 1.0).float()
612
        output = torch.clamp((
613
614
615
            (utils.tiled_scale(samples, decode_fn, tile_x // 2, tile_y * 2, overlap, upscale_amount = 8, pbar = pbar) +
            utils.tiled_scale(samples, decode_fn, tile_x * 2, tile_y // 2, overlap, upscale_amount = 8, pbar = pbar) +
             utils.tiled_scale(samples, decode_fn, tile_x, tile_y, overlap, upscale_amount = 8, pbar = pbar))
616
617
618
            / 3.0) / 2.0, min=0.0, max=1.0)
        return output

619
620
621
622
623
624
    def encode_tiled_(self, pixel_samples, tile_x=512, tile_y=512, overlap = 64):
        steps = pixel_samples.shape[0] * utils.get_tiled_scale_steps(pixel_samples.shape[3], pixel_samples.shape[2], tile_x, tile_y, overlap)
        steps += pixel_samples.shape[0] * utils.get_tiled_scale_steps(pixel_samples.shape[3], pixel_samples.shape[2], tile_x // 2, tile_y * 2, overlap)
        steps += pixel_samples.shape[0] * utils.get_tiled_scale_steps(pixel_samples.shape[3], pixel_samples.shape[2], tile_x * 2, tile_y // 2, overlap)
        pbar = utils.ProgressBar(steps)

625
        encode_fn = lambda a: self.first_stage_model.encode(2. * a.to(self.vae_dtype).to(self.device) - 1.).sample().float()
626
627
628
629
630
631
        samples = utils.tiled_scale(pixel_samples, encode_fn, tile_x, tile_y, overlap, upscale_amount = (1/8), out_channels=4, pbar=pbar)
        samples += utils.tiled_scale(pixel_samples, encode_fn, tile_x * 2, tile_y // 2, overlap, upscale_amount = (1/8), out_channels=4, pbar=pbar)
        samples += utils.tiled_scale(pixel_samples, encode_fn, tile_x // 2, tile_y * 2, overlap, upscale_amount = (1/8), out_channels=4, pbar=pbar)
        samples /= 3.0
        return samples

632
    def decode(self, samples_in):
633
        model_management.unload_model()
comfyanonymous's avatar
comfyanonymous committed
634
        self.first_stage_model = self.first_stage_model.to(self.device)
635
        try:
636
637
638
639
640
641
            free_memory = model_management.get_free_memory(self.device)
            batch_number = int((free_memory * 0.7) / (2562 * samples_in.shape[2] * samples_in.shape[3] * 64))
            batch_number = max(1, batch_number)

            pixel_samples = torch.empty((samples_in.shape[0], 3, round(samples_in.shape[2] * 8), round(samples_in.shape[3] * 8)), device="cpu")
            for x in range(0, samples_in.shape[0], batch_number):
642
643
                samples = samples_in[x:x+batch_number].to(self.vae_dtype).to(self.device)
                pixel_samples[x:x+batch_number] = torch.clamp((self.first_stage_model.decode(samples) + 1.0) / 2.0, min=0.0, max=1.0).cpu().float()
644
645
646
647
        except model_management.OOM_EXCEPTION as e:
            print("Warning: Ran out of memory when regular VAE decoding, retrying with tiled VAE decoding.")
            pixel_samples = self.decode_tiled_(samples_in)

648
        self.first_stage_model = self.first_stage_model.to(self.offload_device)
comfyanonymous's avatar
comfyanonymous committed
649
650
651
        pixel_samples = pixel_samples.cpu().movedim(1,-1)
        return pixel_samples

652
    def decode_tiled(self, samples, tile_x=64, tile_y=64, overlap = 16):
653
654
        model_management.unload_model()
        self.first_stage_model = self.first_stage_model.to(self.device)
655
        output = self.decode_tiled_(samples, tile_x, tile_y, overlap)
656
        self.first_stage_model = self.first_stage_model.to(self.offload_device)
657
658
        return output.movedim(1,-1)

comfyanonymous's avatar
comfyanonymous committed
659
    def encode(self, pixel_samples):
660
        model_management.unload_model()
comfyanonymous's avatar
comfyanonymous committed
661
        self.first_stage_model = self.first_stage_model.to(self.device)
662
663
        pixel_samples = pixel_samples.movedim(-1,1)
        try:
664
665
666
            free_memory = model_management.get_free_memory(self.device)
            batch_number = int((free_memory * 0.7) / (2078 * pixel_samples.shape[2] * pixel_samples.shape[3])) #NOTE: this constant along with the one in the decode above are estimated from the mem usage for the VAE and could change.
            batch_number = max(1, batch_number)
667
668
            samples = torch.empty((pixel_samples.shape[0], 4, round(pixel_samples.shape[2] // 8), round(pixel_samples.shape[3] // 8)), device="cpu")
            for x in range(0, pixel_samples.shape[0], batch_number):
669
670
                pixels_in = (2. * pixel_samples[x:x+batch_number] - 1.).to(self.vae_dtype).to(self.device)
                samples[x:x+batch_number] = self.first_stage_model.encode(pixels_in).sample().cpu().float()
671

672
673
674
675
        except model_management.OOM_EXCEPTION as e:
            print("Warning: Ran out of memory when regular VAE encoding, retrying with tiled VAE encoding.")
            samples = self.encode_tiled_(pixel_samples)

676
        self.first_stage_model = self.first_stage_model.to(self.offload_device)
comfyanonymous's avatar
comfyanonymous committed
677
678
        return samples

comfyanonymous's avatar
comfyanonymous committed
679
680
681
    def encode_tiled(self, pixel_samples, tile_x=512, tile_y=512, overlap = 64):
        model_management.unload_model()
        self.first_stage_model = self.first_stage_model.to(self.device)
682
683
        pixel_samples = pixel_samples.movedim(-1,1)
        samples = self.encode_tiled_(pixel_samples, tile_x=tile_x, tile_y=tile_y, overlap=overlap)
684
        self.first_stage_model = self.first_stage_model.to(self.offload_device)
comfyanonymous's avatar
comfyanonymous committed
685
        return samples
686

687
688
689
690
    def get_sd(self):
        return self.first_stage_model.state_dict()


BlenderNeko's avatar
BlenderNeko committed
691
def broadcast_image_to(tensor, target_batch_size, batched_number):
692
    current_batch_size = tensor.shape[0]
693
    #print(current_batch_size, target_batch_size)
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
    if current_batch_size == 1:
        return tensor

    per_batch = target_batch_size // batched_number
    tensor = tensor[:per_batch]

    if per_batch > tensor.shape[0]:
        tensor = torch.cat([tensor] * (per_batch // tensor.shape[0]) + [tensor[:(per_batch % tensor.shape[0])]], dim=0)

    current_batch_size = tensor.shape[0]
    if current_batch_size == target_batch_size:
        return tensor
    else:
        return torch.cat([tensor] * batched_number, dim=0)

709
710
class ControlBase:
    def __init__(self, device=None):
comfyanonymous's avatar
comfyanonymous committed
711
712
        self.cond_hint_original = None
        self.cond_hint = None
713
        self.strength = 1.0
714
715
716
        self.timestep_percent_range = (1.0, 0.0)
        self.timestep_range = None

717
718
        if device is None:
            device = model_management.get_torch_device()
719
        self.device = device
comfyanonymous's avatar
comfyanonymous committed
720
        self.previous_controlnet = None
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759

    def set_cond_hint(self, cond_hint, strength=1.0, timestep_percent_range=(1.0, 0.0)):
        self.cond_hint_original = cond_hint
        self.strength = strength
        self.timestep_percent_range = timestep_percent_range
        return self

    def pre_run(self, model, percent_to_timestep_function):
        self.timestep_range = (percent_to_timestep_function(self.timestep_percent_range[0]), percent_to_timestep_function(self.timestep_percent_range[1]))
        if self.previous_controlnet is not None:
            self.previous_controlnet.pre_run(model, percent_to_timestep_function)

    def set_previous_controlnet(self, controlnet):
        self.previous_controlnet = controlnet
        return self

    def cleanup(self):
        if self.previous_controlnet is not None:
            self.previous_controlnet.cleanup()
        if self.cond_hint is not None:
            del self.cond_hint
            self.cond_hint = None
        self.timestep_range = None

    def get_models(self):
        out = []
        if self.previous_controlnet is not None:
            out += self.previous_controlnet.get_models()
        return out

    def copy_to(self, c):
        c.cond_hint_original = self.cond_hint_original
        c.strength = self.strength
        c.timestep_percent_range = self.timestep_percent_range

class ControlNet(ControlBase):
    def __init__(self, control_model, global_average_pooling=False, device=None):
        super().__init__(device)
        self.control_model = control_model
760
        self.global_average_pooling = global_average_pooling
comfyanonymous's avatar
comfyanonymous committed
761

762
    def get_control(self, x_noisy, t, cond, batched_number):
comfyanonymous's avatar
comfyanonymous committed
763
764
        control_prev = None
        if self.previous_controlnet is not None:
765
            control_prev = self.previous_controlnet.get_control(x_noisy, t, cond, batched_number)
comfyanonymous's avatar
comfyanonymous committed
766

767
768
769
770
771
772
773
        if self.timestep_range is not None:
            if t[0] > self.timestep_range[0] or t[0] < self.timestep_range[1]:
                if control_prev is not None:
                    return control_prev
                else:
                    return {}

774
        output_dtype = x_noisy.dtype
comfyanonymous's avatar
comfyanonymous committed
775
776
777
778
        if self.cond_hint is None or x_noisy.shape[2] * 8 != self.cond_hint.shape[2] or x_noisy.shape[3] * 8 != self.cond_hint.shape[3]:
            if self.cond_hint is not None:
                del self.cond_hint
            self.cond_hint = None
BlenderNeko's avatar
BlenderNeko committed
779
780
781
            self.cond_hint = utils.common_upscale(self.cond_hint_original, x_noisy.shape[3] * 8, x_noisy.shape[2] * 8, 'nearest-exact', "center").to(self.control_model.dtype).to(self.device)
        if x_noisy.shape[0] != self.cond_hint.shape[0]:
            self.cond_hint = broadcast_image_to(self.cond_hint, x_noisy.shape[0], batched_number)
782
783
784
785
786
787

        if self.control_model.dtype == torch.float16:
            precision_scope = torch.autocast
        else:
            precision_scope = contextlib.nullcontext

788
        with precision_scope(model_management.get_autocast_device(self.device)):
789
            self.control_model = model_management.load_if_low_vram(self.control_model)
790
791
792
            context = torch.cat(cond['c_crossattn'], 1)
            y = cond.get('c_adm', None)
            control = self.control_model(x=x_noisy, hint=self.cond_hint, timesteps=t, context=context, y=y)
793
            self.control_model = model_management.unload_if_low_vram(self.control_model)
794
        out = {'middle':[], 'output': []}
795
        autocast_enabled = torch.is_autocast_enabled()
comfyanonymous's avatar
comfyanonymous committed
796
797

        for i in range(len(control)):
comfyanonymous's avatar
comfyanonymous committed
798
799
800
801
802
803
            if i == (len(control) - 1):
                key = 'middle'
                index = 0
            else:
                key = 'output'
                index = i
comfyanonymous's avatar
comfyanonymous committed
804
            x = control[i]
805
806
807
            if self.global_average_pooling:
                x = torch.mean(x, dim=(2, 3), keepdim=True).repeat(1, 1, x.shape[2], x.shape[3])

808
            x *= self.strength
809
810
            if x.dtype != output_dtype and not autocast_enabled:
                x = x.to(output_dtype)
comfyanonymous's avatar
comfyanonymous committed
811

comfyanonymous's avatar
comfyanonymous committed
812
813
814
815
816
817
818
            if control_prev is not None and key in control_prev:
                prev = control_prev[key][index]
                if prev is not None:
                    x += prev
            out[key].append(x)
        if control_prev is not None and 'input' in control_prev:
            out['input'] = control_prev['input']
819
        return out
comfyanonymous's avatar
comfyanonymous committed
820
821

    def copy(self):
822
        c = ControlNet(self.control_model, global_average_pooling=self.global_average_pooling)
823
        self.copy_to(c)
comfyanonymous's avatar
comfyanonymous committed
824
825
        return c

826
827
828
829
830
831
    def get_models(self):
        out = super().get_models()
        out.append(self.control_model)
        return out


832
def load_controlnet(ckpt_path, model=None):
833
    controlnet_data = utils.load_torch_file(ckpt_path, safe_load=True)
834
835
836
837

    controlnet_config = None
    if "controlnet_cond_embedding.conv_in.weight" in controlnet_data: #diffusers format
        use_fp16 = model_management.should_use_fp16()
838
        controlnet_config = model_detection.unet_config_from_diffusers_unet(controlnet_data, use_fp16)
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
        diffusers_keys = utils.unet_to_diffusers(controlnet_config)
        diffusers_keys["controlnet_mid_block.weight"] = "middle_block_out.0.weight"
        diffusers_keys["controlnet_mid_block.bias"] = "middle_block_out.0.bias"

        count = 0
        loop = True
        while loop:
            suffix = [".weight", ".bias"]
            for s in suffix:
                k_in = "controlnet_down_blocks.{}{}".format(count, s)
                k_out = "zero_convs.{}.0{}".format(count, s)
                if k_in not in controlnet_data:
                    loop = False
                    break
                diffusers_keys[k_in] = k_out
            count += 1

        count = 0
        loop = True
        while loop:
            suffix = [".weight", ".bias"]
            for s in suffix:
                if count == 0:
                    k_in = "controlnet_cond_embedding.conv_in{}".format(s)
                else:
                    k_in = "controlnet_cond_embedding.blocks.{}{}".format(count - 1, s)
                k_out = "input_hint_block.{}{}".format(count * 2, s)
                if k_in not in controlnet_data:
                    k_in = "controlnet_cond_embedding.conv_out{}".format(s)
                    loop = False
                diffusers_keys[k_in] = k_out
            count += 1

        new_sd = {}
        for k in diffusers_keys:
            if k in controlnet_data:
                new_sd[diffusers_keys[k]] = controlnet_data.pop(k)

877
878
879
        leftover_keys = controlnet_data.keys()
        if len(leftover_keys) > 0:
            print("leftover keys:", leftover_keys)
880
881
        controlnet_data = new_sd

882
    pth_key = 'control_model.zero_convs.0.0.weight'
comfyanonymous's avatar
comfyanonymous committed
883
    pth = False
884
    key = 'zero_convs.0.0.weight'
comfyanonymous's avatar
comfyanonymous committed
885
886
887
    if pth_key in controlnet_data:
        pth = True
        key = pth_key
888
        prefix = "control_model."
comfyanonymous's avatar
comfyanonymous committed
889
    elif key in controlnet_data:
890
        prefix = ""
comfyanonymous's avatar
comfyanonymous committed
891
    else:
892
893
894
895
        net = load_t2i_adapter(controlnet_data)
        if net is None:
            print("error checkpoint does not contain controlnet or t2i adapter data", ckpt_path)
        return net
comfyanonymous's avatar
comfyanonymous committed
896

897
898
899
    if controlnet_config is None:
        use_fp16 = model_management.should_use_fp16()
        controlnet_config = model_detection.model_config_from_unet(controlnet_data, prefix, use_fp16).unet_config
900
    controlnet_config.pop("out_channels")
901
    controlnet_config["hint_channels"] = controlnet_data["{}input_hint_block.0.weight".format(prefix)].shape[1]
902
903
    control_model = cldm.ControlNet(**controlnet_config)

comfyanonymous's avatar
comfyanonymous committed
904
    if pth:
905
906
907
908
909
910
911
        if 'difference' in controlnet_data:
            if model is not None:
                m = model.patch_model()
                model_sd = m.state_dict()
                for x in controlnet_data:
                    c_m = "control_model."
                    if x.startswith(c_m):
comfyanonymous's avatar
comfyanonymous committed
912
                        sd_key = "diffusion_model.{}".format(x[len(c_m):])
913
914
915
916
917
918
919
                        if sd_key in model_sd:
                            cd = controlnet_data[x]
                            cd += model_sd[sd_key].type(cd.dtype).to(cd.device)
                model.unpatch_model()
            else:
                print("WARNING: Loaded a diff controlnet without a model. It will very likely not work.")

comfyanonymous's avatar
comfyanonymous committed
920
921
922
923
        class WeightsLoader(torch.nn.Module):
            pass
        w = WeightsLoader()
        w.control_model = control_model
924
        missing, unexpected = w.load_state_dict(controlnet_data, strict=False)
comfyanonymous's avatar
comfyanonymous committed
925
    else:
926
927
        missing, unexpected = control_model.load_state_dict(controlnet_data, strict=False)
    print(missing, unexpected)
comfyanonymous's avatar
comfyanonymous committed
928

929
930
931
    if use_fp16:
        control_model = control_model.half()

932
933
934
935
936
    global_average_pooling = False
    if ckpt_path.endswith("_shuffle.pth") or ckpt_path.endswith("_shuffle.safetensors") or ckpt_path.endswith("_shuffle_fp16.safetensors"): #TODO: smarter way of enabling global_average_pooling
        global_average_pooling = True

    control = ControlNet(control_model, global_average_pooling=global_average_pooling)
comfyanonymous's avatar
comfyanonymous committed
937
938
    return control

939
class T2IAdapter(ControlBase):
940
    def __init__(self, t2i_model, channels_in, device=None):
941
        super().__init__(device)
942
943
944
945
        self.t2i_model = t2i_model
        self.channels_in = channels_in
        self.control_input = None

946
    def get_control(self, x_noisy, t, cond, batched_number):
947
948
        control_prev = None
        if self.previous_controlnet is not None:
949
            control_prev = self.previous_controlnet.get_control(x_noisy, t, cond, batched_number)
950

951
952
953
954
955
956
957
        if self.timestep_range is not None:
            if t[0] > self.timestep_range[0] or t[0] < self.timestep_range[1]:
                if control_prev is not None:
                    return control_prev
                else:
                    return {}

958
959
960
        if self.cond_hint is None or x_noisy.shape[2] * 8 != self.cond_hint.shape[2] or x_noisy.shape[3] * 8 != self.cond_hint.shape[3]:
            if self.cond_hint is not None:
                del self.cond_hint
BlenderNeko's avatar
BlenderNeko committed
961
            self.control_input = None
962
            self.cond_hint = None
BlenderNeko's avatar
BlenderNeko committed
963
            self.cond_hint = utils.common_upscale(self.cond_hint_original, x_noisy.shape[3] * 8, x_noisy.shape[2] * 8, 'nearest-exact', "center").float().to(self.device)
964
965
            if self.channels_in == 1 and self.cond_hint.shape[1] > 1:
                self.cond_hint = torch.mean(self.cond_hint, 1, keepdim=True)
BlenderNeko's avatar
BlenderNeko committed
966
967
968
        if x_noisy.shape[0] != self.cond_hint.shape[0]:
            self.cond_hint = broadcast_image_to(self.cond_hint, x_noisy.shape[0], batched_number)
        if self.control_input is None:
969
970
971
972
973
974
975
            self.t2i_model.to(self.device)
            self.control_input = self.t2i_model(self.cond_hint)
            self.t2i_model.cpu()

        output_dtype = x_noisy.dtype
        out = {'input':[]}

comfyanonymous's avatar
comfyanonymous committed
976
        autocast_enabled = torch.is_autocast_enabled()
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
        for i in range(len(self.control_input)):
            key = 'input'
            x = self.control_input[i] * self.strength
            if x.dtype != output_dtype and not autocast_enabled:
                x = x.to(output_dtype)

            if control_prev is not None and key in control_prev:
                index = len(control_prev[key]) - i * 3 - 3
                prev = control_prev[key][index]
                if prev is not None:
                    x += prev
            out[key].insert(0, None)
            out[key].insert(0, None)
            out[key].insert(0, x)

        if control_prev is not None and 'input' in control_prev:
            for i in range(len(out['input'])):
                if out['input'][i] is None:
                    out['input'][i] = control_prev['input'][i]
        if control_prev is not None and 'middle' in control_prev:
            out['middle'] = control_prev['middle']
        if control_prev is not None and 'output' in control_prev:
            out['output'] = control_prev['output']
        return out

    def copy(self):
        c = T2IAdapter(self.t2i_model, self.channels_in)
1004
        self.copy_to(c)
1005
1006
1007
        return c


1008
def load_t2i_adapter(t2i_data):
1009
    keys = t2i_data.keys()
1010
1011
1012
    if 'adapter' in keys:
        t2i_data = t2i_data['adapter']
        keys = t2i_data.keys()
1013
    if "body.0.in_conv.weight" in keys:
1014
1015
        cin = t2i_data['body.0.in_conv.weight'].shape[1]
        model_ad = adapter.Adapter_light(cin=cin, channels=[320, 640, 1280, 1280], nums_rb=4)
1016
    elif 'conv_in.weight' in keys:
1017
        cin = t2i_data['conv_in.weight'].shape[1]
1018
1019
1020
1021
1022
1023
1024
        channel = t2i_data['conv_in.weight'].shape[0]
        ksize = t2i_data['body.0.block2.weight'].shape[2]
        use_conv = False
        down_opts = list(filter(lambda a: a.endswith("down_opt.op.weight"), keys))
        if len(down_opts) > 0:
            use_conv = True
        model_ad = adapter.Adapter(cin=cin, channels=[channel, channel*2, channel*4, channel*4][:4], nums_rb=2, ksize=ksize, sk=True, use_conv=use_conv)
1025
1026
    else:
        return None
1027
1028
    model_ad.load_state_dict(t2i_data)
    return T2IAdapter(model_ad, cin // 64)
comfyanonymous's avatar
comfyanonymous committed
1029

1030
1031
1032
1033
1034
1035
1036
1037
1038
1039

class StyleModel:
    def __init__(self, model, device="cpu"):
        self.model = model

    def get_cond(self, input):
        return self.model(input.last_hidden_state)


def load_style_model(ckpt_path):
1040
    model_data = utils.load_torch_file(ckpt_path, safe_load=True)
1041
1042
1043
1044
1045
1046
1047
1048
1049
    keys = model_data.keys()
    if "style_embedding" in keys:
        model = adapter.StyleAdapter(width=1024, context_dim=768, num_head=8, n_layes=3, num_token=8)
    else:
        raise Exception("invalid style model {}".format(ckpt_path))
    model.load_state_dict(model_data)
    return StyleModel(model)


1050
1051
1052
1053
1054
def load_clip(ckpt_paths, embedding_directory=None):
    clip_data = []
    for p in ckpt_paths:
        clip_data.append(utils.load_torch_file(p, safe_load=True))

comfyanonymous's avatar
comfyanonymous committed
1055
1056
1057
    class EmptyClass:
        pass

1058
1059
1060
1061
    for i in range(len(clip_data)):
        if "transformer.resblocks.0.ln_1.weight" in clip_data[i]:
            clip_data[i] = utils.transformers_convert(clip_data[i], "", "text_model.", 32)

comfyanonymous's avatar
comfyanonymous committed
1062
1063
    clip_target = EmptyClass()
    clip_target.params = {}
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
    if len(clip_data) == 1:
        if "text_model.encoder.layers.30.mlp.fc1.weight" in clip_data[0]:
            clip_target.clip = sdxl_clip.SDXLRefinerClipModel
            clip_target.tokenizer = sdxl_clip.SDXLTokenizer
        elif "text_model.encoder.layers.22.mlp.fc1.weight" in clip_data[0]:
            clip_target.clip = sd2_clip.SD2ClipModel
            clip_target.tokenizer = sd2_clip.SD2Tokenizer
        else:
            clip_target.clip = sd1_clip.SD1ClipModel
            clip_target.tokenizer = sd1_clip.SD1Tokenizer
1074
    else:
1075
1076
        clip_target.clip = sdxl_clip.SDXLClipModel
        clip_target.tokenizer = sdxl_clip.SDXLTokenizer
comfyanonymous's avatar
comfyanonymous committed
1077
1078

    clip = CLIP(clip_target, embedding_directory=embedding_directory)
1079
1080
1081
1082
1083
1084
1085
    for c in clip_data:
        m, u = clip.load_sd(c)
        if len(m) > 0:
            print("clip missing:", m)

        if len(u) > 0:
            print("clip unexpected:", u)
1086
    return clip
comfyanonymous's avatar
comfyanonymous committed
1087

1088
def load_gligen(ckpt_path):
1089
    data = utils.load_torch_file(ckpt_path, safe_load=True)
1090
1091
1092
1093
1094
    model = gligen.load_gligen(data)
    if model_management.should_use_fp16():
        model = model.half()
    return model

comfyanonymous's avatar
comfyanonymous committed
1095
def load_checkpoint(config_path=None, ckpt_path=None, output_vae=True, output_clip=True, embedding_directory=None, state_dict=None, config=None):
1096
    #TODO: this function is a mess and should be removed eventually
comfyanonymous's avatar
comfyanonymous committed
1097
1098
1099
    if config is None:
        with open(config_path, 'r') as stream:
            config = yaml.safe_load(stream)
comfyanonymous's avatar
comfyanonymous committed
1100
1101
1102
1103
1104
    model_config_params = config['model']['params']
    clip_config = model_config_params['cond_stage_config']
    scale_factor = model_config_params['scale_factor']
    vae_config = model_config_params['first_stage_config']

1105
1106
1107
    fp16 = False
    if "unet_config" in model_config_params:
        if "params" in model_config_params["unet_config"]:
comfyanonymous's avatar
comfyanonymous committed
1108
1109
1110
1111
1112
1113
1114
1115
            unet_config = model_config_params["unet_config"]["params"]
            if "use_fp16" in unet_config:
                fp16 = unet_config["use_fp16"]

    noise_aug_config = None
    if "noise_aug_config" in model_config_params:
        noise_aug_config = model_config_params["noise_aug_config"]

1116
    model_type = model_base.ModelType.EPS
comfyanonymous's avatar
comfyanonymous committed
1117
1118
1119

    if "parameterization" in model_config_params:
        if model_config_params["parameterization"] == "v":
1120
            model_type = model_base.ModelType.V_PREDICTION
1121

comfyanonymous's avatar
comfyanonymous committed
1122
1123
1124
1125
1126
1127
    clip = None
    vae = None

    class WeightsLoader(torch.nn.Module):
        pass

1128
1129
    if state_dict is None:
        state_dict = utils.load_torch_file(ckpt_path)
comfyanonymous's avatar
comfyanonymous committed
1130

1131
1132
1133
1134
1135
1136
1137
1138
    class EmptyClass:
        pass

    model_config = EmptyClass()
    model_config.unet_config = unet_config
    from . import latent_formats
    model_config.latent_format = latent_formats.SD15(scale_factor=scale_factor)

comfyanonymous's avatar
comfyanonymous committed
1139
    if config['model']["target"].endswith("LatentInpaintDiffusion"):
1140
        model = model_base.SDInpaint(model_config, model_type=model_type)
comfyanonymous's avatar
comfyanonymous committed
1141
    elif config['model']["target"].endswith("ImageEmbeddingConditionedLatentDiffusion"):
1142
        model = model_base.SD21UNCLIP(model_config, noise_aug_config["params"], model_type=model_type)
comfyanonymous's avatar
comfyanonymous committed
1143
    else:
1144
        model = model_base.BaseModel(model_config, model_type=model_type)
comfyanonymous's avatar
comfyanonymous committed
1145

1146
1147
1148
    if fp16:
        model = model.half()

1149
1150
    offload_device = model_management.unet_offload_device()
    model = model.to(offload_device)
1151
1152
1153
1154
    model.load_model_weights(state_dict, "model.diffusion_model.")

    if output_vae:
        w = WeightsLoader()
1155
        vae = VAE(config=vae_config)
1156
1157
1158
1159
1160
1161
        w.first_stage_model = vae.first_stage_model
        load_model_weights(w, state_dict)

    if output_clip:
        w = WeightsLoader()
        clip_target = EmptyClass()
1162
        clip_target.params = clip_config.get("params", {})
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
        if clip_config["target"].endswith("FrozenOpenCLIPEmbedder"):
            clip_target.clip = sd2_clip.SD2ClipModel
            clip_target.tokenizer = sd2_clip.SD2Tokenizer
        elif clip_config["target"].endswith("FrozenCLIPEmbedder"):
            clip_target.clip = sd1_clip.SD1ClipModel
            clip_target.tokenizer = sd1_clip.SD1Tokenizer
        clip = CLIP(clip_target, embedding_directory=embedding_directory)
        w.cond_stage_model = clip.cond_stage_model
        load_clip_weights(w, state_dict)

1173
    return (ModelPatcher(model, load_device=model_management.get_torch_device(), offload_device=offload_device), clip, vae)
1174

1175
1176
1177
1178
1179
1180
def calculate_parameters(sd, prefix):
    params = 0
    for k in sd.keys():
        if k.startswith(prefix):
            params += sd[k].nelement()
    return params
1181

1182
1183
def load_checkpoint_guess_config(ckpt_path, output_vae=True, output_clip=True, output_clipvision=False, embedding_directory=None):
    sd = utils.load_torch_file(ckpt_path)
1184
1185
    sd_keys = sd.keys()
    clip = None
1186
    clipvision = None
1187
    vae = None
1188
1189
    model = None
    clip_target = None
1190

1191
1192
    parameters = calculate_parameters(sd, "model.diffusion_model.")
    fp16 = model_management.should_use_fp16(model_params=parameters)
1193

1194
1195
1196
    class WeightsLoader(torch.nn.Module):
        pass

1197
1198
1199
    model_config = model_detection.model_config_from_unet(sd, "model.diffusion_model.", fp16)
    if model_config is None:
        raise RuntimeError("ERROR: Could not detect model type of: {}".format(ckpt_path))
1200

1201
    if model_config.clip_vision_prefix is not None:
1202
        if output_clipvision:
1203
            clipvision = clip_vision.load_clipvision_from_sd(sd, model_config.clip_vision_prefix, True)
1204

1205
    offload_device = model_management.unet_offload_device()
1206
    model = model_config.get_model(sd, "model.diffusion_model.", device=offload_device)
1207
    model.load_model_weights(sd, "model.diffusion_model.")
1208

1209
    if output_vae:
1210
        vae = VAE()
1211
1212
1213
        w = WeightsLoader()
        w.first_stage_model = vae.first_stage_model
        load_model_weights(w, sd)
1214

1215
1216
1217
1218
1219
1220
1221
    if output_clip:
        w = WeightsLoader()
        clip_target = model_config.clip_target()
        clip = CLIP(clip_target, embedding_directory=embedding_directory)
        w.cond_stage_model = clip.cond_stage_model
        sd = model_config.process_clip_state_dict(sd)
        load_model_weights(w, sd)
comfyanonymous's avatar
comfyanonymous committed
1222

1223
1224
1225
    left_over = sd.keys()
    if len(left_over) > 0:
        print("left over keys:", left_over)
1226

1227
    return (ModelPatcher(model, load_device=model_management.get_torch_device(), offload_device=offload_device), clip, vae, clipvision)
1228

1229
1230
1231
1232
1233
1234

def load_unet(unet_path): #load unet in diffusers format
    sd = utils.load_torch_file(unet_path)
    parameters = calculate_parameters(sd, "")
    fp16 = model_management.should_use_fp16(model_params=parameters)

1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
    model_config = model_detection.model_config_from_diffusers_unet(sd, fp16)
    if model_config is None:
        print("ERROR UNSUPPORTED UNET", unet_path)
        return None

    diffusers_keys = utils.unet_to_diffusers(model_config.unet_config)

    new_sd = {}
    for k in diffusers_keys:
        if k in sd:
            new_sd[diffusers_keys[k]] = sd.pop(k)
        else:
            print(diffusers_keys[k], k)
    offload_device = model_management.unet_offload_device()
    model = model_config.get_model(new_sd, "")
    model = model.to(offload_device)
    model.load_model_weights(new_sd, "")
    return ModelPatcher(model, load_device=model_management.get_torch_device(), offload_device=offload_device)
1253

1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
def save_checkpoint(output_path, model, clip, vae, metadata=None):
    try:
        model.patch_model()
        clip.patch_model()
        sd = model.model.state_dict_for_saving(clip.get_sd(), vae.get_sd())
        utils.save_torch_file(sd, output_path, metadata=metadata)
        model.unpatch_model()
        clip.unpatch_model()
    except Exception as e:
        model.unpatch_model()
        clip.unpatch_model()
        raise e