"...components/common/ExpandableDetails/ExpandableIndex.tsx" did not exist on "cbb63c5bec618354a25583c0861f45d4a01d9812"
sd.py 48.4 KB
Newer Older
comfyanonymous's avatar
comfyanonymous committed
1
import torch
2
import contextlib
3
import copy
4
import inspect
comfyanonymous's avatar
comfyanonymous committed
5

6
from comfy import model_management
7
8
from .ldm.util import instantiate_from_config
from .ldm.models.autoencoder import AutoencoderKL
9
import yaml
comfyanonymous's avatar
comfyanonymous committed
10
from .cldm import cldm
11
from .t2i_adapter import adapter
comfyanonymous's avatar
comfyanonymous committed
12
13

from . import utils
14
from . import clip_vision
15
from . import gligen
16
from . import diffusers_convert
comfyanonymous's avatar
comfyanonymous committed
17
from . import model_base
18
from . import model_detection
19

20
21
from . import sd1_clip
from . import sd2_clip
22
from . import sdxl_clip
comfyanonymous's avatar
comfyanonymous committed
23

24
def load_model_weights(model, sd):
comfyanonymous's avatar
comfyanonymous committed
25
    m, u = model.load_state_dict(sd, strict=False)
26
27
    m = set(m)
    unexpected_keys = set(u)
comfyanonymous's avatar
comfyanonymous committed
28
29
30

    k = list(sd.keys())
    for x in k:
31
32
33
34
35
36
37
38
39
40
        if x not in unexpected_keys:
            w = sd.pop(x)
            del w
    if len(m) > 0:
        print("missing", m)
    return model

def load_clip_weights(model, sd):
    k = list(sd.keys())
    for x in k:
comfyanonymous's avatar
comfyanonymous committed
41
42
43
44
        if x.startswith("cond_stage_model.transformer.") and not x.startswith("cond_stage_model.transformer.text_model."):
            y = x.replace("cond_stage_model.transformer.", "cond_stage_model.transformer.text_model.")
            sd[y] = sd.pop(x)

comfyanonymous's avatar
comfyanonymous committed
45
46
47
48
    if 'cond_stage_model.transformer.text_model.embeddings.position_ids' in sd:
        ids = sd['cond_stage_model.transformer.text_model.embeddings.position_ids']
        if ids.dtype == torch.float32:
            sd['cond_stage_model.transformer.text_model.embeddings.position_ids'] = ids.round()
49

50
51
    sd = utils.transformers_convert(sd, "cond_stage_model.model.", "cond_stage_model.transformer.text_model.", 24)
    return load_model_weights(model, sd)
comfyanonymous's avatar
comfyanonymous committed
52

53
54
55
56
57
58
59
60
61
62
LORA_CLIP_MAP = {
    "mlp.fc1": "mlp_fc1",
    "mlp.fc2": "mlp_fc2",
    "self_attn.k_proj": "self_attn_k_proj",
    "self_attn.q_proj": "self_attn_q_proj",
    "self_attn.v_proj": "self_attn_v_proj",
    "self_attn.out_proj": "self_attn_out_proj",
}


63
def load_lora(lora, to_load):
64
65
66
    patch_dict = {}
    loaded_keys = set()
    for x in to_load:
comfyanonymous's avatar
comfyanonymous committed
67
68
69
70
71
72
        alpha_name = "{}.alpha".format(x)
        alpha = None
        if alpha_name in lora.keys():
            alpha = lora[alpha_name].item()
            loaded_keys.add(alpha_name)

73
74
        A_name = "{}.lora_up.weight".format(x)
        B_name = "{}.lora_down.weight".format(x)
75
        mid_name = "{}.lora_mid.weight".format(x)
comfyanonymous's avatar
comfyanonymous committed
76

77
        if A_name in lora.keys():
78
79
80
81
82
            mid = None
            if mid_name in lora.keys():
                mid = lora[mid_name]
                loaded_keys.add(mid_name)
            patch_dict[to_load[x]] = (lora[A_name], lora[B_name], alpha, mid)
83
84
            loaded_keys.add(A_name)
            loaded_keys.add(B_name)
comfyanonymous's avatar
comfyanonymous committed
85

comfyanonymous's avatar
comfyanonymous committed
86
87

        ######## loha
comfyanonymous's avatar
comfyanonymous committed
88
89
90
91
        hada_w1_a_name = "{}.hada_w1_a".format(x)
        hada_w1_b_name = "{}.hada_w1_b".format(x)
        hada_w2_a_name = "{}.hada_w2_a".format(x)
        hada_w2_b_name = "{}.hada_w2_b".format(x)
92
93
        hada_t1_name = "{}.hada_t1".format(x)
        hada_t2_name = "{}.hada_t2".format(x)
comfyanonymous's avatar
comfyanonymous committed
94
        if hada_w1_a_name in lora.keys():
95
96
97
98
99
100
101
102
103
            hada_t1 = None
            hada_t2 = None
            if hada_t1_name in lora.keys():
                hada_t1 = lora[hada_t1_name]
                hada_t2 = lora[hada_t2_name]
                loaded_keys.add(hada_t1_name)
                loaded_keys.add(hada_t2_name)

            patch_dict[to_load[x]] = (lora[hada_w1_a_name], lora[hada_w1_b_name], alpha, lora[hada_w2_a_name], lora[hada_w2_b_name], hada_t1, hada_t2)
comfyanonymous's avatar
comfyanonymous committed
104
105
106
107
108
            loaded_keys.add(hada_w1_a_name)
            loaded_keys.add(hada_w1_b_name)
            loaded_keys.add(hada_w2_a_name)
            loaded_keys.add(hada_w2_b_name)

comfyanonymous's avatar
comfyanonymous committed
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156

        ######## lokr
        lokr_w1_name = "{}.lokr_w1".format(x)
        lokr_w2_name = "{}.lokr_w2".format(x)
        lokr_w1_a_name = "{}.lokr_w1_a".format(x)
        lokr_w1_b_name = "{}.lokr_w1_b".format(x)
        lokr_t2_name = "{}.lokr_t2".format(x)
        lokr_w2_a_name = "{}.lokr_w2_a".format(x)
        lokr_w2_b_name = "{}.lokr_w2_b".format(x)

        lokr_w1 = None
        if lokr_w1_name in lora.keys():
            lokr_w1 = lora[lokr_w1_name]
            loaded_keys.add(lokr_w1_name)

        lokr_w2 = None
        if lokr_w2_name in lora.keys():
            lokr_w2 = lora[lokr_w2_name]
            loaded_keys.add(lokr_w2_name)

        lokr_w1_a = None
        if lokr_w1_a_name in lora.keys():
            lokr_w1_a = lora[lokr_w1_a_name]
            loaded_keys.add(lokr_w1_a_name)

        lokr_w1_b = None
        if lokr_w1_b_name in lora.keys():
            lokr_w1_b = lora[lokr_w1_b_name]
            loaded_keys.add(lokr_w1_b_name)

        lokr_w2_a = None
        if lokr_w2_a_name in lora.keys():
            lokr_w2_a = lora[lokr_w2_a_name]
            loaded_keys.add(lokr_w2_a_name)

        lokr_w2_b = None
        if lokr_w2_b_name in lora.keys():
            lokr_w2_b = lora[lokr_w2_b_name]
            loaded_keys.add(lokr_w2_b_name)

        lokr_t2 = None
        if lokr_t2_name in lora.keys():
            lokr_t2 = lora[lokr_t2_name]
            loaded_keys.add(lokr_t2_name)

        if (lokr_w1 is not None) or (lokr_w2 is not None) or (lokr_w1_a is not None) or (lokr_w2_a is not None):
            patch_dict[to_load[x]] = (lokr_w1, lokr_w2, alpha, lokr_w1_a, lokr_w1_b, lokr_w2_a, lokr_w2_b, lokr_t2)

157
158
159
160
161
    for x in lora.keys():
        if x not in loaded_keys:
            print("lora key not loaded", x)
    return patch_dict

162
def model_lora_keys_clip(model, key_map={}):
163
164
    sdk = model.state_dict().keys()

comfyanonymous's avatar
comfyanonymous committed
165
    text_model_lora_key = "lora_te_text_model_encoder_layers_{}_{}"
166
167
    clip_l_present = False
    for b in range(32):
168
169
170
        for c in LORA_CLIP_MAP:
            k = "transformer.text_model.encoder.layers.{}.{}.weight".format(b, c)
            if k in sdk:
comfyanonymous's avatar
comfyanonymous committed
171
                lora_key = text_model_lora_key.format(b, LORA_CLIP_MAP[c])
172
                key_map[lora_key] = k
comfyanonymous's avatar
comfyanonymous committed
173

174
175
176
177
178
179
180
181
182
183
184
185
186
187
            k = "clip_l.transformer.text_model.encoder.layers.{}.{}.weight".format(b, c)
            if k in sdk:
                lora_key = "lora_te1_text_model_encoder_layers_{}_{}".format(b, LORA_CLIP_MAP[c]) #SDXL base
                key_map[lora_key] = k
                clip_l_present = True

            k = "clip_g.transformer.text_model.encoder.layers.{}.{}.weight".format(b, c)
            if k in sdk:
                if clip_l_present:
                    lora_key = "lora_te2_text_model_encoder_layers_{}_{}".format(b, LORA_CLIP_MAP[c]) #SDXL base
                else:
                    lora_key = "lora_te_text_model_encoder_layers_{}_{}".format(b, LORA_CLIP_MAP[c]) #TODO: test if this is correct for SDXL-Refiner
                key_map[lora_key] = k

188
    return key_map
comfyanonymous's avatar
comfyanonymous committed
189

190
191
def model_lora_keys_unet(model, key_map={}):
    sdk = model.state_dict().keys()
comfyanonymous's avatar
comfyanonymous committed
192

193
194
195
196
197
    for k in sdk:
        if k.startswith("diffusion_model.") and k.endswith(".weight"):
            key_lora = k[len("diffusion_model."):-len(".weight")].replace(".", "_")
            key_map["lora_unet_{}".format(key_lora)] = k

198
199
200
201
202
    diffusers_keys = utils.unet_to_diffusers(model.model_config.unet_config)
    for k in diffusers_keys:
        if k.endswith(".weight"):
            key_lora = k[:-len(".weight")].replace(".", "_")
            key_map["lora_unet_{}".format(key_lora)] = "diffusion_model.{}".format(diffusers_keys[k])
203
204
205
    return key_map

class ModelPatcher:
206
    def __init__(self, model, load_device, offload_device, size=0):
207
        self.size = size
208
        self.model = model
209
        self.patches = {}
210
        self.backup = {}
211
        self.model_options = {"transformer_options":{}}
212
        self.model_size()
213
214
        self.load_device = load_device
        self.offload_device = offload_device
215
216
217
218
219
220
221
222
223
224

    def model_size(self):
        if self.size > 0:
            return self.size
        model_sd = self.model.state_dict()
        size = 0
        for k in model_sd:
            t = model_sd[k]
            size += t.nelement() * t.element_size()
        self.size = size
225
        self.model_keys = set(model_sd.keys())
226
        return size
227
228

    def clone(self):
229
        n = ModelPatcher(self.model, self.load_device, self.offload_device, self.size)
230
231
232
233
        n.patches = {}
        for k in self.patches:
            n.patches[k] = self.patches[k][:]

234
        n.model_options = copy.deepcopy(self.model_options)
235
        n.model_keys = self.model_keys
236
237
        return n

238
    def set_model_sampler_cfg_function(self, sampler_cfg_function):
239
240
241
242
        if len(inspect.signature(sampler_cfg_function).parameters) == 3:
            self.model_options["sampler_cfg_function"] = lambda args: sampler_cfg_function(args["cond"], args["uncond"], args["cond_scale"]) #Old way
        else:
            self.model_options["sampler_cfg_function"] = sampler_cfg_function
243

244
245
246
    def set_model_unet_function_wrapper(self, unet_wrapper_function):
        self.model_options["model_function_wrapper"] = unet_wrapper_function

247
248
249
250
251
252
    def set_model_patch(self, patch, name):
        to = self.model_options["transformer_options"]
        if "patches" not in to:
            to["patches"] = {}
        to["patches"][name] = to["patches"].get(name, []) + [patch]

253
254
255
256
257
258
259
260
    def set_model_patch_replace(self, patch, name, block_name, number):
        to = self.model_options["transformer_options"]
        if "patches_replace" not in to:
            to["patches_replace"] = {}
        if name not in to["patches_replace"]:
            to["patches_replace"][name] = {}
        to["patches_replace"][name][(block_name, number)] = patch

261
262
263
264
265
266
    def set_model_attn1_patch(self, patch):
        self.set_model_patch(patch, "attn1_patch")

    def set_model_attn2_patch(self, patch):
        self.set_model_patch(patch, "attn2_patch")

267
268
269
270
271
272
273
274
275
    def set_model_attn1_replace(self, patch, block_name, number):
        self.set_model_patch_replace(patch, "attn1", block_name, number)

    def set_model_attn2_replace(self, patch, block_name, number):
        self.set_model_patch_replace(patch, "attn2", block_name, number)

    def set_model_attn1_output_patch(self, patch):
        self.set_model_patch(patch, "attn1_output_patch")

276
277
278
    def set_model_attn2_output_patch(self, patch):
        self.set_model_patch(patch, "attn2_output_patch")

279
280
281
282
283
284
285
286
287
    def model_patches_to(self, device):
        to = self.model_options["transformer_options"]
        if "patches" in to:
            patches = to["patches"]
            for name in patches:
                patch_list = patches[name]
                for i in range(len(patch_list)):
                    if hasattr(patch_list[i], "to"):
                        patch_list[i] = patch_list[i].to(device)
288
289
290
291
292
293
294
        if "patches_replace" in to:
            patches = to["patches_replace"]
            for name in patches:
                patch_list = patches[name]
                for k in patch_list:
                    if hasattr(patch_list[k], "to"):
                        patch_list[k] = patch_list[k].to(device)
295

296
    def model_dtype(self):
297
298
        if hasattr(self.model, "get_dtype"):
            return self.model.get_dtype()
299

300
    def add_patches(self, patches, strength_patch=1.0, strength_model=1.0):
301
        p = set()
302
        for k in patches:
303
            if k in self.model_keys:
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
                p.add(k)
                current_patches = self.patches.get(k, [])
                current_patches.append((strength_patch, patches[k], strength_model))
                self.patches[k] = current_patches

        return list(p)

    def get_key_patches(self, filter_prefix=None):
        model_sd = self.model_state_dict()
        p = {}
        for k in model_sd:
            if filter_prefix is not None:
                if not k.startswith(filter_prefix):
                    continue
            if k in self.patches:
                p[k] = [model_sd[k]] + self.patches[k]
            else:
                p[k] = (model_sd[k],)
        return p
323

324
    def model_state_dict(self, filter_prefix=None):
325
326
        sd = self.model.state_dict()
        keys = list(sd.keys())
327
328
329
330
        if filter_prefix is not None:
            for k in keys:
                if not k.startswith(filter_prefix):
                    sd.pop(k)
331
332
        return sd

333
    def patch_model(self):
334
        model_sd = self.model_state_dict()
335
336
337
338
        for key in self.patches:
            if key not in model_sd:
                print("could not patch. key doesn't exist in model:", k)
                continue
339

340
            weight = model_sd[key]
341

342
343
            if key not in self.backup:
                self.backup[key] = weight.clone()
344

345
346
            weight[:] = self.calculate_weight(self.patches[key], weight.clone(), key)
        return self.model
comfyanonymous's avatar
comfyanonymous committed
347

348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
    def calculate_weight(self, patches, weight, key):
        for p in patches:
            alpha = p[0]
            v = p[1]
            strength_model = p[2]

            if strength_model != 1.0:
                weight *= strength_model

            if isinstance(v, list):
                v = (self.calculate_weight(v[1:], v[0].clone(), key), )

            if len(v) == 1:
                w1 = v[0]
                if w1.shape != weight.shape:
                    print("WARNING SHAPE MISMATCH {} WEIGHT NOT MERGED {} != {}".format(key, w1.shape, weight.shape))
                else:
                    weight += alpha * w1.type(weight.dtype).to(weight.device)
            elif len(v) == 4: #lora/locon
                mat1 = v[0]
                mat2 = v[1]
                if v[2] is not None:
                    alpha *= v[2] / mat2.shape[0]
                if v[3] is not None:
                    #locon mid weights, hopefully the math is fine because I didn't properly test it
                    final_shape = [mat2.shape[1], mat2.shape[0], v[3].shape[2], v[3].shape[3]]
                    mat2 = torch.mm(mat2.transpose(0, 1).flatten(start_dim=1).float(), v[3].transpose(0, 1).flatten(start_dim=1).float()).reshape(final_shape).transpose(0, 1)
                weight += (alpha * torch.mm(mat1.flatten(start_dim=1).float(), mat2.flatten(start_dim=1).float())).reshape(weight.shape).type(weight.dtype).to(weight.device)
            elif len(v) == 8: #lokr
                w1 = v[0]
                w2 = v[1]
                w1_a = v[3]
                w1_b = v[4]
                w2_a = v[5]
                w2_b = v[6]
                t2 = v[7]
                dim = None

                if w1 is None:
                    dim = w1_b.shape[0]
                    w1 = torch.mm(w1_a.float(), w1_b.float())

                if w2 is None:
                    dim = w2_b.shape[0]
                    if t2 is None:
                        w2 = torch.mm(w2_a.float(), w2_b.float())
394
                    else:
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
                        w2 = torch.einsum('i j k l, j r, i p -> p r k l', t2.float(), w2_b.float(), w2_a.float())

                if len(w2.shape) == 4:
                    w1 = w1.unsqueeze(2).unsqueeze(2)
                if v[2] is not None and dim is not None:
                    alpha *= v[2] / dim

                weight += alpha * torch.kron(w1.float(), w2.float()).reshape(weight.shape).type(weight.dtype).to(weight.device)
            else: #loha
                w1a = v[0]
                w1b = v[1]
                if v[2] is not None:
                    alpha *= v[2] / w1b.shape[0]
                w2a = v[3]
                w2b = v[4]
                if v[5] is not None: #cp decomposition
                    t1 = v[5]
                    t2 = v[6]
                    m1 = torch.einsum('i j k l, j r, i p -> p r k l', t1.float(), w1b.float(), w1a.float())
                    m2 = torch.einsum('i j k l, j r, i p -> p r k l', t2.float(), w2b.float(), w2a.float())
                else:
                    m1 = torch.mm(w1a.float(), w1b.float())
                    m2 = torch.mm(w2a.float(), w2b.float())

                weight += (alpha * m1 * m2).reshape(weight.shape).type(weight.dtype).to(weight.device)
        return weight
421

422
    def unpatch_model(self):
423
        model_sd = self.model_state_dict()
424
425
        keys = list(self.backup.keys())
        for k in keys:
426
            model_sd[k][:] = self.backup[k]
427
428
            del self.backup[k]

429
430
        self.backup = {}

431
def load_lora_for_models(model, clip, lora, strength_model, strength_clip):
432
433
    key_map = model_lora_keys_unet(model.model)
    key_map = model_lora_keys_clip(clip.cond_stage_model, key_map)
434
    loaded = load_lora(lora, key_map)
435
436
437
438
439
440
441
442
443
444
445
    new_modelpatcher = model.clone()
    k = new_modelpatcher.add_patches(loaded, strength_model)
    new_clip = clip.clone()
    k1 = new_clip.add_patches(loaded, strength_clip)
    k = set(k)
    k1 = set(k1)
    for x in loaded:
        if (x not in k) and (x not in k1):
            print("NOT LOADED", x)

    return (new_modelpatcher, new_clip)
comfyanonymous's avatar
comfyanonymous committed
446
447
448


class CLIP:
449
    def __init__(self, target=None, embedding_directory=None, no_init=False):
450
451
        if no_init:
            return
comfyanonymous's avatar
comfyanonymous committed
452
        params = target.params.copy()
453
454
        clip = target.clip
        tokenizer = target.tokenizer
455

456
457
        load_device = model_management.text_encoder_device()
        offload_device = model_management.text_encoder_offload_device()
comfyanonymous's avatar
comfyanonymous committed
458
        params['device'] = load_device
459
        self.cond_stage_model = clip(**(params))
460
461
462
        #TODO: make sure this doesn't have a quality loss before enabling.
        # if model_management.should_use_fp16(load_device):
        #     self.cond_stage_model.half()
463
464

        self.cond_stage_model = self.cond_stage_model.to()
465

466
        self.tokenizer = tokenizer(embedding_directory=embedding_directory)
467
        self.patcher = ModelPatcher(self.cond_stage_model, load_device=load_device, offload_device=offload_device)
468
        self.layer_idx = None
469
470
471
472
473
474

    def clone(self):
        n = CLIP(no_init=True)
        n.patcher = self.patcher.clone()
        n.cond_stage_model = self.cond_stage_model
        n.tokenizer = self.tokenizer
comfyanonymous's avatar
comfyanonymous committed
475
        n.layer_idx = self.layer_idx
476
477
        return n

478
    def load_from_state_dict(self, sd):
479
        self.cond_stage_model.load_sd(sd)
480

481
482
    def add_patches(self, patches, strength=1.0):
        return self.patcher.add_patches(patches, strength)
comfyanonymous's avatar
comfyanonymous committed
483

484
    def clip_layer(self, layer_idx):
comfyanonymous's avatar
comfyanonymous committed
485
        self.layer_idx = layer_idx
486

487
488
    def tokenize(self, text, return_word_ids=False):
        return self.tokenizer.tokenize_with_weights(text, return_word_ids)
BlenderNeko's avatar
BlenderNeko committed
489

490
    def encode_from_tokens(self, tokens, return_pooled=False):
491
492
        if self.layer_idx is not None:
            self.cond_stage_model.clip_layer(self.layer_idx)
493
494
495

        model_management.load_model_gpu(self.patcher)
        cond, pooled = self.cond_stage_model.encode_token_weights(tokens)
496
        if return_pooled:
497
498
            return cond, pooled
        return cond
comfyanonymous's avatar
comfyanonymous committed
499

500
    def encode(self, text):
501
        tokens = self.tokenize(text)
502
503
        return self.encode_from_tokens(tokens)

504
505
    def load_sd(self, sd):
        return self.cond_stage_model.load_sd(sd)
506

507
508
509
510
511
512
513
514
515
    def get_sd(self):
        return self.cond_stage_model.state_dict()

    def patch_model(self):
        self.patcher.patch_model()

    def unpatch_model(self):
        self.patcher.unpatch_model()

comfyanonymous's avatar
comfyanonymous committed
516
class VAE:
517
    def __init__(self, ckpt_path=None, device=None, config=None):
comfyanonymous's avatar
comfyanonymous committed
518
519
520
        if config is None:
            #default SD1.x/SD2.x VAE parameters
            ddconfig = {'double_z': True, 'z_channels': 4, 'resolution': 256, 'in_channels': 3, 'out_ch': 3, 'ch': 128, 'ch_mult': [1, 2, 4, 4], 'num_res_blocks': 2, 'attn_resolutions': [], 'dropout': 0.0}
521
            self.first_stage_model = AutoencoderKL(ddconfig, {'target': 'torch.nn.Identity'}, 4, monitor="val/rec_loss")
comfyanonymous's avatar
comfyanonymous committed
522
        else:
523
            self.first_stage_model = AutoencoderKL(**(config['params']))
comfyanonymous's avatar
comfyanonymous committed
524
        self.first_stage_model = self.first_stage_model.eval()
525
526
527
528
529
530
        if ckpt_path is not None:
            sd = utils.load_torch_file(ckpt_path)
            if 'decoder.up_blocks.0.resnets.0.norm1.weight' in sd.keys(): #diffusers format
                sd = diffusers_convert.convert_vae_state_dict(sd)
            self.first_stage_model.load_state_dict(sd, strict=False)

531
        if device is None:
532
            device = model_management.vae_device()
comfyanonymous's avatar
comfyanonymous committed
533
        self.device = device
534
        self.offload_device = model_management.vae_offload_device()
535
536
        self.vae_dtype = model_management.vae_dtype()
        self.first_stage_model.to(self.vae_dtype)
comfyanonymous's avatar
comfyanonymous committed
537

538
    def decode_tiled_(self, samples, tile_x=64, tile_y=64, overlap = 16):
pythongosssss's avatar
pythongosssss committed
539
        steps = samples.shape[0] * utils.get_tiled_scale_steps(samples.shape[3], samples.shape[2], tile_x, tile_y, overlap)
comfyanonymous's avatar
comfyanonymous committed
540
541
        steps += samples.shape[0] * utils.get_tiled_scale_steps(samples.shape[3], samples.shape[2], tile_x // 2, tile_y * 2, overlap)
        steps += samples.shape[0] * utils.get_tiled_scale_steps(samples.shape[3], samples.shape[2], tile_x * 2, tile_y // 2, overlap)
pythongosssss's avatar
pythongosssss committed
542
        pbar = utils.ProgressBar(steps)
543

544
        decode_fn = lambda a: (self.first_stage_model.decode(a.to(self.vae_dtype).to(self.device)) + 1.0).float()
545
        output = torch.clamp((
546
547
548
            (utils.tiled_scale(samples, decode_fn, tile_x // 2, tile_y * 2, overlap, upscale_amount = 8, pbar = pbar) +
            utils.tiled_scale(samples, decode_fn, tile_x * 2, tile_y // 2, overlap, upscale_amount = 8, pbar = pbar) +
             utils.tiled_scale(samples, decode_fn, tile_x, tile_y, overlap, upscale_amount = 8, pbar = pbar))
549
550
551
            / 3.0) / 2.0, min=0.0, max=1.0)
        return output

552
553
554
555
556
557
    def encode_tiled_(self, pixel_samples, tile_x=512, tile_y=512, overlap = 64):
        steps = pixel_samples.shape[0] * utils.get_tiled_scale_steps(pixel_samples.shape[3], pixel_samples.shape[2], tile_x, tile_y, overlap)
        steps += pixel_samples.shape[0] * utils.get_tiled_scale_steps(pixel_samples.shape[3], pixel_samples.shape[2], tile_x // 2, tile_y * 2, overlap)
        steps += pixel_samples.shape[0] * utils.get_tiled_scale_steps(pixel_samples.shape[3], pixel_samples.shape[2], tile_x * 2, tile_y // 2, overlap)
        pbar = utils.ProgressBar(steps)

558
        encode_fn = lambda a: self.first_stage_model.encode(2. * a.to(self.vae_dtype).to(self.device) - 1.).sample().float()
559
560
561
562
563
564
        samples = utils.tiled_scale(pixel_samples, encode_fn, tile_x, tile_y, overlap, upscale_amount = (1/8), out_channels=4, pbar=pbar)
        samples += utils.tiled_scale(pixel_samples, encode_fn, tile_x * 2, tile_y // 2, overlap, upscale_amount = (1/8), out_channels=4, pbar=pbar)
        samples += utils.tiled_scale(pixel_samples, encode_fn, tile_x // 2, tile_y * 2, overlap, upscale_amount = (1/8), out_channels=4, pbar=pbar)
        samples /= 3.0
        return samples

565
    def decode(self, samples_in):
566
        model_management.unload_model()
comfyanonymous's avatar
comfyanonymous committed
567
        self.first_stage_model = self.first_stage_model.to(self.device)
568
        try:
569
570
571
572
573
574
            free_memory = model_management.get_free_memory(self.device)
            batch_number = int((free_memory * 0.7) / (2562 * samples_in.shape[2] * samples_in.shape[3] * 64))
            batch_number = max(1, batch_number)

            pixel_samples = torch.empty((samples_in.shape[0], 3, round(samples_in.shape[2] * 8), round(samples_in.shape[3] * 8)), device="cpu")
            for x in range(0, samples_in.shape[0], batch_number):
575
576
                samples = samples_in[x:x+batch_number].to(self.vae_dtype).to(self.device)
                pixel_samples[x:x+batch_number] = torch.clamp((self.first_stage_model.decode(samples) + 1.0) / 2.0, min=0.0, max=1.0).cpu().float()
577
578
579
580
        except model_management.OOM_EXCEPTION as e:
            print("Warning: Ran out of memory when regular VAE decoding, retrying with tiled VAE decoding.")
            pixel_samples = self.decode_tiled_(samples_in)

581
        self.first_stage_model = self.first_stage_model.to(self.offload_device)
comfyanonymous's avatar
comfyanonymous committed
582
583
584
        pixel_samples = pixel_samples.cpu().movedim(1,-1)
        return pixel_samples

585
    def decode_tiled(self, samples, tile_x=64, tile_y=64, overlap = 16):
586
587
        model_management.unload_model()
        self.first_stage_model = self.first_stage_model.to(self.device)
588
        output = self.decode_tiled_(samples, tile_x, tile_y, overlap)
589
        self.first_stage_model = self.first_stage_model.to(self.offload_device)
590
591
        return output.movedim(1,-1)

comfyanonymous's avatar
comfyanonymous committed
592
    def encode(self, pixel_samples):
593
        model_management.unload_model()
comfyanonymous's avatar
comfyanonymous committed
594
        self.first_stage_model = self.first_stage_model.to(self.device)
595
596
        pixel_samples = pixel_samples.movedim(-1,1)
        try:
597
598
599
            free_memory = model_management.get_free_memory(self.device)
            batch_number = int((free_memory * 0.7) / (2078 * pixel_samples.shape[2] * pixel_samples.shape[3])) #NOTE: this constant along with the one in the decode above are estimated from the mem usage for the VAE and could change.
            batch_number = max(1, batch_number)
600
601
            samples = torch.empty((pixel_samples.shape[0], 4, round(pixel_samples.shape[2] // 8), round(pixel_samples.shape[3] // 8)), device="cpu")
            for x in range(0, pixel_samples.shape[0], batch_number):
602
603
                pixels_in = (2. * pixel_samples[x:x+batch_number] - 1.).to(self.vae_dtype).to(self.device)
                samples[x:x+batch_number] = self.first_stage_model.encode(pixels_in).sample().cpu().float()
604

605
606
607
608
        except model_management.OOM_EXCEPTION as e:
            print("Warning: Ran out of memory when regular VAE encoding, retrying with tiled VAE encoding.")
            samples = self.encode_tiled_(pixel_samples)

609
        self.first_stage_model = self.first_stage_model.to(self.offload_device)
comfyanonymous's avatar
comfyanonymous committed
610
611
        return samples

comfyanonymous's avatar
comfyanonymous committed
612
613
614
    def encode_tiled(self, pixel_samples, tile_x=512, tile_y=512, overlap = 64):
        model_management.unload_model()
        self.first_stage_model = self.first_stage_model.to(self.device)
615
616
        pixel_samples = pixel_samples.movedim(-1,1)
        samples = self.encode_tiled_(pixel_samples, tile_x=tile_x, tile_y=tile_y, overlap=overlap)
617
        self.first_stage_model = self.first_stage_model.to(self.offload_device)
comfyanonymous's avatar
comfyanonymous committed
618
        return samples
619

620
621
622
623
    def get_sd(self):
        return self.first_stage_model.state_dict()


BlenderNeko's avatar
BlenderNeko committed
624
def broadcast_image_to(tensor, target_batch_size, batched_number):
625
    current_batch_size = tensor.shape[0]
626
    #print(current_batch_size, target_batch_size)
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
    if current_batch_size == 1:
        return tensor

    per_batch = target_batch_size // batched_number
    tensor = tensor[:per_batch]

    if per_batch > tensor.shape[0]:
        tensor = torch.cat([tensor] * (per_batch // tensor.shape[0]) + [tensor[:(per_batch % tensor.shape[0])]], dim=0)

    current_batch_size = tensor.shape[0]
    if current_batch_size == target_batch_size:
        return tensor
    else:
        return torch.cat([tensor] * batched_number, dim=0)

comfyanonymous's avatar
comfyanonymous committed
642
class ControlNet:
643
    def __init__(self, control_model, global_average_pooling=False, device=None):
comfyanonymous's avatar
comfyanonymous committed
644
645
646
        self.control_model = control_model
        self.cond_hint_original = None
        self.cond_hint = None
647
        self.strength = 1.0
648
649
        if device is None:
            device = model_management.get_torch_device()
650
        self.device = device
comfyanonymous's avatar
comfyanonymous committed
651
        self.previous_controlnet = None
652
        self.global_average_pooling = global_average_pooling
comfyanonymous's avatar
comfyanonymous committed
653

654
    def get_control(self, x_noisy, t, cond, batched_number):
comfyanonymous's avatar
comfyanonymous committed
655
656
        control_prev = None
        if self.previous_controlnet is not None:
657
            control_prev = self.previous_controlnet.get_control(x_noisy, t, cond, batched_number)
comfyanonymous's avatar
comfyanonymous committed
658

659
        output_dtype = x_noisy.dtype
comfyanonymous's avatar
comfyanonymous committed
660
661
662
663
        if self.cond_hint is None or x_noisy.shape[2] * 8 != self.cond_hint.shape[2] or x_noisy.shape[3] * 8 != self.cond_hint.shape[3]:
            if self.cond_hint is not None:
                del self.cond_hint
            self.cond_hint = None
BlenderNeko's avatar
BlenderNeko committed
664
665
666
            self.cond_hint = utils.common_upscale(self.cond_hint_original, x_noisy.shape[3] * 8, x_noisy.shape[2] * 8, 'nearest-exact', "center").to(self.control_model.dtype).to(self.device)
        if x_noisy.shape[0] != self.cond_hint.shape[0]:
            self.cond_hint = broadcast_image_to(self.cond_hint, x_noisy.shape[0], batched_number)
667
668
669
670
671
672

        if self.control_model.dtype == torch.float16:
            precision_scope = torch.autocast
        else:
            precision_scope = contextlib.nullcontext

673
        with precision_scope(model_management.get_autocast_device(self.device)):
674
            self.control_model = model_management.load_if_low_vram(self.control_model)
675
676
677
            context = torch.cat(cond['c_crossattn'], 1)
            y = cond.get('c_adm', None)
            control = self.control_model(x=x_noisy, hint=self.cond_hint, timesteps=t, context=context, y=y)
678
            self.control_model = model_management.unload_if_low_vram(self.control_model)
679
        out = {'middle':[], 'output': []}
680
        autocast_enabled = torch.is_autocast_enabled()
comfyanonymous's avatar
comfyanonymous committed
681
682

        for i in range(len(control)):
comfyanonymous's avatar
comfyanonymous committed
683
684
685
686
687
688
            if i == (len(control) - 1):
                key = 'middle'
                index = 0
            else:
                key = 'output'
                index = i
comfyanonymous's avatar
comfyanonymous committed
689
            x = control[i]
690
691
692
            if self.global_average_pooling:
                x = torch.mean(x, dim=(2, 3), keepdim=True).repeat(1, 1, x.shape[2], x.shape[3])

693
            x *= self.strength
694
695
            if x.dtype != output_dtype and not autocast_enabled:
                x = x.to(output_dtype)
comfyanonymous's avatar
comfyanonymous committed
696

comfyanonymous's avatar
comfyanonymous committed
697
698
699
700
701
702
703
            if control_prev is not None and key in control_prev:
                prev = control_prev[key][index]
                if prev is not None:
                    x += prev
            out[key].append(x)
        if control_prev is not None and 'input' in control_prev:
            out['input'] = control_prev['input']
704
        return out
comfyanonymous's avatar
comfyanonymous committed
705

706
    def set_cond_hint(self, cond_hint, strength=1.0):
comfyanonymous's avatar
comfyanonymous committed
707
        self.cond_hint_original = cond_hint
708
        self.strength = strength
comfyanonymous's avatar
comfyanonymous committed
709
710
        return self

comfyanonymous's avatar
comfyanonymous committed
711
712
713
714
    def set_previous_controlnet(self, controlnet):
        self.previous_controlnet = controlnet
        return self

comfyanonymous's avatar
comfyanonymous committed
715
    def cleanup(self):
comfyanonymous's avatar
comfyanonymous committed
716
717
        if self.previous_controlnet is not None:
            self.previous_controlnet.cleanup()
comfyanonymous's avatar
comfyanonymous committed
718
719
720
721
722
        if self.cond_hint is not None:
            del self.cond_hint
            self.cond_hint = None

    def copy(self):
723
        c = ControlNet(self.control_model, global_average_pooling=self.global_average_pooling)
comfyanonymous's avatar
comfyanonymous committed
724
        c.cond_hint_original = self.cond_hint_original
725
        c.strength = self.strength
comfyanonymous's avatar
comfyanonymous committed
726
727
        return c

728
    def get_models(self):
comfyanonymous's avatar
comfyanonymous committed
729
730
        out = []
        if self.previous_controlnet is not None:
731
            out += self.previous_controlnet.get_models()
comfyanonymous's avatar
comfyanonymous committed
732
733
734
        out.append(self.control_model)
        return out

735
def load_controlnet(ckpt_path, model=None):
736
    controlnet_data = utils.load_torch_file(ckpt_path, safe_load=True)
737
    pth_key = 'control_model.zero_convs.0.0.weight'
comfyanonymous's avatar
comfyanonymous committed
738
    pth = False
739
    key = 'zero_convs.0.0.weight'
comfyanonymous's avatar
comfyanonymous committed
740
741
742
    if pth_key in controlnet_data:
        pth = True
        key = pth_key
743
        prefix = "control_model."
comfyanonymous's avatar
comfyanonymous committed
744
    elif key in controlnet_data:
745
        prefix = ""
comfyanonymous's avatar
comfyanonymous committed
746
    else:
747
748
749
750
        net = load_t2i_adapter(controlnet_data)
        if net is None:
            print("error checkpoint does not contain controlnet or t2i adapter data", ckpt_path)
        return net
comfyanonymous's avatar
comfyanonymous committed
751

752
753
754
755
756
757
758
    use_fp16 = model_management.should_use_fp16()

    controlnet_config = model_detection.model_config_from_unet(controlnet_data, prefix, use_fp16).unet_config
    controlnet_config.pop("out_channels")
    controlnet_config["hint_channels"] = 3
    control_model = cldm.ControlNet(**controlnet_config)

comfyanonymous's avatar
comfyanonymous committed
759
    if pth:
760
761
762
763
764
765
766
        if 'difference' in controlnet_data:
            if model is not None:
                m = model.patch_model()
                model_sd = m.state_dict()
                for x in controlnet_data:
                    c_m = "control_model."
                    if x.startswith(c_m):
comfyanonymous's avatar
comfyanonymous committed
767
                        sd_key = "diffusion_model.{}".format(x[len(c_m):])
768
769
770
771
772
773
774
                        if sd_key in model_sd:
                            cd = controlnet_data[x]
                            cd += model_sd[sd_key].type(cd.dtype).to(cd.device)
                model.unpatch_model()
            else:
                print("WARNING: Loaded a diff controlnet without a model. It will very likely not work.")

comfyanonymous's avatar
comfyanonymous committed
775
776
777
778
        class WeightsLoader(torch.nn.Module):
            pass
        w = WeightsLoader()
        w.control_model = control_model
779
        missing, unexpected = w.load_state_dict(controlnet_data, strict=False)
comfyanonymous's avatar
comfyanonymous committed
780
    else:
781
782
        missing, unexpected = control_model.load_state_dict(controlnet_data, strict=False)
    print(missing, unexpected)
comfyanonymous's avatar
comfyanonymous committed
783

784
785
786
    if use_fp16:
        control_model = control_model.half()

787
788
789
790
791
    global_average_pooling = False
    if ckpt_path.endswith("_shuffle.pth") or ckpt_path.endswith("_shuffle.safetensors") or ckpt_path.endswith("_shuffle_fp16.safetensors"): #TODO: smarter way of enabling global_average_pooling
        global_average_pooling = True

    control = ControlNet(control_model, global_average_pooling=global_average_pooling)
comfyanonymous's avatar
comfyanonymous committed
792
793
    return control

794
class T2IAdapter:
795
    def __init__(self, t2i_model, channels_in, device=None):
796
797
798
        self.t2i_model = t2i_model
        self.channels_in = channels_in
        self.strength = 1.0
799
800
        if device is None:
            device = model_management.get_torch_device()
801
802
803
804
805
806
        self.device = device
        self.previous_controlnet = None
        self.control_input = None
        self.cond_hint_original = None
        self.cond_hint = None

807
    def get_control(self, x_noisy, t, cond, batched_number):
808
809
        control_prev = None
        if self.previous_controlnet is not None:
810
            control_prev = self.previous_controlnet.get_control(x_noisy, t, cond, batched_number)
811
812
813
814

        if self.cond_hint is None or x_noisy.shape[2] * 8 != self.cond_hint.shape[2] or x_noisy.shape[3] * 8 != self.cond_hint.shape[3]:
            if self.cond_hint is not None:
                del self.cond_hint
BlenderNeko's avatar
BlenderNeko committed
815
            self.control_input = None
816
            self.cond_hint = None
BlenderNeko's avatar
BlenderNeko committed
817
            self.cond_hint = utils.common_upscale(self.cond_hint_original, x_noisy.shape[3] * 8, x_noisy.shape[2] * 8, 'nearest-exact', "center").float().to(self.device)
818
819
            if self.channels_in == 1 and self.cond_hint.shape[1] > 1:
                self.cond_hint = torch.mean(self.cond_hint, 1, keepdim=True)
BlenderNeko's avatar
BlenderNeko committed
820
821
822
        if x_noisy.shape[0] != self.cond_hint.shape[0]:
            self.cond_hint = broadcast_image_to(self.cond_hint, x_noisy.shape[0], batched_number)
        if self.control_input is None:
823
824
825
826
827
828
829
            self.t2i_model.to(self.device)
            self.control_input = self.t2i_model(self.cond_hint)
            self.t2i_model.cpu()

        output_dtype = x_noisy.dtype
        out = {'input':[]}

comfyanonymous's avatar
comfyanonymous committed
830
        autocast_enabled = torch.is_autocast_enabled()
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
        for i in range(len(self.control_input)):
            key = 'input'
            x = self.control_input[i] * self.strength
            if x.dtype != output_dtype and not autocast_enabled:
                x = x.to(output_dtype)

            if control_prev is not None and key in control_prev:
                index = len(control_prev[key]) - i * 3 - 3
                prev = control_prev[key][index]
                if prev is not None:
                    x += prev
            out[key].insert(0, None)
            out[key].insert(0, None)
            out[key].insert(0, x)

        if control_prev is not None and 'input' in control_prev:
            for i in range(len(out['input'])):
                if out['input'][i] is None:
                    out['input'][i] = control_prev['input'][i]
        if control_prev is not None and 'middle' in control_prev:
            out['middle'] = control_prev['middle']
        if control_prev is not None and 'output' in control_prev:
            out['output'] = control_prev['output']
        return out

    def set_cond_hint(self, cond_hint, strength=1.0):
        self.cond_hint_original = cond_hint
        self.strength = strength
        return self

    def set_previous_controlnet(self, controlnet):
        self.previous_controlnet = controlnet
        return self

    def copy(self):
        c = T2IAdapter(self.t2i_model, self.channels_in)
        c.cond_hint_original = self.cond_hint_original
        c.strength = self.strength
        return c

    def cleanup(self):
        if self.previous_controlnet is not None:
            self.previous_controlnet.cleanup()
        if self.cond_hint is not None:
            del self.cond_hint
            self.cond_hint = None

878
    def get_models(self):
879
880
        out = []
        if self.previous_controlnet is not None:
881
            out += self.previous_controlnet.get_models()
882
883
        return out

884
def load_t2i_adapter(t2i_data):
885
    keys = t2i_data.keys()
886
887
888
    if 'adapter' in keys:
        t2i_data = t2i_data['adapter']
        keys = t2i_data.keys()
889
    if "body.0.in_conv.weight" in keys:
890
891
        cin = t2i_data['body.0.in_conv.weight'].shape[1]
        model_ad = adapter.Adapter_light(cin=cin, channels=[320, 640, 1280, 1280], nums_rb=4)
892
    elif 'conv_in.weight' in keys:
893
        cin = t2i_data['conv_in.weight'].shape[1]
894
895
896
897
898
899
900
        channel = t2i_data['conv_in.weight'].shape[0]
        ksize = t2i_data['body.0.block2.weight'].shape[2]
        use_conv = False
        down_opts = list(filter(lambda a: a.endswith("down_opt.op.weight"), keys))
        if len(down_opts) > 0:
            use_conv = True
        model_ad = adapter.Adapter(cin=cin, channels=[channel, channel*2, channel*4, channel*4][:4], nums_rb=2, ksize=ksize, sk=True, use_conv=use_conv)
901
902
    else:
        return None
903
904
    model_ad.load_state_dict(t2i_data)
    return T2IAdapter(model_ad, cin // 64)
comfyanonymous's avatar
comfyanonymous committed
905

906
907
908
909
910
911
912
913
914
915

class StyleModel:
    def __init__(self, model, device="cpu"):
        self.model = model

    def get_cond(self, input):
        return self.model(input.last_hidden_state)


def load_style_model(ckpt_path):
916
    model_data = utils.load_torch_file(ckpt_path, safe_load=True)
917
918
919
920
921
922
923
924
925
    keys = model_data.keys()
    if "style_embedding" in keys:
        model = adapter.StyleAdapter(width=1024, context_dim=768, num_head=8, n_layes=3, num_token=8)
    else:
        raise Exception("invalid style model {}".format(ckpt_path))
    model.load_state_dict(model_data)
    return StyleModel(model)


926
927
928
929
930
def load_clip(ckpt_paths, embedding_directory=None):
    clip_data = []
    for p in ckpt_paths:
        clip_data.append(utils.load_torch_file(p, safe_load=True))

comfyanonymous's avatar
comfyanonymous committed
931
932
933
    class EmptyClass:
        pass

934
935
936
937
    for i in range(len(clip_data)):
        if "transformer.resblocks.0.ln_1.weight" in clip_data[i]:
            clip_data[i] = utils.transformers_convert(clip_data[i], "", "text_model.", 32)

comfyanonymous's avatar
comfyanonymous committed
938
939
    clip_target = EmptyClass()
    clip_target.params = {}
940
941
942
943
944
945
946
947
948
949
    if len(clip_data) == 1:
        if "text_model.encoder.layers.30.mlp.fc1.weight" in clip_data[0]:
            clip_target.clip = sdxl_clip.SDXLRefinerClipModel
            clip_target.tokenizer = sdxl_clip.SDXLTokenizer
        elif "text_model.encoder.layers.22.mlp.fc1.weight" in clip_data[0]:
            clip_target.clip = sd2_clip.SD2ClipModel
            clip_target.tokenizer = sd2_clip.SD2Tokenizer
        else:
            clip_target.clip = sd1_clip.SD1ClipModel
            clip_target.tokenizer = sd1_clip.SD1Tokenizer
950
    else:
951
952
        clip_target.clip = sdxl_clip.SDXLClipModel
        clip_target.tokenizer = sdxl_clip.SDXLTokenizer
comfyanonymous's avatar
comfyanonymous committed
953
954

    clip = CLIP(clip_target, embedding_directory=embedding_directory)
955
956
957
958
959
960
961
    for c in clip_data:
        m, u = clip.load_sd(c)
        if len(m) > 0:
            print("clip missing:", m)

        if len(u) > 0:
            print("clip unexpected:", u)
962
    return clip
comfyanonymous's avatar
comfyanonymous committed
963

964
def load_gligen(ckpt_path):
965
    data = utils.load_torch_file(ckpt_path, safe_load=True)
966
967
968
969
970
    model = gligen.load_gligen(data)
    if model_management.should_use_fp16():
        model = model.half()
    return model

comfyanonymous's avatar
comfyanonymous committed
971
def load_checkpoint(config_path=None, ckpt_path=None, output_vae=True, output_clip=True, embedding_directory=None, state_dict=None, config=None):
972
    #TODO: this function is a mess and should be removed eventually
comfyanonymous's avatar
comfyanonymous committed
973
974
975
    if config is None:
        with open(config_path, 'r') as stream:
            config = yaml.safe_load(stream)
comfyanonymous's avatar
comfyanonymous committed
976
977
978
979
980
    model_config_params = config['model']['params']
    clip_config = model_config_params['cond_stage_config']
    scale_factor = model_config_params['scale_factor']
    vae_config = model_config_params['first_stage_config']

981
982
983
    fp16 = False
    if "unet_config" in model_config_params:
        if "params" in model_config_params["unet_config"]:
comfyanonymous's avatar
comfyanonymous committed
984
985
986
987
988
989
990
991
992
993
994
995
996
            unet_config = model_config_params["unet_config"]["params"]
            if "use_fp16" in unet_config:
                fp16 = unet_config["use_fp16"]

    noise_aug_config = None
    if "noise_aug_config" in model_config_params:
        noise_aug_config = model_config_params["noise_aug_config"]

    v_prediction = False

    if "parameterization" in model_config_params:
        if model_config_params["parameterization"] == "v":
            v_prediction = True
997

comfyanonymous's avatar
comfyanonymous committed
998
999
1000
1001
1002
1003
    clip = None
    vae = None

    class WeightsLoader(torch.nn.Module):
        pass

1004
1005
    if state_dict is None:
        state_dict = utils.load_torch_file(ckpt_path)
comfyanonymous's avatar
comfyanonymous committed
1006

1007
1008
1009
1010
1011
1012
1013
1014
    class EmptyClass:
        pass

    model_config = EmptyClass()
    model_config.unet_config = unet_config
    from . import latent_formats
    model_config.latent_format = latent_formats.SD15(scale_factor=scale_factor)

comfyanonymous's avatar
comfyanonymous committed
1015
    if config['model']["target"].endswith("LatentInpaintDiffusion"):
1016
        model = model_base.SDInpaint(model_config, v_prediction=v_prediction)
comfyanonymous's avatar
comfyanonymous committed
1017
    elif config['model']["target"].endswith("ImageEmbeddingConditionedLatentDiffusion"):
1018
        model = model_base.SD21UNCLIP(model_config, noise_aug_config["params"], v_prediction=v_prediction)
comfyanonymous's avatar
comfyanonymous committed
1019
    else:
1020
        model = model_base.BaseModel(model_config, v_prediction=v_prediction)
comfyanonymous's avatar
comfyanonymous committed
1021

1022
1023
1024
    if fp16:
        model = model.half()

1025
1026
    offload_device = model_management.unet_offload_device()
    model = model.to(offload_device)
1027
1028
1029
1030
    model.load_model_weights(state_dict, "model.diffusion_model.")

    if output_vae:
        w = WeightsLoader()
1031
        vae = VAE(config=vae_config)
1032
1033
1034
1035
1036
1037
        w.first_stage_model = vae.first_stage_model
        load_model_weights(w, state_dict)

    if output_clip:
        w = WeightsLoader()
        clip_target = EmptyClass()
1038
        clip_target.params = clip_config.get("params", {})
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
        if clip_config["target"].endswith("FrozenOpenCLIPEmbedder"):
            clip_target.clip = sd2_clip.SD2ClipModel
            clip_target.tokenizer = sd2_clip.SD2Tokenizer
        elif clip_config["target"].endswith("FrozenCLIPEmbedder"):
            clip_target.clip = sd1_clip.SD1ClipModel
            clip_target.tokenizer = sd1_clip.SD1Tokenizer
        clip = CLIP(clip_target, embedding_directory=embedding_directory)
        w.cond_stage_model = clip.cond_stage_model
        load_clip_weights(w, state_dict)

1049
    return (ModelPatcher(model, load_device=model_management.get_torch_device(), offload_device=offload_device), clip, vae)
1050

1051
1052
1053
1054
1055
1056
def calculate_parameters(sd, prefix):
    params = 0
    for k in sd.keys():
        if k.startswith(prefix):
            params += sd[k].nelement()
    return params
1057

1058
1059
def load_checkpoint_guess_config(ckpt_path, output_vae=True, output_clip=True, output_clipvision=False, embedding_directory=None):
    sd = utils.load_torch_file(ckpt_path)
1060
1061
    sd_keys = sd.keys()
    clip = None
1062
    clipvision = None
1063
    vae = None
1064
1065
    model = None
    clip_target = None
1066

1067
1068
    parameters = calculate_parameters(sd, "model.diffusion_model.")
    fp16 = model_management.should_use_fp16(model_params=parameters)
1069

1070
1071
1072
    class WeightsLoader(torch.nn.Module):
        pass

1073
1074
1075
    model_config = model_detection.model_config_from_unet(sd, "model.diffusion_model.", fp16)
    if model_config is None:
        raise RuntimeError("ERROR: Could not detect model type of: {}".format(ckpt_path))
1076

1077
    if model_config.clip_vision_prefix is not None:
1078
        if output_clipvision:
1079
            clipvision = clip_vision.load_clipvision_from_sd(sd, model_config.clip_vision_prefix, True)
1080

1081
    offload_device = model_management.unet_offload_device()
1082
    model = model_config.get_model(sd, "model.diffusion_model.")
1083
    model = model.to(offload_device)
1084
    model.load_model_weights(sd, "model.diffusion_model.")
1085

1086
    if output_vae:
1087
        vae = VAE()
1088
1089
1090
        w = WeightsLoader()
        w.first_stage_model = vae.first_stage_model
        load_model_weights(w, sd)
1091

1092
1093
1094
1095
1096
1097
1098
    if output_clip:
        w = WeightsLoader()
        clip_target = model_config.clip_target()
        clip = CLIP(clip_target, embedding_directory=embedding_directory)
        w.cond_stage_model = clip.cond_stage_model
        sd = model_config.process_clip_state_dict(sd)
        load_model_weights(w, sd)
comfyanonymous's avatar
comfyanonymous committed
1099

1100
1101
1102
    left_over = sd.keys()
    if len(left_over) > 0:
        print("left over keys:", left_over)
1103

1104
    return (ModelPatcher(model, load_device=model_management.get_torch_device(), offload_device=offload_device), clip, vae, clipvision)
1105

1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172

def load_unet(unet_path): #load unet in diffusers format
    sd = utils.load_torch_file(unet_path)
    parameters = calculate_parameters(sd, "")
    fp16 = model_management.should_use_fp16(model_params=parameters)

    match = {}
    match["context_dim"] = sd["down_blocks.0.attentions.1.transformer_blocks.0.attn2.to_k.weight"].shape[1]
    match["model_channels"] = sd["conv_in.weight"].shape[0]
    match["in_channels"] = sd["conv_in.weight"].shape[1]
    match["adm_in_channels"] = None
    if "class_embedding.linear_1.weight" in sd:
        match["adm_in_channels"] = sd["class_embedding.linear_1.weight"].shape[1]

    SDXL = {'use_checkpoint': False, 'image_size': 32, 'out_channels': 4, 'use_spatial_transformer': True, 'legacy': False,
            'num_classes': 'sequential', 'adm_in_channels': 2816, 'use_fp16': fp16, 'in_channels': 4, 'model_channels': 320,
            'num_res_blocks': 2, 'attention_resolutions': [2, 4], 'transformer_depth': [0, 2, 10], 'channel_mult': [1, 2, 4],
            'transformer_depth_middle': 10, 'use_linear_in_transformer': True, 'context_dim': 2048}

    SDXL_refiner = {'use_checkpoint': False, 'image_size': 32, 'out_channels': 4, 'use_spatial_transformer': True, 'legacy': False,
                    'num_classes': 'sequential', 'adm_in_channels': 2560, 'use_fp16': fp16, 'in_channels': 4, 'model_channels': 384,
                    'num_res_blocks': 2, 'attention_resolutions': [2, 4], 'transformer_depth': [0, 4, 4, 0], 'channel_mult': [1, 2, 4, 4],
                    'transformer_depth_middle': 4, 'use_linear_in_transformer': True, 'context_dim': 1280}

    SD21 = {'use_checkpoint': False, 'image_size': 32, 'out_channels': 4, 'use_spatial_transformer': True, 'legacy': False,
            'adm_in_channels': None, 'use_fp16': fp16, 'in_channels': 4, 'model_channels': 320, 'num_res_blocks': 2,
            'attention_resolutions': [1, 2, 4], 'transformer_depth': [1, 1, 1, 0], 'channel_mult': [1, 2, 4, 4],
            'transformer_depth_middle': 1, 'use_linear_in_transformer': True, 'context_dim': 1024}

    SD21_uncliph = {'use_checkpoint': False, 'image_size': 32, 'out_channels': 4, 'use_spatial_transformer': True, 'legacy': False,
                    'num_classes': 'sequential', 'adm_in_channels': 2048, 'use_fp16': True, 'in_channels': 4, 'model_channels': 320,
                    'num_res_blocks': 2, 'attention_resolutions': [1, 2, 4], 'transformer_depth': [1, 1, 1, 0], 'channel_mult': [1, 2, 4, 4],
                    'transformer_depth_middle': 1, 'use_linear_in_transformer': True, 'context_dim': 1024}

    SD21_unclipl = {'use_checkpoint': False, 'image_size': 32, 'out_channels': 4, 'use_spatial_transformer': True, 'legacy': False,
                    'num_classes': 'sequential', 'adm_in_channels': 1536, 'use_fp16': True, 'in_channels': 4, 'model_channels': 320,
                    'num_res_blocks': 2, 'attention_resolutions': [1, 2, 4], 'transformer_depth': [1, 1, 1, 0], 'channel_mult': [1, 2, 4, 4],
                    'transformer_depth_middle': 1, 'use_linear_in_transformer': True, 'context_dim': 1024}

    SD15 = {'use_checkpoint': False, 'image_size': 32, 'out_channels': 4, 'use_spatial_transformer': True, 'legacy': False,
            'adm_in_channels': None, 'use_fp16': True, 'in_channels': 4, 'model_channels': 320, 'num_res_blocks': 2,
            'attention_resolutions': [1, 2, 4], 'transformer_depth': [1, 1, 1, 0], 'channel_mult': [1, 2, 4, 4],
            'transformer_depth_middle': 1, 'use_linear_in_transformer': False, 'context_dim': 768}

    supported_models = [SDXL, SDXL_refiner, SD21, SD15, SD21_uncliph, SD21_unclipl]
    print("match", match)
    for unet_config in supported_models:
        matches = True
        for k in match:
            if match[k] != unet_config[k]:
                matches = False
                break
        if matches:
            diffusers_keys = utils.unet_to_diffusers(unet_config)
            new_sd = {}
            for k in diffusers_keys:
                if k in sd:
                    new_sd[diffusers_keys[k]] = sd.pop(k)
                else:
                    print(diffusers_keys[k], k)
            offload_device = model_management.unet_offload_device()
            model_config = model_detection.model_config_from_unet_config(unet_config)
            model = model_config.get_model(new_sd, "")
            model = model.to(offload_device)
            model.load_model_weights(new_sd, "")
            return ModelPatcher(model, load_device=model_management.get_torch_device(), offload_device=offload_device)

1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
def save_checkpoint(output_path, model, clip, vae, metadata=None):
    try:
        model.patch_model()
        clip.patch_model()
        sd = model.model.state_dict_for_saving(clip.get_sd(), vae.get_sd())
        utils.save_torch_file(sd, output_path, metadata=metadata)
        model.unpatch_model()
        clip.unpatch_model()
    except Exception as e:
        model.unpatch_model()
        clip.unpatch_model()
        raise e