sd.py 51.6 KB
Newer Older
comfyanonymous's avatar
comfyanonymous committed
1
import torch
2
import contextlib
3
import copy
4
import inspect
comfyanonymous's avatar
comfyanonymous committed
5

6
from comfy import model_management
7
8
from .ldm.util import instantiate_from_config
from .ldm.models.autoencoder import AutoencoderKL
9
import yaml
comfyanonymous's avatar
comfyanonymous committed
10
from .cldm import cldm
11
from .t2i_adapter import adapter
comfyanonymous's avatar
comfyanonymous committed
12
13

from . import utils
14
from . import clip_vision
15
from . import gligen
16
from . import diffusers_convert
comfyanonymous's avatar
comfyanonymous committed
17
from . import model_base
18
from . import model_detection
19

20
21
from . import sd1_clip
from . import sd2_clip
22
from . import sdxl_clip
comfyanonymous's avatar
comfyanonymous committed
23

24
def load_model_weights(model, sd):
comfyanonymous's avatar
comfyanonymous committed
25
    m, u = model.load_state_dict(sd, strict=False)
26
27
    m = set(m)
    unexpected_keys = set(u)
comfyanonymous's avatar
comfyanonymous committed
28
29
30

    k = list(sd.keys())
    for x in k:
31
32
33
34
35
36
37
38
39
40
        if x not in unexpected_keys:
            w = sd.pop(x)
            del w
    if len(m) > 0:
        print("missing", m)
    return model

def load_clip_weights(model, sd):
    k = list(sd.keys())
    for x in k:
comfyanonymous's avatar
comfyanonymous committed
41
42
43
44
        if x.startswith("cond_stage_model.transformer.") and not x.startswith("cond_stage_model.transformer.text_model."):
            y = x.replace("cond_stage_model.transformer.", "cond_stage_model.transformer.text_model.")
            sd[y] = sd.pop(x)

comfyanonymous's avatar
comfyanonymous committed
45
46
47
48
    if 'cond_stage_model.transformer.text_model.embeddings.position_ids' in sd:
        ids = sd['cond_stage_model.transformer.text_model.embeddings.position_ids']
        if ids.dtype == torch.float32:
            sd['cond_stage_model.transformer.text_model.embeddings.position_ids'] = ids.round()
49

50
51
    sd = utils.transformers_convert(sd, "cond_stage_model.model.", "cond_stage_model.transformer.text_model.", 24)
    return load_model_weights(model, sd)
comfyanonymous's avatar
comfyanonymous committed
52

53
54
55
56
57
58
59
60
61
62
LORA_CLIP_MAP = {
    "mlp.fc1": "mlp_fc1",
    "mlp.fc2": "mlp_fc2",
    "self_attn.k_proj": "self_attn_k_proj",
    "self_attn.q_proj": "self_attn_q_proj",
    "self_attn.v_proj": "self_attn_v_proj",
    "self_attn.out_proj": "self_attn_out_proj",
}


63
def load_lora(lora, to_load):
64
65
66
    patch_dict = {}
    loaded_keys = set()
    for x in to_load:
comfyanonymous's avatar
comfyanonymous committed
67
68
69
70
71
72
        alpha_name = "{}.alpha".format(x)
        alpha = None
        if alpha_name in lora.keys():
            alpha = lora[alpha_name].item()
            loaded_keys.add(alpha_name)

73
74
        regular_lora = "{}.lora_up.weight".format(x)
        diffusers_lora = "{}_lora.up.weight".format(x)
75
        transformers_lora = "{}.lora_linear_layer.up.weight".format(x)
76
77
78
79
80
81
82
83
84
85
        A_name = None

        if regular_lora in lora.keys():
            A_name = regular_lora
            B_name = "{}.lora_down.weight".format(x)
            mid_name = "{}.lora_mid.weight".format(x)
        elif diffusers_lora in lora.keys():
            A_name = diffusers_lora
            B_name = "{}_lora.down.weight".format(x)
            mid_name = None
86
87
88
89
        elif transformers_lora in lora.keys():
            A_name = transformers_lora
            B_name ="{}.lora_linear_layer.down.weight".format(x)
            mid_name = None
90
91

        if A_name is not None:
92
            mid = None
93
            if mid_name is not None and mid_name in lora.keys():
94
95
96
                mid = lora[mid_name]
                loaded_keys.add(mid_name)
            patch_dict[to_load[x]] = (lora[A_name], lora[B_name], alpha, mid)
97
98
            loaded_keys.add(A_name)
            loaded_keys.add(B_name)
comfyanonymous's avatar
comfyanonymous committed
99

comfyanonymous's avatar
comfyanonymous committed
100
101

        ######## loha
comfyanonymous's avatar
comfyanonymous committed
102
103
104
105
        hada_w1_a_name = "{}.hada_w1_a".format(x)
        hada_w1_b_name = "{}.hada_w1_b".format(x)
        hada_w2_a_name = "{}.hada_w2_a".format(x)
        hada_w2_b_name = "{}.hada_w2_b".format(x)
106
107
        hada_t1_name = "{}.hada_t1".format(x)
        hada_t2_name = "{}.hada_t2".format(x)
comfyanonymous's avatar
comfyanonymous committed
108
        if hada_w1_a_name in lora.keys():
109
110
111
112
113
114
115
116
117
            hada_t1 = None
            hada_t2 = None
            if hada_t1_name in lora.keys():
                hada_t1 = lora[hada_t1_name]
                hada_t2 = lora[hada_t2_name]
                loaded_keys.add(hada_t1_name)
                loaded_keys.add(hada_t2_name)

            patch_dict[to_load[x]] = (lora[hada_w1_a_name], lora[hada_w1_b_name], alpha, lora[hada_w2_a_name], lora[hada_w2_b_name], hada_t1, hada_t2)
comfyanonymous's avatar
comfyanonymous committed
118
119
120
121
122
            loaded_keys.add(hada_w1_a_name)
            loaded_keys.add(hada_w1_b_name)
            loaded_keys.add(hada_w2_a_name)
            loaded_keys.add(hada_w2_b_name)

comfyanonymous's avatar
comfyanonymous committed
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170

        ######## lokr
        lokr_w1_name = "{}.lokr_w1".format(x)
        lokr_w2_name = "{}.lokr_w2".format(x)
        lokr_w1_a_name = "{}.lokr_w1_a".format(x)
        lokr_w1_b_name = "{}.lokr_w1_b".format(x)
        lokr_t2_name = "{}.lokr_t2".format(x)
        lokr_w2_a_name = "{}.lokr_w2_a".format(x)
        lokr_w2_b_name = "{}.lokr_w2_b".format(x)

        lokr_w1 = None
        if lokr_w1_name in lora.keys():
            lokr_w1 = lora[lokr_w1_name]
            loaded_keys.add(lokr_w1_name)

        lokr_w2 = None
        if lokr_w2_name in lora.keys():
            lokr_w2 = lora[lokr_w2_name]
            loaded_keys.add(lokr_w2_name)

        lokr_w1_a = None
        if lokr_w1_a_name in lora.keys():
            lokr_w1_a = lora[lokr_w1_a_name]
            loaded_keys.add(lokr_w1_a_name)

        lokr_w1_b = None
        if lokr_w1_b_name in lora.keys():
            lokr_w1_b = lora[lokr_w1_b_name]
            loaded_keys.add(lokr_w1_b_name)

        lokr_w2_a = None
        if lokr_w2_a_name in lora.keys():
            lokr_w2_a = lora[lokr_w2_a_name]
            loaded_keys.add(lokr_w2_a_name)

        lokr_w2_b = None
        if lokr_w2_b_name in lora.keys():
            lokr_w2_b = lora[lokr_w2_b_name]
            loaded_keys.add(lokr_w2_b_name)

        lokr_t2 = None
        if lokr_t2_name in lora.keys():
            lokr_t2 = lora[lokr_t2_name]
            loaded_keys.add(lokr_t2_name)

        if (lokr_w1 is not None) or (lokr_w2 is not None) or (lokr_w1_a is not None) or (lokr_w2_a is not None):
            patch_dict[to_load[x]] = (lokr_w1, lokr_w2, alpha, lokr_w1_a, lokr_w1_b, lokr_w2_a, lokr_w2_b, lokr_t2)

171
172
173
174
175
    for x in lora.keys():
        if x not in loaded_keys:
            print("lora key not loaded", x)
    return patch_dict

176
def model_lora_keys_clip(model, key_map={}):
177
178
    sdk = model.state_dict().keys()

comfyanonymous's avatar
comfyanonymous committed
179
    text_model_lora_key = "lora_te_text_model_encoder_layers_{}_{}"
180
181
    clip_l_present = False
    for b in range(32):
182
183
184
        for c in LORA_CLIP_MAP:
            k = "transformer.text_model.encoder.layers.{}.{}.weight".format(b, c)
            if k in sdk:
comfyanonymous's avatar
comfyanonymous committed
185
                lora_key = text_model_lora_key.format(b, LORA_CLIP_MAP[c])
186
                key_map[lora_key] = k
comfyanonymous's avatar
comfyanonymous committed
187
188
                lora_key = "lora_te1_text_model_encoder_layers_{}_{}".format(b, LORA_CLIP_MAP[c])
                key_map[lora_key] = k
189
190
                lora_key = "text_encoder.text_model.encoder.layers.{}.{}".format(b, c) #diffusers lora
                key_map[lora_key] = k
comfyanonymous's avatar
comfyanonymous committed
191

192
193
194
195
196
            k = "clip_l.transformer.text_model.encoder.layers.{}.{}.weight".format(b, c)
            if k in sdk:
                lora_key = "lora_te1_text_model_encoder_layers_{}_{}".format(b, LORA_CLIP_MAP[c]) #SDXL base
                key_map[lora_key] = k
                clip_l_present = True
197
198
                lora_key = "text_encoder.text_model.encoder.layers.{}.{}".format(b, c) #diffusers lora
                key_map[lora_key] = k
199
200
201
202
203

            k = "clip_g.transformer.text_model.encoder.layers.{}.{}.weight".format(b, c)
            if k in sdk:
                if clip_l_present:
                    lora_key = "lora_te2_text_model_encoder_layers_{}_{}".format(b, LORA_CLIP_MAP[c]) #SDXL base
204
205
206
                    key_map[lora_key] = k
                    lora_key = "text_encoder_2.text_model.encoder.layers.{}.{}".format(b, c) #diffusers lora
                    key_map[lora_key] = k
207
208
                else:
                    lora_key = "lora_te_text_model_encoder_layers_{}_{}".format(b, LORA_CLIP_MAP[c]) #TODO: test if this is correct for SDXL-Refiner
209
210
211
                    key_map[lora_key] = k
                    lora_key = "text_encoder.text_model.encoder.layers.{}.{}".format(b, c) #diffusers lora
                    key_map[lora_key] = k
212

213
    return key_map
comfyanonymous's avatar
comfyanonymous committed
214

215
216
def model_lora_keys_unet(model, key_map={}):
    sdk = model.state_dict().keys()
comfyanonymous's avatar
comfyanonymous committed
217

218
219
220
221
222
    for k in sdk:
        if k.startswith("diffusion_model.") and k.endswith(".weight"):
            key_lora = k[len("diffusion_model."):-len(".weight")].replace(".", "_")
            key_map["lora_unet_{}".format(key_lora)] = k

223
224
225
    diffusers_keys = utils.unet_to_diffusers(model.model_config.unet_config)
    for k in diffusers_keys:
        if k.endswith(".weight"):
226
            unet_key = "diffusion_model.{}".format(diffusers_keys[k])
227
            key_lora = k[:-len(".weight")].replace(".", "_")
228
229
230
231
232
233
234
235
            key_map["lora_unet_{}".format(key_lora)] = unet_key

            diffusers_lora_prefix = ["", "unet."]
            for p in diffusers_lora_prefix:
                diffusers_lora_key = "{}{}".format(p, k[:-len(".weight")].replace(".to_", ".processor.to_"))
                if diffusers_lora_key.endswith(".to_out.0"):
                    diffusers_lora_key = diffusers_lora_key[:-2]
                key_map[diffusers_lora_key] = unet_key
236
237
    return key_map

238
239
240
241
242
243
244
245
def set_attr(obj, attr, value):
    attrs = attr.split(".")
    for name in attrs[:-1]:
        obj = getattr(obj, name)
    prev = getattr(obj, attrs[-1])
    setattr(obj, attrs[-1], torch.nn.Parameter(value))
    del prev

246
class ModelPatcher:
comfyanonymous's avatar
comfyanonymous committed
247
    def __init__(self, model, load_device, offload_device, size=0, current_device=None):
248
        self.size = size
249
        self.model = model
250
        self.patches = {}
251
        self.backup = {}
252
        self.model_options = {"transformer_options":{}}
253
        self.model_size()
254
255
        self.load_device = load_device
        self.offload_device = offload_device
comfyanonymous's avatar
comfyanonymous committed
256
257
258
259
        if current_device is None:
            self.current_device = self.offload_device
        else:
            self.current_device = current_device
260
261
262
263
264
265
266
267
268
269

    def model_size(self):
        if self.size > 0:
            return self.size
        model_sd = self.model.state_dict()
        size = 0
        for k in model_sd:
            t = model_sd[k]
            size += t.nelement() * t.element_size()
        self.size = size
270
        self.model_keys = set(model_sd.keys())
271
        return size
272
273

    def clone(self):
comfyanonymous's avatar
comfyanonymous committed
274
        n = ModelPatcher(self.model, self.load_device, self.offload_device, self.size, self.current_device)
275
276
277
278
        n.patches = {}
        for k in self.patches:
            n.patches[k] = self.patches[k][:]

279
        n.model_options = copy.deepcopy(self.model_options)
280
        n.model_keys = self.model_keys
281
282
        return n

comfyanonymous's avatar
comfyanonymous committed
283
284
285
286
287
    def is_clone(self, other):
        if hasattr(other, 'model') and self.model is other.model:
            return True
        return False

288
    def set_model_sampler_cfg_function(self, sampler_cfg_function):
289
290
291
292
        if len(inspect.signature(sampler_cfg_function).parameters) == 3:
            self.model_options["sampler_cfg_function"] = lambda args: sampler_cfg_function(args["cond"], args["uncond"], args["cond_scale"]) #Old way
        else:
            self.model_options["sampler_cfg_function"] = sampler_cfg_function
293

294
295
296
    def set_model_unet_function_wrapper(self, unet_wrapper_function):
        self.model_options["model_function_wrapper"] = unet_wrapper_function

297
298
299
300
301
302
    def set_model_patch(self, patch, name):
        to = self.model_options["transformer_options"]
        if "patches" not in to:
            to["patches"] = {}
        to["patches"][name] = to["patches"].get(name, []) + [patch]

303
304
305
306
307
308
309
310
    def set_model_patch_replace(self, patch, name, block_name, number):
        to = self.model_options["transformer_options"]
        if "patches_replace" not in to:
            to["patches_replace"] = {}
        if name not in to["patches_replace"]:
            to["patches_replace"][name] = {}
        to["patches_replace"][name][(block_name, number)] = patch

311
312
313
314
315
316
    def set_model_attn1_patch(self, patch):
        self.set_model_patch(patch, "attn1_patch")

    def set_model_attn2_patch(self, patch):
        self.set_model_patch(patch, "attn2_patch")

317
318
319
320
321
322
323
324
325
    def set_model_attn1_replace(self, patch, block_name, number):
        self.set_model_patch_replace(patch, "attn1", block_name, number)

    def set_model_attn2_replace(self, patch, block_name, number):
        self.set_model_patch_replace(patch, "attn2", block_name, number)

    def set_model_attn1_output_patch(self, patch):
        self.set_model_patch(patch, "attn1_output_patch")

326
327
328
    def set_model_attn2_output_patch(self, patch):
        self.set_model_patch(patch, "attn2_output_patch")

329
330
331
332
333
334
335
336
337
    def model_patches_to(self, device):
        to = self.model_options["transformer_options"]
        if "patches" in to:
            patches = to["patches"]
            for name in patches:
                patch_list = patches[name]
                for i in range(len(patch_list)):
                    if hasattr(patch_list[i], "to"):
                        patch_list[i] = patch_list[i].to(device)
338
339
340
341
342
343
344
        if "patches_replace" in to:
            patches = to["patches_replace"]
            for name in patches:
                patch_list = patches[name]
                for k in patch_list:
                    if hasattr(patch_list[k], "to"):
                        patch_list[k] = patch_list[k].to(device)
345

346
    def model_dtype(self):
347
348
        if hasattr(self.model, "get_dtype"):
            return self.model.get_dtype()
349

350
    def add_patches(self, patches, strength_patch=1.0, strength_model=1.0):
351
        p = set()
352
        for k in patches:
353
            if k in self.model_keys:
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
                p.add(k)
                current_patches = self.patches.get(k, [])
                current_patches.append((strength_patch, patches[k], strength_model))
                self.patches[k] = current_patches

        return list(p)

    def get_key_patches(self, filter_prefix=None):
        model_sd = self.model_state_dict()
        p = {}
        for k in model_sd:
            if filter_prefix is not None:
                if not k.startswith(filter_prefix):
                    continue
            if k in self.patches:
                p[k] = [model_sd[k]] + self.patches[k]
            else:
                p[k] = (model_sd[k],)
        return p
373

374
    def model_state_dict(self, filter_prefix=None):
375
376
        sd = self.model.state_dict()
        keys = list(sd.keys())
377
378
379
380
        if filter_prefix is not None:
            for k in keys:
                if not k.startswith(filter_prefix):
                    sd.pop(k)
381
382
        return sd

383
    def patch_model(self, device_to=None):
384
        model_sd = self.model_state_dict()
385
386
387
388
        for key in self.patches:
            if key not in model_sd:
                print("could not patch. key doesn't exist in model:", k)
                continue
389

390
            weight = model_sd[key]
391

392
            if key not in self.backup:
393
                self.backup[key] = weight.to(self.offload_device)
394

395
396
397
398
            if device_to is not None:
                temp_weight = weight.float().to(device_to, copy=True)
            else:
                temp_weight = weight.to(torch.float32, copy=True)
399
400
            out_weight = self.calculate_weight(self.patches[key], temp_weight, key).to(weight.dtype)
            set_attr(self.model, key, out_weight)
401
            del temp_weight
comfyanonymous's avatar
comfyanonymous committed
402
403
404
405
406

        if device_to is not None:
            self.model.to(device_to)
            self.current_device = device_to

407
        return self.model
comfyanonymous's avatar
comfyanonymous committed
408

409
410
411
412
413
414
415
416
417
418
419
420
421
422
    def calculate_weight(self, patches, weight, key):
        for p in patches:
            alpha = p[0]
            v = p[1]
            strength_model = p[2]

            if strength_model != 1.0:
                weight *= strength_model

            if isinstance(v, list):
                v = (self.calculate_weight(v[1:], v[0].clone(), key), )

            if len(v) == 1:
                w1 = v[0]
423
424
425
426
427
                if alpha != 0.0:
                    if w1.shape != weight.shape:
                        print("WARNING SHAPE MISMATCH {} WEIGHT NOT MERGED {} != {}".format(key, w1.shape, weight.shape))
                    else:
                        weight += alpha * w1.type(weight.dtype).to(weight.device)
428
            elif len(v) == 4: #lora/locon
comfyanonymous's avatar
comfyanonymous committed
429
430
                mat1 = v[0].float().to(weight.device)
                mat2 = v[1].float().to(weight.device)
431
432
433
434
                if v[2] is not None:
                    alpha *= v[2] / mat2.shape[0]
                if v[3] is not None:
                    #locon mid weights, hopefully the math is fine because I didn't properly test it
comfyanonymous's avatar
comfyanonymous committed
435
436
437
                    mat3 = v[3].float().to(weight.device)
                    final_shape = [mat2.shape[1], mat2.shape[0], mat3.shape[2], mat3.shape[3]]
                    mat2 = torch.mm(mat2.transpose(0, 1).flatten(start_dim=1), mat3.transpose(0, 1).flatten(start_dim=1)).reshape(final_shape).transpose(0, 1)
438
439
440
441
                try:
                    weight += (alpha * torch.mm(mat1.flatten(start_dim=1), mat2.flatten(start_dim=1))).reshape(weight.shape).type(weight.dtype)
                except Exception as e:
                    print("ERROR", key, e)
442
443
444
445
446
447
448
449
450
451
452
453
454
            elif len(v) == 8: #lokr
                w1 = v[0]
                w2 = v[1]
                w1_a = v[3]
                w1_b = v[4]
                w2_a = v[5]
                w2_b = v[6]
                t2 = v[7]
                dim = None

                if w1 is None:
                    dim = w1_b.shape[0]
                    w1 = torch.mm(w1_a.float(), w1_b.float())
comfyanonymous's avatar
comfyanonymous committed
455
456
                else:
                    w1 = w1.float().to(weight.device)
457
458
459
460

                if w2 is None:
                    dim = w2_b.shape[0]
                    if t2 is None:
comfyanonymous's avatar
comfyanonymous committed
461
                        w2 = torch.mm(w2_a.float().to(weight.device), w2_b.float().to(weight.device))
462
                    else:
comfyanonymous's avatar
comfyanonymous committed
463
464
465
                        w2 = torch.einsum('i j k l, j r, i p -> p r k l', t2.float().to(weight.device), w2_b.float().to(weight.device), w2_a.float().to(weight.device))
                else:
                    w2 = w2.float().to(weight.device)
466
467
468
469
470
471

                if len(w2.shape) == 4:
                    w1 = w1.unsqueeze(2).unsqueeze(2)
                if v[2] is not None and dim is not None:
                    alpha *= v[2] / dim

472
473
474
475
                try:
                    weight += alpha * torch.kron(w1, w2).reshape(weight.shape).type(weight.dtype)
                except Exception as e:
                    print("ERROR", key, e)
476
477
478
479
480
481
482
483
484
485
            else: #loha
                w1a = v[0]
                w1b = v[1]
                if v[2] is not None:
                    alpha *= v[2] / w1b.shape[0]
                w2a = v[3]
                w2b = v[4]
                if v[5] is not None: #cp decomposition
                    t1 = v[5]
                    t2 = v[6]
comfyanonymous's avatar
comfyanonymous committed
486
487
                    m1 = torch.einsum('i j k l, j r, i p -> p r k l', t1.float().to(weight.device), w1b.float().to(weight.device), w1a.float().to(weight.device))
                    m2 = torch.einsum('i j k l, j r, i p -> p r k l', t2.float().to(weight.device), w2b.float().to(weight.device), w2a.float().to(weight.device))
488
                else:
comfyanonymous's avatar
comfyanonymous committed
489
490
                    m1 = torch.mm(w1a.float().to(weight.device), w1b.float().to(weight.device))
                    m2 = torch.mm(w2a.float().to(weight.device), w2b.float().to(weight.device))
491

492
493
494
495
496
                try:
                    weight += (alpha * m1 * m2).reshape(weight.shape).type(weight.dtype)
                except Exception as e:
                    print("ERROR", key, e)

497
        return weight
498

comfyanonymous's avatar
comfyanonymous committed
499
    def unpatch_model(self, device_to=None):
500
        keys = list(self.backup.keys())
501

502
        for k in keys:
503
            set_attr(self.model, k, self.backup[k])
504

505
506
        self.backup = {}

comfyanonymous's avatar
comfyanonymous committed
507
508
509
510
511
        if device_to is not None:
            self.model.to(device_to)
            self.current_device = device_to


512
def load_lora_for_models(model, clip, lora, strength_model, strength_clip):
513
514
    key_map = model_lora_keys_unet(model.model)
    key_map = model_lora_keys_clip(clip.cond_stage_model, key_map)
515
    loaded = load_lora(lora, key_map)
516
517
518
519
520
521
522
523
524
525
526
    new_modelpatcher = model.clone()
    k = new_modelpatcher.add_patches(loaded, strength_model)
    new_clip = clip.clone()
    k1 = new_clip.add_patches(loaded, strength_clip)
    k = set(k)
    k1 = set(k1)
    for x in loaded:
        if (x not in k) and (x not in k1):
            print("NOT LOADED", x)

    return (new_modelpatcher, new_clip)
comfyanonymous's avatar
comfyanonymous committed
527
528
529


class CLIP:
530
    def __init__(self, target=None, embedding_directory=None, no_init=False):
531
532
        if no_init:
            return
comfyanonymous's avatar
comfyanonymous committed
533
        params = target.params.copy()
534
535
        clip = target.clip
        tokenizer = target.tokenizer
536

537
538
        load_device = model_management.text_encoder_device()
        offload_device = model_management.text_encoder_offload_device()
comfyanonymous's avatar
comfyanonymous committed
539
        params['device'] = load_device
540
        self.cond_stage_model = clip(**(params))
541
542
543
        #TODO: make sure this doesn't have a quality loss before enabling.
        # if model_management.should_use_fp16(load_device):
        #     self.cond_stage_model.half()
544
545

        self.cond_stage_model = self.cond_stage_model.to()
546

547
        self.tokenizer = tokenizer(embedding_directory=embedding_directory)
548
        self.patcher = ModelPatcher(self.cond_stage_model, load_device=load_device, offload_device=offload_device)
549
        self.layer_idx = None
550
551
552
553
554
555

    def clone(self):
        n = CLIP(no_init=True)
        n.patcher = self.patcher.clone()
        n.cond_stage_model = self.cond_stage_model
        n.tokenizer = self.tokenizer
comfyanonymous's avatar
comfyanonymous committed
556
        n.layer_idx = self.layer_idx
557
558
        return n

559
    def load_from_state_dict(self, sd):
560
        self.cond_stage_model.load_sd(sd)
561

562
563
    def add_patches(self, patches, strength_patch=1.0, strength_model=1.0):
        return self.patcher.add_patches(patches, strength_patch, strength_model)
comfyanonymous's avatar
comfyanonymous committed
564

565
    def clip_layer(self, layer_idx):
comfyanonymous's avatar
comfyanonymous committed
566
        self.layer_idx = layer_idx
567

568
569
    def tokenize(self, text, return_word_ids=False):
        return self.tokenizer.tokenize_with_weights(text, return_word_ids)
BlenderNeko's avatar
BlenderNeko committed
570

571
    def encode_from_tokens(self, tokens, return_pooled=False):
572
573
        if self.layer_idx is not None:
            self.cond_stage_model.clip_layer(self.layer_idx)
574
575
        else:
            self.cond_stage_model.reset_clip_layer()
576
577
578

        model_management.load_model_gpu(self.patcher)
        cond, pooled = self.cond_stage_model.encode_token_weights(tokens)
579
        if return_pooled:
580
581
            return cond, pooled
        return cond
comfyanonymous's avatar
comfyanonymous committed
582

583
    def encode(self, text):
584
        tokens = self.tokenize(text)
585
586
        return self.encode_from_tokens(tokens)

587
588
    def load_sd(self, sd):
        return self.cond_stage_model.load_sd(sd)
589

590
591
592
593
594
595
596
597
598
    def get_sd(self):
        return self.cond_stage_model.state_dict()

    def patch_model(self):
        self.patcher.patch_model()

    def unpatch_model(self):
        self.patcher.unpatch_model()

599
600
601
    def get_key_patches(self):
        return self.patcher.get_key_patches()

comfyanonymous's avatar
comfyanonymous committed
602
class VAE:
603
    def __init__(self, ckpt_path=None, device=None, config=None):
comfyanonymous's avatar
comfyanonymous committed
604
605
606
        if config is None:
            #default SD1.x/SD2.x VAE parameters
            ddconfig = {'double_z': True, 'z_channels': 4, 'resolution': 256, 'in_channels': 3, 'out_ch': 3, 'ch': 128, 'ch_mult': [1, 2, 4, 4], 'num_res_blocks': 2, 'attn_resolutions': [], 'dropout': 0.0}
607
            self.first_stage_model = AutoencoderKL(ddconfig, {'target': 'torch.nn.Identity'}, 4, monitor="val/rec_loss")
comfyanonymous's avatar
comfyanonymous committed
608
        else:
609
            self.first_stage_model = AutoencoderKL(**(config['params']))
comfyanonymous's avatar
comfyanonymous committed
610
        self.first_stage_model = self.first_stage_model.eval()
611
612
613
614
615
616
        if ckpt_path is not None:
            sd = utils.load_torch_file(ckpt_path)
            if 'decoder.up_blocks.0.resnets.0.norm1.weight' in sd.keys(): #diffusers format
                sd = diffusers_convert.convert_vae_state_dict(sd)
            self.first_stage_model.load_state_dict(sd, strict=False)

617
        if device is None:
618
            device = model_management.vae_device()
comfyanonymous's avatar
comfyanonymous committed
619
        self.device = device
620
        self.offload_device = model_management.vae_offload_device()
621
622
        self.vae_dtype = model_management.vae_dtype()
        self.first_stage_model.to(self.vae_dtype)
comfyanonymous's avatar
comfyanonymous committed
623

624
    def decode_tiled_(self, samples, tile_x=64, tile_y=64, overlap = 16):
pythongosssss's avatar
pythongosssss committed
625
        steps = samples.shape[0] * utils.get_tiled_scale_steps(samples.shape[3], samples.shape[2], tile_x, tile_y, overlap)
comfyanonymous's avatar
comfyanonymous committed
626
627
        steps += samples.shape[0] * utils.get_tiled_scale_steps(samples.shape[3], samples.shape[2], tile_x // 2, tile_y * 2, overlap)
        steps += samples.shape[0] * utils.get_tiled_scale_steps(samples.shape[3], samples.shape[2], tile_x * 2, tile_y // 2, overlap)
pythongosssss's avatar
pythongosssss committed
628
        pbar = utils.ProgressBar(steps)
629

630
        decode_fn = lambda a: (self.first_stage_model.decode(a.to(self.vae_dtype).to(self.device)) + 1.0).float()
631
        output = torch.clamp((
632
633
634
            (utils.tiled_scale(samples, decode_fn, tile_x // 2, tile_y * 2, overlap, upscale_amount = 8, pbar = pbar) +
            utils.tiled_scale(samples, decode_fn, tile_x * 2, tile_y // 2, overlap, upscale_amount = 8, pbar = pbar) +
             utils.tiled_scale(samples, decode_fn, tile_x, tile_y, overlap, upscale_amount = 8, pbar = pbar))
635
636
637
            / 3.0) / 2.0, min=0.0, max=1.0)
        return output

638
639
640
641
642
643
    def encode_tiled_(self, pixel_samples, tile_x=512, tile_y=512, overlap = 64):
        steps = pixel_samples.shape[0] * utils.get_tiled_scale_steps(pixel_samples.shape[3], pixel_samples.shape[2], tile_x, tile_y, overlap)
        steps += pixel_samples.shape[0] * utils.get_tiled_scale_steps(pixel_samples.shape[3], pixel_samples.shape[2], tile_x // 2, tile_y * 2, overlap)
        steps += pixel_samples.shape[0] * utils.get_tiled_scale_steps(pixel_samples.shape[3], pixel_samples.shape[2], tile_x * 2, tile_y // 2, overlap)
        pbar = utils.ProgressBar(steps)

644
        encode_fn = lambda a: self.first_stage_model.encode(2. * a.to(self.vae_dtype).to(self.device) - 1.).sample().float()
645
646
647
648
649
650
        samples = utils.tiled_scale(pixel_samples, encode_fn, tile_x, tile_y, overlap, upscale_amount = (1/8), out_channels=4, pbar=pbar)
        samples += utils.tiled_scale(pixel_samples, encode_fn, tile_x * 2, tile_y // 2, overlap, upscale_amount = (1/8), out_channels=4, pbar=pbar)
        samples += utils.tiled_scale(pixel_samples, encode_fn, tile_x // 2, tile_y * 2, overlap, upscale_amount = (1/8), out_channels=4, pbar=pbar)
        samples /= 3.0
        return samples

651
    def decode(self, samples_in):
comfyanonymous's avatar
comfyanonymous committed
652
        self.first_stage_model = self.first_stage_model.to(self.device)
653
        try:
comfyanonymous's avatar
comfyanonymous committed
654
655
            memory_used = (2562 * samples_in.shape[2] * samples_in.shape[3] * 64) * 1.4
            model_management.free_memory(memory_used, self.device)
656
            free_memory = model_management.get_free_memory(self.device)
comfyanonymous's avatar
comfyanonymous committed
657
            batch_number = int(free_memory / memory_used)
658
659
660
661
            batch_number = max(1, batch_number)

            pixel_samples = torch.empty((samples_in.shape[0], 3, round(samples_in.shape[2] * 8), round(samples_in.shape[3] * 8)), device="cpu")
            for x in range(0, samples_in.shape[0], batch_number):
662
663
                samples = samples_in[x:x+batch_number].to(self.vae_dtype).to(self.device)
                pixel_samples[x:x+batch_number] = torch.clamp((self.first_stage_model.decode(samples) + 1.0) / 2.0, min=0.0, max=1.0).cpu().float()
664
665
666
667
        except model_management.OOM_EXCEPTION as e:
            print("Warning: Ran out of memory when regular VAE decoding, retrying with tiled VAE decoding.")
            pixel_samples = self.decode_tiled_(samples_in)

668
        self.first_stage_model = self.first_stage_model.to(self.offload_device)
comfyanonymous's avatar
comfyanonymous committed
669
670
671
        pixel_samples = pixel_samples.cpu().movedim(1,-1)
        return pixel_samples

672
    def decode_tiled(self, samples, tile_x=64, tile_y=64, overlap = 16):
673
        self.first_stage_model = self.first_stage_model.to(self.device)
674
        output = self.decode_tiled_(samples, tile_x, tile_y, overlap)
675
        self.first_stage_model = self.first_stage_model.to(self.offload_device)
676
677
        return output.movedim(1,-1)

comfyanonymous's avatar
comfyanonymous committed
678
679
    def encode(self, pixel_samples):
        self.first_stage_model = self.first_stage_model.to(self.device)
680
681
        pixel_samples = pixel_samples.movedim(-1,1)
        try:
comfyanonymous's avatar
comfyanonymous committed
682
683
            memory_used = (2078 * pixel_samples.shape[2] * pixel_samples.shape[3]) * 1.4 #NOTE: this constant along with the one in the decode above are estimated from the mem usage for the VAE and could change.
            model_management.free_memory(memory_used, self.device)
684
            free_memory = model_management.get_free_memory(self.device)
comfyanonymous's avatar
comfyanonymous committed
685
            batch_number = int(free_memory / memory_used)
686
            batch_number = max(1, batch_number)
687
688
            samples = torch.empty((pixel_samples.shape[0], 4, round(pixel_samples.shape[2] // 8), round(pixel_samples.shape[3] // 8)), device="cpu")
            for x in range(0, pixel_samples.shape[0], batch_number):
689
690
                pixels_in = (2. * pixel_samples[x:x+batch_number] - 1.).to(self.vae_dtype).to(self.device)
                samples[x:x+batch_number] = self.first_stage_model.encode(pixels_in).sample().cpu().float()
691

692
693
694
695
        except model_management.OOM_EXCEPTION as e:
            print("Warning: Ran out of memory when regular VAE encoding, retrying with tiled VAE encoding.")
            samples = self.encode_tiled_(pixel_samples)

696
        self.first_stage_model = self.first_stage_model.to(self.offload_device)
comfyanonymous's avatar
comfyanonymous committed
697
698
        return samples

comfyanonymous's avatar
comfyanonymous committed
699
700
    def encode_tiled(self, pixel_samples, tile_x=512, tile_y=512, overlap = 64):
        self.first_stage_model = self.first_stage_model.to(self.device)
701
702
        pixel_samples = pixel_samples.movedim(-1,1)
        samples = self.encode_tiled_(pixel_samples, tile_x=tile_x, tile_y=tile_y, overlap=overlap)
703
        self.first_stage_model = self.first_stage_model.to(self.offload_device)
comfyanonymous's avatar
comfyanonymous committed
704
        return samples
705

706
707
708
709
    def get_sd(self):
        return self.first_stage_model.state_dict()


BlenderNeko's avatar
BlenderNeko committed
710
def broadcast_image_to(tensor, target_batch_size, batched_number):
711
    current_batch_size = tensor.shape[0]
712
    #print(current_batch_size, target_batch_size)
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
    if current_batch_size == 1:
        return tensor

    per_batch = target_batch_size // batched_number
    tensor = tensor[:per_batch]

    if per_batch > tensor.shape[0]:
        tensor = torch.cat([tensor] * (per_batch // tensor.shape[0]) + [tensor[:(per_batch % tensor.shape[0])]], dim=0)

    current_batch_size = tensor.shape[0]
    if current_batch_size == target_batch_size:
        return tensor
    else:
        return torch.cat([tensor] * batched_number, dim=0)

728
729
class ControlBase:
    def __init__(self, device=None):
comfyanonymous's avatar
comfyanonymous committed
730
731
        self.cond_hint_original = None
        self.cond_hint = None
732
        self.strength = 1.0
733
734
735
        self.timestep_percent_range = (1.0, 0.0)
        self.timestep_range = None

736
737
        if device is None:
            device = model_management.get_torch_device()
738
        self.device = device
comfyanonymous's avatar
comfyanonymous committed
739
        self.previous_controlnet = None
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778

    def set_cond_hint(self, cond_hint, strength=1.0, timestep_percent_range=(1.0, 0.0)):
        self.cond_hint_original = cond_hint
        self.strength = strength
        self.timestep_percent_range = timestep_percent_range
        return self

    def pre_run(self, model, percent_to_timestep_function):
        self.timestep_range = (percent_to_timestep_function(self.timestep_percent_range[0]), percent_to_timestep_function(self.timestep_percent_range[1]))
        if self.previous_controlnet is not None:
            self.previous_controlnet.pre_run(model, percent_to_timestep_function)

    def set_previous_controlnet(self, controlnet):
        self.previous_controlnet = controlnet
        return self

    def cleanup(self):
        if self.previous_controlnet is not None:
            self.previous_controlnet.cleanup()
        if self.cond_hint is not None:
            del self.cond_hint
            self.cond_hint = None
        self.timestep_range = None

    def get_models(self):
        out = []
        if self.previous_controlnet is not None:
            out += self.previous_controlnet.get_models()
        return out

    def copy_to(self, c):
        c.cond_hint_original = self.cond_hint_original
        c.strength = self.strength
        c.timestep_percent_range = self.timestep_percent_range

class ControlNet(ControlBase):
    def __init__(self, control_model, global_average_pooling=False, device=None):
        super().__init__(device)
        self.control_model = control_model
comfyanonymous's avatar
comfyanonymous committed
779
        self.control_model_wrapped = ModelPatcher(self.control_model, load_device=model_management.get_torch_device(), offload_device=model_management.unet_offload_device())
780
        self.global_average_pooling = global_average_pooling
comfyanonymous's avatar
comfyanonymous committed
781

782
    def get_control(self, x_noisy, t, cond, batched_number):
comfyanonymous's avatar
comfyanonymous committed
783
784
        control_prev = None
        if self.previous_controlnet is not None:
785
            control_prev = self.previous_controlnet.get_control(x_noisy, t, cond, batched_number)
comfyanonymous's avatar
comfyanonymous committed
786

787
788
789
790
791
792
793
        if self.timestep_range is not None:
            if t[0] > self.timestep_range[0] or t[0] < self.timestep_range[1]:
                if control_prev is not None:
                    return control_prev
                else:
                    return {}

794
        output_dtype = x_noisy.dtype
comfyanonymous's avatar
comfyanonymous committed
795
796
797
798
        if self.cond_hint is None or x_noisy.shape[2] * 8 != self.cond_hint.shape[2] or x_noisy.shape[3] * 8 != self.cond_hint.shape[3]:
            if self.cond_hint is not None:
                del self.cond_hint
            self.cond_hint = None
BlenderNeko's avatar
BlenderNeko committed
799
800
801
            self.cond_hint = utils.common_upscale(self.cond_hint_original, x_noisy.shape[3] * 8, x_noisy.shape[2] * 8, 'nearest-exact', "center").to(self.control_model.dtype).to(self.device)
        if x_noisy.shape[0] != self.cond_hint.shape[0]:
            self.cond_hint = broadcast_image_to(self.cond_hint, x_noisy.shape[0], batched_number)
802
803
804
805
806
807

        if self.control_model.dtype == torch.float16:
            precision_scope = torch.autocast
        else:
            precision_scope = contextlib.nullcontext

808
        with precision_scope(model_management.get_autocast_device(self.device)):
809
810
811
            context = torch.cat(cond['c_crossattn'], 1)
            y = cond.get('c_adm', None)
            control = self.control_model(x=x_noisy, hint=self.cond_hint, timesteps=t, context=context, y=y)
812
        out = {'middle':[], 'output': []}
813
        autocast_enabled = torch.is_autocast_enabled()
comfyanonymous's avatar
comfyanonymous committed
814
815

        for i in range(len(control)):
comfyanonymous's avatar
comfyanonymous committed
816
817
818
819
820
821
            if i == (len(control) - 1):
                key = 'middle'
                index = 0
            else:
                key = 'output'
                index = i
comfyanonymous's avatar
comfyanonymous committed
822
            x = control[i]
823
824
825
            if self.global_average_pooling:
                x = torch.mean(x, dim=(2, 3), keepdim=True).repeat(1, 1, x.shape[2], x.shape[3])

826
            x *= self.strength
827
828
            if x.dtype != output_dtype and not autocast_enabled:
                x = x.to(output_dtype)
comfyanonymous's avatar
comfyanonymous committed
829

comfyanonymous's avatar
comfyanonymous committed
830
831
832
833
834
835
836
            if control_prev is not None and key in control_prev:
                prev = control_prev[key][index]
                if prev is not None:
                    x += prev
            out[key].append(x)
        if control_prev is not None and 'input' in control_prev:
            out['input'] = control_prev['input']
837
        return out
comfyanonymous's avatar
comfyanonymous committed
838
839

    def copy(self):
840
        c = ControlNet(self.control_model, global_average_pooling=self.global_average_pooling)
841
        self.copy_to(c)
comfyanonymous's avatar
comfyanonymous committed
842
843
        return c

844
845
    def get_models(self):
        out = super().get_models()
comfyanonymous's avatar
comfyanonymous committed
846
        out.append(self.control_model_wrapped)
847
848
849
        return out


850
def load_controlnet(ckpt_path, model=None):
851
    controlnet_data = utils.load_torch_file(ckpt_path, safe_load=True)
852
853
854
855

    controlnet_config = None
    if "controlnet_cond_embedding.conv_in.weight" in controlnet_data: #diffusers format
        use_fp16 = model_management.should_use_fp16()
856
        controlnet_config = model_detection.unet_config_from_diffusers_unet(controlnet_data, use_fp16)
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
        diffusers_keys = utils.unet_to_diffusers(controlnet_config)
        diffusers_keys["controlnet_mid_block.weight"] = "middle_block_out.0.weight"
        diffusers_keys["controlnet_mid_block.bias"] = "middle_block_out.0.bias"

        count = 0
        loop = True
        while loop:
            suffix = [".weight", ".bias"]
            for s in suffix:
                k_in = "controlnet_down_blocks.{}{}".format(count, s)
                k_out = "zero_convs.{}.0{}".format(count, s)
                if k_in not in controlnet_data:
                    loop = False
                    break
                diffusers_keys[k_in] = k_out
            count += 1

        count = 0
        loop = True
        while loop:
            suffix = [".weight", ".bias"]
            for s in suffix:
                if count == 0:
                    k_in = "controlnet_cond_embedding.conv_in{}".format(s)
                else:
                    k_in = "controlnet_cond_embedding.blocks.{}{}".format(count - 1, s)
                k_out = "input_hint_block.{}{}".format(count * 2, s)
                if k_in not in controlnet_data:
                    k_in = "controlnet_cond_embedding.conv_out{}".format(s)
                    loop = False
                diffusers_keys[k_in] = k_out
            count += 1

        new_sd = {}
        for k in diffusers_keys:
            if k in controlnet_data:
                new_sd[diffusers_keys[k]] = controlnet_data.pop(k)

895
896
897
        leftover_keys = controlnet_data.keys()
        if len(leftover_keys) > 0:
            print("leftover keys:", leftover_keys)
898
899
        controlnet_data = new_sd

900
    pth_key = 'control_model.zero_convs.0.0.weight'
comfyanonymous's avatar
comfyanonymous committed
901
    pth = False
902
    key = 'zero_convs.0.0.weight'
comfyanonymous's avatar
comfyanonymous committed
903
904
905
    if pth_key in controlnet_data:
        pth = True
        key = pth_key
906
        prefix = "control_model."
comfyanonymous's avatar
comfyanonymous committed
907
    elif key in controlnet_data:
908
        prefix = ""
comfyanonymous's avatar
comfyanonymous committed
909
    else:
910
911
912
913
        net = load_t2i_adapter(controlnet_data)
        if net is None:
            print("error checkpoint does not contain controlnet or t2i adapter data", ckpt_path)
        return net
comfyanonymous's avatar
comfyanonymous committed
914

915
916
917
    if controlnet_config is None:
        use_fp16 = model_management.should_use_fp16()
        controlnet_config = model_detection.model_config_from_unet(controlnet_data, prefix, use_fp16).unet_config
918
    controlnet_config.pop("out_channels")
919
    controlnet_config["hint_channels"] = controlnet_data["{}input_hint_block.0.weight".format(prefix)].shape[1]
920
921
    control_model = cldm.ControlNet(**controlnet_config)

comfyanonymous's avatar
comfyanonymous committed
922
    if pth:
923
924
925
926
927
928
929
        if 'difference' in controlnet_data:
            if model is not None:
                m = model.patch_model()
                model_sd = m.state_dict()
                for x in controlnet_data:
                    c_m = "control_model."
                    if x.startswith(c_m):
comfyanonymous's avatar
comfyanonymous committed
930
                        sd_key = "diffusion_model.{}".format(x[len(c_m):])
931
932
933
934
935
936
937
                        if sd_key in model_sd:
                            cd = controlnet_data[x]
                            cd += model_sd[sd_key].type(cd.dtype).to(cd.device)
                model.unpatch_model()
            else:
                print("WARNING: Loaded a diff controlnet without a model. It will very likely not work.")

comfyanonymous's avatar
comfyanonymous committed
938
939
940
941
        class WeightsLoader(torch.nn.Module):
            pass
        w = WeightsLoader()
        w.control_model = control_model
942
        missing, unexpected = w.load_state_dict(controlnet_data, strict=False)
comfyanonymous's avatar
comfyanonymous committed
943
    else:
944
945
        missing, unexpected = control_model.load_state_dict(controlnet_data, strict=False)
    print(missing, unexpected)
comfyanonymous's avatar
comfyanonymous committed
946

947
948
949
    if use_fp16:
        control_model = control_model.half()

950
951
952
953
954
    global_average_pooling = False
    if ckpt_path.endswith("_shuffle.pth") or ckpt_path.endswith("_shuffle.safetensors") or ckpt_path.endswith("_shuffle_fp16.safetensors"): #TODO: smarter way of enabling global_average_pooling
        global_average_pooling = True

    control = ControlNet(control_model, global_average_pooling=global_average_pooling)
comfyanonymous's avatar
comfyanonymous committed
955
956
    return control

957
class T2IAdapter(ControlBase):
958
    def __init__(self, t2i_model, channels_in, device=None):
959
        super().__init__(device)
960
961
962
963
        self.t2i_model = t2i_model
        self.channels_in = channels_in
        self.control_input = None

964
    def get_control(self, x_noisy, t, cond, batched_number):
965
966
        control_prev = None
        if self.previous_controlnet is not None:
967
            control_prev = self.previous_controlnet.get_control(x_noisy, t, cond, batched_number)
968

969
970
971
972
973
974
975
        if self.timestep_range is not None:
            if t[0] > self.timestep_range[0] or t[0] < self.timestep_range[1]:
                if control_prev is not None:
                    return control_prev
                else:
                    return {}

976
977
978
        if self.cond_hint is None or x_noisy.shape[2] * 8 != self.cond_hint.shape[2] or x_noisy.shape[3] * 8 != self.cond_hint.shape[3]:
            if self.cond_hint is not None:
                del self.cond_hint
BlenderNeko's avatar
BlenderNeko committed
979
            self.control_input = None
980
            self.cond_hint = None
BlenderNeko's avatar
BlenderNeko committed
981
            self.cond_hint = utils.common_upscale(self.cond_hint_original, x_noisy.shape[3] * 8, x_noisy.shape[2] * 8, 'nearest-exact', "center").float().to(self.device)
982
983
            if self.channels_in == 1 and self.cond_hint.shape[1] > 1:
                self.cond_hint = torch.mean(self.cond_hint, 1, keepdim=True)
BlenderNeko's avatar
BlenderNeko committed
984
985
986
        if x_noisy.shape[0] != self.cond_hint.shape[0]:
            self.cond_hint = broadcast_image_to(self.cond_hint, x_noisy.shape[0], batched_number)
        if self.control_input is None:
987
988
989
990
991
992
993
            self.t2i_model.to(self.device)
            self.control_input = self.t2i_model(self.cond_hint)
            self.t2i_model.cpu()

        output_dtype = x_noisy.dtype
        out = {'input':[]}

comfyanonymous's avatar
comfyanonymous committed
994
        autocast_enabled = torch.is_autocast_enabled()
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
        for i in range(len(self.control_input)):
            key = 'input'
            x = self.control_input[i] * self.strength
            if x.dtype != output_dtype and not autocast_enabled:
                x = x.to(output_dtype)

            if control_prev is not None and key in control_prev:
                index = len(control_prev[key]) - i * 3 - 3
                prev = control_prev[key][index]
                if prev is not None:
                    x += prev
            out[key].insert(0, None)
            out[key].insert(0, None)
            out[key].insert(0, x)

        if control_prev is not None and 'input' in control_prev:
            for i in range(len(out['input'])):
                if out['input'][i] is None:
                    out['input'][i] = control_prev['input'][i]
        if control_prev is not None and 'middle' in control_prev:
            out['middle'] = control_prev['middle']
        if control_prev is not None and 'output' in control_prev:
            out['output'] = control_prev['output']
        return out

    def copy(self):
        c = T2IAdapter(self.t2i_model, self.channels_in)
1022
        self.copy_to(c)
1023
1024
        return c

1025
def load_t2i_adapter(t2i_data):
1026
    keys = t2i_data.keys()
1027
1028
1029
    if 'adapter' in keys:
        t2i_data = t2i_data['adapter']
        keys = t2i_data.keys()
1030
    if "body.0.in_conv.weight" in keys:
1031
1032
        cin = t2i_data['body.0.in_conv.weight'].shape[1]
        model_ad = adapter.Adapter_light(cin=cin, channels=[320, 640, 1280, 1280], nums_rb=4)
1033
    elif 'conv_in.weight' in keys:
1034
        cin = t2i_data['conv_in.weight'].shape[1]
1035
1036
1037
1038
1039
1040
1041
        channel = t2i_data['conv_in.weight'].shape[0]
        ksize = t2i_data['body.0.block2.weight'].shape[2]
        use_conv = False
        down_opts = list(filter(lambda a: a.endswith("down_opt.op.weight"), keys))
        if len(down_opts) > 0:
            use_conv = True
        model_ad = adapter.Adapter(cin=cin, channels=[channel, channel*2, channel*4, channel*4][:4], nums_rb=2, ksize=ksize, sk=True, use_conv=use_conv)
1042
1043
    else:
        return None
1044
1045
    model_ad.load_state_dict(t2i_data)
    return T2IAdapter(model_ad, cin // 64)
comfyanonymous's avatar
comfyanonymous committed
1046

1047
1048
1049
1050
1051
1052
1053
1054
1055
1056

class StyleModel:
    def __init__(self, model, device="cpu"):
        self.model = model

    def get_cond(self, input):
        return self.model(input.last_hidden_state)


def load_style_model(ckpt_path):
1057
    model_data = utils.load_torch_file(ckpt_path, safe_load=True)
1058
1059
1060
1061
1062
1063
1064
1065
1066
    keys = model_data.keys()
    if "style_embedding" in keys:
        model = adapter.StyleAdapter(width=1024, context_dim=768, num_head=8, n_layes=3, num_token=8)
    else:
        raise Exception("invalid style model {}".format(ckpt_path))
    model.load_state_dict(model_data)
    return StyleModel(model)


1067
1068
1069
1070
1071
def load_clip(ckpt_paths, embedding_directory=None):
    clip_data = []
    for p in ckpt_paths:
        clip_data.append(utils.load_torch_file(p, safe_load=True))

comfyanonymous's avatar
comfyanonymous committed
1072
1073
1074
    class EmptyClass:
        pass

1075
1076
1077
1078
    for i in range(len(clip_data)):
        if "transformer.resblocks.0.ln_1.weight" in clip_data[i]:
            clip_data[i] = utils.transformers_convert(clip_data[i], "", "text_model.", 32)

comfyanonymous's avatar
comfyanonymous committed
1079
1080
    clip_target = EmptyClass()
    clip_target.params = {}
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
    if len(clip_data) == 1:
        if "text_model.encoder.layers.30.mlp.fc1.weight" in clip_data[0]:
            clip_target.clip = sdxl_clip.SDXLRefinerClipModel
            clip_target.tokenizer = sdxl_clip.SDXLTokenizer
        elif "text_model.encoder.layers.22.mlp.fc1.weight" in clip_data[0]:
            clip_target.clip = sd2_clip.SD2ClipModel
            clip_target.tokenizer = sd2_clip.SD2Tokenizer
        else:
            clip_target.clip = sd1_clip.SD1ClipModel
            clip_target.tokenizer = sd1_clip.SD1Tokenizer
1091
    else:
1092
1093
        clip_target.clip = sdxl_clip.SDXLClipModel
        clip_target.tokenizer = sdxl_clip.SDXLTokenizer
comfyanonymous's avatar
comfyanonymous committed
1094
1095

    clip = CLIP(clip_target, embedding_directory=embedding_directory)
1096
1097
1098
1099
1100
1101
1102
    for c in clip_data:
        m, u = clip.load_sd(c)
        if len(m) > 0:
            print("clip missing:", m)

        if len(u) > 0:
            print("clip unexpected:", u)
1103
    return clip
comfyanonymous's avatar
comfyanonymous committed
1104

1105
def load_gligen(ckpt_path):
1106
    data = utils.load_torch_file(ckpt_path, safe_load=True)
1107
1108
1109
    model = gligen.load_gligen(data)
    if model_management.should_use_fp16():
        model = model.half()
comfyanonymous's avatar
comfyanonymous committed
1110
    return ModelPatcher(model, load_device=model_management.get_torch_device(), offload_device=model_management.unet_offload_device())
1111

comfyanonymous's avatar
comfyanonymous committed
1112
def load_checkpoint(config_path=None, ckpt_path=None, output_vae=True, output_clip=True, embedding_directory=None, state_dict=None, config=None):
1113
    #TODO: this function is a mess and should be removed eventually
comfyanonymous's avatar
comfyanonymous committed
1114
1115
1116
    if config is None:
        with open(config_path, 'r') as stream:
            config = yaml.safe_load(stream)
comfyanonymous's avatar
comfyanonymous committed
1117
1118
1119
1120
1121
    model_config_params = config['model']['params']
    clip_config = model_config_params['cond_stage_config']
    scale_factor = model_config_params['scale_factor']
    vae_config = model_config_params['first_stage_config']

1122
1123
1124
    fp16 = False
    if "unet_config" in model_config_params:
        if "params" in model_config_params["unet_config"]:
comfyanonymous's avatar
comfyanonymous committed
1125
1126
1127
1128
1129
1130
1131
1132
            unet_config = model_config_params["unet_config"]["params"]
            if "use_fp16" in unet_config:
                fp16 = unet_config["use_fp16"]

    noise_aug_config = None
    if "noise_aug_config" in model_config_params:
        noise_aug_config = model_config_params["noise_aug_config"]

1133
    model_type = model_base.ModelType.EPS
comfyanonymous's avatar
comfyanonymous committed
1134
1135
1136

    if "parameterization" in model_config_params:
        if model_config_params["parameterization"] == "v":
1137
            model_type = model_base.ModelType.V_PREDICTION
1138

comfyanonymous's avatar
comfyanonymous committed
1139
1140
1141
1142
1143
1144
    clip = None
    vae = None

    class WeightsLoader(torch.nn.Module):
        pass

1145
1146
    if state_dict is None:
        state_dict = utils.load_torch_file(ckpt_path)
comfyanonymous's avatar
comfyanonymous committed
1147

1148
1149
1150
1151
1152
1153
1154
1155
    class EmptyClass:
        pass

    model_config = EmptyClass()
    model_config.unet_config = unet_config
    from . import latent_formats
    model_config.latent_format = latent_formats.SD15(scale_factor=scale_factor)

comfyanonymous's avatar
comfyanonymous committed
1156
    if config['model']["target"].endswith("LatentInpaintDiffusion"):
1157
        model = model_base.SDInpaint(model_config, model_type=model_type)
comfyanonymous's avatar
comfyanonymous committed
1158
    elif config['model']["target"].endswith("ImageEmbeddingConditionedLatentDiffusion"):
1159
        model = model_base.SD21UNCLIP(model_config, noise_aug_config["params"], model_type=model_type)
comfyanonymous's avatar
comfyanonymous committed
1160
    else:
1161
        model = model_base.BaseModel(model_config, model_type=model_type)
comfyanonymous's avatar
comfyanonymous committed
1162

1163
1164
1165
    if fp16:
        model = model.half()

1166
1167
    offload_device = model_management.unet_offload_device()
    model = model.to(offload_device)
1168
1169
1170
1171
    model.load_model_weights(state_dict, "model.diffusion_model.")

    if output_vae:
        w = WeightsLoader()
1172
        vae = VAE(config=vae_config)
1173
1174
1175
1176
1177
1178
        w.first_stage_model = vae.first_stage_model
        load_model_weights(w, state_dict)

    if output_clip:
        w = WeightsLoader()
        clip_target = EmptyClass()
1179
        clip_target.params = clip_config.get("params", {})
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
        if clip_config["target"].endswith("FrozenOpenCLIPEmbedder"):
            clip_target.clip = sd2_clip.SD2ClipModel
            clip_target.tokenizer = sd2_clip.SD2Tokenizer
        elif clip_config["target"].endswith("FrozenCLIPEmbedder"):
            clip_target.clip = sd1_clip.SD1ClipModel
            clip_target.tokenizer = sd1_clip.SD1Tokenizer
        clip = CLIP(clip_target, embedding_directory=embedding_directory)
        w.cond_stage_model = clip.cond_stage_model
        load_clip_weights(w, state_dict)

1190
    return (ModelPatcher(model, load_device=model_management.get_torch_device(), offload_device=offload_device), clip, vae)
1191

1192
1193
1194
1195
1196
1197
def calculate_parameters(sd, prefix):
    params = 0
    for k in sd.keys():
        if k.startswith(prefix):
            params += sd[k].nelement()
    return params
1198

1199
1200
def load_checkpoint_guess_config(ckpt_path, output_vae=True, output_clip=True, output_clipvision=False, embedding_directory=None):
    sd = utils.load_torch_file(ckpt_path)
1201
1202
    sd_keys = sd.keys()
    clip = None
1203
    clipvision = None
1204
    vae = None
1205
1206
    model = None
    clip_target = None
1207

1208
1209
    parameters = calculate_parameters(sd, "model.diffusion_model.")
    fp16 = model_management.should_use_fp16(model_params=parameters)
1210

1211
1212
1213
    class WeightsLoader(torch.nn.Module):
        pass

1214
1215
1216
    model_config = model_detection.model_config_from_unet(sd, "model.diffusion_model.", fp16)
    if model_config is None:
        raise RuntimeError("ERROR: Could not detect model type of: {}".format(ckpt_path))
1217

1218
    if model_config.clip_vision_prefix is not None:
1219
        if output_clipvision:
1220
            clipvision = clip_vision.load_clipvision_from_sd(sd, model_config.clip_vision_prefix, True)
1221

comfyanonymous's avatar
comfyanonymous committed
1222
1223
1224
1225
1226
    dtype = torch.float32
    if fp16:
        dtype = torch.float16

    inital_load_device = model_management.unet_inital_load_device(parameters, dtype)
1227
    offload_device = model_management.unet_offload_device()
comfyanonymous's avatar
comfyanonymous committed
1228
    model = model_config.get_model(sd, "model.diffusion_model.", device=inital_load_device)
1229
    model.load_model_weights(sd, "model.diffusion_model.")
1230

1231
    if output_vae:
1232
        vae = VAE()
1233
1234
1235
        w = WeightsLoader()
        w.first_stage_model = vae.first_stage_model
        load_model_weights(w, sd)
1236

1237
1238
1239
1240
1241
1242
1243
    if output_clip:
        w = WeightsLoader()
        clip_target = model_config.clip_target()
        clip = CLIP(clip_target, embedding_directory=embedding_directory)
        w.cond_stage_model = clip.cond_stage_model
        sd = model_config.process_clip_state_dict(sd)
        load_model_weights(w, sd)
comfyanonymous's avatar
comfyanonymous committed
1244

1245
1246
1247
    left_over = sd.keys()
    if len(left_over) > 0:
        print("left over keys:", left_over)
1248

comfyanonymous's avatar
comfyanonymous committed
1249
1250
1251
1252
1253
1254
    model_patcher = ModelPatcher(model, load_device=model_management.get_torch_device(), offload_device=model_management.unet_offload_device(), current_device=inital_load_device)
    if inital_load_device != torch.device("cpu"):
        print("loaded straight to GPU")
        model_management.load_model_gpu(model_patcher)

    return (model_patcher, clip, vae, clipvision)
1255

1256
1257
1258
1259
1260
1261

def load_unet(unet_path): #load unet in diffusers format
    sd = utils.load_torch_file(unet_path)
    parameters = calculate_parameters(sd, "")
    fp16 = model_management.should_use_fp16(model_params=parameters)

1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
    model_config = model_detection.model_config_from_diffusers_unet(sd, fp16)
    if model_config is None:
        print("ERROR UNSUPPORTED UNET", unet_path)
        return None

    diffusers_keys = utils.unet_to_diffusers(model_config.unet_config)

    new_sd = {}
    for k in diffusers_keys:
        if k in sd:
            new_sd[diffusers_keys[k]] = sd.pop(k)
        else:
            print(diffusers_keys[k], k)
    offload_device = model_management.unet_offload_device()
    model = model_config.get_model(new_sd, "")
    model = model.to(offload_device)
    model.load_model_weights(new_sd, "")
    return ModelPatcher(model, load_device=model_management.get_torch_device(), offload_device=offload_device)
1280

1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
def save_checkpoint(output_path, model, clip, vae, metadata=None):
    try:
        model.patch_model()
        clip.patch_model()
        sd = model.model.state_dict_for_saving(clip.get_sd(), vae.get_sd())
        utils.save_torch_file(sd, output_path, metadata=metadata)
        model.unpatch_model()
        clip.unpatch_model()
    except Exception as e:
        model.unpatch_model()
        clip.unpatch_model()
        raise e