sd.py 46.7 KB
Newer Older
comfyanonymous's avatar
comfyanonymous committed
1
import torch
2
import contextlib
3
import copy
4
import inspect
comfyanonymous's avatar
comfyanonymous committed
5

6
from comfy import model_management
7
8
from .ldm.util import instantiate_from_config
from .ldm.models.autoencoder import AutoencoderKL
9
import yaml
comfyanonymous's avatar
comfyanonymous committed
10
from .cldm import cldm
11
from .t2i_adapter import adapter
comfyanonymous's avatar
comfyanonymous committed
12
13

from . import utils
14
from . import clip_vision
15
from . import gligen
16
from . import diffusers_convert
comfyanonymous's avatar
comfyanonymous committed
17
from . import model_base
18
from . import model_detection
19

20
21
from . import sd1_clip
from . import sd2_clip
22
from . import sdxl_clip
comfyanonymous's avatar
comfyanonymous committed
23

24
def load_model_weights(model, sd):
comfyanonymous's avatar
comfyanonymous committed
25
    m, u = model.load_state_dict(sd, strict=False)
26
27
    m = set(m)
    unexpected_keys = set(u)
comfyanonymous's avatar
comfyanonymous committed
28
29
30

    k = list(sd.keys())
    for x in k:
31
32
33
34
35
36
37
38
39
40
        if x not in unexpected_keys:
            w = sd.pop(x)
            del w
    if len(m) > 0:
        print("missing", m)
    return model

def load_clip_weights(model, sd):
    k = list(sd.keys())
    for x in k:
comfyanonymous's avatar
comfyanonymous committed
41
42
43
44
        if x.startswith("cond_stage_model.transformer.") and not x.startswith("cond_stage_model.transformer.text_model."):
            y = x.replace("cond_stage_model.transformer.", "cond_stage_model.transformer.text_model.")
            sd[y] = sd.pop(x)

comfyanonymous's avatar
comfyanonymous committed
45
46
47
48
    if 'cond_stage_model.transformer.text_model.embeddings.position_ids' in sd:
        ids = sd['cond_stage_model.transformer.text_model.embeddings.position_ids']
        if ids.dtype == torch.float32:
            sd['cond_stage_model.transformer.text_model.embeddings.position_ids'] = ids.round()
49

50
51
    sd = utils.transformers_convert(sd, "cond_stage_model.model.", "cond_stage_model.transformer.text_model.", 24)
    return load_model_weights(model, sd)
comfyanonymous's avatar
comfyanonymous committed
52

53
54
55
56
57
58
59
60
61
LORA_CLIP_MAP = {
    "mlp.fc1": "mlp_fc1",
    "mlp.fc2": "mlp_fc2",
    "self_attn.k_proj": "self_attn_k_proj",
    "self_attn.q_proj": "self_attn_q_proj",
    "self_attn.v_proj": "self_attn_v_proj",
    "self_attn.out_proj": "self_attn_out_proj",
}

comfyanonymous's avatar
comfyanonymous committed
62
LORA_UNET_MAP_ATTENTIONS = {
63
64
65
66
    "proj_in": "proj_in",
    "proj_out": "proj_out",
}

67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
transformer_lora_blocks = {
    "transformer_blocks.{}.attn1.to_q": "transformer_blocks_{}_attn1_to_q",
    "transformer_blocks.{}.attn1.to_k": "transformer_blocks_{}_attn1_to_k",
    "transformer_blocks.{}.attn1.to_v": "transformer_blocks_{}_attn1_to_v",
    "transformer_blocks.{}.attn1.to_out.0": "transformer_blocks_{}_attn1_to_out_0",
    "transformer_blocks.{}.attn2.to_q": "transformer_blocks_{}_attn2_to_q",
    "transformer_blocks.{}.attn2.to_k": "transformer_blocks_{}_attn2_to_k",
    "transformer_blocks.{}.attn2.to_v": "transformer_blocks_{}_attn2_to_v",
    "transformer_blocks.{}.attn2.to_out.0": "transformer_blocks_{}_attn2_to_out_0",
    "transformer_blocks.{}.ff.net.0.proj": "transformer_blocks_{}_ff_net_0_proj",
    "transformer_blocks.{}.ff.net.2": "transformer_blocks_{}_ff_net_2",
}

for i in range(10):
    for k in transformer_lora_blocks:
        LORA_UNET_MAP_ATTENTIONS[k.format(i)] = transformer_lora_blocks[k].format(i)


comfyanonymous's avatar
comfyanonymous committed
85
86
87
88
89
90
LORA_UNET_MAP_RESNET = {
    "in_layers.2": "resnets_{}_conv1",
    "emb_layers.1": "resnets_{}_time_emb_proj",
    "out_layers.3": "resnets_{}_conv2",
    "skip_connection": "resnets_{}_conv_shortcut"
}
91

92
def load_lora(lora, to_load):
93
94
95
    patch_dict = {}
    loaded_keys = set()
    for x in to_load:
comfyanonymous's avatar
comfyanonymous committed
96
97
98
99
100
101
        alpha_name = "{}.alpha".format(x)
        alpha = None
        if alpha_name in lora.keys():
            alpha = lora[alpha_name].item()
            loaded_keys.add(alpha_name)

102
103
        A_name = "{}.lora_up.weight".format(x)
        B_name = "{}.lora_down.weight".format(x)
104
        mid_name = "{}.lora_mid.weight".format(x)
comfyanonymous's avatar
comfyanonymous committed
105

106
        if A_name in lora.keys():
107
108
109
110
111
            mid = None
            if mid_name in lora.keys():
                mid = lora[mid_name]
                loaded_keys.add(mid_name)
            patch_dict[to_load[x]] = (lora[A_name], lora[B_name], alpha, mid)
112
113
            loaded_keys.add(A_name)
            loaded_keys.add(B_name)
comfyanonymous's avatar
comfyanonymous committed
114

comfyanonymous's avatar
comfyanonymous committed
115
116

        ######## loha
comfyanonymous's avatar
comfyanonymous committed
117
118
119
120
        hada_w1_a_name = "{}.hada_w1_a".format(x)
        hada_w1_b_name = "{}.hada_w1_b".format(x)
        hada_w2_a_name = "{}.hada_w2_a".format(x)
        hada_w2_b_name = "{}.hada_w2_b".format(x)
121
122
        hada_t1_name = "{}.hada_t1".format(x)
        hada_t2_name = "{}.hada_t2".format(x)
comfyanonymous's avatar
comfyanonymous committed
123
        if hada_w1_a_name in lora.keys():
124
125
126
127
128
129
130
131
132
            hada_t1 = None
            hada_t2 = None
            if hada_t1_name in lora.keys():
                hada_t1 = lora[hada_t1_name]
                hada_t2 = lora[hada_t2_name]
                loaded_keys.add(hada_t1_name)
                loaded_keys.add(hada_t2_name)

            patch_dict[to_load[x]] = (lora[hada_w1_a_name], lora[hada_w1_b_name], alpha, lora[hada_w2_a_name], lora[hada_w2_b_name], hada_t1, hada_t2)
comfyanonymous's avatar
comfyanonymous committed
133
134
135
136
137
            loaded_keys.add(hada_w1_a_name)
            loaded_keys.add(hada_w1_b_name)
            loaded_keys.add(hada_w2_a_name)
            loaded_keys.add(hada_w2_b_name)

comfyanonymous's avatar
comfyanonymous committed
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185

        ######## lokr
        lokr_w1_name = "{}.lokr_w1".format(x)
        lokr_w2_name = "{}.lokr_w2".format(x)
        lokr_w1_a_name = "{}.lokr_w1_a".format(x)
        lokr_w1_b_name = "{}.lokr_w1_b".format(x)
        lokr_t2_name = "{}.lokr_t2".format(x)
        lokr_w2_a_name = "{}.lokr_w2_a".format(x)
        lokr_w2_b_name = "{}.lokr_w2_b".format(x)

        lokr_w1 = None
        if lokr_w1_name in lora.keys():
            lokr_w1 = lora[lokr_w1_name]
            loaded_keys.add(lokr_w1_name)

        lokr_w2 = None
        if lokr_w2_name in lora.keys():
            lokr_w2 = lora[lokr_w2_name]
            loaded_keys.add(lokr_w2_name)

        lokr_w1_a = None
        if lokr_w1_a_name in lora.keys():
            lokr_w1_a = lora[lokr_w1_a_name]
            loaded_keys.add(lokr_w1_a_name)

        lokr_w1_b = None
        if lokr_w1_b_name in lora.keys():
            lokr_w1_b = lora[lokr_w1_b_name]
            loaded_keys.add(lokr_w1_b_name)

        lokr_w2_a = None
        if lokr_w2_a_name in lora.keys():
            lokr_w2_a = lora[lokr_w2_a_name]
            loaded_keys.add(lokr_w2_a_name)

        lokr_w2_b = None
        if lokr_w2_b_name in lora.keys():
            lokr_w2_b = lora[lokr_w2_b_name]
            loaded_keys.add(lokr_w2_b_name)

        lokr_t2 = None
        if lokr_t2_name in lora.keys():
            lokr_t2 = lora[lokr_t2_name]
            loaded_keys.add(lokr_t2_name)

        if (lokr_w1 is not None) or (lokr_w2 is not None) or (lokr_w1_a is not None) or (lokr_w2_a is not None):
            patch_dict[to_load[x]] = (lokr_w1, lokr_w2, alpha, lokr_w1_a, lokr_w1_b, lokr_w2_a, lokr_w2_b, lokr_t2)

186
187
188
189
190
191
192
193
194
195
    for x in lora.keys():
        if x not in loaded_keys:
            print("lora key not loaded", x)
    return patch_dict

def model_lora_keys(model, key_map={}):
    sdk = model.state_dict().keys()

    counter = 0
    for b in range(12):
comfyanonymous's avatar
comfyanonymous committed
196
        tk = "diffusion_model.input_blocks.{}.1".format(b)
197
        up_counter = 0
comfyanonymous's avatar
comfyanonymous committed
198
        for c in LORA_UNET_MAP_ATTENTIONS:
199
200
            k = "{}.{}.weight".format(tk, c)
            if k in sdk:
comfyanonymous's avatar
comfyanonymous committed
201
                lora_key = "lora_unet_down_blocks_{}_attentions_{}_{}".format(counter // 2, counter % 2, LORA_UNET_MAP_ATTENTIONS[c])
202
                key_map[lora_key] = k
203
204
205
                up_counter += 1
        if up_counter >= 4:
            counter += 1
comfyanonymous's avatar
comfyanonymous committed
206
    for c in LORA_UNET_MAP_ATTENTIONS:
comfyanonymous's avatar
comfyanonymous committed
207
        k = "diffusion_model.middle_block.1.{}.weight".format(c)
208
        if k in sdk:
comfyanonymous's avatar
comfyanonymous committed
209
            lora_key = "lora_unet_mid_block_attentions_0_{}".format(LORA_UNET_MAP_ATTENTIONS[c])
210
            key_map[lora_key] = k
211
212
    counter = 3
    for b in range(12):
comfyanonymous's avatar
comfyanonymous committed
213
        tk = "diffusion_model.output_blocks.{}.1".format(b)
214
        up_counter = 0
comfyanonymous's avatar
comfyanonymous committed
215
        for c in LORA_UNET_MAP_ATTENTIONS:
216
217
            k = "{}.{}.weight".format(tk, c)
            if k in sdk:
comfyanonymous's avatar
comfyanonymous committed
218
                lora_key = "lora_unet_up_blocks_{}_attentions_{}_{}".format(counter // 3, counter % 3, LORA_UNET_MAP_ATTENTIONS[c])
219
                key_map[lora_key] = k
220
221
222
223
                up_counter += 1
        if up_counter >= 4:
            counter += 1
    counter = 0
comfyanonymous's avatar
comfyanonymous committed
224
    text_model_lora_key = "lora_te_text_model_encoder_layers_{}_{}"
225
226
    clip_l_present = False
    for b in range(32):
227
228
229
        for c in LORA_CLIP_MAP:
            k = "transformer.text_model.encoder.layers.{}.{}.weight".format(b, c)
            if k in sdk:
comfyanonymous's avatar
comfyanonymous committed
230
                lora_key = text_model_lora_key.format(b, LORA_CLIP_MAP[c])
231
                key_map[lora_key] = k
comfyanonymous's avatar
comfyanonymous committed
232

233
234
235
236
237
238
239
240
241
242
243
244
245
246
            k = "clip_l.transformer.text_model.encoder.layers.{}.{}.weight".format(b, c)
            if k in sdk:
                lora_key = "lora_te1_text_model_encoder_layers_{}_{}".format(b, LORA_CLIP_MAP[c]) #SDXL base
                key_map[lora_key] = k
                clip_l_present = True

            k = "clip_g.transformer.text_model.encoder.layers.{}.{}.weight".format(b, c)
            if k in sdk:
                if clip_l_present:
                    lora_key = "lora_te2_text_model_encoder_layers_{}_{}".format(b, LORA_CLIP_MAP[c]) #SDXL base
                else:
                    lora_key = "lora_te_text_model_encoder_layers_{}_{}".format(b, LORA_CLIP_MAP[c]) #TODO: test if this is correct for SDXL-Refiner
                key_map[lora_key] = k

comfyanonymous's avatar
comfyanonymous committed
247
248
249
250
251

    #Locon stuff
    ds_counter = 0
    counter = 0
    for b in range(12):
comfyanonymous's avatar
comfyanonymous committed
252
        tk = "diffusion_model.input_blocks.{}.0".format(b)
comfyanonymous's avatar
comfyanonymous committed
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
        key_in = False
        for c in LORA_UNET_MAP_RESNET:
            k = "{}.{}.weight".format(tk, c)
            if k in sdk:
                lora_key = "lora_unet_down_blocks_{}_{}".format(counter // 2, LORA_UNET_MAP_RESNET[c].format(counter % 2))
                key_map[lora_key] = k
                key_in = True
        for bb in range(3):
            k = "{}.{}.op.weight".format(tk[:-2], bb)
            if k in sdk:
                lora_key = "lora_unet_down_blocks_{}_downsamplers_0_conv".format(ds_counter)
                key_map[lora_key] = k
                ds_counter += 1
        if key_in:
            counter += 1

    counter = 0
    for b in range(3):
comfyanonymous's avatar
comfyanonymous committed
271
        tk = "diffusion_model.middle_block.{}".format(b)
comfyanonymous's avatar
comfyanonymous committed
272
273
274
275
276
277
278
279
280
281
282
283
284
        key_in = False
        for c in LORA_UNET_MAP_RESNET:
            k = "{}.{}.weight".format(tk, c)
            if k in sdk:
                lora_key = "lora_unet_mid_block_{}".format(LORA_UNET_MAP_RESNET[c].format(counter))
                key_map[lora_key] = k
                key_in = True
        if key_in:
            counter += 1

    counter = 0
    us_counter = 0
    for b in range(12):
comfyanonymous's avatar
comfyanonymous committed
285
        tk = "diffusion_model.output_blocks.{}.0".format(b)
comfyanonymous's avatar
comfyanonymous committed
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
        key_in = False
        for c in LORA_UNET_MAP_RESNET:
            k = "{}.{}.weight".format(tk, c)
            if k in sdk:
                lora_key = "lora_unet_up_blocks_{}_{}".format(counter // 3, LORA_UNET_MAP_RESNET[c].format(counter % 3))
                key_map[lora_key] = k
                key_in = True
        for bb in range(3):
            k = "{}.{}.conv.weight".format(tk[:-2], bb)
            if k in sdk:
                lora_key = "lora_unet_up_blocks_{}_upsamplers_0_conv".format(us_counter)
                key_map[lora_key] = k
                us_counter += 1
        if key_in:
            counter += 1

302
303
304
305
306
    for k in sdk:
        if k.startswith("diffusion_model.") and k.endswith(".weight"):
            key_lora = k[len("diffusion_model."):-len(".weight")].replace(".", "_")
            key_map["lora_unet_{}".format(key_lora)] = k

307
308
    return key_map

309

310
class ModelPatcher:
311
    def __init__(self, model, load_device, offload_device, size=0):
312
        self.size = size
313
314
315
        self.model = model
        self.patches = []
        self.backup = {}
316
        self.model_options = {"transformer_options":{}}
317
        self.model_size()
318
319
        self.load_device = load_device
        self.offload_device = offload_device
320
321
322
323
324
325
326
327
328
329

    def model_size(self):
        if self.size > 0:
            return self.size
        model_sd = self.model.state_dict()
        size = 0
        for k in model_sd:
            t = model_sd[k]
            size += t.nelement() * t.element_size()
        self.size = size
330
        self.model_keys = set(model_sd.keys())
331
        return size
332
333

    def clone(self):
334
        n = ModelPatcher(self.model, self.load_device, self.offload_device, self.size)
335
        n.patches = self.patches[:]
336
        n.model_options = copy.deepcopy(self.model_options)
337
        n.model_keys = self.model_keys
338
339
        return n

340
    def set_model_sampler_cfg_function(self, sampler_cfg_function):
341
342
343
344
        if len(inspect.signature(sampler_cfg_function).parameters) == 3:
            self.model_options["sampler_cfg_function"] = lambda args: sampler_cfg_function(args["cond"], args["uncond"], args["cond_scale"]) #Old way
        else:
            self.model_options["sampler_cfg_function"] = sampler_cfg_function
345

346
347
348
    def set_model_unet_function_wrapper(self, unet_wrapper_function):
        self.model_options["model_function_wrapper"] = unet_wrapper_function

349
350
351
352
353
354
    def set_model_patch(self, patch, name):
        to = self.model_options["transformer_options"]
        if "patches" not in to:
            to["patches"] = {}
        to["patches"][name] = to["patches"].get(name, []) + [patch]

355
356
357
358
359
360
361
362
    def set_model_patch_replace(self, patch, name, block_name, number):
        to = self.model_options["transformer_options"]
        if "patches_replace" not in to:
            to["patches_replace"] = {}
        if name not in to["patches_replace"]:
            to["patches_replace"][name] = {}
        to["patches_replace"][name][(block_name, number)] = patch

363
364
365
366
367
368
    def set_model_attn1_patch(self, patch):
        self.set_model_patch(patch, "attn1_patch")

    def set_model_attn2_patch(self, patch):
        self.set_model_patch(patch, "attn2_patch")

369
370
371
372
373
374
375
376
377
    def set_model_attn1_replace(self, patch, block_name, number):
        self.set_model_patch_replace(patch, "attn1", block_name, number)

    def set_model_attn2_replace(self, patch, block_name, number):
        self.set_model_patch_replace(patch, "attn2", block_name, number)

    def set_model_attn1_output_patch(self, patch):
        self.set_model_patch(patch, "attn1_output_patch")

378
379
380
    def set_model_attn2_output_patch(self, patch):
        self.set_model_patch(patch, "attn2_output_patch")

381
382
383
384
385
386
387
388
389
    def model_patches_to(self, device):
        to = self.model_options["transformer_options"]
        if "patches" in to:
            patches = to["patches"]
            for name in patches:
                patch_list = patches[name]
                for i in range(len(patch_list)):
                    if hasattr(patch_list[i], "to"):
                        patch_list[i] = patch_list[i].to(device)
390
391
392
393
394
395
396
        if "patches_replace" in to:
            patches = to["patches_replace"]
            for name in patches:
                patch_list = patches[name]
                for k in patch_list:
                    if hasattr(patch_list[k], "to"):
                        patch_list[k] = patch_list[k].to(device)
397

398
    def model_dtype(self):
comfyanonymous's avatar
comfyanonymous committed
399
        return self.model.get_dtype()
400

401
    def add_patches(self, patches, strength_patch=1.0, strength_model=1.0):
402
403
        p = {}
        for k in patches:
404
            if k in self.model_keys:
405
                p[k] = patches[k]
406
        self.patches += [(strength_patch, p, strength_model)]
407
408
        return p.keys()

409
    def model_state_dict(self, filter_prefix=None):
410
411
        sd = self.model.state_dict()
        keys = list(sd.keys())
412
413
414
415
        if filter_prefix is not None:
            for k in keys:
                if not k.startswith(filter_prefix):
                    sd.pop(k)
416
417
        return sd

418
    def patch_model(self):
419
        model_sd = self.model_state_dict()
420
421
422
        for p in self.patches:
            for k in p[1]:
                v = p[1][k]
423
                key = k
comfyanonymous's avatar
comfyanonymous committed
424
                if key not in model_sd:
425
426
427
                    print("could not patch. key doesn't exist in model:", k)
                    continue

comfyanonymous's avatar
comfyanonymous committed
428
429
430
                weight = model_sd[key]
                if key not in self.backup:
                    self.backup[key] = weight.clone()
431
432

                alpha = p[0]
433
434
435
436
                strength_model = p[2]

                if strength_model != 1.0:
                    weight *= strength_model
comfyanonymous's avatar
comfyanonymous committed
437

438
                if len(v) == 1:
439
440
441
442
443
                    w1 = v[0]
                    if w1.shape != weight.shape:
                        print("WARNING SHAPE MISMATCH {} WEIGHT NOT MERGED {} != {}".format(key, w1.shape, weight.shape))
                    else:
                        weight += alpha * w1.type(weight.dtype).to(weight.device)
444
                elif len(v) == 4: #lora/locon
comfyanonymous's avatar
comfyanonymous committed
445
446
447
448
449
450
451
452
453
                    mat1 = v[0]
                    mat2 = v[1]
                    if v[2] is not None:
                        alpha *= v[2] / mat2.shape[0]
                    if v[3] is not None:
                        #locon mid weights, hopefully the math is fine because I didn't properly test it
                        final_shape = [mat2.shape[1], mat2.shape[0], v[3].shape[2], v[3].shape[3]]
                        mat2 = torch.mm(mat2.transpose(0, 1).flatten(start_dim=1).float(), v[3].transpose(0, 1).flatten(start_dim=1).float()).reshape(final_shape).transpose(0, 1)
                    weight += (alpha * torch.mm(mat1.flatten(start_dim=1).float(), mat2.flatten(start_dim=1).float())).reshape(weight.shape).type(weight.dtype).to(weight.device)
comfyanonymous's avatar
comfyanonymous committed
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
                elif len(v) == 8: #lokr
                    w1 = v[0]
                    w2 = v[1]
                    w1_a = v[3]
                    w1_b = v[4]
                    w2_a = v[5]
                    w2_b = v[6]
                    t2 = v[7]
                    dim = None

                    if w1 is None:
                        dim = w1_b.shape[0]
                        w1 = torch.mm(w1_a.float(), w1_b.float())

                    if w2 is None:
                        dim = w2_b.shape[0]
                        if t2 is None:
                            w2 = torch.mm(w2_a.float(), w2_b.float())
                        else:
                            w2 = torch.einsum('i j k l, j r, i p -> p r k l', t2.float(), w2_b.float(), w2_a.float())

                    if len(w2.shape) == 4:
                        w1 = w1.unsqueeze(2).unsqueeze(2)
                    if v[2] is not None and dim is not None:
                        alpha *= v[2] / dim

                    weight += alpha * torch.kron(w1.float(), w2.float()).reshape(weight.shape).type(weight.dtype).to(weight.device)
comfyanonymous's avatar
comfyanonymous committed
481
482
483
484
485
486
487
                else: #loha
                    w1a = v[0]
                    w1b = v[1]
                    if v[2] is not None:
                        alpha *= v[2] / w1b.shape[0]
                    w2a = v[3]
                    w2b = v[4]
488
489
490
491
492
493
494
495
496
497
                    if v[5] is not None: #cp decomposition
                        t1 = v[5]
                        t2 = v[6]
                        m1 = torch.einsum('i j k l, j r, i p -> p r k l', t1.float(), w1b.float(), w1a.float())
                        m2 = torch.einsum('i j k l, j r, i p -> p r k l', t2.float(), w2b.float(), w2a.float())
                    else:
                        m1 = torch.mm(w1a.float(), w1b.float())
                        m2 = torch.mm(w2a.float(), w2b.float())

                    weight += (alpha * m1 * m2).reshape(weight.shape).type(weight.dtype).to(weight.device)
498
499
        return self.model
    def unpatch_model(self):
500
        model_sd = self.model_state_dict()
501
502
        keys = list(self.backup.keys())
        for k in keys:
503
            model_sd[k][:] = self.backup[k]
504
505
            del self.backup[k]

506
507
        self.backup = {}

508
def load_lora_for_models(model, clip, lora, strength_model, strength_clip):
509
510
    key_map = model_lora_keys(model.model)
    key_map = model_lora_keys(clip.cond_stage_model, key_map)
511
    loaded = load_lora(lora, key_map)
512
513
514
515
516
517
518
519
520
521
522
    new_modelpatcher = model.clone()
    k = new_modelpatcher.add_patches(loaded, strength_model)
    new_clip = clip.clone()
    k1 = new_clip.add_patches(loaded, strength_clip)
    k = set(k)
    k1 = set(k1)
    for x in loaded:
        if (x not in k) and (x not in k1):
            print("NOT LOADED", x)

    return (new_modelpatcher, new_clip)
comfyanonymous's avatar
comfyanonymous committed
523
524
525


class CLIP:
526
    def __init__(self, target=None, embedding_directory=None, no_init=False):
527
528
        if no_init:
            return
529
530
531
        params = target.params
        clip = target.clip
        tokenizer = target.tokenizer
532

533
534
        load_device = model_management.text_encoder_device()
        offload_device = model_management.text_encoder_offload_device()
535
        self.cond_stage_model = clip(**(params))
536
537
538
        #TODO: make sure this doesn't have a quality loss before enabling.
        # if model_management.should_use_fp16(load_device):
        #     self.cond_stage_model.half()
539
540

        self.cond_stage_model = self.cond_stage_model.to()
541

542
        self.tokenizer = tokenizer(embedding_directory=embedding_directory)
543
        self.patcher = ModelPatcher(self.cond_stage_model, load_device=load_device, offload_device=offload_device)
544
        self.layer_idx = None
545
546
547
548
549
550

    def clone(self):
        n = CLIP(no_init=True)
        n.patcher = self.patcher.clone()
        n.cond_stage_model = self.cond_stage_model
        n.tokenizer = self.tokenizer
comfyanonymous's avatar
comfyanonymous committed
551
        n.layer_idx = self.layer_idx
552
553
        return n

554
    def load_from_state_dict(self, sd):
555
        self.cond_stage_model.load_sd(sd)
556

557
558
    def add_patches(self, patches, strength=1.0):
        return self.patcher.add_patches(patches, strength)
comfyanonymous's avatar
comfyanonymous committed
559

560
    def clip_layer(self, layer_idx):
comfyanonymous's avatar
comfyanonymous committed
561
        self.layer_idx = layer_idx
562

563
564
    def tokenize(self, text, return_word_ids=False):
        return self.tokenizer.tokenize_with_weights(text, return_word_ids)
BlenderNeko's avatar
BlenderNeko committed
565

566
    def encode_from_tokens(self, tokens, return_pooled=False):
567
568
        if self.layer_idx is not None:
            self.cond_stage_model.clip_layer(self.layer_idx)
569
570
571

        model_management.load_model_gpu(self.patcher)
        cond, pooled = self.cond_stage_model.encode_token_weights(tokens)
572
        if return_pooled:
573
574
            return cond, pooled
        return cond
comfyanonymous's avatar
comfyanonymous committed
575

576
    def encode(self, text):
577
        tokens = self.tokenize(text)
578
579
        return self.encode_from_tokens(tokens)

580
581
    def load_sd(self, sd):
        return self.cond_stage_model.load_sd(sd)
582

583
584
585
586
587
588
589
590
591
    def get_sd(self):
        return self.cond_stage_model.state_dict()

    def patch_model(self):
        self.patcher.patch_model()

    def unpatch_model(self):
        self.patcher.unpatch_model()

comfyanonymous's avatar
comfyanonymous committed
592
class VAE:
593
    def __init__(self, ckpt_path=None, device=None, config=None):
comfyanonymous's avatar
comfyanonymous committed
594
595
596
        if config is None:
            #default SD1.x/SD2.x VAE parameters
            ddconfig = {'double_z': True, 'z_channels': 4, 'resolution': 256, 'in_channels': 3, 'out_ch': 3, 'ch': 128, 'ch_mult': [1, 2, 4, 4], 'num_res_blocks': 2, 'attn_resolutions': [], 'dropout': 0.0}
597
            self.first_stage_model = AutoencoderKL(ddconfig, {'target': 'torch.nn.Identity'}, 4, monitor="val/rec_loss")
comfyanonymous's avatar
comfyanonymous committed
598
        else:
599
            self.first_stage_model = AutoencoderKL(**(config['params']))
comfyanonymous's avatar
comfyanonymous committed
600
        self.first_stage_model = self.first_stage_model.eval()
601
602
603
604
605
606
        if ckpt_path is not None:
            sd = utils.load_torch_file(ckpt_path)
            if 'decoder.up_blocks.0.resnets.0.norm1.weight' in sd.keys(): #diffusers format
                sd = diffusers_convert.convert_vae_state_dict(sd)
            self.first_stage_model.load_state_dict(sd, strict=False)

607
        if device is None:
608
            device = model_management.vae_device()
comfyanonymous's avatar
comfyanonymous committed
609
        self.device = device
610
        self.offload_device = model_management.vae_offload_device()
comfyanonymous's avatar
comfyanonymous committed
611

612
    def decode_tiled_(self, samples, tile_x=64, tile_y=64, overlap = 16):
pythongosssss's avatar
pythongosssss committed
613
        steps = samples.shape[0] * utils.get_tiled_scale_steps(samples.shape[3], samples.shape[2], tile_x, tile_y, overlap)
comfyanonymous's avatar
comfyanonymous committed
614
615
        steps += samples.shape[0] * utils.get_tiled_scale_steps(samples.shape[3], samples.shape[2], tile_x // 2, tile_y * 2, overlap)
        steps += samples.shape[0] * utils.get_tiled_scale_steps(samples.shape[3], samples.shape[2], tile_x * 2, tile_y // 2, overlap)
pythongosssss's avatar
pythongosssss committed
616
        pbar = utils.ProgressBar(steps)
617

618
        decode_fn = lambda a: (self.first_stage_model.decode(a.to(self.device)) + 1.0)
619
        output = torch.clamp((
620
621
622
            (utils.tiled_scale(samples, decode_fn, tile_x // 2, tile_y * 2, overlap, upscale_amount = 8, pbar = pbar) +
            utils.tiled_scale(samples, decode_fn, tile_x * 2, tile_y // 2, overlap, upscale_amount = 8, pbar = pbar) +
             utils.tiled_scale(samples, decode_fn, tile_x, tile_y, overlap, upscale_amount = 8, pbar = pbar))
623
624
625
            / 3.0) / 2.0, min=0.0, max=1.0)
        return output

626
627
628
629
630
631
    def encode_tiled_(self, pixel_samples, tile_x=512, tile_y=512, overlap = 64):
        steps = pixel_samples.shape[0] * utils.get_tiled_scale_steps(pixel_samples.shape[3], pixel_samples.shape[2], tile_x, tile_y, overlap)
        steps += pixel_samples.shape[0] * utils.get_tiled_scale_steps(pixel_samples.shape[3], pixel_samples.shape[2], tile_x // 2, tile_y * 2, overlap)
        steps += pixel_samples.shape[0] * utils.get_tiled_scale_steps(pixel_samples.shape[3], pixel_samples.shape[2], tile_x * 2, tile_y // 2, overlap)
        pbar = utils.ProgressBar(steps)

632
        encode_fn = lambda a: self.first_stage_model.encode(2. * a.to(self.device) - 1.).sample()
633
634
635
636
637
638
        samples = utils.tiled_scale(pixel_samples, encode_fn, tile_x, tile_y, overlap, upscale_amount = (1/8), out_channels=4, pbar=pbar)
        samples += utils.tiled_scale(pixel_samples, encode_fn, tile_x * 2, tile_y // 2, overlap, upscale_amount = (1/8), out_channels=4, pbar=pbar)
        samples += utils.tiled_scale(pixel_samples, encode_fn, tile_x // 2, tile_y * 2, overlap, upscale_amount = (1/8), out_channels=4, pbar=pbar)
        samples /= 3.0
        return samples

639
    def decode(self, samples_in):
640
        model_management.unload_model()
comfyanonymous's avatar
comfyanonymous committed
641
        self.first_stage_model = self.first_stage_model.to(self.device)
642
        try:
643
644
645
646
647
648
649
            free_memory = model_management.get_free_memory(self.device)
            batch_number = int((free_memory * 0.7) / (2562 * samples_in.shape[2] * samples_in.shape[3] * 64))
            batch_number = max(1, batch_number)

            pixel_samples = torch.empty((samples_in.shape[0], 3, round(samples_in.shape[2] * 8), round(samples_in.shape[3] * 8)), device="cpu")
            for x in range(0, samples_in.shape[0], batch_number):
                samples = samples_in[x:x+batch_number].to(self.device)
650
                pixel_samples[x:x+batch_number] = torch.clamp((self.first_stage_model.decode(samples) + 1.0) / 2.0, min=0.0, max=1.0).cpu()
651
652
653
654
        except model_management.OOM_EXCEPTION as e:
            print("Warning: Ran out of memory when regular VAE decoding, retrying with tiled VAE decoding.")
            pixel_samples = self.decode_tiled_(samples_in)

655
        self.first_stage_model = self.first_stage_model.to(self.offload_device)
comfyanonymous's avatar
comfyanonymous committed
656
657
658
        pixel_samples = pixel_samples.cpu().movedim(1,-1)
        return pixel_samples

659
    def decode_tiled(self, samples, tile_x=64, tile_y=64, overlap = 16):
660
661
        model_management.unload_model()
        self.first_stage_model = self.first_stage_model.to(self.device)
662
        output = self.decode_tiled_(samples, tile_x, tile_y, overlap)
663
        self.first_stage_model = self.first_stage_model.to(self.offload_device)
664
665
        return output.movedim(1,-1)

comfyanonymous's avatar
comfyanonymous committed
666
    def encode(self, pixel_samples):
667
        model_management.unload_model()
comfyanonymous's avatar
comfyanonymous committed
668
        self.first_stage_model = self.first_stage_model.to(self.device)
669
670
        pixel_samples = pixel_samples.movedim(-1,1)
        try:
671
672
673
            free_memory = model_management.get_free_memory(self.device)
            batch_number = int((free_memory * 0.7) / (2078 * pixel_samples.shape[2] * pixel_samples.shape[3])) #NOTE: this constant along with the one in the decode above are estimated from the mem usage for the VAE and could change.
            batch_number = max(1, batch_number)
674
675
676
            samples = torch.empty((pixel_samples.shape[0], 4, round(pixel_samples.shape[2] // 8), round(pixel_samples.shape[3] // 8)), device="cpu")
            for x in range(0, pixel_samples.shape[0], batch_number):
                pixels_in = (2. * pixel_samples[x:x+batch_number] - 1.).to(self.device)
677
                samples[x:x+batch_number] = self.first_stage_model.encode(pixels_in).sample().cpu()
678

679
680
681
682
        except model_management.OOM_EXCEPTION as e:
            print("Warning: Ran out of memory when regular VAE encoding, retrying with tiled VAE encoding.")
            samples = self.encode_tiled_(pixel_samples)

683
        self.first_stage_model = self.first_stage_model.to(self.offload_device)
comfyanonymous's avatar
comfyanonymous committed
684
685
        return samples

comfyanonymous's avatar
comfyanonymous committed
686
687
688
    def encode_tiled(self, pixel_samples, tile_x=512, tile_y=512, overlap = 64):
        model_management.unload_model()
        self.first_stage_model = self.first_stage_model.to(self.device)
689
690
        pixel_samples = pixel_samples.movedim(-1,1)
        samples = self.encode_tiled_(pixel_samples, tile_x=tile_x, tile_y=tile_y, overlap=overlap)
691
        self.first_stage_model = self.first_stage_model.to(self.offload_device)
comfyanonymous's avatar
comfyanonymous committed
692
        return samples
693

694
695
696
697
    def get_sd(self):
        return self.first_stage_model.state_dict()


BlenderNeko's avatar
BlenderNeko committed
698
def broadcast_image_to(tensor, target_batch_size, batched_number):
699
    current_batch_size = tensor.shape[0]
700
    #print(current_batch_size, target_batch_size)
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
    if current_batch_size == 1:
        return tensor

    per_batch = target_batch_size // batched_number
    tensor = tensor[:per_batch]

    if per_batch > tensor.shape[0]:
        tensor = torch.cat([tensor] * (per_batch // tensor.shape[0]) + [tensor[:(per_batch % tensor.shape[0])]], dim=0)

    current_batch_size = tensor.shape[0]
    if current_batch_size == target_batch_size:
        return tensor
    else:
        return torch.cat([tensor] * batched_number, dim=0)

comfyanonymous's avatar
comfyanonymous committed
716
class ControlNet:
717
    def __init__(self, control_model, global_average_pooling=False, device=None):
comfyanonymous's avatar
comfyanonymous committed
718
719
720
        self.control_model = control_model
        self.cond_hint_original = None
        self.cond_hint = None
721
        self.strength = 1.0
722
723
        if device is None:
            device = model_management.get_torch_device()
724
        self.device = device
comfyanonymous's avatar
comfyanonymous committed
725
        self.previous_controlnet = None
726
        self.global_average_pooling = global_average_pooling
comfyanonymous's avatar
comfyanonymous committed
727

728
    def get_control(self, x_noisy, t, cond, batched_number):
comfyanonymous's avatar
comfyanonymous committed
729
730
        control_prev = None
        if self.previous_controlnet is not None:
731
            control_prev = self.previous_controlnet.get_control(x_noisy, t, cond, batched_number)
comfyanonymous's avatar
comfyanonymous committed
732

733
        output_dtype = x_noisy.dtype
comfyanonymous's avatar
comfyanonymous committed
734
735
736
737
        if self.cond_hint is None or x_noisy.shape[2] * 8 != self.cond_hint.shape[2] or x_noisy.shape[3] * 8 != self.cond_hint.shape[3]:
            if self.cond_hint is not None:
                del self.cond_hint
            self.cond_hint = None
BlenderNeko's avatar
BlenderNeko committed
738
739
740
            self.cond_hint = utils.common_upscale(self.cond_hint_original, x_noisy.shape[3] * 8, x_noisy.shape[2] * 8, 'nearest-exact', "center").to(self.control_model.dtype).to(self.device)
        if x_noisy.shape[0] != self.cond_hint.shape[0]:
            self.cond_hint = broadcast_image_to(self.cond_hint, x_noisy.shape[0], batched_number)
741
742
743
744
745
746

        if self.control_model.dtype == torch.float16:
            precision_scope = torch.autocast
        else:
            precision_scope = contextlib.nullcontext

747
        with precision_scope(model_management.get_autocast_device(self.device)):
748
            self.control_model = model_management.load_if_low_vram(self.control_model)
749
750
751
            context = torch.cat(cond['c_crossattn'], 1)
            y = cond.get('c_adm', None)
            control = self.control_model(x=x_noisy, hint=self.cond_hint, timesteps=t, context=context, y=y)
752
            self.control_model = model_management.unload_if_low_vram(self.control_model)
753
        out = {'middle':[], 'output': []}
754
        autocast_enabled = torch.is_autocast_enabled()
comfyanonymous's avatar
comfyanonymous committed
755
756

        for i in range(len(control)):
comfyanonymous's avatar
comfyanonymous committed
757
758
759
760
761
762
            if i == (len(control) - 1):
                key = 'middle'
                index = 0
            else:
                key = 'output'
                index = i
comfyanonymous's avatar
comfyanonymous committed
763
            x = control[i]
764
765
766
            if self.global_average_pooling:
                x = torch.mean(x, dim=(2, 3), keepdim=True).repeat(1, 1, x.shape[2], x.shape[3])

767
            x *= self.strength
768
769
            if x.dtype != output_dtype and not autocast_enabled:
                x = x.to(output_dtype)
comfyanonymous's avatar
comfyanonymous committed
770

comfyanonymous's avatar
comfyanonymous committed
771
772
773
774
775
776
777
            if control_prev is not None and key in control_prev:
                prev = control_prev[key][index]
                if prev is not None:
                    x += prev
            out[key].append(x)
        if control_prev is not None and 'input' in control_prev:
            out['input'] = control_prev['input']
778
        return out
comfyanonymous's avatar
comfyanonymous committed
779

780
    def set_cond_hint(self, cond_hint, strength=1.0):
comfyanonymous's avatar
comfyanonymous committed
781
        self.cond_hint_original = cond_hint
782
        self.strength = strength
comfyanonymous's avatar
comfyanonymous committed
783
784
        return self

comfyanonymous's avatar
comfyanonymous committed
785
786
787
788
    def set_previous_controlnet(self, controlnet):
        self.previous_controlnet = controlnet
        return self

comfyanonymous's avatar
comfyanonymous committed
789
    def cleanup(self):
comfyanonymous's avatar
comfyanonymous committed
790
791
        if self.previous_controlnet is not None:
            self.previous_controlnet.cleanup()
comfyanonymous's avatar
comfyanonymous committed
792
793
794
795
796
        if self.cond_hint is not None:
            del self.cond_hint
            self.cond_hint = None

    def copy(self):
797
        c = ControlNet(self.control_model, global_average_pooling=self.global_average_pooling)
comfyanonymous's avatar
comfyanonymous committed
798
        c.cond_hint_original = self.cond_hint_original
799
        c.strength = self.strength
comfyanonymous's avatar
comfyanonymous committed
800
801
        return c

802
    def get_models(self):
comfyanonymous's avatar
comfyanonymous committed
803
804
        out = []
        if self.previous_controlnet is not None:
805
            out += self.previous_controlnet.get_models()
comfyanonymous's avatar
comfyanonymous committed
806
807
808
        out.append(self.control_model)
        return out

809
def load_controlnet(ckpt_path, model=None):
810
    controlnet_data = utils.load_torch_file(ckpt_path, safe_load=True)
811
    pth_key = 'control_model.zero_convs.0.0.weight'
comfyanonymous's avatar
comfyanonymous committed
812
    pth = False
813
    key = 'zero_convs.0.0.weight'
comfyanonymous's avatar
comfyanonymous committed
814
815
816
    if pth_key in controlnet_data:
        pth = True
        key = pth_key
817
        prefix = "control_model."
comfyanonymous's avatar
comfyanonymous committed
818
    elif key in controlnet_data:
819
        prefix = ""
comfyanonymous's avatar
comfyanonymous committed
820
    else:
821
822
823
824
        net = load_t2i_adapter(controlnet_data)
        if net is None:
            print("error checkpoint does not contain controlnet or t2i adapter data", ckpt_path)
        return net
comfyanonymous's avatar
comfyanonymous committed
825

826
827
828
829
830
831
832
    use_fp16 = model_management.should_use_fp16()

    controlnet_config = model_detection.model_config_from_unet(controlnet_data, prefix, use_fp16).unet_config
    controlnet_config.pop("out_channels")
    controlnet_config["hint_channels"] = 3
    control_model = cldm.ControlNet(**controlnet_config)

comfyanonymous's avatar
comfyanonymous committed
833
    if pth:
834
835
836
837
838
839
840
        if 'difference' in controlnet_data:
            if model is not None:
                m = model.patch_model()
                model_sd = m.state_dict()
                for x in controlnet_data:
                    c_m = "control_model."
                    if x.startswith(c_m):
comfyanonymous's avatar
comfyanonymous committed
841
                        sd_key = "diffusion_model.{}".format(x[len(c_m):])
842
843
844
845
846
847
848
                        if sd_key in model_sd:
                            cd = controlnet_data[x]
                            cd += model_sd[sd_key].type(cd.dtype).to(cd.device)
                model.unpatch_model()
            else:
                print("WARNING: Loaded a diff controlnet without a model. It will very likely not work.")

comfyanonymous's avatar
comfyanonymous committed
849
850
851
852
        class WeightsLoader(torch.nn.Module):
            pass
        w = WeightsLoader()
        w.control_model = control_model
853
        missing, unexpected = w.load_state_dict(controlnet_data, strict=False)
comfyanonymous's avatar
comfyanonymous committed
854
    else:
855
856
        missing, unexpected = control_model.load_state_dict(controlnet_data, strict=False)
    print(missing, unexpected)
comfyanonymous's avatar
comfyanonymous committed
857

858
859
860
    if use_fp16:
        control_model = control_model.half()

861
862
863
864
865
    global_average_pooling = False
    if ckpt_path.endswith("_shuffle.pth") or ckpt_path.endswith("_shuffle.safetensors") or ckpt_path.endswith("_shuffle_fp16.safetensors"): #TODO: smarter way of enabling global_average_pooling
        global_average_pooling = True

    control = ControlNet(control_model, global_average_pooling=global_average_pooling)
comfyanonymous's avatar
comfyanonymous committed
866
867
    return control

868
class T2IAdapter:
869
    def __init__(self, t2i_model, channels_in, device=None):
870
871
872
        self.t2i_model = t2i_model
        self.channels_in = channels_in
        self.strength = 1.0
873
874
        if device is None:
            device = model_management.get_torch_device()
875
876
877
878
879
880
        self.device = device
        self.previous_controlnet = None
        self.control_input = None
        self.cond_hint_original = None
        self.cond_hint = None

881
    def get_control(self, x_noisy, t, cond, batched_number):
882
883
        control_prev = None
        if self.previous_controlnet is not None:
884
            control_prev = self.previous_controlnet.get_control(x_noisy, t, cond, batched_number)
885
886
887
888

        if self.cond_hint is None or x_noisy.shape[2] * 8 != self.cond_hint.shape[2] or x_noisy.shape[3] * 8 != self.cond_hint.shape[3]:
            if self.cond_hint is not None:
                del self.cond_hint
BlenderNeko's avatar
BlenderNeko committed
889
            self.control_input = None
890
            self.cond_hint = None
BlenderNeko's avatar
BlenderNeko committed
891
            self.cond_hint = utils.common_upscale(self.cond_hint_original, x_noisy.shape[3] * 8, x_noisy.shape[2] * 8, 'nearest-exact', "center").float().to(self.device)
892
893
            if self.channels_in == 1 and self.cond_hint.shape[1] > 1:
                self.cond_hint = torch.mean(self.cond_hint, 1, keepdim=True)
BlenderNeko's avatar
BlenderNeko committed
894
895
896
        if x_noisy.shape[0] != self.cond_hint.shape[0]:
            self.cond_hint = broadcast_image_to(self.cond_hint, x_noisy.shape[0], batched_number)
        if self.control_input is None:
897
898
899
900
901
902
903
            self.t2i_model.to(self.device)
            self.control_input = self.t2i_model(self.cond_hint)
            self.t2i_model.cpu()

        output_dtype = x_noisy.dtype
        out = {'input':[]}

comfyanonymous's avatar
comfyanonymous committed
904
        autocast_enabled = torch.is_autocast_enabled()
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
        for i in range(len(self.control_input)):
            key = 'input'
            x = self.control_input[i] * self.strength
            if x.dtype != output_dtype and not autocast_enabled:
                x = x.to(output_dtype)

            if control_prev is not None and key in control_prev:
                index = len(control_prev[key]) - i * 3 - 3
                prev = control_prev[key][index]
                if prev is not None:
                    x += prev
            out[key].insert(0, None)
            out[key].insert(0, None)
            out[key].insert(0, x)

        if control_prev is not None and 'input' in control_prev:
            for i in range(len(out['input'])):
                if out['input'][i] is None:
                    out['input'][i] = control_prev['input'][i]
        if control_prev is not None and 'middle' in control_prev:
            out['middle'] = control_prev['middle']
        if control_prev is not None and 'output' in control_prev:
            out['output'] = control_prev['output']
        return out

    def set_cond_hint(self, cond_hint, strength=1.0):
        self.cond_hint_original = cond_hint
        self.strength = strength
        return self

    def set_previous_controlnet(self, controlnet):
        self.previous_controlnet = controlnet
        return self

    def copy(self):
        c = T2IAdapter(self.t2i_model, self.channels_in)
        c.cond_hint_original = self.cond_hint_original
        c.strength = self.strength
        return c

    def cleanup(self):
        if self.previous_controlnet is not None:
            self.previous_controlnet.cleanup()
        if self.cond_hint is not None:
            del self.cond_hint
            self.cond_hint = None

952
    def get_models(self):
953
954
        out = []
        if self.previous_controlnet is not None:
955
            out += self.previous_controlnet.get_models()
956
957
        return out

958
def load_t2i_adapter(t2i_data):
959
    keys = t2i_data.keys()
960
961
962
    if 'adapter' in keys:
        t2i_data = t2i_data['adapter']
        keys = t2i_data.keys()
963
    if "body.0.in_conv.weight" in keys:
964
965
        cin = t2i_data['body.0.in_conv.weight'].shape[1]
        model_ad = adapter.Adapter_light(cin=cin, channels=[320, 640, 1280, 1280], nums_rb=4)
966
    elif 'conv_in.weight' in keys:
967
        cin = t2i_data['conv_in.weight'].shape[1]
968
969
970
971
972
973
974
        channel = t2i_data['conv_in.weight'].shape[0]
        ksize = t2i_data['body.0.block2.weight'].shape[2]
        use_conv = False
        down_opts = list(filter(lambda a: a.endswith("down_opt.op.weight"), keys))
        if len(down_opts) > 0:
            use_conv = True
        model_ad = adapter.Adapter(cin=cin, channels=[channel, channel*2, channel*4, channel*4][:4], nums_rb=2, ksize=ksize, sk=True, use_conv=use_conv)
975
976
    else:
        return None
977
978
    model_ad.load_state_dict(t2i_data)
    return T2IAdapter(model_ad, cin // 64)
comfyanonymous's avatar
comfyanonymous committed
979

980
981
982
983
984
985
986
987
988
989

class StyleModel:
    def __init__(self, model, device="cpu"):
        self.model = model

    def get_cond(self, input):
        return self.model(input.last_hidden_state)


def load_style_model(ckpt_path):
990
    model_data = utils.load_torch_file(ckpt_path, safe_load=True)
991
992
993
994
995
996
997
998
999
    keys = model_data.keys()
    if "style_embedding" in keys:
        model = adapter.StyleAdapter(width=1024, context_dim=768, num_head=8, n_layes=3, num_token=8)
    else:
        raise Exception("invalid style model {}".format(ckpt_path))
    model.load_state_dict(model_data)
    return StyleModel(model)


1000
1001
1002
1003
1004
def load_clip(ckpt_paths, embedding_directory=None):
    clip_data = []
    for p in ckpt_paths:
        clip_data.append(utils.load_torch_file(p, safe_load=True))

comfyanonymous's avatar
comfyanonymous committed
1005
1006
1007
    class EmptyClass:
        pass

1008
1009
1010
1011
    for i in range(len(clip_data)):
        if "transformer.resblocks.0.ln_1.weight" in clip_data[i]:
            clip_data[i] = utils.transformers_convert(clip_data[i], "", "text_model.", 32)

comfyanonymous's avatar
comfyanonymous committed
1012
1013
    clip_target = EmptyClass()
    clip_target.params = {}
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
    if len(clip_data) == 1:
        if "text_model.encoder.layers.30.mlp.fc1.weight" in clip_data[0]:
            clip_target.clip = sdxl_clip.SDXLRefinerClipModel
            clip_target.tokenizer = sdxl_clip.SDXLTokenizer
        elif "text_model.encoder.layers.22.mlp.fc1.weight" in clip_data[0]:
            clip_target.clip = sd2_clip.SD2ClipModel
            clip_target.tokenizer = sd2_clip.SD2Tokenizer
        else:
            clip_target.clip = sd1_clip.SD1ClipModel
            clip_target.tokenizer = sd1_clip.SD1Tokenizer
1024
    else:
1025
1026
        clip_target.clip = sdxl_clip.SDXLClipModel
        clip_target.tokenizer = sdxl_clip.SDXLTokenizer
comfyanonymous's avatar
comfyanonymous committed
1027
1028

    clip = CLIP(clip_target, embedding_directory=embedding_directory)
1029
1030
1031
1032
1033
1034
1035
    for c in clip_data:
        m, u = clip.load_sd(c)
        if len(m) > 0:
            print("clip missing:", m)

        if len(u) > 0:
            print("clip unexpected:", u)
1036
    return clip
comfyanonymous's avatar
comfyanonymous committed
1037

1038
def load_gligen(ckpt_path):
1039
    data = utils.load_torch_file(ckpt_path, safe_load=True)
1040
1041
1042
1043
1044
    model = gligen.load_gligen(data)
    if model_management.should_use_fp16():
        model = model.half()
    return model

comfyanonymous's avatar
comfyanonymous committed
1045
def load_checkpoint(config_path=None, ckpt_path=None, output_vae=True, output_clip=True, embedding_directory=None, state_dict=None, config=None):
1046
    #TODO: this function is a mess and should be removed eventually
comfyanonymous's avatar
comfyanonymous committed
1047
1048
1049
    if config is None:
        with open(config_path, 'r') as stream:
            config = yaml.safe_load(stream)
comfyanonymous's avatar
comfyanonymous committed
1050
1051
1052
1053
1054
    model_config_params = config['model']['params']
    clip_config = model_config_params['cond_stage_config']
    scale_factor = model_config_params['scale_factor']
    vae_config = model_config_params['first_stage_config']

1055
1056
1057
    fp16 = False
    if "unet_config" in model_config_params:
        if "params" in model_config_params["unet_config"]:
comfyanonymous's avatar
comfyanonymous committed
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
            unet_config = model_config_params["unet_config"]["params"]
            if "use_fp16" in unet_config:
                fp16 = unet_config["use_fp16"]

    noise_aug_config = None
    if "noise_aug_config" in model_config_params:
        noise_aug_config = model_config_params["noise_aug_config"]

    v_prediction = False

    if "parameterization" in model_config_params:
        if model_config_params["parameterization"] == "v":
            v_prediction = True
1071

comfyanonymous's avatar
comfyanonymous committed
1072
1073
1074
1075
1076
1077
    clip = None
    vae = None

    class WeightsLoader(torch.nn.Module):
        pass

1078
1079
    if state_dict is None:
        state_dict = utils.load_torch_file(ckpt_path)
comfyanonymous's avatar
comfyanonymous committed
1080

1081
1082
1083
1084
1085
1086
1087
1088
    class EmptyClass:
        pass

    model_config = EmptyClass()
    model_config.unet_config = unet_config
    from . import latent_formats
    model_config.latent_format = latent_formats.SD15(scale_factor=scale_factor)

comfyanonymous's avatar
comfyanonymous committed
1089
    if config['model']["target"].endswith("LatentInpaintDiffusion"):
1090
        model = model_base.SDInpaint(model_config, v_prediction=v_prediction)
comfyanonymous's avatar
comfyanonymous committed
1091
    elif config['model']["target"].endswith("ImageEmbeddingConditionedLatentDiffusion"):
1092
        model = model_base.SD21UNCLIP(model_config, noise_aug_config["params"], v_prediction=v_prediction)
comfyanonymous's avatar
comfyanonymous committed
1093
    else:
1094
        model = model_base.BaseModel(model_config, v_prediction=v_prediction)
comfyanonymous's avatar
comfyanonymous committed
1095

1096
1097
1098
    if fp16:
        model = model.half()

1099
1100
    offload_device = model_management.unet_offload_device()
    model = model.to(offload_device)
1101
1102
1103
1104
    model.load_model_weights(state_dict, "model.diffusion_model.")

    if output_vae:
        w = WeightsLoader()
1105
        vae = VAE(config=vae_config)
1106
1107
1108
1109
1110
1111
        w.first_stage_model = vae.first_stage_model
        load_model_weights(w, state_dict)

    if output_clip:
        w = WeightsLoader()
        clip_target = EmptyClass()
1112
        clip_target.params = clip_config.get("params", {})
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
        if clip_config["target"].endswith("FrozenOpenCLIPEmbedder"):
            clip_target.clip = sd2_clip.SD2ClipModel
            clip_target.tokenizer = sd2_clip.SD2Tokenizer
        elif clip_config["target"].endswith("FrozenCLIPEmbedder"):
            clip_target.clip = sd1_clip.SD1ClipModel
            clip_target.tokenizer = sd1_clip.SD1Tokenizer
        clip = CLIP(clip_target, embedding_directory=embedding_directory)
        w.cond_stage_model = clip.cond_stage_model
        load_clip_weights(w, state_dict)

1123
    return (ModelPatcher(model, load_device=model_management.get_torch_device(), offload_device=offload_device), clip, vae)
1124
1125


1126
1127
def load_checkpoint_guess_config(ckpt_path, output_vae=True, output_clip=True, output_clipvision=False, embedding_directory=None):
    sd = utils.load_torch_file(ckpt_path)
1128
1129
    sd_keys = sd.keys()
    clip = None
1130
    clipvision = None
1131
    vae = None
1132
1133
    model = None
    clip_target = None
1134

1135
1136
    fp16 = model_management.should_use_fp16()

1137
1138
1139
    class WeightsLoader(torch.nn.Module):
        pass

1140
1141
1142
    model_config = model_detection.model_config_from_unet(sd, "model.diffusion_model.", fp16)
    if model_config is None:
        raise RuntimeError("ERROR: Could not detect model type of: {}".format(ckpt_path))
1143

1144
    if model_config.clip_vision_prefix is not None:
1145
        if output_clipvision:
1146
            clipvision = clip_vision.load_clipvision_from_sd(sd, model_config.clip_vision_prefix, True)
1147

1148
    offload_device = model_management.unet_offload_device()
1149
    model = model_config.get_model(sd)
1150
    model = model.to(offload_device)
1151
    model.load_model_weights(sd, "model.diffusion_model.")
1152

1153
    if output_vae:
1154
        vae = VAE()
1155
1156
1157
        w = WeightsLoader()
        w.first_stage_model = vae.first_stage_model
        load_model_weights(w, sd)
1158

1159
1160
1161
1162
1163
1164
1165
    if output_clip:
        w = WeightsLoader()
        clip_target = model_config.clip_target()
        clip = CLIP(clip_target, embedding_directory=embedding_directory)
        w.cond_stage_model = clip.cond_stage_model
        sd = model_config.process_clip_state_dict(sd)
        load_model_weights(w, sd)
comfyanonymous's avatar
comfyanonymous committed
1166

1167
1168
1169
    left_over = sd.keys()
    if len(left_over) > 0:
        print("left over keys:", left_over)
1170

1171
    return (ModelPatcher(model, load_device=model_management.get_torch_device(), offload_device=offload_device), clip, vae, clipvision)
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184

def save_checkpoint(output_path, model, clip, vae, metadata=None):
    try:
        model.patch_model()
        clip.patch_model()
        sd = model.model.state_dict_for_saving(clip.get_sd(), vae.get_sd())
        utils.save_torch_file(sd, output_path, metadata=metadata)
        model.unpatch_model()
        clip.unpatch_model()
    except Exception as e:
        model.unpatch_model()
        clip.unpatch_model()
        raise e