sd.py 40.2 KB
Newer Older
comfyanonymous's avatar
comfyanonymous committed
1
import torch
2
import contextlib
3
import copy
4
from tqdm.auto import tqdm
comfyanonymous's avatar
comfyanonymous committed
5
6
7

import sd1_clip
import sd2_clip
8
from comfy import model_management
9
10
from .ldm.util import instantiate_from_config
from .ldm.models.autoencoder import AutoencoderKL
11
import yaml
comfyanonymous's avatar
comfyanonymous committed
12
from .cldm import cldm
13
from .t2i_adapter import adapter
comfyanonymous's avatar
comfyanonymous committed
14
15

from . import utils
16
from . import clip_vision
17
from . import gligen
18

19
def load_model_weights(model, sd, verbose=False, load_state_dict_to=[]):
comfyanonymous's avatar
comfyanonymous committed
20
21
22
23
24
25
26
27
28
    m, u = model.load_state_dict(sd, strict=False)

    k = list(sd.keys())
    for x in k:
        # print(x)
        if x.startswith("cond_stage_model.transformer.") and not x.startswith("cond_stage_model.transformer.text_model."):
            y = x.replace("cond_stage_model.transformer.", "cond_stage_model.transformer.text_model.")
            sd[y] = sd.pop(x)

comfyanonymous's avatar
comfyanonymous committed
29
30
31
32
    if 'cond_stage_model.transformer.text_model.embeddings.position_ids' in sd:
        ids = sd['cond_stage_model.transformer.text_model.embeddings.position_ids']
        if ids.dtype == torch.float32:
            sd['cond_stage_model.transformer.text_model.embeddings.position_ids'] = ids.round()
33

34
35
36
37
38
39
40
41
42
43
44
    keys_to_replace = {
        "cond_stage_model.model.positional_embedding": "cond_stage_model.transformer.text_model.embeddings.position_embedding.weight",
        "cond_stage_model.model.token_embedding.weight": "cond_stage_model.transformer.text_model.embeddings.token_embedding.weight",
        "cond_stage_model.model.ln_final.weight": "cond_stage_model.transformer.text_model.final_layer_norm.weight",
        "cond_stage_model.model.ln_final.bias": "cond_stage_model.transformer.text_model.final_layer_norm.bias",
    }

    for x in keys_to_replace:
        if x in sd:
            sd[keys_to_replace[x]] = sd.pop(x)

45
    sd = utils.transformers_convert(sd, "cond_stage_model.model", "cond_stage_model.transformer.text_model", 24)
46

comfyanonymous's avatar
comfyanonymous committed
47
48
49
50
51
52
53
54
55
56
57
58
59
    for x in load_state_dict_to:
        x.load_state_dict(sd, strict=False)

    if len(m) > 0 and verbose:
        print("missing keys:")
        print(m)
    if len(u) > 0 and verbose:
        print("unexpected keys:")
        print(u)

    model.eval()
    return model

60
61
62
63
64
65
66
67
68
LORA_CLIP_MAP = {
    "mlp.fc1": "mlp_fc1",
    "mlp.fc2": "mlp_fc2",
    "self_attn.k_proj": "self_attn_k_proj",
    "self_attn.q_proj": "self_attn_q_proj",
    "self_attn.v_proj": "self_attn_v_proj",
    "self_attn.out_proj": "self_attn_out_proj",
}

comfyanonymous's avatar
comfyanonymous committed
69
LORA_UNET_MAP_ATTENTIONS = {
70
71
72
73
74
75
76
77
78
79
80
81
82
83
    "proj_in": "proj_in",
    "proj_out": "proj_out",
    "transformer_blocks.0.attn1.to_q": "transformer_blocks_0_attn1_to_q",
    "transformer_blocks.0.attn1.to_k": "transformer_blocks_0_attn1_to_k",
    "transformer_blocks.0.attn1.to_v": "transformer_blocks_0_attn1_to_v",
    "transformer_blocks.0.attn1.to_out.0": "transformer_blocks_0_attn1_to_out_0",
    "transformer_blocks.0.attn2.to_q": "transformer_blocks_0_attn2_to_q",
    "transformer_blocks.0.attn2.to_k": "transformer_blocks_0_attn2_to_k",
    "transformer_blocks.0.attn2.to_v": "transformer_blocks_0_attn2_to_v",
    "transformer_blocks.0.attn2.to_out.0": "transformer_blocks_0_attn2_to_out_0",
    "transformer_blocks.0.ff.net.0.proj": "transformer_blocks_0_ff_net_0_proj",
    "transformer_blocks.0.ff.net.2": "transformer_blocks_0_ff_net_2",
}

comfyanonymous's avatar
comfyanonymous committed
84
85
86
87
88
89
LORA_UNET_MAP_RESNET = {
    "in_layers.2": "resnets_{}_conv1",
    "emb_layers.1": "resnets_{}_time_emb_proj",
    "out_layers.3": "resnets_{}_conv2",
    "skip_connection": "resnets_{}_conv_shortcut"
}
90
91

def load_lora(path, to_load):
92
    lora = utils.load_torch_file(path)
93
94
95
    patch_dict = {}
    loaded_keys = set()
    for x in to_load:
comfyanonymous's avatar
comfyanonymous committed
96
97
98
99
100
101
        alpha_name = "{}.alpha".format(x)
        alpha = None
        if alpha_name in lora.keys():
            alpha = lora[alpha_name].item()
            loaded_keys.add(alpha_name)

102
103
        A_name = "{}.lora_up.weight".format(x)
        B_name = "{}.lora_down.weight".format(x)
104
        mid_name = "{}.lora_mid.weight".format(x)
comfyanonymous's avatar
comfyanonymous committed
105

106
        if A_name in lora.keys():
107
108
109
110
111
            mid = None
            if mid_name in lora.keys():
                mid = lora[mid_name]
                loaded_keys.add(mid_name)
            patch_dict[to_load[x]] = (lora[A_name], lora[B_name], alpha, mid)
112
113
            loaded_keys.add(A_name)
            loaded_keys.add(B_name)
comfyanonymous's avatar
comfyanonymous committed
114
115
116
117
118

        hada_w1_a_name = "{}.hada_w1_a".format(x)
        hada_w1_b_name = "{}.hada_w1_b".format(x)
        hada_w2_a_name = "{}.hada_w2_a".format(x)
        hada_w2_b_name = "{}.hada_w2_b".format(x)
119
120
        hada_t1_name = "{}.hada_t1".format(x)
        hada_t2_name = "{}.hada_t2".format(x)
comfyanonymous's avatar
comfyanonymous committed
121
        if hada_w1_a_name in lora.keys():
122
123
124
125
126
127
128
129
130
            hada_t1 = None
            hada_t2 = None
            if hada_t1_name in lora.keys():
                hada_t1 = lora[hada_t1_name]
                hada_t2 = lora[hada_t2_name]
                loaded_keys.add(hada_t1_name)
                loaded_keys.add(hada_t2_name)

            patch_dict[to_load[x]] = (lora[hada_w1_a_name], lora[hada_w1_b_name], alpha, lora[hada_w2_a_name], lora[hada_w2_b_name], hada_t1, hada_t2)
comfyanonymous's avatar
comfyanonymous committed
131
132
133
134
135
            loaded_keys.add(hada_w1_a_name)
            loaded_keys.add(hada_w1_b_name)
            loaded_keys.add(hada_w2_a_name)
            loaded_keys.add(hada_w2_b_name)

136
137
138
139
140
141
142
143
144
145
146
147
    for x in lora.keys():
        if x not in loaded_keys:
            print("lora key not loaded", x)
    return patch_dict

def model_lora_keys(model, key_map={}):
    sdk = model.state_dict().keys()

    counter = 0
    for b in range(12):
        tk = "model.diffusion_model.input_blocks.{}.1".format(b)
        up_counter = 0
comfyanonymous's avatar
comfyanonymous committed
148
        for c in LORA_UNET_MAP_ATTENTIONS:
149
150
            k = "{}.{}.weight".format(tk, c)
            if k in sdk:
comfyanonymous's avatar
comfyanonymous committed
151
                lora_key = "lora_unet_down_blocks_{}_attentions_{}_{}".format(counter // 2, counter % 2, LORA_UNET_MAP_ATTENTIONS[c])
152
                key_map[lora_key] = k
153
154
155
                up_counter += 1
        if up_counter >= 4:
            counter += 1
comfyanonymous's avatar
comfyanonymous committed
156
    for c in LORA_UNET_MAP_ATTENTIONS:
157
158
        k = "model.diffusion_model.middle_block.1.{}.weight".format(c)
        if k in sdk:
comfyanonymous's avatar
comfyanonymous committed
159
            lora_key = "lora_unet_mid_block_attentions_0_{}".format(LORA_UNET_MAP_ATTENTIONS[c])
160
            key_map[lora_key] = k
161
162
163
164
    counter = 3
    for b in range(12):
        tk = "model.diffusion_model.output_blocks.{}.1".format(b)
        up_counter = 0
comfyanonymous's avatar
comfyanonymous committed
165
        for c in LORA_UNET_MAP_ATTENTIONS:
166
167
            k = "{}.{}.weight".format(tk, c)
            if k in sdk:
comfyanonymous's avatar
comfyanonymous committed
168
                lora_key = "lora_unet_up_blocks_{}_attentions_{}_{}".format(counter // 3, counter % 3, LORA_UNET_MAP_ATTENTIONS[c])
169
                key_map[lora_key] = k
170
171
172
173
                up_counter += 1
        if up_counter >= 4:
            counter += 1
    counter = 0
comfyanonymous's avatar
comfyanonymous committed
174
    text_model_lora_key = "lora_te_text_model_encoder_layers_{}_{}"
175
    for b in range(24):
176
177
178
        for c in LORA_CLIP_MAP:
            k = "transformer.text_model.encoder.layers.{}.{}.weight".format(b, c)
            if k in sdk:
comfyanonymous's avatar
comfyanonymous committed
179
                lora_key = text_model_lora_key.format(b, LORA_CLIP_MAP[c])
180
                key_map[lora_key] = k
comfyanonymous's avatar
comfyanonymous committed
181

comfyanonymous's avatar
comfyanonymous committed
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236

    #Locon stuff
    ds_counter = 0
    counter = 0
    for b in range(12):
        tk = "model.diffusion_model.input_blocks.{}.0".format(b)
        key_in = False
        for c in LORA_UNET_MAP_RESNET:
            k = "{}.{}.weight".format(tk, c)
            if k in sdk:
                lora_key = "lora_unet_down_blocks_{}_{}".format(counter // 2, LORA_UNET_MAP_RESNET[c].format(counter % 2))
                key_map[lora_key] = k
                key_in = True
        for bb in range(3):
            k = "{}.{}.op.weight".format(tk[:-2], bb)
            if k in sdk:
                lora_key = "lora_unet_down_blocks_{}_downsamplers_0_conv".format(ds_counter)
                key_map[lora_key] = k
                ds_counter += 1
        if key_in:
            counter += 1

    counter = 0
    for b in range(3):
        tk = "model.diffusion_model.middle_block.{}".format(b)
        key_in = False
        for c in LORA_UNET_MAP_RESNET:
            k = "{}.{}.weight".format(tk, c)
            if k in sdk:
                lora_key = "lora_unet_mid_block_{}".format(LORA_UNET_MAP_RESNET[c].format(counter))
                key_map[lora_key] = k
                key_in = True
        if key_in:
            counter += 1

    counter = 0
    us_counter = 0
    for b in range(12):
        tk = "model.diffusion_model.output_blocks.{}.0".format(b)
        key_in = False
        for c in LORA_UNET_MAP_RESNET:
            k = "{}.{}.weight".format(tk, c)
            if k in sdk:
                lora_key = "lora_unet_up_blocks_{}_{}".format(counter // 3, LORA_UNET_MAP_RESNET[c].format(counter % 3))
                key_map[lora_key] = k
                key_in = True
        for bb in range(3):
            k = "{}.{}.conv.weight".format(tk[:-2], bb)
            if k in sdk:
                lora_key = "lora_unet_up_blocks_{}_upsamplers_0_conv".format(us_counter)
                key_map[lora_key] = k
                us_counter += 1
        if key_in:
            counter += 1

237
238
239
240
241
242
243
    return key_map

class ModelPatcher:
    def __init__(self, model):
        self.model = model
        self.patches = []
        self.backup = {}
244
        self.model_options = {"transformer_options":{}}
245
246
247
248

    def clone(self):
        n = ModelPatcher(self.model)
        n.patches = self.patches[:]
249
        n.model_options = copy.deepcopy(self.model_options)
250
251
        return n

252
253
254
    def set_model_tomesd(self, ratio):
        self.model_options["transformer_options"]["tomesd"] = {"ratio": ratio}

255
256
257
    def set_model_sampler_cfg_function(self, sampler_cfg_function):
        self.model_options["sampler_cfg_function"] = sampler_cfg_function

258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280

    def set_model_patch(self, patch, name):
        to = self.model_options["transformer_options"]
        if "patches" not in to:
            to["patches"] = {}
        to["patches"][name] = to["patches"].get(name, []) + [patch]

    def set_model_attn1_patch(self, patch):
        self.set_model_patch(patch, "attn1_patch")

    def set_model_attn2_patch(self, patch):
        self.set_model_patch(patch, "attn2_patch")

    def model_patches_to(self, device):
        to = self.model_options["transformer_options"]
        if "patches" in to:
            patches = to["patches"]
            for name in patches:
                patch_list = patches[name]
                for i in range(len(patch_list)):
                    if hasattr(patch_list[i], "to"):
                        patch_list[i] = patch_list[i].to(device)

281
282
283
    def model_dtype(self):
        return self.model.diffusion_model.dtype

284
285
286
287
    def add_patches(self, patches, strength=1.0):
        p = {}
        model_sd = self.model.state_dict()
        for k in patches:
288
            if k in model_sd:
289
290
291
292
293
294
295
296
297
                p[k] = patches[k]
        self.patches += [(strength, p)]
        return p.keys()

    def patch_model(self):
        model_sd = self.model.state_dict()
        for p in self.patches:
            for k in p[1]:
                v = p[1][k]
298
                key = k
comfyanonymous's avatar
comfyanonymous committed
299
                if key not in model_sd:
300
301
302
                    print("could not patch. key doesn't exist in model:", k)
                    continue

comfyanonymous's avatar
comfyanonymous committed
303
304
305
                weight = model_sd[key]
                if key not in self.backup:
                    self.backup[key] = weight.clone()
306
307

                alpha = p[0]
comfyanonymous's avatar
comfyanonymous committed
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325

                if len(v) == 4: #lora/locon
                    mat1 = v[0]
                    mat2 = v[1]
                    if v[2] is not None:
                        alpha *= v[2] / mat2.shape[0]
                    if v[3] is not None:
                        #locon mid weights, hopefully the math is fine because I didn't properly test it
                        final_shape = [mat2.shape[1], mat2.shape[0], v[3].shape[2], v[3].shape[3]]
                        mat2 = torch.mm(mat2.transpose(0, 1).flatten(start_dim=1).float(), v[3].transpose(0, 1).flatten(start_dim=1).float()).reshape(final_shape).transpose(0, 1)
                    weight += (alpha * torch.mm(mat1.flatten(start_dim=1).float(), mat2.flatten(start_dim=1).float())).reshape(weight.shape).type(weight.dtype).to(weight.device)
                else: #loha
                    w1a = v[0]
                    w1b = v[1]
                    if v[2] is not None:
                        alpha *= v[2] / w1b.shape[0]
                    w2a = v[3]
                    w2b = v[4]
326
327
328
329
330
331
332
333
334
335
                    if v[5] is not None: #cp decomposition
                        t1 = v[5]
                        t2 = v[6]
                        m1 = torch.einsum('i j k l, j r, i p -> p r k l', t1.float(), w1b.float(), w1a.float())
                        m2 = torch.einsum('i j k l, j r, i p -> p r k l', t2.float(), w2b.float(), w2a.float())
                    else:
                        m1 = torch.mm(w1a.float(), w1b.float())
                        m2 = torch.mm(w2a.float(), w2b.float())

                    weight += (alpha * m1 * m2).reshape(weight.shape).type(weight.dtype).to(weight.device)
336
337
338
        return self.model
    def unpatch_model(self):
        model_sd = self.model.state_dict()
339
340
        keys = list(self.backup.keys())
        for k in keys:
341
            model_sd[k][:] = self.backup[k]
342
343
            del self.backup[k]

344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
        self.backup = {}

def load_lora_for_models(model, clip, lora_path, strength_model, strength_clip):
    key_map = model_lora_keys(model.model)
    key_map = model_lora_keys(clip.cond_stage_model, key_map)
    loaded = load_lora(lora_path, key_map)
    new_modelpatcher = model.clone()
    k = new_modelpatcher.add_patches(loaded, strength_model)
    new_clip = clip.clone()
    k1 = new_clip.add_patches(loaded, strength_clip)
    k = set(k)
    k1 = set(k1)
    for x in loaded:
        if (x not in k) and (x not in k1):
            print("NOT LOADED", x)

    return (new_modelpatcher, new_clip)
comfyanonymous's avatar
comfyanonymous committed
361
362
363


class CLIP:
364
365
366
    def __init__(self, config={}, embedding_directory=None, no_init=False):
        if no_init:
            return
comfyanonymous's avatar
comfyanonymous committed
367
        self.target_clip = config["target"]
368
369
370
371
372
        if "params" in config:
            params = config["params"]
        else:
            params = {}

comfyanonymous's avatar
comfyanonymous committed
373
374
375
376
377
378
        if self.target_clip == "ldm.modules.encoders.modules.FrozenOpenCLIPEmbedder":
            clip = sd2_clip.SD2ClipModel
            tokenizer = sd2_clip.SD2Tokenizer
        elif self.target_clip == "ldm.modules.encoders.modules.FrozenCLIPEmbedder":
            clip = sd1_clip.SD1ClipModel
            tokenizer = sd1_clip.SD1Tokenizer
379
380

        self.cond_stage_model = clip(**(params))
381
        self.tokenizer = tokenizer(embedding_directory=embedding_directory)
382
        self.patcher = ModelPatcher(self.cond_stage_model)
383
        self.layer_idx = None
384
385
386
387
388
389
390

    def clone(self):
        n = CLIP(no_init=True)
        n.target_clip = self.target_clip
        n.patcher = self.patcher.clone()
        n.cond_stage_model = self.cond_stage_model
        n.tokenizer = self.tokenizer
comfyanonymous's avatar
comfyanonymous committed
391
        n.layer_idx = self.layer_idx
392
393
        return n

394
395
396
    def load_from_state_dict(self, sd):
        self.cond_stage_model.transformer.load_state_dict(sd, strict=False)

397
398
    def add_patches(self, patches, strength=1.0):
        return self.patcher.add_patches(patches, strength)
comfyanonymous's avatar
comfyanonymous committed
399

400
    def clip_layer(self, layer_idx):
comfyanonymous's avatar
comfyanonymous committed
401
        self.layer_idx = layer_idx
402

403
404
    def tokenize(self, text, return_word_ids=False):
        return self.tokenizer.tokenize_with_weights(text, return_word_ids)
BlenderNeko's avatar
BlenderNeko committed
405

406
    def encode_from_tokens(self, tokens, return_pooled=False):
407
408
        if self.layer_idx is not None:
            self.cond_stage_model.clip_layer(self.layer_idx)
409
410
411
412
413
414
415
        try:
            self.patcher.patch_model()
            cond = self.cond_stage_model.encode_token_weights(tokens)
            self.patcher.unpatch_model()
        except Exception as e:
            self.patcher.unpatch_model()
            raise e
416
417
418
419
        if return_pooled:
            eos_token_index = max(range(len(tokens[0])), key=tokens[0].__getitem__)
            pooled = cond[:, eos_token_index]
            return cond, pooled
comfyanonymous's avatar
comfyanonymous committed
420
421
        return cond

422
    def encode(self, text):
423
        tokens = self.tokenize(text)
424
425
        return self.encode_from_tokens(tokens)

comfyanonymous's avatar
comfyanonymous committed
426
class VAE:
427
    def __init__(self, ckpt_path=None, scale_factor=0.18215, device=None, config=None):
comfyanonymous's avatar
comfyanonymous committed
428
429
430
431
432
433
434
435
        if config is None:
            #default SD1.x/SD2.x VAE parameters
            ddconfig = {'double_z': True, 'z_channels': 4, 'resolution': 256, 'in_channels': 3, 'out_ch': 3, 'ch': 128, 'ch_mult': [1, 2, 4, 4], 'num_res_blocks': 2, 'attn_resolutions': [], 'dropout': 0.0}
            self.first_stage_model = AutoencoderKL(ddconfig, {'target': 'torch.nn.Identity'}, 4, monitor="val/rec_loss", ckpt_path=ckpt_path)
        else:
            self.first_stage_model = AutoencoderKL(**(config['params']), ckpt_path=ckpt_path)
        self.first_stage_model = self.first_stage_model.eval()
        self.scale_factor = scale_factor
436
437
        if device is None:
            device = model_management.get_torch_device()
comfyanonymous's avatar
comfyanonymous committed
438
439
        self.device = device

440
    def decode_tiled_(self, samples, tile_x=64, tile_y=64, overlap = 16):
441
442
443
444
445
        it_1 = -(samples.shape[2] // -(tile_y * 2 - overlap)) * -(samples.shape[3] // -(tile_x // 2 - overlap))
        it_2 = -(samples.shape[2] // -(tile_y // 2 - overlap)) * -(samples.shape[3] // -(tile_x * 2 - overlap))
        it_3 = -(samples.shape[2] // -(tile_y - overlap)) * -(samples.shape[3] // -(tile_x - overlap))
        pbar = tqdm(total=samples.shape[0] * (it_1 + it_2 + it_3))

446
447
        decode_fn = lambda a: (self.first_stage_model.decode(1. / self.scale_factor * a.to(self.device)) + 1.0)
        output = torch.clamp((
448
449
450
            (utils.tiled_scale(samples, decode_fn, tile_x // 2, tile_y * 2, overlap, upscale_amount = 8, pbar = pbar) +
            utils.tiled_scale(samples, decode_fn, tile_x * 2, tile_y // 2, overlap, upscale_amount = 8, pbar = pbar) +
             utils.tiled_scale(samples, decode_fn, tile_x, tile_y, overlap, upscale_amount = 8, pbar = pbar))
451
452
453
454
            / 3.0) / 2.0, min=0.0, max=1.0)
        return output

    def decode(self, samples_in):
455
        model_management.unload_model()
comfyanonymous's avatar
comfyanonymous committed
456
        self.first_stage_model = self.first_stage_model.to(self.device)
457
        try:
458
459
460
461
462
463
464
465
            free_memory = model_management.get_free_memory(self.device)
            batch_number = int((free_memory * 0.7) / (2562 * samples_in.shape[2] * samples_in.shape[3] * 64))
            batch_number = max(1, batch_number)

            pixel_samples = torch.empty((samples_in.shape[0], 3, round(samples_in.shape[2] * 8), round(samples_in.shape[3] * 8)), device="cpu")
            for x in range(0, samples_in.shape[0], batch_number):
                samples = samples_in[x:x+batch_number].to(self.device)
                pixel_samples[x:x+batch_number] = torch.clamp((self.first_stage_model.decode(1. / self.scale_factor * samples) + 1.0) / 2.0, min=0.0, max=1.0).cpu()
466
467
468
469
        except model_management.OOM_EXCEPTION as e:
            print("Warning: Ran out of memory when regular VAE decoding, retrying with tiled VAE decoding.")
            pixel_samples = self.decode_tiled_(samples_in)

comfyanonymous's avatar
comfyanonymous committed
470
471
472
473
        self.first_stage_model = self.first_stage_model.cpu()
        pixel_samples = pixel_samples.cpu().movedim(1,-1)
        return pixel_samples

474
    def decode_tiled(self, samples, tile_x=64, tile_y=64, overlap = 16):
475
476
        model_management.unload_model()
        self.first_stage_model = self.first_stage_model.to(self.device)
477
        output = self.decode_tiled_(samples, tile_x, tile_y, overlap)
478
479
480
        self.first_stage_model = self.first_stage_model.cpu()
        return output.movedim(1,-1)

comfyanonymous's avatar
comfyanonymous committed
481
    def encode(self, pixel_samples):
482
        model_management.unload_model()
comfyanonymous's avatar
comfyanonymous committed
483
484
485
486
487
488
489
        self.first_stage_model = self.first_stage_model.to(self.device)
        pixel_samples = pixel_samples.movedim(-1,1).to(self.device)
        samples = self.first_stage_model.encode(2. * pixel_samples - 1.).sample() * self.scale_factor
        self.first_stage_model = self.first_stage_model.cpu()
        samples = samples.cpu()
        return samples

comfyanonymous's avatar
comfyanonymous committed
490
491
492
493
    def encode_tiled(self, pixel_samples, tile_x=512, tile_y=512, overlap = 64):
        model_management.unload_model()
        self.first_stage_model = self.first_stage_model.to(self.device)
        pixel_samples = pixel_samples.movedim(-1,1).to(self.device)
494
495
496
497
498
499
500
501
502

        it_1 = -(pixel_samples.shape[2] // -(tile_y * 2 - overlap)) * -(pixel_samples.shape[3] // -(tile_x // 2 - overlap))
        it_2 = -(pixel_samples.shape[2] // -(tile_y // 2 - overlap)) * -(pixel_samples.shape[3] // -(tile_x * 2 - overlap))
        it_3 = -(pixel_samples.shape[2] // -(tile_y - overlap)) * -(pixel_samples.shape[3] // -(tile_x - overlap))
        pbar = tqdm(total=(it_1 + it_2 + it_3))

        samples = utils.tiled_scale(pixel_samples, lambda a: self.first_stage_model.encode(2. * a - 1.).sample() * self.scale_factor, tile_x, tile_y, overlap, upscale_amount = (1/8), out_channels=4, pbar=pbar)
        samples += utils.tiled_scale(pixel_samples, lambda a: self.first_stage_model.encode(2. * a - 1.).sample() * self.scale_factor, tile_x * 2, tile_y // 2, overlap, upscale_amount = (1/8), out_channels=4, pbar=pbar)
        samples += utils.tiled_scale(pixel_samples, lambda a: self.first_stage_model.encode(2. * a - 1.).sample() * self.scale_factor, tile_x // 2, tile_y * 2, overlap, upscale_amount = (1/8), out_channels=4, pbar=pbar)
503
        samples /= 3.0
comfyanonymous's avatar
comfyanonymous committed
504
505
506
        self.first_stage_model = self.first_stage_model.cpu()
        samples = samples.cpu()
        return samples
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528

def resize_image_to(tensor, target_latent_tensor, batched_number):
    tensor = utils.common_upscale(tensor, target_latent_tensor.shape[3] * 8, target_latent_tensor.shape[2] * 8, 'nearest-exact', "center")
    target_batch_size = target_latent_tensor.shape[0]

    current_batch_size = tensor.shape[0]
    print(current_batch_size, target_batch_size)
    if current_batch_size == 1:
        return tensor

    per_batch = target_batch_size // batched_number
    tensor = tensor[:per_batch]

    if per_batch > tensor.shape[0]:
        tensor = torch.cat([tensor] * (per_batch // tensor.shape[0]) + [tensor[:(per_batch % tensor.shape[0])]], dim=0)

    current_batch_size = tensor.shape[0]
    if current_batch_size == target_batch_size:
        return tensor
    else:
        return torch.cat([tensor] * batched_number, dim=0)

comfyanonymous's avatar
comfyanonymous committed
529
class ControlNet:
530
    def __init__(self, control_model, device=None):
comfyanonymous's avatar
comfyanonymous committed
531
532
533
        self.control_model = control_model
        self.cond_hint_original = None
        self.cond_hint = None
534
        self.strength = 1.0
535
536
        if device is None:
            device = model_management.get_torch_device()
537
        self.device = device
comfyanonymous's avatar
comfyanonymous committed
538
        self.previous_controlnet = None
comfyanonymous's avatar
comfyanonymous committed
539

540
    def get_control(self, x_noisy, t, cond_txt, batched_number):
comfyanonymous's avatar
comfyanonymous committed
541
542
        control_prev = None
        if self.previous_controlnet is not None:
543
            control_prev = self.previous_controlnet.get_control(x_noisy, t, cond_txt, batched_number)
comfyanonymous's avatar
comfyanonymous committed
544

545
        output_dtype = x_noisy.dtype
comfyanonymous's avatar
comfyanonymous committed
546
547
548
549
        if self.cond_hint is None or x_noisy.shape[2] * 8 != self.cond_hint.shape[2] or x_noisy.shape[3] * 8 != self.cond_hint.shape[3]:
            if self.cond_hint is not None:
                del self.cond_hint
            self.cond_hint = None
550
            self.cond_hint = resize_image_to(self.cond_hint_original, x_noisy, batched_number).to(self.control_model.dtype).to(self.device)
551
552
553
554
555
556

        if self.control_model.dtype == torch.float16:
            precision_scope = torch.autocast
        else:
            precision_scope = contextlib.nullcontext

557
        with precision_scope(model_management.get_autocast_device(self.device)):
558
            self.control_model = model_management.load_if_low_vram(self.control_model)
559
            control = self.control_model(x=x_noisy, hint=self.cond_hint, timesteps=t, context=cond_txt)
560
            self.control_model = model_management.unload_if_low_vram(self.control_model)
561
        out = {'middle':[], 'output': []}
562
        autocast_enabled = torch.is_autocast_enabled()
comfyanonymous's avatar
comfyanonymous committed
563
564

        for i in range(len(control)):
comfyanonymous's avatar
comfyanonymous committed
565
566
567
568
569
570
            if i == (len(control) - 1):
                key = 'middle'
                index = 0
            else:
                key = 'output'
                index = i
comfyanonymous's avatar
comfyanonymous committed
571
            x = control[i]
572
            x *= self.strength
573
574
            if x.dtype != output_dtype and not autocast_enabled:
                x = x.to(output_dtype)
comfyanonymous's avatar
comfyanonymous committed
575

comfyanonymous's avatar
comfyanonymous committed
576
577
578
579
580
581
582
            if control_prev is not None and key in control_prev:
                prev = control_prev[key][index]
                if prev is not None:
                    x += prev
            out[key].append(x)
        if control_prev is not None and 'input' in control_prev:
            out['input'] = control_prev['input']
583
        return out
comfyanonymous's avatar
comfyanonymous committed
584

585
    def set_cond_hint(self, cond_hint, strength=1.0):
comfyanonymous's avatar
comfyanonymous committed
586
        self.cond_hint_original = cond_hint
587
        self.strength = strength
comfyanonymous's avatar
comfyanonymous committed
588
589
        return self

comfyanonymous's avatar
comfyanonymous committed
590
591
592
593
    def set_previous_controlnet(self, controlnet):
        self.previous_controlnet = controlnet
        return self

comfyanonymous's avatar
comfyanonymous committed
594
    def cleanup(self):
comfyanonymous's avatar
comfyanonymous committed
595
596
        if self.previous_controlnet is not None:
            self.previous_controlnet.cleanup()
comfyanonymous's avatar
comfyanonymous committed
597
598
599
600
601
602
603
        if self.cond_hint is not None:
            del self.cond_hint
            self.cond_hint = None

    def copy(self):
        c = ControlNet(self.control_model)
        c.cond_hint_original = self.cond_hint_original
604
        c.strength = self.strength
comfyanonymous's avatar
comfyanonymous committed
605
606
        return c

607
    def get_models(self):
comfyanonymous's avatar
comfyanonymous committed
608
609
        out = []
        if self.previous_controlnet is not None:
610
            out += self.previous_controlnet.get_models()
comfyanonymous's avatar
comfyanonymous committed
611
612
613
        out.append(self.control_model)
        return out

614
def load_controlnet(ckpt_path, model=None):
615
    controlnet_data = utils.load_torch_file(ckpt_path)
comfyanonymous's avatar
comfyanonymous committed
616
617
618
619
620
621
622
623
624
625
    pth_key = 'control_model.input_blocks.1.1.transformer_blocks.0.attn2.to_k.weight'
    pth = False
    sd2 = False
    key = 'input_blocks.1.1.transformer_blocks.0.attn2.to_k.weight'
    if pth_key in controlnet_data:
        pth = True
        key = pth_key
    elif key in controlnet_data:
        pass
    else:
626
627
628
629
        net = load_t2i_adapter(controlnet_data)
        if net is None:
            print("error checkpoint does not contain controlnet or t2i adapter data", ckpt_path)
        return net
comfyanonymous's avatar
comfyanonymous committed
630
631

    context_dim = controlnet_data[key].shape[1]
632
633

    use_fp16 = False
634
    if model_management.should_use_fp16() and controlnet_data[key].dtype == torch.float16:
635
636
        use_fp16 = True

comfyanonymous's avatar
comfyanonymous committed
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
    if context_dim == 768:
        #SD1.x
        control_model = cldm.ControlNet(image_size=32,
                                        in_channels=4,
                                        hint_channels=3,
                                        model_channels=320,
                                        attention_resolutions=[ 4, 2, 1 ],
                                        num_res_blocks=2,
                                        channel_mult=[ 1, 2, 4, 4 ],
                                        num_heads=8,
                                        use_spatial_transformer=True,
                                        transformer_depth=1,
                                        context_dim=context_dim,
                                        use_checkpoint=True,
                                        legacy=False,
                                        use_fp16=use_fp16)
    else:
        #SD2.x
        control_model = cldm.ControlNet(image_size=32,
                                        in_channels=4,
                                        hint_channels=3,
                                        model_channels=320,
                                        attention_resolutions=[ 4, 2, 1 ],
                                        num_res_blocks=2,
                                        channel_mult=[ 1, 2, 4, 4 ],
                                        num_head_channels=64,
                                        use_spatial_transformer=True,
                                        use_linear_in_transformer=True,
                                        transformer_depth=1,
                                        context_dim=context_dim,
                                        use_checkpoint=True,
                                        legacy=False,
                                        use_fp16=use_fp16)
comfyanonymous's avatar
comfyanonymous committed
670
    if pth:
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
        if 'difference' in controlnet_data:
            if model is not None:
                m = model.patch_model()
                model_sd = m.state_dict()
                for x in controlnet_data:
                    c_m = "control_model."
                    if x.startswith(c_m):
                        sd_key = "model.diffusion_model.{}".format(x[len(c_m):])
                        if sd_key in model_sd:
                            cd = controlnet_data[x]
                            cd += model_sd[sd_key].type(cd.dtype).to(cd.device)
                model.unpatch_model()
            else:
                print("WARNING: Loaded a diff controlnet without a model. It will very likely not work.")

comfyanonymous's avatar
comfyanonymous committed
686
687
688
689
690
691
692
693
        class WeightsLoader(torch.nn.Module):
            pass
        w = WeightsLoader()
        w.control_model = control_model
        w.load_state_dict(controlnet_data, strict=False)
    else:
        control_model.load_state_dict(controlnet_data, strict=False)

694
695
696
    if use_fp16:
        control_model = control_model.half()

comfyanonymous's avatar
comfyanonymous committed
697
698
699
    control = ControlNet(control_model)
    return control

700
class T2IAdapter:
701
    def __init__(self, t2i_model, channels_in, device=None):
702
703
704
        self.t2i_model = t2i_model
        self.channels_in = channels_in
        self.strength = 1.0
705
706
        if device is None:
            device = model_management.get_torch_device()
707
708
709
710
711
712
        self.device = device
        self.previous_controlnet = None
        self.control_input = None
        self.cond_hint_original = None
        self.cond_hint = None

713
    def get_control(self, x_noisy, t, cond_txt, batched_number):
714
715
        control_prev = None
        if self.previous_controlnet is not None:
716
            control_prev = self.previous_controlnet.get_control(x_noisy, t, cond_txt, batched_number)
717
718
719
720
721

        if self.cond_hint is None or x_noisy.shape[2] * 8 != self.cond_hint.shape[2] or x_noisy.shape[3] * 8 != self.cond_hint.shape[3]:
            if self.cond_hint is not None:
                del self.cond_hint
            self.cond_hint = None
722
            self.cond_hint = resize_image_to(self.cond_hint_original, x_noisy, batched_number).float().to(self.device)
723
724
725
726
727
728
729
730
731
            if self.channels_in == 1 and self.cond_hint.shape[1] > 1:
                self.cond_hint = torch.mean(self.cond_hint, 1, keepdim=True)
            self.t2i_model.to(self.device)
            self.control_input = self.t2i_model(self.cond_hint)
            self.t2i_model.cpu()

        output_dtype = x_noisy.dtype
        out = {'input':[]}

comfyanonymous's avatar
comfyanonymous committed
732
        autocast_enabled = torch.is_autocast_enabled()
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
        for i in range(len(self.control_input)):
            key = 'input'
            x = self.control_input[i] * self.strength
            if x.dtype != output_dtype and not autocast_enabled:
                x = x.to(output_dtype)

            if control_prev is not None and key in control_prev:
                index = len(control_prev[key]) - i * 3 - 3
                prev = control_prev[key][index]
                if prev is not None:
                    x += prev
            out[key].insert(0, None)
            out[key].insert(0, None)
            out[key].insert(0, x)

        if control_prev is not None and 'input' in control_prev:
            for i in range(len(out['input'])):
                if out['input'][i] is None:
                    out['input'][i] = control_prev['input'][i]
        if control_prev is not None and 'middle' in control_prev:
            out['middle'] = control_prev['middle']
        if control_prev is not None and 'output' in control_prev:
            out['output'] = control_prev['output']
        return out

    def set_cond_hint(self, cond_hint, strength=1.0):
        self.cond_hint_original = cond_hint
        self.strength = strength
        return self

    def set_previous_controlnet(self, controlnet):
        self.previous_controlnet = controlnet
        return self

    def copy(self):
        c = T2IAdapter(self.t2i_model, self.channels_in)
        c.cond_hint_original = self.cond_hint_original
        c.strength = self.strength
        return c

    def cleanup(self):
        if self.previous_controlnet is not None:
            self.previous_controlnet.cleanup()
        if self.cond_hint is not None:
            del self.cond_hint
            self.cond_hint = None

780
    def get_models(self):
781
782
        out = []
        if self.previous_controlnet is not None:
783
            out += self.previous_controlnet.get_models()
784
785
        return out

786
def load_t2i_adapter(t2i_data):
787
    keys = t2i_data.keys()
788
    if "body.0.in_conv.weight" in keys:
789
790
        cin = t2i_data['body.0.in_conv.weight'].shape[1]
        model_ad = adapter.Adapter_light(cin=cin, channels=[320, 640, 1280, 1280], nums_rb=4)
791
    elif 'conv_in.weight' in keys:
792
793
        cin = t2i_data['conv_in.weight'].shape[1]
        model_ad = adapter.Adapter(cin=cin, channels=[320, 640, 1280, 1280][:4], nums_rb=2, ksize=1, sk=True, use_conv=False)
794
795
    else:
        return None
796
797
    model_ad.load_state_dict(t2i_data)
    return T2IAdapter(model_ad, cin // 64)
comfyanonymous's avatar
comfyanonymous committed
798

799
800
801
802
803
804
805
806
807
808

class StyleModel:
    def __init__(self, model, device="cpu"):
        self.model = model

    def get_cond(self, input):
        return self.model(input.last_hidden_state)


def load_style_model(ckpt_path):
809
    model_data = utils.load_torch_file(ckpt_path)
810
811
812
813
814
815
816
817
818
    keys = model_data.keys()
    if "style_embedding" in keys:
        model = adapter.StyleAdapter(width=1024, context_dim=768, num_head=8, n_layes=3, num_token=8)
    else:
        raise Exception("invalid style model {}".format(ckpt_path))
    model.load_state_dict(model_data)
    return StyleModel(model)


819
def load_clip(ckpt_path, embedding_directory=None):
820
    clip_data = utils.load_torch_file(ckpt_path)
821
822
823
824
825
826
827
828
    config = {}
    if "text_model.encoder.layers.22.mlp.fc1.weight" in clip_data:
        config['target'] = 'ldm.modules.encoders.modules.FrozenOpenCLIPEmbedder'
    else:
        config['target'] = 'ldm.modules.encoders.modules.FrozenCLIPEmbedder'
    clip = CLIP(config=config, embedding_directory=embedding_directory)
    clip.load_from_state_dict(clip_data)
    return clip
comfyanonymous's avatar
comfyanonymous committed
829

830
831
832
833
834
835
836
def load_gligen(ckpt_path):
    data = utils.load_torch_file(ckpt_path)
    model = gligen.load_gligen(data)
    if model_management.should_use_fp16():
        model = model.half()
    return model

837
def load_checkpoint(config_path, ckpt_path, output_vae=True, output_clip=True, embedding_directory=None):
838
839
    with open(config_path, 'r') as stream:
        config = yaml.safe_load(stream)
comfyanonymous's avatar
comfyanonymous committed
840
841
842
843
844
    model_config_params = config['model']['params']
    clip_config = model_config_params['cond_stage_config']
    scale_factor = model_config_params['scale_factor']
    vae_config = model_config_params['first_stage_config']

845
846
847
848
849
850
    fp16 = False
    if "unet_config" in model_config_params:
        if "params" in model_config_params["unet_config"]:
            if "use_fp16" in model_config_params["unet_config"]["params"]:
                fp16 = model_config_params["unet_config"]["params"]["use_fp16"]

comfyanonymous's avatar
comfyanonymous committed
851
852
853
854
855
856
857
858
859
860
861
862
863
864
    clip = None
    vae = None

    class WeightsLoader(torch.nn.Module):
        pass

    w = WeightsLoader()
    load_state_dict_to = []
    if output_vae:
        vae = VAE(scale_factor=scale_factor, config=vae_config)
        w.first_stage_model = vae.first_stage_model
        load_state_dict_to = [w]

    if output_clip:
865
        clip = CLIP(config=clip_config, embedding_directory=embedding_directory)
comfyanonymous's avatar
comfyanonymous committed
866
867
868
        w.cond_stage_model = clip.cond_stage_model
        load_state_dict_to = [w]

869
    model = instantiate_from_config(config["model"])
870
    sd = utils.load_torch_file(ckpt_path)
871
    model = load_model_weights(model, sd, verbose=False, load_state_dict_to=load_state_dict_to)
872
873
874
875

    if fp16:
        model = model.half()

876
    return (ModelPatcher(model), clip, vae)
877
878


879
880
def load_checkpoint_guess_config(ckpt_path, output_vae=True, output_clip=True, output_clipvision=False, embedding_directory=None):
    sd = utils.load_torch_file(ckpt_path)
881
882
    sd_keys = sd.keys()
    clip = None
883
    clipvision = None
884
885
    vae = None

886
887
    fp16 = model_management.should_use_fp16()

888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
    class WeightsLoader(torch.nn.Module):
        pass

    w = WeightsLoader()
    load_state_dict_to = []
    if output_vae:
        vae = VAE()
        w.first_stage_model = vae.first_stage_model
        load_state_dict_to = [w]

    if output_clip:
        clip_config = {}
        if "cond_stage_model.model.transformer.resblocks.22.attn.out_proj.weight" in sd_keys:
            clip_config['target'] = 'ldm.modules.encoders.modules.FrozenOpenCLIPEmbedder'
        else:
            clip_config['target'] = 'ldm.modules.encoders.modules.FrozenCLIPEmbedder'
        clip = CLIP(config=clip_config, embedding_directory=embedding_directory)
        w.cond_stage_model = clip.cond_stage_model
        load_state_dict_to = [w]

908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
    clipvision_key = "embedder.model.visual.transformer.resblocks.0.attn.in_proj_weight"
    noise_aug_config = None
    if clipvision_key in sd_keys:
        size = sd[clipvision_key].shape[1]

        if output_clipvision:
            clipvision = clip_vision.load_clipvision_from_sd(sd)

        noise_aug_key = "noise_augmentor.betas"
        if noise_aug_key in sd_keys:
            noise_aug_config = {}
            params = {}
            noise_schedule_config = {}
            noise_schedule_config["timesteps"] = sd[noise_aug_key].shape[0]
            noise_schedule_config["beta_schedule"] = "squaredcos_cap_v2"
            params["noise_schedule_config"] = noise_schedule_config
            noise_aug_config['target'] = "ldm.modules.encoders.noise_aug_modules.CLIPEmbeddingNoiseAugmentation"
            if size == 1280: #h
                params["timestep_dim"] = 1024
            elif size == 1024: #l
                params["timestep_dim"] = 768
            noise_aug_config['params'] = params

931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
    sd_config = {
        "linear_start": 0.00085,
        "linear_end": 0.012,
        "num_timesteps_cond": 1,
        "log_every_t": 200,
        "timesteps": 1000,
        "first_stage_key": "jpg",
        "cond_stage_key": "txt",
        "image_size": 64,
        "channels": 4,
        "cond_stage_trainable": False,
        "monitor": "val/loss_simple_ema",
        "scale_factor": 0.18215,
        "use_ema": False,
    }

    unet_config = {
        "use_checkpoint": True,
        "image_size": 32,
        "out_channels": 4,
        "attention_resolutions": [
            4,
            2,
            1
        ],
        "num_res_blocks": 2,
        "channel_mult": [
            1,
            2,
            4,
            4
        ],
        "use_spatial_transformer": True,
        "transformer_depth": 1,
        "legacy": False
    }

    if len(sd['model.diffusion_model.input_blocks.1.1.proj_in.weight'].shape) == 2:
        unet_config['use_linear_in_transformer'] = True

    unet_config["use_fp16"] = fp16
    unet_config["model_channels"] = sd['model.diffusion_model.input_blocks.0.0.weight'].shape[0]
    unet_config["in_channels"] = sd['model.diffusion_model.input_blocks.0.0.weight'].shape[1]
    unet_config["context_dim"] = sd['model.diffusion_model.input_blocks.1.1.transformer_blocks.0.attn2.to_k.weight'].shape[1]

    sd_config["unet_config"] = {"target": "ldm.modules.diffusionmodules.openaimodel.UNetModel", "params": unet_config}
    model_config = {"target": "ldm.models.diffusion.ddpm.LatentDiffusion", "params": sd_config}

979
980
981
982
983
984
985
    if noise_aug_config is not None: #SD2.x unclip model
        sd_config["noise_aug_config"] = noise_aug_config
        sd_config["image_size"] = 96
        sd_config["embedding_dropout"] = 0.25
        sd_config["conditioning_key"] = 'crossattn-adm'
        model_config["target"] = "ldm.models.diffusion.ddpm.ImageEmbeddingConditionedLatentDiffusion"
    elif unet_config["in_channels"] > 4: #inpainting model
986
987
988
989
990
991
992
993
994
995
996
        sd_config["conditioning_key"] = "hybrid"
        sd_config["finetune_keys"] = None
        model_config["target"] = "ldm.models.diffusion.ddpm.LatentInpaintDiffusion"
    else:
        sd_config["conditioning_key"] = "crossattn"

    if unet_config["context_dim"] == 1024:
        unet_config["num_head_channels"] = 64 #SD2.x
    else:
        unet_config["num_heads"] = 8 #SD1.x

997
998
999
1000
1001
    unclip = 'model.diffusion_model.label_emb.0.0.weight'
    if unclip in sd_keys:
        unet_config["num_classes"] = "sequential"
        unet_config["adm_in_channels"] = sd[unclip].shape[1]

comfyanonymous's avatar
comfyanonymous committed
1002
1003
1004
1005
1006
    if unet_config["context_dim"] == 1024 and unet_config["in_channels"] == 4: #only SD2.x non inpainting models are v prediction
        k = "model.diffusion_model.output_blocks.11.1.transformer_blocks.0.norm1.bias"
        out = sd[k]
        if torch.std(out, unbiased=False) > 0.09: # not sure how well this will actually work. I guess we will find out.
            sd_config["parameterization"] = 'v'
1007
1008
1009
1010

    model = instantiate_from_config(model_config)
    model = load_model_weights(model, sd, verbose=False, load_state_dict_to=load_state_dict_to)

1011
1012
1013
    if fp16:
        model = model.half()

1014
    return (ModelPatcher(model), clip, vae, clipvision)