"official/projects/panoptic/train.py" did not exist on "7bb4d44269324bd7860aa670a04e6face9bdb876"
sd.py 24.4 KB
Newer Older
comfyanonymous's avatar
comfyanonymous committed
1
2
import torch

3
from comfy import model_management
comfyanonymous's avatar
comfyanonymous committed
4
from .ldm.models.autoencoder import AutoencoderKL, AutoencodingEngine
comfyanonymous's avatar
comfyanonymous committed
5
6
from .ldm.cascade.stage_a import StageA

7
import yaml
comfyanonymous's avatar
comfyanonymous committed
8

9
10
import comfy.utils

11
from . import clip_vision
12
from . import gligen
13
from . import diffusers_convert
comfyanonymous's avatar
comfyanonymous committed
14
from . import model_base
15
from . import model_detection
16

17
18
from . import sd1_clip
from . import sd2_clip
19
from . import sdxl_clip
comfyanonymous's avatar
comfyanonymous committed
20

21
import comfy.model_patcher
22
import comfy.lora
23
import comfy.t2i_adapter.adapter
24
import comfy.supported_models_base
25
import comfy.taesd.taesd
26

27
def load_model_weights(model, sd):
comfyanonymous's avatar
comfyanonymous committed
28
    m, u = model.load_state_dict(sd, strict=False)
29
30
    m = set(m)
    unexpected_keys = set(u)
comfyanonymous's avatar
comfyanonymous committed
31
32
33

    k = list(sd.keys())
    for x in k:
34
35
36
37
38
39
40
41
42
43
        if x not in unexpected_keys:
            w = sd.pop(x)
            del w
    if len(m) > 0:
        print("missing", m)
    return model

def load_clip_weights(model, sd):
    k = list(sd.keys())
    for x in k:
comfyanonymous's avatar
comfyanonymous committed
44
45
46
47
        if x.startswith("cond_stage_model.transformer.") and not x.startswith("cond_stage_model.transformer.text_model."):
            y = x.replace("cond_stage_model.transformer.", "cond_stage_model.transformer.text_model.")
            sd[y] = sd.pop(x)

comfyanonymous's avatar
comfyanonymous committed
48
49
50
51
    if 'cond_stage_model.transformer.text_model.embeddings.position_ids' in sd:
        ids = sd['cond_stage_model.transformer.text_model.embeddings.position_ids']
        if ids.dtype == torch.float32:
            sd['cond_stage_model.transformer.text_model.embeddings.position_ids'] = ids.round()
52

53
    sd = comfy.utils.transformers_convert(sd, "cond_stage_model.model.", "cond_stage_model.transformer.text_model.", 24)
54
    return load_model_weights(model, sd)
comfyanonymous's avatar
comfyanonymous committed
55

comfyanonymous's avatar
comfyanonymous committed
56

57
def load_lora_for_models(model, clip, lora, strength_model, strength_clip):
58
59
60
61
62
63
    key_map = {}
    if model is not None:
        key_map = comfy.lora.model_lora_keys_unet(model.model, key_map)
    if clip is not None:
        key_map = comfy.lora.model_lora_keys_clip(clip.cond_stage_model, key_map)

64
    loaded = comfy.lora.load_lora(lora, key_map)
65
66
67
68
69
70
71
72
73
74
75
76
77
    if model is not None:
        new_modelpatcher = model.clone()
        k = new_modelpatcher.add_patches(loaded, strength_model)
    else:
        k = ()
        new_modelpatcher = None

    if clip is not None:
        new_clip = clip.clone()
        k1 = new_clip.add_patches(loaded, strength_clip)
    else:
        k1 = ()
        new_clip = None
78
79
80
81
82
83
84
    k = set(k)
    k1 = set(k1)
    for x in loaded:
        if (x not in k) and (x not in k1):
            print("NOT LOADED", x)

    return (new_modelpatcher, new_clip)
comfyanonymous's avatar
comfyanonymous committed
85
86
87


class CLIP:
88
    def __init__(self, target=None, embedding_directory=None, no_init=False):
89
90
        if no_init:
            return
comfyanonymous's avatar
comfyanonymous committed
91
        params = target.params.copy()
92
93
        clip = target.clip
        tokenizer = target.tokenizer
94

95
96
        load_device = model_management.text_encoder_device()
        offload_device = model_management.text_encoder_offload_device()
97
        params['device'] = offload_device
98
        params['dtype'] = model_management.text_encoder_dtype(load_device)
99
100

        self.cond_stage_model = clip(**(params))
101

102
        self.tokenizer = tokenizer(embedding_directory=embedding_directory)
103
        self.patcher = comfy.model_patcher.ModelPatcher(self.cond_stage_model, load_device=load_device, offload_device=offload_device)
104
        self.layer_idx = None
105
106
107
108
109
110

    def clone(self):
        n = CLIP(no_init=True)
        n.patcher = self.patcher.clone()
        n.cond_stage_model = self.cond_stage_model
        n.tokenizer = self.tokenizer
comfyanonymous's avatar
comfyanonymous committed
111
        n.layer_idx = self.layer_idx
112
113
        return n

114
115
    def add_patches(self, patches, strength_patch=1.0, strength_model=1.0):
        return self.patcher.add_patches(patches, strength_patch, strength_model)
comfyanonymous's avatar
comfyanonymous committed
116

117
    def clip_layer(self, layer_idx):
comfyanonymous's avatar
comfyanonymous committed
118
        self.layer_idx = layer_idx
119

120
121
    def tokenize(self, text, return_word_ids=False):
        return self.tokenizer.tokenize_with_weights(text, return_word_ids)
BlenderNeko's avatar
BlenderNeko committed
122

123
    def encode_from_tokens(self, tokens, return_pooled=False):
124
125
        if self.layer_idx is not None:
            self.cond_stage_model.clip_layer(self.layer_idx)
126
127
        else:
            self.cond_stage_model.reset_clip_layer()
128

129
        self.load_model()
130
        cond, pooled = self.cond_stage_model.encode_token_weights(tokens)
131
        if return_pooled:
132
133
            return cond, pooled
        return cond
comfyanonymous's avatar
comfyanonymous committed
134

135
    def encode(self, text):
136
        tokens = self.tokenize(text)
137
138
        return self.encode_from_tokens(tokens)

139
140
    def load_sd(self, sd):
        return self.cond_stage_model.load_sd(sd)
141

142
143
144
    def get_sd(self):
        return self.cond_stage_model.state_dict()

145
146
147
    def load_model(self):
        model_management.load_model_gpu(self.patcher)
        return self.patcher
148

149
150
151
    def get_key_patches(self):
        return self.patcher.get_key_patches()

comfyanonymous's avatar
comfyanonymous committed
152
class VAE:
153
    def __init__(self, sd=None, device=None, config=None, dtype=None):
comfyanonymous's avatar
comfyanonymous committed
154
155
156
        if 'decoder.up_blocks.0.resnets.0.norm1.weight' in sd.keys(): #diffusers format
            sd = diffusers_convert.convert_vae_state_dict(sd)

157
158
        self.memory_used_encode = lambda shape, dtype: (1767 * shape[2] * shape[3]) * model_management.dtype_size(dtype) #These are for AutoencoderKL and need tweaking (should be lower)
        self.memory_used_decode = lambda shape, dtype: (2178 * shape[2] * shape[3] * 64) * model_management.dtype_size(dtype)
comfyanonymous's avatar
comfyanonymous committed
159
160
        self.downscale_ratio = 8
        self.latent_channels = 4
comfyanonymous's avatar
comfyanonymous committed
161
162
        self.process_input = lambda image: image * 2.0 - 1.0
        self.process_output = lambda image: torch.clamp((image + 1.0) / 2.0, min=0.0, max=1.0)
163

comfyanonymous's avatar
comfyanonymous committed
164
        if config is None:
comfyanonymous's avatar
comfyanonymous committed
165
166
167
168
169
170
171
172
173
            if "decoder.mid.block_1.mix_factor" in sd:
                encoder_config = {'double_z': True, 'z_channels': 4, 'resolution': 256, 'in_channels': 3, 'out_ch': 3, 'ch': 128, 'ch_mult': [1, 2, 4, 4], 'num_res_blocks': 2, 'attn_resolutions': [], 'dropout': 0.0}
                decoder_config = encoder_config.copy()
                decoder_config["video_kernel_size"] = [3, 1, 1]
                decoder_config["alpha"] = 0.0
                self.first_stage_model = AutoencodingEngine(regularizer_config={'target': "comfy.ldm.models.autoencoder.DiagonalGaussianRegularizer"},
                                                            encoder_config={'target': "comfy.ldm.modules.diffusionmodules.model.Encoder", 'params': encoder_config},
                                                            decoder_config={'target': "comfy.ldm.modules.temporal_ae.VideoDecoder", 'params': decoder_config})
            elif "taesd_decoder.1.weight" in sd:
174
                self.first_stage_model = comfy.taesd.taesd.TAESD()
comfyanonymous's avatar
comfyanonymous committed
175
176
177
178
179
180
181
182
            elif "vquantizer.codebook.weight" in sd: #VQGan: stage a of stable cascade
                self.first_stage_model = StageA()
                self.downscale_ratio = 4
                #TODO
                #self.memory_used_encode
                #self.memory_used_decode
                self.process_input = lambda image: image
                self.process_output = lambda image: image
183
184
185
            else:
                #default SD1.x/SD2.x VAE parameters
                ddconfig = {'double_z': True, 'z_channels': 4, 'resolution': 256, 'in_channels': 3, 'out_ch': 3, 'ch': 128, 'ch_mult': [1, 2, 4, 4], 'num_res_blocks': 2, 'attn_resolutions': [], 'dropout': 0.0}
186
187
188
189
190

                if 'encoder.down.2.downsample.conv.weight' not in sd: #Stable diffusion x4 upscaler VAE
                    ddconfig['ch_mult'] = [1, 2, 4]
                    self.downscale_ratio = 4

191
                self.first_stage_model = AutoencoderKL(ddconfig=ddconfig, embed_dim=4)
comfyanonymous's avatar
comfyanonymous committed
192
        else:
193
            self.first_stage_model = AutoencoderKL(**(config['params']))
comfyanonymous's avatar
comfyanonymous committed
194
        self.first_stage_model = self.first_stage_model.eval()
comfyanonymous's avatar
comfyanonymous committed
195
196
197
198
199
200
201

        m, u = self.first_stage_model.load_state_dict(sd, strict=False)
        if len(m) > 0:
            print("Missing VAE keys", m)

        if len(u) > 0:
            print("Leftover VAE keys", u)
202

203
        if device is None:
204
            device = model_management.vae_device()
comfyanonymous's avatar
comfyanonymous committed
205
        self.device = device
206
        offload_device = model_management.vae_offload_device()
207
208
209
        if dtype is None:
            dtype = model_management.vae_dtype()
        self.vae_dtype = dtype
210
        self.first_stage_model.to(self.vae_dtype)
211
        self.output_device = model_management.intermediate_device()
comfyanonymous's avatar
comfyanonymous committed
212

213
214
        self.patcher = comfy.model_patcher.ModelPatcher(self.first_stage_model, load_device=self.device, offload_device=offload_device)

215
    def decode_tiled_(self, samples, tile_x=64, tile_y=64, overlap = 16):
216
217
218
219
        steps = samples.shape[0] * comfy.utils.get_tiled_scale_steps(samples.shape[3], samples.shape[2], tile_x, tile_y, overlap)
        steps += samples.shape[0] * comfy.utils.get_tiled_scale_steps(samples.shape[3], samples.shape[2], tile_x // 2, tile_y * 2, overlap)
        steps += samples.shape[0] * comfy.utils.get_tiled_scale_steps(samples.shape[3], samples.shape[2], tile_x * 2, tile_y // 2, overlap)
        pbar = comfy.utils.ProgressBar(steps)
220

comfyanonymous's avatar
comfyanonymous committed
221
222
        decode_fn = lambda a: self.first_stage_model.decode(a.to(self.vae_dtype).to(self.device)).float()
        output = self.process_output(
comfyanonymous's avatar
comfyanonymous committed
223
224
225
            (comfy.utils.tiled_scale(samples, decode_fn, tile_x // 2, tile_y * 2, overlap, upscale_amount = self.downscale_ratio, output_device=self.output_device, pbar = pbar) +
            comfy.utils.tiled_scale(samples, decode_fn, tile_x * 2, tile_y // 2, overlap, upscale_amount = self.downscale_ratio, output_device=self.output_device, pbar = pbar) +
             comfy.utils.tiled_scale(samples, decode_fn, tile_x, tile_y, overlap, upscale_amount = self.downscale_ratio, output_device=self.output_device, pbar = pbar))
comfyanonymous's avatar
comfyanonymous committed
226
            / 3.0)
227
228
        return output

229
    def encode_tiled_(self, pixel_samples, tile_x=512, tile_y=512, overlap = 64):
230
231
232
233
        steps = pixel_samples.shape[0] * comfy.utils.get_tiled_scale_steps(pixel_samples.shape[3], pixel_samples.shape[2], tile_x, tile_y, overlap)
        steps += pixel_samples.shape[0] * comfy.utils.get_tiled_scale_steps(pixel_samples.shape[3], pixel_samples.shape[2], tile_x // 2, tile_y * 2, overlap)
        steps += pixel_samples.shape[0] * comfy.utils.get_tiled_scale_steps(pixel_samples.shape[3], pixel_samples.shape[2], tile_x * 2, tile_y // 2, overlap)
        pbar = comfy.utils.ProgressBar(steps)
234

comfyanonymous's avatar
comfyanonymous committed
235
        encode_fn = lambda a: self.first_stage_model.encode((self.process_input(a)).to(self.vae_dtype).to(self.device)).float()
comfyanonymous's avatar
comfyanonymous committed
236
237
238
        samples = comfy.utils.tiled_scale(pixel_samples, encode_fn, tile_x, tile_y, overlap, upscale_amount = (1/self.downscale_ratio), out_channels=self.latent_channels, output_device=self.output_device, pbar=pbar)
        samples += comfy.utils.tiled_scale(pixel_samples, encode_fn, tile_x * 2, tile_y // 2, overlap, upscale_amount = (1/self.downscale_ratio), out_channels=self.latent_channels, output_device=self.output_device, pbar=pbar)
        samples += comfy.utils.tiled_scale(pixel_samples, encode_fn, tile_x // 2, tile_y * 2, overlap, upscale_amount = (1/self.downscale_ratio), out_channels=self.latent_channels, output_device=self.output_device, pbar=pbar)
239
240
241
        samples /= 3.0
        return samples

242
243
    def decode(self, samples_in):
        try:
244
            memory_used = self.memory_used_decode(samples_in.shape, self.vae_dtype)
245
            model_management.load_models_gpu([self.patcher], memory_required=memory_used)
246
            free_memory = model_management.get_free_memory(self.device)
comfyanonymous's avatar
comfyanonymous committed
247
            batch_number = int(free_memory / memory_used)
248
249
            batch_number = max(1, batch_number)

comfyanonymous's avatar
comfyanonymous committed
250
            pixel_samples = torch.empty((samples_in.shape[0], 3, round(samples_in.shape[2] * self.downscale_ratio), round(samples_in.shape[3] * self.downscale_ratio)), device=self.output_device)
251
            for x in range(0, samples_in.shape[0], batch_number):
252
                samples = samples_in[x:x+batch_number].to(self.vae_dtype).to(self.device)
comfyanonymous's avatar
comfyanonymous committed
253
                pixel_samples[x:x+batch_number] = self.process_output(self.first_stage_model.decode(samples).to(self.output_device).float())
254
255
256
257
        except model_management.OOM_EXCEPTION as e:
            print("Warning: Ran out of memory when regular VAE decoding, retrying with tiled VAE decoding.")
            pixel_samples = self.decode_tiled_(samples_in)

258
        pixel_samples = pixel_samples.to(self.output_device).movedim(1,-1)
comfyanonymous's avatar
comfyanonymous committed
259
260
        return pixel_samples

261
    def decode_tiled(self, samples, tile_x=64, tile_y=64, overlap = 16):
262
        model_management.load_model_gpu(self.patcher)
263
        output = self.decode_tiled_(samples, tile_x, tile_y, overlap)
264
265
        return output.movedim(1,-1)

comfyanonymous's avatar
comfyanonymous committed
266
    def encode(self, pixel_samples):
267
268
        pixel_samples = pixel_samples.movedim(-1,1)
        try:
269
            memory_used = self.memory_used_encode(pixel_samples.shape, self.vae_dtype)
270
            model_management.load_models_gpu([self.patcher], memory_required=memory_used)
271
            free_memory = model_management.get_free_memory(self.device)
comfyanonymous's avatar
comfyanonymous committed
272
            batch_number = int(free_memory / memory_used)
273
            batch_number = max(1, batch_number)
comfyanonymous's avatar
comfyanonymous committed
274
            samples = torch.empty((pixel_samples.shape[0], self.latent_channels, round(pixel_samples.shape[2] // self.downscale_ratio), round(pixel_samples.shape[3] // self.downscale_ratio)), device=self.output_device)
275
            for x in range(0, pixel_samples.shape[0], batch_number):
comfyanonymous's avatar
comfyanonymous committed
276
                pixels_in = self.process_input(pixel_samples[x:x+batch_number]).to(self.vae_dtype).to(self.device)
277
                samples[x:x+batch_number] = self.first_stage_model.encode(pixels_in).to(self.output_device).float()
278

279
280
281
282
        except model_management.OOM_EXCEPTION as e:
            print("Warning: Ran out of memory when regular VAE encoding, retrying with tiled VAE encoding.")
            samples = self.encode_tiled_(pixel_samples)

comfyanonymous's avatar
comfyanonymous committed
283
284
        return samples

comfyanonymous's avatar
comfyanonymous committed
285
    def encode_tiled(self, pixel_samples, tile_x=512, tile_y=512, overlap = 64):
286
        model_management.load_model_gpu(self.patcher)
287
288
        pixel_samples = pixel_samples.movedim(-1,1)
        samples = self.encode_tiled_(pixel_samples, tile_x=tile_x, tile_y=tile_y, overlap=overlap)
comfyanonymous's avatar
comfyanonymous committed
289
        return samples
290

291
292
293
    def get_sd(self):
        return self.first_stage_model.state_dict()

294
295
296
297
298
299
300
301
302
class StyleModel:
    def __init__(self, model, device="cpu"):
        self.model = model

    def get_cond(self, input):
        return self.model(input.last_hidden_state)


def load_style_model(ckpt_path):
303
    model_data = comfy.utils.load_torch_file(ckpt_path, safe_load=True)
304
305
    keys = model_data.keys()
    if "style_embedding" in keys:
306
        model = comfy.t2i_adapter.adapter.StyleAdapter(width=1024, context_dim=768, num_head=8, n_layes=3, num_token=8)
307
308
309
310
311
312
    else:
        raise Exception("invalid style model {}".format(ckpt_path))
    model.load_state_dict(model_data)
    return StyleModel(model)


313
314
315
def load_clip(ckpt_paths, embedding_directory=None):
    clip_data = []
    for p in ckpt_paths:
316
        clip_data.append(comfy.utils.load_torch_file(p, safe_load=True))
317

comfyanonymous's avatar
comfyanonymous committed
318
319
320
    class EmptyClass:
        pass

321
322
    for i in range(len(clip_data)):
        if "transformer.resblocks.0.ln_1.weight" in clip_data[i]:
323
            clip_data[i] = comfy.utils.transformers_convert(clip_data[i], "", "text_model.", 32)
324

comfyanonymous's avatar
comfyanonymous committed
325
326
    clip_target = EmptyClass()
    clip_target.params = {}
327
328
329
330
331
332
333
334
335
336
    if len(clip_data) == 1:
        if "text_model.encoder.layers.30.mlp.fc1.weight" in clip_data[0]:
            clip_target.clip = sdxl_clip.SDXLRefinerClipModel
            clip_target.tokenizer = sdxl_clip.SDXLTokenizer
        elif "text_model.encoder.layers.22.mlp.fc1.weight" in clip_data[0]:
            clip_target.clip = sd2_clip.SD2ClipModel
            clip_target.tokenizer = sd2_clip.SD2Tokenizer
        else:
            clip_target.clip = sd1_clip.SD1ClipModel
            clip_target.tokenizer = sd1_clip.SD1Tokenizer
337
    else:
338
339
        clip_target.clip = sdxl_clip.SDXLClipModel
        clip_target.tokenizer = sdxl_clip.SDXLTokenizer
comfyanonymous's avatar
comfyanonymous committed
340
341

    clip = CLIP(clip_target, embedding_directory=embedding_directory)
342
343
344
345
346
347
348
    for c in clip_data:
        m, u = clip.load_sd(c)
        if len(m) > 0:
            print("clip missing:", m)

        if len(u) > 0:
            print("clip unexpected:", u)
349
    return clip
comfyanonymous's avatar
comfyanonymous committed
350

351
def load_gligen(ckpt_path):
352
    data = comfy.utils.load_torch_file(ckpt_path, safe_load=True)
353
354
355
    model = gligen.load_gligen(data)
    if model_management.should_use_fp16():
        model = model.half()
356
    return comfy.model_patcher.ModelPatcher(model, load_device=model_management.get_torch_device(), offload_device=model_management.unet_offload_device())
357

comfyanonymous's avatar
comfyanonymous committed
358
def load_checkpoint(config_path=None, ckpt_path=None, output_vae=True, output_clip=True, embedding_directory=None, state_dict=None, config=None):
359
    #TODO: this function is a mess and should be removed eventually
comfyanonymous's avatar
comfyanonymous committed
360
361
362
    if config is None:
        with open(config_path, 'r') as stream:
            config = yaml.safe_load(stream)
comfyanonymous's avatar
comfyanonymous committed
363
364
365
366
367
    model_config_params = config['model']['params']
    clip_config = model_config_params['cond_stage_config']
    scale_factor = model_config_params['scale_factor']
    vae_config = model_config_params['first_stage_config']

368
369
370
    fp16 = False
    if "unet_config" in model_config_params:
        if "params" in model_config_params["unet_config"]:
comfyanonymous's avatar
comfyanonymous committed
371
372
            unet_config = model_config_params["unet_config"]["params"]
            if "use_fp16" in unet_config:
373
374
375
                fp16 = unet_config.pop("use_fp16")
                if fp16:
                    unet_config["dtype"] = torch.float16
comfyanonymous's avatar
comfyanonymous committed
376
377
378
379
380

    noise_aug_config = None
    if "noise_aug_config" in model_config_params:
        noise_aug_config = model_config_params["noise_aug_config"]

381
    model_type = model_base.ModelType.EPS
comfyanonymous's avatar
comfyanonymous committed
382
383
384

    if "parameterization" in model_config_params:
        if model_config_params["parameterization"] == "v":
385
            model_type = model_base.ModelType.V_PREDICTION
386

comfyanonymous's avatar
comfyanonymous committed
387
388
389
390
391
392
    clip = None
    vae = None

    class WeightsLoader(torch.nn.Module):
        pass

393
    if state_dict is None:
394
        state_dict = comfy.utils.load_torch_file(ckpt_path)
comfyanonymous's avatar
comfyanonymous committed
395

396
397
398
    class EmptyClass:
        pass

399
400
    model_config = comfy.supported_models_base.BASE({})

401
402
    from . import latent_formats
    model_config.latent_format = latent_formats.SD15(scale_factor=scale_factor)
403
    model_config.unet_config = model_detection.convert_config(unet_config)
404

comfyanonymous's avatar
comfyanonymous committed
405
    if config['model']["target"].endswith("ImageEmbeddingConditionedLatentDiffusion"):
406
        model = model_base.SD21UNCLIP(model_config, noise_aug_config["params"], model_type=model_type)
comfyanonymous's avatar
comfyanonymous committed
407
    else:
408
        model = model_base.BaseModel(model_config, model_type=model_type)
comfyanonymous's avatar
comfyanonymous committed
409

comfyanonymous's avatar
comfyanonymous committed
410
411
412
    if config['model']["target"].endswith("LatentInpaintDiffusion"):
        model.set_inpaint()

413
414
415
    if fp16:
        model = model.half()

416
417
    offload_device = model_management.unet_offload_device()
    model = model.to(offload_device)
418
419
420
    model.load_model_weights(state_dict, "model.diffusion_model.")

    if output_vae:
comfyanonymous's avatar
comfyanonymous committed
421
422
        vae_sd = comfy.utils.state_dict_prefix_replace(state_dict, {"first_stage_model.": ""}, filter_keys=True)
        vae = VAE(sd=vae_sd, config=vae_config)
423
424
425
426

    if output_clip:
        w = WeightsLoader()
        clip_target = EmptyClass()
427
        clip_target.params = clip_config.get("params", {})
428
429
430
        if clip_config["target"].endswith("FrozenOpenCLIPEmbedder"):
            clip_target.clip = sd2_clip.SD2ClipModel
            clip_target.tokenizer = sd2_clip.SD2Tokenizer
431
432
            clip = CLIP(clip_target, embedding_directory=embedding_directory)
            w.cond_stage_model = clip.cond_stage_model.clip_h
433
434
435
        elif clip_config["target"].endswith("FrozenCLIPEmbedder"):
            clip_target.clip = sd1_clip.SD1ClipModel
            clip_target.tokenizer = sd1_clip.SD1Tokenizer
436
437
            clip = CLIP(clip_target, embedding_directory=embedding_directory)
            w.cond_stage_model = clip.cond_stage_model.clip_l
438
439
        load_clip_weights(w, state_dict)

440
    return (comfy.model_patcher.ModelPatcher(model, load_device=model_management.get_torch_device(), offload_device=offload_device), clip, vae)
441

442
def load_checkpoint_guess_config(ckpt_path, output_vae=True, output_clip=True, output_clipvision=False, embedding_directory=None, output_model=True):
443
    sd = comfy.utils.load_torch_file(ckpt_path)
444
445
    sd_keys = sd.keys()
    clip = None
446
    clipvision = None
447
    vae = None
448
    model = None
449
    model_patcher = None
450
    clip_target = None
451

452
    parameters = comfy.utils.calculate_parameters(sd, "model.diffusion_model.")
453
    unet_dtype = model_management.unet_dtype(model_params=parameters)
454
455
    load_device = model_management.get_torch_device()
    manual_cast_dtype = model_management.unet_manual_cast(unet_dtype, load_device)
456

457
458
459
    class WeightsLoader(torch.nn.Module):
        pass

460
    model_config = model_detection.model_config_from_unet(sd, "model.diffusion_model.", unet_dtype)
461
462
    model_config.set_manual_cast(manual_cast_dtype)

463
464
    if model_config is None:
        raise RuntimeError("ERROR: Could not detect model type of: {}".format(ckpt_path))
465

466
    if model_config.clip_vision_prefix is not None:
467
        if output_clipvision:
468
            clipvision = clip_vision.load_clipvision_from_sd(sd, model_config.clip_vision_prefix, True)
469

470
    if output_model:
471
        inital_load_device = model_management.unet_inital_load_device(parameters, unet_dtype)
472
473
474
        offload_device = model_management.unet_offload_device()
        model = model_config.get_model(sd, "model.diffusion_model.", device=inital_load_device)
        model.load_model_weights(sd, "model.diffusion_model.")
475

476
    if output_vae:
477
        vae_sd = comfy.utils.state_dict_prefix_replace(sd, {k: "" for k in model_config.vae_key_prefix}, filter_keys=True)
478
        vae_sd = model_config.process_vae_state_dict(vae_sd)
comfyanonymous's avatar
comfyanonymous committed
479
        vae = VAE(sd=vae_sd)
480

481
482
483
    if output_clip:
        w = WeightsLoader()
        clip_target = model_config.clip_target()
comfyanonymous's avatar
comfyanonymous committed
484
485
        if clip_target is not None:
            sd = model_config.process_clip_state_dict(sd)
486
487
488
489
490
491
            if any(k.startswith('cond_stage_model.') for k in sd):
                clip = CLIP(clip_target, embedding_directory=embedding_directory)
                w.cond_stage_model = clip.cond_stage_model
                load_model_weights(w, sd)
            else:
                print("no CLIP/text encoder weights in checkpoint, the text encoder model will not be loaded.")
comfyanonymous's avatar
comfyanonymous committed
492

493
494
495
    left_over = sd.keys()
    if len(left_over) > 0:
        print("left over keys:", left_over)
496

497
    if output_model:
498
        model_patcher = comfy.model_patcher.ModelPatcher(model, load_device=load_device, offload_device=model_management.unet_offload_device(), current_device=inital_load_device)
499
500
501
        if inital_load_device != torch.device("cpu"):
            print("loaded straight to GPU")
            model_management.load_model_gpu(model_patcher)
comfyanonymous's avatar
comfyanonymous committed
502
503

    return (model_patcher, clip, vae, clipvision)
504

505

506
def load_unet_state_dict(sd): #load unet in diffusers format
507
    parameters = comfy.utils.calculate_parameters(sd)
508
    unet_dtype = model_management.unet_dtype(model_params=parameters)
509
510
511
    load_device = model_management.get_torch_device()
    manual_cast_dtype = model_management.unet_manual_cast(unet_dtype, load_device)

512
    if "input_blocks.0.0.weight" in sd: #ldm
513
        model_config = model_detection.model_config_from_unet(sd, "", unet_dtype)
514
        if model_config is None:
515
            return None
516
517
518
        new_sd = sd

    else: #diffusers
519
        model_config = model_detection.model_config_from_diffusers_unet(sd, unet_dtype)
520
521
522
523
524
525
526
527
528
529
530
        if model_config is None:
            return None

        diffusers_keys = comfy.utils.unet_to_diffusers(model_config.unet_config)

        new_sd = {}
        for k in diffusers_keys:
            if k in sd:
                new_sd[diffusers_keys[k]] = sd.pop(k)
            else:
                print(diffusers_keys[k], k)
531
    offload_device = model_management.unet_offload_device()
532
    model_config.set_manual_cast(manual_cast_dtype)
533
534
535
    model = model_config.get_model(new_sd, "")
    model = model.to(offload_device)
    model.load_model_weights(new_sd, "")
536
537
538
    left_over = sd.keys()
    if len(left_over) > 0:
        print("left over keys in unet:", left_over)
539
    return comfy.model_patcher.ModelPatcher(model, load_device=load_device, offload_device=offload_device)
540

541
542
543
544
545
546
547
548
def load_unet(unet_path):
    sd = comfy.utils.load_torch_file(unet_path)
    model = load_unet_state_dict(sd)
    if model is None:
        print("ERROR UNSUPPORTED UNET", unet_path)
        raise RuntimeError("ERROR: Could not detect model type of: {}".format(unet_path))
    return model

549
550
551
552
553
554
555
556
557
558
def save_checkpoint(output_path, model, clip=None, vae=None, clip_vision=None, metadata=None):
    clip_sd = None
    load_models = [model]
    if clip is not None:
        load_models.append(clip.load_model())
        clip_sd = clip.get_sd()

    model_management.load_models_gpu(load_models)
    clip_vision_sd = clip_vision.get_sd() if clip_vision is not None else None
    sd = model.model.state_dict_for_saving(clip_sd, vae.get_sd(), clip_vision_sd)
559
    comfy.utils.save_torch_file(sd, output_path, metadata=metadata)