nodes.py 73.3 KB
Newer Older
comfyanonymous's avatar
comfyanonymous committed
1
2
3
4
5
import torch

import os
import sys
import json
6
import hashlib
7
import traceback
8
import math
9
import time
10
import random
11
import logging
comfyanonymous's avatar
comfyanonymous committed
12

13
from PIL import Image, ImageOps, ImageSequence
comfyanonymous's avatar
comfyanonymous committed
14
15
from PIL.PngImagePlugin import PngInfo
import numpy as np
16
import safetensors.torch
comfyanonymous's avatar
comfyanonymous committed
17

18
19
20
sys.path.insert(0, os.path.join(os.path.dirname(os.path.realpath(__file__)), "comfy"))


21
import comfy.diffusers_load
comfyanonymous's avatar
comfyanonymous committed
22
import comfy.samplers
23
import comfy.sample
comfyanonymous's avatar
comfyanonymous committed
24
import comfy.sd
comfyanonymous's avatar
comfyanonymous committed
25
import comfy.utils
26
import comfy.controlnet
comfyanonymous's avatar
comfyanonymous committed
27

28
import comfy.clip_vision
29

30
import comfy.model_management
31
32
from comfy.cli_args import args

33
import importlib
comfyanonymous's avatar
comfyanonymous committed
34

35
import folder_paths
36
import latent_preview
37
import node_helpers
space-nuko's avatar
space-nuko committed
38

39
def before_node_execution():
40
    comfy.model_management.throw_exception_if_processing_interrupted()
41

42
def interrupt_processing(value=True):
43
    comfy.model_management.interrupt_current_processing(value)
44

comfyanonymous's avatar
comfyanonymous committed
45
MAX_RESOLUTION=16384
46

comfyanonymous's avatar
comfyanonymous committed
47
48
49
class CLIPTextEncode:
    @classmethod
    def INPUT_TYPES(s):
50
        return {"required": {"text": ("STRING", {"multiline": True}), "clip": ("CLIP", )}}
comfyanonymous's avatar
comfyanonymous committed
51
52
53
    RETURN_TYPES = ("CONDITIONING",)
    FUNCTION = "encode"

54
55
    CATEGORY = "conditioning"

comfyanonymous's avatar
comfyanonymous committed
56
    def encode(self, clip, text):
57
58
59
        tokens = clip.tokenize(text)
        cond, pooled = clip.encode_from_tokens(tokens, return_pooled=True)
        return ([[cond, {"pooled_output": pooled}]], )
comfyanonymous's avatar
comfyanonymous committed
60
61
62
63
64
65
66
67

class ConditioningCombine:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": {"conditioning_1": ("CONDITIONING", ), "conditioning_2": ("CONDITIONING", )}}
    RETURN_TYPES = ("CONDITIONING",)
    FUNCTION = "combine"

68
69
    CATEGORY = "conditioning"

comfyanonymous's avatar
comfyanonymous committed
70
71
72
    def combine(self, conditioning_1, conditioning_2):
        return (conditioning_1 + conditioning_2, )

FizzleDorf's avatar
FizzleDorf committed
73
74
75
class ConditioningAverage :
    @classmethod
    def INPUT_TYPES(s):
comfyanonymous's avatar
comfyanonymous committed
76
77
        return {"required": {"conditioning_to": ("CONDITIONING", ), "conditioning_from": ("CONDITIONING", ),
                              "conditioning_to_strength": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01})
FizzleDorf's avatar
FizzleDorf committed
78
79
80
81
82
83
                             }}
    RETURN_TYPES = ("CONDITIONING",)
    FUNCTION = "addWeighted"

    CATEGORY = "conditioning"

comfyanonymous's avatar
comfyanonymous committed
84
    def addWeighted(self, conditioning_to, conditioning_from, conditioning_to_strength):
FizzleDorf's avatar
FizzleDorf committed
85
        out = []
comfyanonymous's avatar
comfyanonymous committed
86
87

        if len(conditioning_from) > 1:
88
            logging.warning("Warning: ConditioningAverage conditioning_from contains more than 1 cond, only the first one will actually be applied to conditioning_to.")
comfyanonymous's avatar
comfyanonymous committed
89
90

        cond_from = conditioning_from[0][0]
91
        pooled_output_from = conditioning_from[0][1].get("pooled_output", None)
comfyanonymous's avatar
comfyanonymous committed
92
93
94

        for i in range(len(conditioning_to)):
            t1 = conditioning_to[i][0]
95
            pooled_output_to = conditioning_to[i][1].get("pooled_output", pooled_output_from)
comfyanonymous's avatar
comfyanonymous committed
96
97
98
99
100
            t0 = cond_from[:,:t1.shape[1]]
            if t0.shape[1] < t1.shape[1]:
                t0 = torch.cat([t0] + [torch.zeros((1, (t1.shape[1] - t0.shape[1]), t1.shape[2]))], dim=1)

            tw = torch.mul(t1, conditioning_to_strength) + torch.mul(t0, (1.0 - conditioning_to_strength))
101
102
103
104
105
106
107
            t_to = conditioning_to[i][1].copy()
            if pooled_output_from is not None and pooled_output_to is not None:
                t_to["pooled_output"] = torch.mul(pooled_output_to, conditioning_to_strength) + torch.mul(pooled_output_from, (1.0 - conditioning_to_strength))
            elif pooled_output_from is not None:
                t_to["pooled_output"] = pooled_output_from

            n = [tw, t_to]
FizzleDorf's avatar
FizzleDorf committed
108
109
110
            out.append(n)
        return (out, )

111
112
113
114
115
116
117
118
119
120
class ConditioningConcat:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": {
            "conditioning_to": ("CONDITIONING",),
            "conditioning_from": ("CONDITIONING",),
            }}
    RETURN_TYPES = ("CONDITIONING",)
    FUNCTION = "concat"

121
    CATEGORY = "conditioning"
122
123
124
125
126

    def concat(self, conditioning_to, conditioning_from):
        out = []

        if len(conditioning_from) > 1:
127
            logging.warning("Warning: ConditioningConcat conditioning_from contains more than 1 cond, only the first one will actually be applied to conditioning_to.")
128
129
130
131
132
133
134
135
136
137
138

        cond_from = conditioning_from[0][0]

        for i in range(len(conditioning_to)):
            t1 = conditioning_to[i][0]
            tw = torch.cat((t1, cond_from),1)
            n = [tw, conditioning_to[i][1].copy()]
            out.append(n)

        return (out, )

comfyanonymous's avatar
comfyanonymous committed
139
140
141
142
class ConditioningSetArea:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": {"conditioning": ("CONDITIONING", ),
143
144
145
146
                              "width": ("INT", {"default": 64, "min": 64, "max": MAX_RESOLUTION, "step": 8}),
                              "height": ("INT", {"default": 64, "min": 64, "max": MAX_RESOLUTION, "step": 8}),
                              "x": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 8}),
                              "y": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 8}),
comfyanonymous's avatar
comfyanonymous committed
147
148
149
150
151
                              "strength": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}),
                             }}
    RETURN_TYPES = ("CONDITIONING",)
    FUNCTION = "append"

152
153
    CATEGORY = "conditioning"

154
    def append(self, conditioning, width, height, x, y, strength):
155
156
157
        c = node_helpers.conditioning_set_values(conditioning, {"area": (height // 8, width // 8, y // 8, x // 8),
                                                                "strength": strength,
                                                                "set_area_to_bounds": False})
comfyanonymous's avatar
comfyanonymous committed
158
        return (c, )
comfyanonymous's avatar
comfyanonymous committed
159

160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
class ConditioningSetAreaPercentage:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": {"conditioning": ("CONDITIONING", ),
                              "width": ("FLOAT", {"default": 1.0, "min": 0, "max": 1.0, "step": 0.01}),
                              "height": ("FLOAT", {"default": 1.0, "min": 0, "max": 1.0, "step": 0.01}),
                              "x": ("FLOAT", {"default": 0, "min": 0, "max": 1.0, "step": 0.01}),
                              "y": ("FLOAT", {"default": 0, "min": 0, "max": 1.0, "step": 0.01}),
                              "strength": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}),
                             }}
    RETURN_TYPES = ("CONDITIONING",)
    FUNCTION = "append"

    CATEGORY = "conditioning"

    def append(self, conditioning, width, height, x, y, strength):
176
177
178
        c = node_helpers.conditioning_set_values(conditioning, {"area": ("percentage", height, width, y, x),
                                                                "strength": strength,
                                                                "set_area_to_bounds": False})
179
180
        return (c, )

181
182
183
184
185
186
187
188
189
190
191
192
class ConditioningSetAreaStrength:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": {"conditioning": ("CONDITIONING", ),
                              "strength": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}),
                             }}
    RETURN_TYPES = ("CONDITIONING",)
    FUNCTION = "append"

    CATEGORY = "conditioning"

    def append(self, conditioning, strength):
193
        c = node_helpers.conditioning_set_values(conditioning, {"strength": strength})
194
195
196
        return (c, )


Jacob Segal's avatar
Jacob Segal committed
197
198
199
200
201
202
class ConditioningSetMask:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": {"conditioning": ("CONDITIONING", ),
                              "mask": ("MASK", ),
                              "strength": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}),
203
                              "set_cond_area": (["default", "mask bounds"],),
Jacob Segal's avatar
Jacob Segal committed
204
205
206
207
208
209
                             }}
    RETURN_TYPES = ("CONDITIONING",)
    FUNCTION = "append"

    CATEGORY = "conditioning"

210
211
212
213
    def append(self, conditioning, mask, set_cond_area, strength):
        set_area_to_bounds = False
        if set_cond_area != "default":
            set_area_to_bounds = True
Jacob Segal's avatar
Jacob Segal committed
214
215
        if len(mask.shape) < 3:
            mask = mask.unsqueeze(0)
216
217
218
219

        c = node_helpers.conditioning_set_values(conditioning, {"mask": mask,
                                                                "set_area_to_bounds": set_area_to_bounds,
                                                                "mask_strength": strength})
Jacob Segal's avatar
Jacob Segal committed
220
221
        return (c, )

222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
class ConditioningZeroOut:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": {"conditioning": ("CONDITIONING", )}}
    RETURN_TYPES = ("CONDITIONING",)
    FUNCTION = "zero_out"

    CATEGORY = "advanced/conditioning"

    def zero_out(self, conditioning):
        c = []
        for t in conditioning:
            d = t[1].copy()
            if "pooled_output" in d:
                d["pooled_output"] = torch.zeros_like(d["pooled_output"])
            n = [torch.zeros_like(t[0]), d]
            c.append(n)
        return (c, )

241
242
243
244
class ConditioningSetTimestepRange:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": {"conditioning": ("CONDITIONING", ),
245
246
                             "start": ("FLOAT", {"default": 0.0, "min": 0.0, "max": 1.0, "step": 0.001}),
                             "end": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.001})
247
248
249
250
251
252
253
254
255
256
                             }}
    RETURN_TYPES = ("CONDITIONING",)
    FUNCTION = "set_range"

    CATEGORY = "advanced/conditioning"

    def set_range(self, conditioning, start, end):
        c = []
        for t in conditioning:
            d = t[1].copy()
257
258
            d['start_percent'] = start
            d['end_percent'] = end
259
260
261
262
            n = [t[0], d]
            c.append(n)
        return (c, )

comfyanonymous's avatar
comfyanonymous committed
263
264
265
266
267
268
269
class VAEDecode:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "samples": ("LATENT", ), "vae": ("VAE", )}}
    RETURN_TYPES = ("IMAGE",)
    FUNCTION = "decode"

270
271
    CATEGORY = "latent"

comfyanonymous's avatar
comfyanonymous committed
272
    def decode(self, vae, samples):
273
        return (vae.decode(samples["samples"]), )
comfyanonymous's avatar
comfyanonymous committed
274

275
276
277
class VAEDecodeTiled:
    @classmethod
    def INPUT_TYPES(s):
278
        return {"required": {"samples": ("LATENT", ), "vae": ("VAE", ),
comfyanonymous's avatar
comfyanonymous committed
279
                             "tile_size": ("INT", {"default": 512, "min": 320, "max": 4096, "step": 64})
280
                            }}
281
282
283
284
285
    RETURN_TYPES = ("IMAGE",)
    FUNCTION = "decode"

    CATEGORY = "_for_testing"

286
    def decode(self, vae, samples, tile_size):
287
        return (vae.decode_tiled(samples["samples"], tile_x=tile_size // 8, tile_y=tile_size // 8, ), )
288

comfyanonymous's avatar
comfyanonymous committed
289
290
291
292
293
294
295
class VAEEncode:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "pixels": ("IMAGE", ), "vae": ("VAE", )}}
    RETURN_TYPES = ("LATENT",)
    FUNCTION = "encode"

296
297
    CATEGORY = "latent"

298
299
    def encode(self, vae, pixels):
        t = vae.encode(pixels[:,:,:,:3])
300
        return ({"samples":t}, )
comfyanonymous's avatar
comfyanonymous committed
301

comfyanonymous's avatar
comfyanonymous committed
302
303
304
class VAEEncodeTiled:
    @classmethod
    def INPUT_TYPES(s):
305
        return {"required": {"pixels": ("IMAGE", ), "vae": ("VAE", ),
306
                             "tile_size": ("INT", {"default": 512, "min": 320, "max": 4096, "step": 64})
307
                            }}
comfyanonymous's avatar
comfyanonymous committed
308
309
310
311
312
    RETURN_TYPES = ("LATENT",)
    FUNCTION = "encode"

    CATEGORY = "_for_testing"

313
314
    def encode(self, vae, pixels, tile_size):
        t = vae.encode_tiled(pixels[:,:,:,:3], tile_x=tile_size, tile_y=tile_size, )
comfyanonymous's avatar
comfyanonymous committed
315
        return ({"samples":t}, )
316

317
318
319
class VAEEncodeForInpaint:
    @classmethod
    def INPUT_TYPES(s):
320
        return {"required": { "pixels": ("IMAGE", ), "vae": ("VAE", ), "mask": ("MASK", ), "grow_mask_by": ("INT", {"default": 6, "min": 0, "max": 64, "step": 1}),}}
321
322
323
324
325
    RETURN_TYPES = ("LATENT",)
    FUNCTION = "encode"

    CATEGORY = "latent/inpaint"

326
    def encode(self, vae, pixels, mask, grow_mask_by=6):
327
328
        x = (pixels.shape[1] // vae.downscale_ratio) * vae.downscale_ratio
        y = (pixels.shape[2] // vae.downscale_ratio) * vae.downscale_ratio
329
        mask = torch.nn.functional.interpolate(mask.reshape((-1, 1, mask.shape[-2], mask.shape[-1])), size=(pixels.shape[1], pixels.shape[2]), mode="bilinear")
330

331
        pixels = pixels.clone()
332
        if pixels.shape[1] != x or pixels.shape[2] != y:
333
334
            x_offset = (pixels.shape[1] % vae.downscale_ratio) // 2
            y_offset = (pixels.shape[2] % vae.downscale_ratio) // 2
335
336
            pixels = pixels[:,x_offset:x + x_offset, y_offset:y + y_offset,:]
            mask = mask[:,:,x_offset:x + x_offset, y_offset:y + y_offset]
337

338
        #grow mask by a few pixels to keep things seamless in latent space
339
340
341
342
343
344
345
346
        if grow_mask_by == 0:
            mask_erosion = mask
        else:
            kernel_tensor = torch.ones((1, 1, grow_mask_by, grow_mask_by))
            padding = math.ceil((grow_mask_by - 1) / 2)

            mask_erosion = torch.clamp(torch.nn.functional.conv2d(mask.round(), kernel_tensor, padding=padding), 0, 1)

347
        m = (1.0 - mask.round()).squeeze(1)
348
349
        for i in range(3):
            pixels[:,:,:,i] -= 0.5
350
            pixels[:,:,:,i] *= m
351
352
353
            pixels[:,:,:,i] += 0.5
        t = vae.encode(pixels)

354
        return ({"samples":t, "noise_mask": (mask_erosion[:,:,:x,:y].round())}, )
comfyanonymous's avatar
comfyanonymous committed
355

356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411

class InpaintModelConditioning:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": {"positive": ("CONDITIONING", ),
                             "negative": ("CONDITIONING", ),
                             "vae": ("VAE", ),
                             "pixels": ("IMAGE", ),
                             "mask": ("MASK", ),
                             }}

    RETURN_TYPES = ("CONDITIONING","CONDITIONING","LATENT")
    RETURN_NAMES = ("positive", "negative", "latent")
    FUNCTION = "encode"

    CATEGORY = "conditioning/inpaint"

    def encode(self, positive, negative, pixels, vae, mask):
        x = (pixels.shape[1] // 8) * 8
        y = (pixels.shape[2] // 8) * 8
        mask = torch.nn.functional.interpolate(mask.reshape((-1, 1, mask.shape[-2], mask.shape[-1])), size=(pixels.shape[1], pixels.shape[2]), mode="bilinear")

        orig_pixels = pixels
        pixels = orig_pixels.clone()
        if pixels.shape[1] != x or pixels.shape[2] != y:
            x_offset = (pixels.shape[1] % 8) // 2
            y_offset = (pixels.shape[2] % 8) // 2
            pixels = pixels[:,x_offset:x + x_offset, y_offset:y + y_offset,:]
            mask = mask[:,:,x_offset:x + x_offset, y_offset:y + y_offset]

        m = (1.0 - mask.round()).squeeze(1)
        for i in range(3):
            pixels[:,:,:,i] -= 0.5
            pixels[:,:,:,i] *= m
            pixels[:,:,:,i] += 0.5
        concat_latent = vae.encode(pixels)
        orig_latent = vae.encode(orig_pixels)

        out_latent = {}

        out_latent["samples"] = orig_latent
        out_latent["noise_mask"] = mask

        out = []
        for conditioning in [positive, negative]:
            c = []
            for t in conditioning:
                d = t[1].copy()
                d["concat_latent_image"] = concat_latent
                d["concat_mask"] = mask
                n = [t[0], d]
                c.append(n)
            out.append(c)
        return (out[0], out[1], out_latent)


Dr.Lt.Data's avatar
Dr.Lt.Data committed
412
413
class SaveLatent:
    def __init__(self):
414
        self.output_dir = folder_paths.get_output_directory()
Dr.Lt.Data's avatar
Dr.Lt.Data committed
415
416
417
418

    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "samples": ("LATENT", ),
419
                              "filename_prefix": ("STRING", {"default": "latents/ComfyUI"})},
Dr.Lt.Data's avatar
Dr.Lt.Data committed
420
421
422
423
424
425
426
427
428
429
                "hidden": {"prompt": "PROMPT", "extra_pnginfo": "EXTRA_PNGINFO"},
                }
    RETURN_TYPES = ()
    FUNCTION = "save"

    OUTPUT_NODE = True

    CATEGORY = "_for_testing"

    def save(self, samples, filename_prefix="ComfyUI", prompt=None, extra_pnginfo=None):
430
        full_output_folder, filename, counter, subfolder, filename_prefix = folder_paths.get_save_image_path(filename_prefix, self.output_dir)
Dr.Lt.Data's avatar
Dr.Lt.Data committed
431
432
433
434
435
436

        # support save metadata for latent sharing
        prompt_info = ""
        if prompt is not None:
            prompt_info = json.dumps(prompt)

437
438
439
440
441
442
        metadata = None
        if not args.disable_metadata:
            metadata = {"prompt": prompt_info}
            if extra_pnginfo is not None:
                for x in extra_pnginfo:
                    metadata[x] = json.dumps(extra_pnginfo[x])
Dr.Lt.Data's avatar
Dr.Lt.Data committed
443
444

        file = f"{filename}_{counter:05}_.latent"
445
446
447
448
449
450
451
452

        results = list()
        results.append({
            "filename": file,
            "subfolder": subfolder,
            "type": "output"
        })

Dr.Lt.Data's avatar
Dr.Lt.Data committed
453
454
        file = os.path.join(full_output_folder, file)

455
456
        output = {}
        output["latent_tensor"] = samples["samples"]
457
        output["latent_format_version_0"] = torch.tensor([])
458

459
        comfy.utils.save_torch_file(output, file, metadata=metadata)
460
        return { "ui": { "latents": results } }
Dr.Lt.Data's avatar
Dr.Lt.Data committed
461
462
463
464
465


class LoadLatent:
    @classmethod
    def INPUT_TYPES(s):
466
467
        input_dir = folder_paths.get_input_directory()
        files = [f for f in os.listdir(input_dir) if os.path.isfile(os.path.join(input_dir, f)) and f.endswith(".latent")]
Dr.Lt.Data's avatar
Dr.Lt.Data committed
468
469
470
471
472
473
474
475
        return {"required": {"latent": [sorted(files), ]}, }

    CATEGORY = "_for_testing"

    RETURN_TYPES = ("LATENT", )
    FUNCTION = "load"

    def load(self, latent):
476
477
        latent_path = folder_paths.get_annotated_filepath(latent)
        latent = safetensors.torch.load_file(latent_path, device="cpu")
478
479
480
481
        multiplier = 1.0
        if "latent_format_version_0" not in latent:
            multiplier = 1.0 / 0.18215
        samples = {"samples": latent["latent_tensor"].float() * multiplier}
482
        return (samples, )
Dr.Lt.Data's avatar
Dr.Lt.Data committed
483

484
485
486
487
488
489
490
491
492
493
494
495
496
497
    @classmethod
    def IS_CHANGED(s, latent):
        image_path = folder_paths.get_annotated_filepath(latent)
        m = hashlib.sha256()
        with open(image_path, 'rb') as f:
            m.update(f.read())
        return m.digest().hex()

    @classmethod
    def VALIDATE_INPUTS(s, latent):
        if not folder_paths.exists_annotated_filepath(latent):
            return "Invalid latent file: {}".format(latent)
        return True

Dr.Lt.Data's avatar
Dr.Lt.Data committed
498

comfyanonymous's avatar
comfyanonymous committed
499
500
501
class CheckpointLoader:
    @classmethod
    def INPUT_TYPES(s):
502
503
        return {"required": { "config_name": (folder_paths.get_filename_list("configs"), ),
                              "ckpt_name": (folder_paths.get_filename_list("checkpoints"), )}}
comfyanonymous's avatar
comfyanonymous committed
504
505
506
    RETURN_TYPES = ("MODEL", "CLIP", "VAE")
    FUNCTION = "load_checkpoint"

507
    CATEGORY = "advanced/loaders"
508

comfyanonymous's avatar
comfyanonymous committed
509
    def load_checkpoint(self, config_name, ckpt_name, output_vae=True, output_clip=True):
510
511
        config_path = folder_paths.get_full_path("configs", config_name)
        ckpt_path = folder_paths.get_full_path("checkpoints", ckpt_name)
512
        return comfy.sd.load_checkpoint(config_path, ckpt_path, output_vae=True, output_clip=True, embedding_directory=folder_paths.get_folder_paths("embeddings"))
comfyanonymous's avatar
comfyanonymous committed
513

514
515
516
class CheckpointLoaderSimple:
    @classmethod
    def INPUT_TYPES(s):
517
        return {"required": { "ckpt_name": (folder_paths.get_filename_list("checkpoints"), ),
518
519
520
521
                             }}
    RETURN_TYPES = ("MODEL", "CLIP", "VAE")
    FUNCTION = "load_checkpoint"

522
    CATEGORY = "loaders"
523

524
    def load_checkpoint(self, ckpt_name, output_vae=True, output_clip=True):
525
        ckpt_path = folder_paths.get_full_path("checkpoints", ckpt_name)
526
        out = comfy.sd.load_checkpoint_guess_config(ckpt_path, output_vae=True, output_clip=True, embedding_directory=folder_paths.get_folder_paths("embeddings"))
527
        return out[:3]
528

sALTaccount's avatar
sALTaccount committed
529
530
531
class DiffusersLoader:
    @classmethod
    def INPUT_TYPES(cls):
532
        paths = []
sALTaccount's avatar
sALTaccount committed
533
        for search_path in folder_paths.get_folder_paths("diffusers"):
534
            if os.path.exists(search_path):
535
536
537
538
                for root, subdir, files in os.walk(search_path, followlinks=True):
                    if "model_index.json" in files:
                        paths.append(os.path.relpath(root, start=search_path))

539
        return {"required": {"model_path": (paths,), }}
sALTaccount's avatar
sALTaccount committed
540
541
542
    RETURN_TYPES = ("MODEL", "CLIP", "VAE")
    FUNCTION = "load_checkpoint"

543
    CATEGORY = "advanced/loaders/deprecated"
sALTaccount's avatar
sALTaccount committed
544
545

    def load_checkpoint(self, model_path, output_vae=True, output_clip=True):
sALTaccount's avatar
sALTaccount committed
546
547
        for search_path in folder_paths.get_folder_paths("diffusers"):
            if os.path.exists(search_path):
548
549
550
                path = os.path.join(search_path, model_path)
                if os.path.exists(path):
                    model_path = path
sALTaccount's avatar
sALTaccount committed
551
                    break
552

553
        return comfy.diffusers_load.load_diffusers(model_path, output_vae=output_vae, output_clip=output_clip, embedding_directory=folder_paths.get_folder_paths("embeddings"))
sALTaccount's avatar
sALTaccount committed
554
555


556
557
558
559
560
561
562
563
class unCLIPCheckpointLoader:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "ckpt_name": (folder_paths.get_filename_list("checkpoints"), ),
                             }}
    RETURN_TYPES = ("MODEL", "CLIP", "VAE", "CLIP_VISION")
    FUNCTION = "load_checkpoint"

564
    CATEGORY = "loaders"
565
566
567
568
569
570

    def load_checkpoint(self, ckpt_name, output_vae=True, output_clip=True):
        ckpt_path = folder_paths.get_full_path("checkpoints", ckpt_name)
        out = comfy.sd.load_checkpoint_guess_config(ckpt_path, output_vae=True, output_clip=True, output_clipvision=True, embedding_directory=folder_paths.get_folder_paths("embeddings"))
        return out

comfyanonymous's avatar
comfyanonymous committed
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
class CLIPSetLastLayer:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "clip": ("CLIP", ),
                              "stop_at_clip_layer": ("INT", {"default": -1, "min": -24, "max": -1, "step": 1}),
                              }}
    RETURN_TYPES = ("CLIP",)
    FUNCTION = "set_last_layer"

    CATEGORY = "conditioning"

    def set_last_layer(self, clip, stop_at_clip_layer):
        clip = clip.clone()
        clip.clip_layer(stop_at_clip_layer)
        return (clip,)

587
class LoraLoader:
588
589
590
    def __init__(self):
        self.loaded_lora = None

591
592
593
594
    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "model": ("MODEL",),
                              "clip": ("CLIP", ),
595
                              "lora_name": (folder_paths.get_filename_list("loras"), ),
596
597
                              "strength_model": ("FLOAT", {"default": 1.0, "min": -20.0, "max": 20.0, "step": 0.01}),
                              "strength_clip": ("FLOAT", {"default": 1.0, "min": -20.0, "max": 20.0, "step": 0.01}),
598
599
600
601
602
603
604
                              }}
    RETURN_TYPES = ("MODEL", "CLIP")
    FUNCTION = "load_lora"

    CATEGORY = "loaders"

    def load_lora(self, model, clip, lora_name, strength_model, strength_clip):
605
606
607
        if strength_model == 0 and strength_clip == 0:
            return (model, clip)

608
        lora_path = folder_paths.get_full_path("loras", lora_name)
609
610
611
612
613
        lora = None
        if self.loaded_lora is not None:
            if self.loaded_lora[0] == lora_path:
                lora = self.loaded_lora[1]
            else:
614
615
616
                temp = self.loaded_lora
                self.loaded_lora = None
                del temp
617
618
619
620
621
622

        if lora is None:
            lora = comfy.utils.load_torch_file(lora_path, safe_load=True)
            self.loaded_lora = (lora_path, lora)

        model_lora, clip_lora = comfy.sd.load_lora_for_models(model, clip, lora, strength_model, strength_clip)
623
624
        return (model_lora, clip_lora)

625
626
627
628
629
630
631
632
633
634
635
636
637
class LoraLoaderModelOnly(LoraLoader):
    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "model": ("MODEL",),
                              "lora_name": (folder_paths.get_filename_list("loras"), ),
                              "strength_model": ("FLOAT", {"default": 1.0, "min": -20.0, "max": 20.0, "step": 0.01}),
                              }}
    RETURN_TYPES = ("MODEL",)
    FUNCTION = "load_lora_model_only"

    def load_lora_model_only(self, model, lora_name, strength_model):
        return (self.load_lora(model, None, lora_name, strength_model, 0)[0],)

comfyanonymous's avatar
comfyanonymous committed
638
class VAELoader:
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
    @staticmethod
    def vae_list():
        vaes = folder_paths.get_filename_list("vae")
        approx_vaes = folder_paths.get_filename_list("vae_approx")
        sdxl_taesd_enc = False
        sdxl_taesd_dec = False
        sd1_taesd_enc = False
        sd1_taesd_dec = False

        for v in approx_vaes:
            if v.startswith("taesd_decoder."):
                sd1_taesd_dec = True
            elif v.startswith("taesd_encoder."):
                sd1_taesd_enc = True
            elif v.startswith("taesdxl_decoder."):
                sdxl_taesd_dec = True
            elif v.startswith("taesdxl_encoder."):
                sdxl_taesd_enc = True
        if sd1_taesd_dec and sd1_taesd_enc:
            vaes.append("taesd")
        if sdxl_taesd_dec and sdxl_taesd_enc:
            vaes.append("taesdxl")
        return vaes

    @staticmethod
    def load_taesd(name):
        sd = {}
        approx_vaes = folder_paths.get_filename_list("vae_approx")

        encoder = next(filter(lambda a: a.startswith("{}_encoder.".format(name)), approx_vaes))
        decoder = next(filter(lambda a: a.startswith("{}_decoder.".format(name)), approx_vaes))

        enc = comfy.utils.load_torch_file(folder_paths.get_full_path("vae_approx", encoder))
        for k in enc:
            sd["taesd_encoder.{}".format(k)] = enc[k]

        dec = comfy.utils.load_torch_file(folder_paths.get_full_path("vae_approx", decoder))
        for k in dec:
            sd["taesd_decoder.{}".format(k)] = dec[k]

        if name == "taesd":
            sd["vae_scale"] = torch.tensor(0.18215)
        elif name == "taesdxl":
            sd["vae_scale"] = torch.tensor(0.13025)
        return sd

comfyanonymous's avatar
comfyanonymous committed
685
686
    @classmethod
    def INPUT_TYPES(s):
687
        return {"required": { "vae_name": (s.vae_list(), )}}
comfyanonymous's avatar
comfyanonymous committed
688
689
690
    RETURN_TYPES = ("VAE",)
    FUNCTION = "load_vae"

691
692
    CATEGORY = "loaders"

comfyanonymous's avatar
comfyanonymous committed
693
694
    #TODO: scale factor?
    def load_vae(self, vae_name):
695
696
697
698
699
        if vae_name in ["taesd", "taesdxl"]:
            sd = self.load_taesd(vae_name)
        else:
            vae_path = folder_paths.get_full_path("vae", vae_name)
            sd = comfy.utils.load_torch_file(vae_path)
comfyanonymous's avatar
comfyanonymous committed
700
        vae = comfy.sd.VAE(sd=sd)
comfyanonymous's avatar
comfyanonymous committed
701
702
        return (vae,)

comfyanonymous's avatar
comfyanonymous committed
703
704
705
class ControlNetLoader:
    @classmethod
    def INPUT_TYPES(s):
706
        return {"required": { "control_net_name": (folder_paths.get_filename_list("controlnet"), )}}
comfyanonymous's avatar
comfyanonymous committed
707
708
709
710
711
712
713

    RETURN_TYPES = ("CONTROL_NET",)
    FUNCTION = "load_controlnet"

    CATEGORY = "loaders"

    def load_controlnet(self, control_net_name):
714
        controlnet_path = folder_paths.get_full_path("controlnet", control_net_name)
715
        controlnet = comfy.controlnet.load_controlnet(controlnet_path)
comfyanonymous's avatar
comfyanonymous committed
716
717
        return (controlnet,)

718
719
720
721
class DiffControlNetLoader:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "model": ("MODEL",),
722
                              "control_net_name": (folder_paths.get_filename_list("controlnet"), )}}
723
724
725
726
727
728
729

    RETURN_TYPES = ("CONTROL_NET",)
    FUNCTION = "load_controlnet"

    CATEGORY = "loaders"

    def load_controlnet(self, model, control_net_name):
730
        controlnet_path = folder_paths.get_full_path("controlnet", control_net_name)
731
        controlnet = comfy.controlnet.load_controlnet(controlnet_path, model)
732
733
        return (controlnet,)

comfyanonymous's avatar
comfyanonymous committed
734
735
736
737

class ControlNetApply:
    @classmethod
    def INPUT_TYPES(s):
738
739
740
741
742
        return {"required": {"conditioning": ("CONDITIONING", ),
                             "control_net": ("CONTROL_NET", ),
                             "image": ("IMAGE", ),
                             "strength": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01})
                             }}
comfyanonymous's avatar
comfyanonymous committed
743
744
745
746
747
    RETURN_TYPES = ("CONDITIONING",)
    FUNCTION = "apply_controlnet"

    CATEGORY = "conditioning"

748
    def apply_controlnet(self, conditioning, control_net, image, strength):
749
750
751
        if strength == 0:
            return (conditioning, )

comfyanonymous's avatar
comfyanonymous committed
752
753
754
755
        c = []
        control_hint = image.movedim(-1,1)
        for t in conditioning:
            n = [t[0], t[1].copy()]
comfyanonymous's avatar
comfyanonymous committed
756
757
758
759
            c_net = control_net.copy().set_cond_hint(control_hint, strength)
            if 'control' in t[1]:
                c_net.set_previous_controlnet(t[1]['control'])
            n[1]['control'] = c_net
760
            n[1]['control_apply_to_uncond'] = True
comfyanonymous's avatar
comfyanonymous committed
761
762
763
            c.append(n)
        return (c, )

764
765
766
767
768
769
770
771
772

class ControlNetApplyAdvanced:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": {"positive": ("CONDITIONING", ),
                             "negative": ("CONDITIONING", ),
                             "control_net": ("CONTROL_NET", ),
                             "image": ("IMAGE", ),
                             "strength": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}),
773
774
                             "start_percent": ("FLOAT", {"default": 0.0, "min": 0.0, "max": 1.0, "step": 0.001}),
                             "end_percent": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.001})
775
776
777
778
779
780
781
782
                             }}

    RETURN_TYPES = ("CONDITIONING","CONDITIONING")
    RETURN_NAMES = ("positive", "negative")
    FUNCTION = "apply_controlnet"

    CATEGORY = "conditioning"

783
    def apply_controlnet(self, positive, negative, control_net, image, strength, start_percent, end_percent):
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
        if strength == 0:
            return (positive, negative)

        control_hint = image.movedim(-1,1)
        cnets = {}

        out = []
        for conditioning in [positive, negative]:
            c = []
            for t in conditioning:
                d = t[1].copy()

                prev_cnet = d.get('control', None)
                if prev_cnet in cnets:
                    c_net = cnets[prev_cnet]
                else:
800
                    c_net = control_net.copy().set_cond_hint(control_hint, strength, (start_percent, end_percent))
801
802
803
804
805
806
807
808
809
810
811
                    c_net.set_previous_controlnet(prev_cnet)
                    cnets[prev_cnet] = c_net

                d['control'] = c_net
                d['control_apply_to_uncond'] = False
                n = [t[0], d]
                c.append(n)
            out.append(c)
        return (out[0], out[1])


812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
class UNETLoader:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "unet_name": (folder_paths.get_filename_list("unet"), ),
                             }}
    RETURN_TYPES = ("MODEL",)
    FUNCTION = "load_unet"

    CATEGORY = "advanced/loaders"

    def load_unet(self, unet_name):
        unet_path = folder_paths.get_full_path("unet", unet_name)
        model = comfy.sd.load_unet(unet_path)
        return (model,)

827
828
829
class CLIPLoader:
    @classmethod
    def INPUT_TYPES(s):
830
        return {"required": { "clip_name": (folder_paths.get_filename_list("clip"), ),
831
                              "type": (["stable_diffusion", "stable_cascade"], ),
832
833
834
835
                             }}
    RETURN_TYPES = ("CLIP",)
    FUNCTION = "load_clip"

836
    CATEGORY = "advanced/loaders"
837

838
839
840
841
842
    def load_clip(self, clip_name, type="stable_diffusion"):
        clip_type = comfy.sd.CLIPType.STABLE_DIFFUSION
        if type == "stable_cascade":
            clip_type = comfy.sd.CLIPType.STABLE_CASCADE

843
        clip_path = folder_paths.get_full_path("clip", clip_name)
844
        clip = comfy.sd.load_clip(ckpt_paths=[clip_path], embedding_directory=folder_paths.get_folder_paths("embeddings"), clip_type=clip_type)
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
        return (clip,)

class DualCLIPLoader:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "clip_name1": (folder_paths.get_filename_list("clip"), ), "clip_name2": (folder_paths.get_filename_list("clip"), ),
                             }}
    RETURN_TYPES = ("CLIP",)
    FUNCTION = "load_clip"

    CATEGORY = "advanced/loaders"

    def load_clip(self, clip_name1, clip_name2):
        clip_path1 = folder_paths.get_full_path("clip", clip_name1)
        clip_path2 = folder_paths.get_full_path("clip", clip_name2)
        clip = comfy.sd.load_clip(ckpt_paths=[clip_path1, clip_path2], embedding_directory=folder_paths.get_folder_paths("embeddings"))
861
862
        return (clip,)

863
864
865
class CLIPVisionLoader:
    @classmethod
    def INPUT_TYPES(s):
866
        return {"required": { "clip_name": (folder_paths.get_filename_list("clip_vision"), ),
867
868
869
870
871
872
873
                             }}
    RETURN_TYPES = ("CLIP_VISION",)
    FUNCTION = "load_clip"

    CATEGORY = "loaders"

    def load_clip(self, clip_name):
874
        clip_path = folder_paths.get_full_path("clip_vision", clip_name)
875
        clip_vision = comfy.clip_vision.load(clip_path)
876
877
878
879
880
881
882
883
        return (clip_vision,)

class CLIPVisionEncode:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "clip_vision": ("CLIP_VISION",),
                              "image": ("IMAGE",)
                             }}
comfyanonymous's avatar
comfyanonymous committed
884
    RETURN_TYPES = ("CLIP_VISION_OUTPUT",)
885
886
    FUNCTION = "encode"

887
    CATEGORY = "conditioning"
888
889
890
891
892
893
894
895

    def encode(self, clip_vision, image):
        output = clip_vision.encode_image(image)
        return (output,)

class StyleModelLoader:
    @classmethod
    def INPUT_TYPES(s):
896
        return {"required": { "style_model_name": (folder_paths.get_filename_list("style_models"), )}}
897
898
899
900
901
902
903

    RETURN_TYPES = ("STYLE_MODEL",)
    FUNCTION = "load_style_model"

    CATEGORY = "loaders"

    def load_style_model(self, style_model_name):
904
        style_model_path = folder_paths.get_full_path("style_models", style_model_name)
905
906
907
908
909
910
911
        style_model = comfy.sd.load_style_model(style_model_path)
        return (style_model,)


class StyleModelApply:
    @classmethod
    def INPUT_TYPES(s):
912
913
914
        return {"required": {"conditioning": ("CONDITIONING", ),
                             "style_model": ("STYLE_MODEL", ),
                             "clip_vision_output": ("CLIP_VISION_OUTPUT", ),
915
916
917
918
                             }}
    RETURN_TYPES = ("CONDITIONING",)
    FUNCTION = "apply_stylemodel"

comfyanonymous's avatar
comfyanonymous committed
919
    CATEGORY = "conditioning/style_model"
920

921
    def apply_stylemodel(self, clip_vision_output, style_model, conditioning):
922
        cond = style_model.get_cond(clip_vision_output).flatten(start_dim=0, end_dim=1).unsqueeze(dim=0)
923
        c = []
924
925
        for t in conditioning:
            n = [torch.cat((t[0], cond), dim=1), t[1].copy()]
926
927
928
            c.append(n)
        return (c, )

929
930
931
932
933
934
class unCLIPConditioning:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": {"conditioning": ("CONDITIONING", ),
                             "clip_vision_output": ("CLIP_VISION_OUTPUT", ),
                             "strength": ("FLOAT", {"default": 1.0, "min": -10.0, "max": 10.0, "step": 0.01}),
935
                             "noise_augmentation": ("FLOAT", {"default": 0.0, "min": 0.0, "max": 1.0, "step": 0.01}),
936
937
938
939
                             }}
    RETURN_TYPES = ("CONDITIONING",)
    FUNCTION = "apply_adm"

940
    CATEGORY = "conditioning"
941

942
    def apply_adm(self, conditioning, clip_vision_output, strength, noise_augmentation):
943
944
945
        if strength == 0:
            return (conditioning, )

946
947
948
        c = []
        for t in conditioning:
            o = t[1].copy()
949
950
951
            x = {"clip_vision_output": clip_vision_output, "strength": strength, "noise_augmentation": noise_augmentation}
            if "unclip_conditioning" in o:
                o["unclip_conditioning"] = o["unclip_conditioning"][:] + [x]
952
            else:
953
                o["unclip_conditioning"] = [x]
954
955
956
957
            n = [t[0], o]
            c.append(n)
        return (c, )

958
959
960
961
962
963
964
965
class GLIGENLoader:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "gligen_name": (folder_paths.get_filename_list("gligen"), )}}

    RETURN_TYPES = ("GLIGEN",)
    FUNCTION = "load_gligen"

comfyanonymous's avatar
comfyanonymous committed
966
    CATEGORY = "loaders"
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987

    def load_gligen(self, gligen_name):
        gligen_path = folder_paths.get_full_path("gligen", gligen_name)
        gligen = comfy.sd.load_gligen(gligen_path)
        return (gligen,)

class GLIGENTextBoxApply:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": {"conditioning_to": ("CONDITIONING", ),
                              "clip": ("CLIP", ),
                              "gligen_textbox_model": ("GLIGEN", ),
                              "text": ("STRING", {"multiline": True}),
                              "width": ("INT", {"default": 64, "min": 8, "max": MAX_RESOLUTION, "step": 8}),
                              "height": ("INT", {"default": 64, "min": 8, "max": MAX_RESOLUTION, "step": 8}),
                              "x": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 8}),
                              "y": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 8}),
                             }}
    RETURN_TYPES = ("CONDITIONING",)
    FUNCTION = "append"

comfyanonymous's avatar
comfyanonymous committed
988
    CATEGORY = "conditioning/gligen"
989
990
991

    def append(self, conditioning_to, clip, gligen_textbox_model, text, width, height, x, y):
        c = []
992
        cond, cond_pooled = clip.encode_from_tokens(clip.tokenize(text), return_pooled="unprojected")
993
994
995
996
997
998
999
1000
1001
1002
        for t in conditioning_to:
            n = [t[0], t[1].copy()]
            position_params = [(cond_pooled, height // 8, width // 8, y // 8, x // 8)]
            prev = []
            if "gligen" in n[1]:
                prev = n[1]['gligen'][2]

            n[1]['gligen'] = ("position", gligen_textbox_model, prev + position_params)
            c.append(n)
        return (c, )
1003

comfyanonymous's avatar
comfyanonymous committed
1004
class EmptyLatentImage:
1005
1006
    def __init__(self):
        self.device = comfy.model_management.intermediate_device()
comfyanonymous's avatar
comfyanonymous committed
1007
1008
1009

    @classmethod
    def INPUT_TYPES(s):
1010
1011
        return {"required": { "width": ("INT", {"default": 512, "min": 16, "max": MAX_RESOLUTION, "step": 8}),
                              "height": ("INT", {"default": 512, "min": 16, "max": MAX_RESOLUTION, "step": 8}),
comfyanonymous's avatar
comfyanonymous committed
1012
                              "batch_size": ("INT", {"default": 1, "min": 1, "max": 4096})}}
comfyanonymous's avatar
comfyanonymous committed
1013
1014
1015
    RETURN_TYPES = ("LATENT",)
    FUNCTION = "generate"

1016
1017
    CATEGORY = "latent"

comfyanonymous's avatar
comfyanonymous committed
1018
    def generate(self, width, height, batch_size=1):
1019
        latent = torch.zeros([batch_size, 4, height // 8, width // 8], device=self.device)
1020
        return ({"samples":latent}, )
comfyanonymous's avatar
comfyanonymous committed
1021

comfyanonymous's avatar
comfyanonymous committed
1022

1023
1024
1025
1026
1027
class LatentFromBatch:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "samples": ("LATENT",),
                              "batch_index": ("INT", {"default": 0, "min": 0, "max": 63}),
1028
                              "length": ("INT", {"default": 1, "min": 1, "max": 64}),
1029
1030
                              }}
    RETURN_TYPES = ("LATENT",)
1031
    FUNCTION = "frombatch"
1032

1033
    CATEGORY = "latent/batch"
1034

1035
    def frombatch(self, samples, batch_index, length):
1036
1037
1038
        s = samples.copy()
        s_in = samples["samples"]
        batch_index = min(s_in.shape[0] - 1, batch_index)
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
        length = min(s_in.shape[0] - batch_index, length)
        s["samples"] = s_in[batch_index:batch_index + length].clone()
        if "noise_mask" in samples:
            masks = samples["noise_mask"]
            if masks.shape[0] == 1:
                s["noise_mask"] = masks.clone()
            else:
                if masks.shape[0] < s_in.shape[0]:
                    masks = masks.repeat(math.ceil(s_in.shape[0] / masks.shape[0]), 1, 1, 1)[:s_in.shape[0]]
                s["noise_mask"] = masks[batch_index:batch_index + length].clone()
        if "batch_index" not in s:
            s["batch_index"] = [x for x in range(batch_index, batch_index+length)]
        else:
            s["batch_index"] = samples["batch_index"][batch_index:batch_index + length]
        return (s,)
    
class RepeatLatentBatch:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "samples": ("LATENT",),
                              "amount": ("INT", {"default": 1, "min": 1, "max": 64}),
                              }}
    RETURN_TYPES = ("LATENT",)
    FUNCTION = "repeat"

    CATEGORY = "latent/batch"

    def repeat(self, samples, amount):
        s = samples.copy()
        s_in = samples["samples"]
        
        s["samples"] = s_in.repeat((amount, 1,1,1))
        if "noise_mask" in samples and samples["noise_mask"].shape[0] > 1:
            masks = samples["noise_mask"]
            if masks.shape[0] < s_in.shape[0]:
                masks = masks.repeat(math.ceil(s_in.shape[0] / masks.shape[0]), 1, 1, 1)[:s_in.shape[0]]
            s["noise_mask"] = samples["noise_mask"].repeat((amount, 1,1,1))
        if "batch_index" in s:
            offset = max(s["batch_index"]) - min(s["batch_index"]) + 1
            s["batch_index"] = s["batch_index"] + [x + (i * offset) for i in range(1, amount) for x in s["batch_index"]]
1079
        return (s,)
comfyanonymous's avatar
comfyanonymous committed
1080

comfyanonymous's avatar
comfyanonymous committed
1081
class LatentUpscale:
comfyanonymous's avatar
comfyanonymous committed
1082
    upscale_methods = ["nearest-exact", "bilinear", "area", "bicubic", "bislerp"]
1083
    crop_methods = ["disabled", "center"]
comfyanonymous's avatar
comfyanonymous committed
1084
1085
1086
1087

    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "samples": ("LATENT",), "upscale_method": (s.upscale_methods,),
1088
1089
                              "width": ("INT", {"default": 512, "min": 0, "max": MAX_RESOLUTION, "step": 8}),
                              "height": ("INT", {"default": 512, "min": 0, "max": MAX_RESOLUTION, "step": 8}),
1090
                              "crop": (s.crop_methods,)}}
comfyanonymous's avatar
comfyanonymous committed
1091
1092
1093
    RETURN_TYPES = ("LATENT",)
    FUNCTION = "upscale"

1094
1095
    CATEGORY = "latent"

1096
    def upscale(self, samples, upscale_method, width, height, crop):
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
        if width == 0 and height == 0:
            s = samples
        else:
            s = samples.copy()

            if width == 0:
                height = max(64, height)
                width = max(64, round(samples["samples"].shape[3] * height / samples["samples"].shape[2]))
            elif height == 0:
                width = max(64, width)
                height = max(64, round(samples["samples"].shape[2] * width / samples["samples"].shape[3]))
            else:
                width = max(64, width)
                height = max(64, height)

            s["samples"] = comfy.utils.common_upscale(samples["samples"], width // 8, height // 8, upscale_method, crop)
comfyanonymous's avatar
comfyanonymous committed
1113
1114
        return (s,)

comfyanonymous's avatar
comfyanonymous committed
1115
class LatentUpscaleBy:
comfyanonymous's avatar
comfyanonymous committed
1116
    upscale_methods = ["nearest-exact", "bilinear", "area", "bicubic", "bislerp"]
comfyanonymous's avatar
comfyanonymous committed
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133

    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "samples": ("LATENT",), "upscale_method": (s.upscale_methods,),
                              "scale_by": ("FLOAT", {"default": 1.5, "min": 0.01, "max": 8.0, "step": 0.01}),}}
    RETURN_TYPES = ("LATENT",)
    FUNCTION = "upscale"

    CATEGORY = "latent"

    def upscale(self, samples, upscale_method, scale_by):
        s = samples.copy()
        width = round(samples["samples"].shape[3] * scale_by)
        height = round(samples["samples"].shape[2] * scale_by)
        s["samples"] = comfy.utils.common_upscale(samples["samples"], width, height, upscale_method, "disabled")
        return (s,)

comfyanonymous's avatar
comfyanonymous committed
1134
1135
1136
1137
1138
1139
1140
1141
1142
class LatentRotate:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "samples": ("LATENT",),
                              "rotation": (["none", "90 degrees", "180 degrees", "270 degrees"],),
                              }}
    RETURN_TYPES = ("LATENT",)
    FUNCTION = "rotate"

comfyanonymous's avatar
comfyanonymous committed
1143
    CATEGORY = "latent/transform"
comfyanonymous's avatar
comfyanonymous committed
1144
1145

    def rotate(self, samples, rotation):
1146
        s = samples.copy()
comfyanonymous's avatar
comfyanonymous committed
1147
1148
1149
1150
1151
1152
1153
1154
        rotate_by = 0
        if rotation.startswith("90"):
            rotate_by = 1
        elif rotation.startswith("180"):
            rotate_by = 2
        elif rotation.startswith("270"):
            rotate_by = 3

1155
        s["samples"] = torch.rot90(samples["samples"], k=rotate_by, dims=[3, 2])
comfyanonymous's avatar
comfyanonymous committed
1156
        return (s,)
comfyanonymous's avatar
comfyanonymous committed
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166

class LatentFlip:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "samples": ("LATENT",),
                              "flip_method": (["x-axis: vertically", "y-axis: horizontally"],),
                              }}
    RETURN_TYPES = ("LATENT",)
    FUNCTION = "flip"

comfyanonymous's avatar
comfyanonymous committed
1167
    CATEGORY = "latent/transform"
comfyanonymous's avatar
comfyanonymous committed
1168
1169

    def flip(self, samples, flip_method):
1170
        s = samples.copy()
comfyanonymous's avatar
comfyanonymous committed
1171
        if flip_method.startswith("x"):
1172
            s["samples"] = torch.flip(samples["samples"], dims=[2])
comfyanonymous's avatar
comfyanonymous committed
1173
        elif flip_method.startswith("y"):
1174
            s["samples"] = torch.flip(samples["samples"], dims=[3])
comfyanonymous's avatar
comfyanonymous committed
1175
1176

        return (s,)
comfyanonymous's avatar
comfyanonymous committed
1177
1178
1179
1180

class LatentComposite:
    @classmethod
    def INPUT_TYPES(s):
comfyanonymous's avatar
comfyanonymous committed
1181
1182
1183
1184
1185
1186
        return {"required": { "samples_to": ("LATENT",),
                              "samples_from": ("LATENT",),
                              "x": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 8}),
                              "y": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 8}),
                              "feather": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 8}),
                              }}
comfyanonymous's avatar
comfyanonymous committed
1187
1188
1189
1190
1191
    RETURN_TYPES = ("LATENT",)
    FUNCTION = "composite"

    CATEGORY = "latent"

comfyanonymous's avatar
comfyanonymous committed
1192
1193
1194
    def composite(self, samples_to, samples_from, x, y, composite_method="normal", feather=0):
        x =  x // 8
        y = y // 8
1195
        feather = feather // 8
comfyanonymous's avatar
comfyanonymous committed
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
        samples_out = samples_to.copy()
        s = samples_to["samples"].clone()
        samples_to = samples_to["samples"]
        samples_from = samples_from["samples"]
        if feather == 0:
            s[:,:,y:y+samples_from.shape[2],x:x+samples_from.shape[3]] = samples_from[:,:,:samples_to.shape[2] - y, :samples_to.shape[3] - x]
        else:
            samples_from = samples_from[:,:,:samples_to.shape[2] - y, :samples_to.shape[3] - x]
            mask = torch.ones_like(samples_from)
            for t in range(feather):
                if y != 0:
                    mask[:,:,t:1+t,:] *= ((1.0/feather) * (t + 1))

                if y + samples_from.shape[2] < samples_to.shape[2]:
                    mask[:,:,mask.shape[2] -1 -t: mask.shape[2]-t,:] *= ((1.0/feather) * (t + 1))
                if x != 0:
                    mask[:,:,:,t:1+t] *= ((1.0/feather) * (t + 1))
                if x + samples_from.shape[3] < samples_to.shape[3]:
                    mask[:,:,:,mask.shape[3]- 1 - t: mask.shape[3]- t] *= ((1.0/feather) * (t + 1))
            rev_mask = torch.ones_like(mask) - mask
            s[:,:,y:y+samples_from.shape[2],x:x+samples_from.shape[3]] = samples_from[:,:,:samples_to.shape[2] - y, :samples_to.shape[3] - x] * mask + s[:,:,y:y+samples_from.shape[2],x:x+samples_from.shape[3]] * rev_mask
        samples_out["samples"] = s
        return (samples_out,)
comfyanonymous's avatar
comfyanonymous committed
1219

1220
1221
1222
1223
class LatentBlend:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": {
1224
1225
            "samples1": ("LATENT",),
            "samples2": ("LATENT",),
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
            "blend_factor": ("FLOAT", {
                "default": 0.5,
                "min": 0,
                "max": 1,
                "step": 0.01
            }),
        }}

    RETURN_TYPES = ("LATENT",)
    FUNCTION = "blend"

    CATEGORY = "_for_testing"

1239
    def blend(self, samples1, samples2, blend_factor:float, blend_mode: str="normal"):
1240

1241
1242
1243
        samples_out = samples1.copy()
        samples1 = samples1["samples"]
        samples2 = samples2["samples"]
1244

1245
1246
1247
1248
        if samples1.shape != samples2.shape:
            samples2.permute(0, 3, 1, 2)
            samples2 = comfy.utils.common_upscale(samples2, samples1.shape[3], samples1.shape[2], 'bicubic', crop='center')
            samples2.permute(0, 2, 3, 1)
1249

1250
1251
        samples_blended = self.blend_mode(samples1, samples2, blend_mode)
        samples_blended = samples1 * blend_factor + samples_blended * (1 - blend_factor)
1252
1253
1254
1255
1256
1257
1258
1259
1260
        samples_out["samples"] = samples_blended
        return (samples_out,)

    def blend_mode(self, img1, img2, mode):
        if mode == "normal":
            return img2
        else:
            raise ValueError(f"Unsupported blend mode: {mode}")

comfyanonymous's avatar
comfyanonymous committed
1261
1262
1263
1264
class LatentCrop:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "samples": ("LATENT",),
1265
1266
                              "width": ("INT", {"default": 512, "min": 64, "max": MAX_RESOLUTION, "step": 8}),
                              "height": ("INT", {"default": 512, "min": 64, "max": MAX_RESOLUTION, "step": 8}),
1267
1268
                              "x": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 8}),
                              "y": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 8}),
comfyanonymous's avatar
comfyanonymous committed
1269
1270
1271
1272
                              }}
    RETURN_TYPES = ("LATENT",)
    FUNCTION = "crop"

comfyanonymous's avatar
comfyanonymous committed
1273
    CATEGORY = "latent/transform"
comfyanonymous's avatar
comfyanonymous committed
1274
1275

    def crop(self, samples, width, height, x, y):
1276
1277
        s = samples.copy()
        samples = samples['samples']
comfyanonymous's avatar
comfyanonymous committed
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
        x =  x // 8
        y = y // 8

        #enfonce minimum size of 64
        if x > (samples.shape[3] - 8):
            x = samples.shape[3] - 8
        if y > (samples.shape[2] - 8):
            y = samples.shape[2] - 8

        new_height = height // 8
        new_width = width // 8
        to_x = new_width + x
        to_y = new_height + y
1291
        s['samples'] = samples[:,:,y:to_y, x:to_x]
comfyanonymous's avatar
comfyanonymous committed
1292
1293
        return (s,)

1294
1295
1296
1297
1298
1299
1300
1301
1302
class SetLatentNoiseMask:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "samples": ("LATENT",),
                              "mask": ("MASK",),
                              }}
    RETURN_TYPES = ("LATENT",)
    FUNCTION = "set_mask"

1303
    CATEGORY = "latent/inpaint"
1304
1305
1306

    def set_mask(self, samples, mask):
        s = samples.copy()
1307
        s["noise_mask"] = mask.reshape((-1, 1, mask.shape[-2], mask.shape[-1]))
1308
1309
        return (s,)

space-nuko's avatar
space-nuko committed
1310
def common_ksampler(model, seed, steps, cfg, sampler_name, scheduler, positive, negative, latent, denoise=1.0, disable_noise=False, start_step=None, last_step=None, force_full_denoise=False):
1311
    latent_image = latent["samples"]
comfyanonymous's avatar
comfyanonymous committed
1312
1313
1314
    if disable_noise:
        noise = torch.zeros(latent_image.size(), dtype=latent_image.dtype, layout=latent_image.layout, device="cpu")
    else:
1315
1316
        batch_inds = latent["batch_index"] if "batch_index" in latent else None
        noise = comfy.sample.prepare_noise(latent_image, seed, batch_inds)
comfyanonymous's avatar
comfyanonymous committed
1317

1318
    noise_mask = None
1319
    if "noise_mask" in latent:
1320
        noise_mask = latent["noise_mask"]
comfyanonymous's avatar
comfyanonymous committed
1321

1322
    callback = latent_preview.prepare_callback(model, steps)
1323
    disable_pbar = not comfy.utils.PROGRESS_BAR_ENABLED
1324
1325
    samples = comfy.sample.sample(model, noise, steps, cfg, sampler_name, scheduler, positive, negative, latent_image,
                                  denoise=denoise, disable_noise=disable_noise, start_step=start_step, last_step=last_step,
comfyanonymous's avatar
comfyanonymous committed
1326
                                  force_full_denoise=force_full_denoise, noise_mask=noise_mask, callback=callback, disable_pbar=disable_pbar, seed=seed)
1327
1328
1329
    out = latent.copy()
    out["samples"] = samples
    return (out, )
comfyanonymous's avatar
comfyanonymous committed
1330

comfyanonymous's avatar
comfyanonymous committed
1331
1332
1333
class KSampler:
    @classmethod
    def INPUT_TYPES(s):
comfyanonymous's avatar
comfyanonymous committed
1334
        return {"required":
comfyanonymous's avatar
comfyanonymous committed
1335
1336
1337
                    {"model": ("MODEL",),
                    "seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}),
                    "steps": ("INT", {"default": 20, "min": 1, "max": 10000}),
1338
                    "cfg": ("FLOAT", {"default": 8.0, "min": 0.0, "max": 100.0, "step":0.1, "round": 0.01}),
comfyanonymous's avatar
comfyanonymous committed
1339
1340
1341
1342
1343
1344
                    "sampler_name": (comfy.samplers.KSampler.SAMPLERS, ),
                    "scheduler": (comfy.samplers.KSampler.SCHEDULERS, ),
                    "positive": ("CONDITIONING", ),
                    "negative": ("CONDITIONING", ),
                    "latent_image": ("LATENT", ),
                    "denoise": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}),
space-nuko's avatar
space-nuko committed
1345
1346
                     }
                }
comfyanonymous's avatar
comfyanonymous committed
1347
1348
1349
1350

    RETURN_TYPES = ("LATENT",)
    FUNCTION = "sample"

1351
1352
    CATEGORY = "sampling"

space-nuko's avatar
space-nuko committed
1353
1354
    def sample(self, model, seed, steps, cfg, sampler_name, scheduler, positive, negative, latent_image, denoise=1.0):
        return common_ksampler(model, seed, steps, cfg, sampler_name, scheduler, positive, negative, latent_image, denoise=denoise)
comfyanonymous's avatar
comfyanonymous committed
1355

comfyanonymous's avatar
comfyanonymous committed
1356
1357
1358
1359
1360
1361
1362
1363
class KSamplerAdvanced:
    @classmethod
    def INPUT_TYPES(s):
        return {"required":
                    {"model": ("MODEL",),
                    "add_noise": (["enable", "disable"], ),
                    "noise_seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}),
                    "steps": ("INT", {"default": 20, "min": 1, "max": 10000}),
1364
                    "cfg": ("FLOAT", {"default": 8.0, "min": 0.0, "max": 100.0, "step":0.1, "round": 0.01}),
comfyanonymous's avatar
comfyanonymous committed
1365
1366
1367
1368
1369
1370
1371
1372
                    "sampler_name": (comfy.samplers.KSampler.SAMPLERS, ),
                    "scheduler": (comfy.samplers.KSampler.SCHEDULERS, ),
                    "positive": ("CONDITIONING", ),
                    "negative": ("CONDITIONING", ),
                    "latent_image": ("LATENT", ),
                    "start_at_step": ("INT", {"default": 0, "min": 0, "max": 10000}),
                    "end_at_step": ("INT", {"default": 10000, "min": 0, "max": 10000}),
                    "return_with_leftover_noise": (["disable", "enable"], ),
space-nuko's avatar
space-nuko committed
1373
1374
                     }
                }
comfyanonymous's avatar
comfyanonymous committed
1375
1376
1377
1378
1379

    RETURN_TYPES = ("LATENT",)
    FUNCTION = "sample"

    CATEGORY = "sampling"
comfyanonymous's avatar
comfyanonymous committed
1380

space-nuko's avatar
space-nuko committed
1381
    def sample(self, model, add_noise, noise_seed, steps, cfg, sampler_name, scheduler, positive, negative, latent_image, start_at_step, end_at_step, return_with_leftover_noise, denoise=1.0):
comfyanonymous's avatar
comfyanonymous committed
1382
1383
1384
1385
1386
1387
        force_full_denoise = True
        if return_with_leftover_noise == "enable":
            force_full_denoise = False
        disable_noise = False
        if add_noise == "disable":
            disable_noise = True
space-nuko's avatar
space-nuko committed
1388
        return common_ksampler(model, noise_seed, steps, cfg, sampler_name, scheduler, positive, negative, latent_image, denoise=denoise, disable_noise=disable_noise, start_step=start_at_step, last_step=end_at_step, force_full_denoise=force_full_denoise)
comfyanonymous's avatar
comfyanonymous committed
1389
1390
1391

class SaveImage:
    def __init__(self):
1392
        self.output_dir = folder_paths.get_output_directory()
m957ymj75urz's avatar
m957ymj75urz committed
1393
        self.type = "output"
1394
        self.prefix_append = ""
1395
        self.compress_level = 4
comfyanonymous's avatar
comfyanonymous committed
1396
1397
1398
1399

    @classmethod
    def INPUT_TYPES(s):
        return {"required": 
1400
                    {"images": ("IMAGE", ),
pythongosssss's avatar
tidy  
pythongosssss committed
1401
                     "filename_prefix": ("STRING", {"default": "ComfyUI"})},
pythongosssss's avatar
pythongosssss committed
1402
                "hidden": {"prompt": "PROMPT", "extra_pnginfo": "EXTRA_PNGINFO"},
comfyanonymous's avatar
comfyanonymous committed
1403
1404
1405
1406
1407
1408
1409
                }

    RETURN_TYPES = ()
    FUNCTION = "save_images"

    OUTPUT_NODE = True

1410
1411
    CATEGORY = "image"

pythongosssss's avatar
tidy  
pythongosssss committed
1412
    def save_images(self, images, filename_prefix="ComfyUI", prompt=None, extra_pnginfo=None):
1413
        filename_prefix += self.prefix_append
1414
        full_output_folder, filename, counter, subfolder, filename_prefix = folder_paths.get_save_image_path(filename_prefix, self.output_dir, images[0].shape[1], images[0].shape[0])
m957ymj75urz's avatar
m957ymj75urz committed
1415
        results = list()
1416
        for (batch_number, image) in enumerate(images):
comfyanonymous's avatar
comfyanonymous committed
1417
            i = 255. * image.cpu().numpy()
1418
            img = Image.fromarray(np.clip(i, 0, 255).astype(np.uint8))
1419
1420
1421
1422
1423
1424
1425
1426
            metadata = None
            if not args.disable_metadata:
                metadata = PngInfo()
                if prompt is not None:
                    metadata.add_text("prompt", json.dumps(prompt))
                if extra_pnginfo is not None:
                    for x in extra_pnginfo:
                        metadata.add_text(x, json.dumps(extra_pnginfo[x]))
1427

1428
1429
            filename_with_batch_num = filename.replace("%batch_num%", str(batch_number))
            file = f"{filename_with_batch_num}_{counter:05}_.png"
1430
            img.save(os.path.join(full_output_folder, file), pnginfo=metadata, compress_level=self.compress_level)
m957ymj75urz's avatar
m957ymj75urz committed
1431
1432
1433
1434
            results.append({
                "filename": file,
                "subfolder": subfolder,
                "type": self.type
Gavroche CryptoRUSH's avatar
Gavroche CryptoRUSH committed
1435
            })
1436
            counter += 1
comfyanonymous's avatar
comfyanonymous committed
1437

m957ymj75urz's avatar
m957ymj75urz committed
1438
        return { "ui": { "images": results } }
comfyanonymous's avatar
comfyanonymous committed
1439

pythongosssss's avatar
pythongosssss committed
1440
1441
class PreviewImage(SaveImage):
    def __init__(self):
1442
        self.output_dir = folder_paths.get_temp_directory()
m957ymj75urz's avatar
m957ymj75urz committed
1443
        self.type = "temp"
1444
        self.prefix_append = "_temp_" + ''.join(random.choice("abcdefghijklmnopqrstupvxyz") for x in range(5))
1445
        self.compress_level = 1
pythongosssss's avatar
pythongosssss committed
1446
1447
1448

    @classmethod
    def INPUT_TYPES(s):
1449
        return {"required":
pythongosssss's avatar
pythongosssss committed
1450
1451
1452
                    {"images": ("IMAGE", ), },
                "hidden": {"prompt": "PROMPT", "extra_pnginfo": "EXTRA_PNGINFO"},
                }
1453

1454
1455
1456
class LoadImage:
    @classmethod
    def INPUT_TYPES(s):
1457
        input_dir = folder_paths.get_input_directory()
comfyanonymous's avatar
comfyanonymous committed
1458
        files = [f for f in os.listdir(input_dir) if os.path.isfile(os.path.join(input_dir, f))]
1459
        return {"required":
1460
                    {"image": (sorted(files), {"image_upload": True})},
1461
                }
1462
1463

    CATEGORY = "image"
1464

1465
    RETURN_TYPES = ("IMAGE", "MASK")
1466
1467
    FUNCTION = "load_image"
    def load_image(self, image):
1468
        image_path = folder_paths.get_annotated_filepath(image)
1469
1470
1471
1472
1473
        img = Image.open(image_path)
        output_images = []
        output_masks = []
        for i in ImageSequence.Iterator(img):
            i = ImageOps.exif_transpose(i)
1474
1475
            if i.mode == 'I':
                i = i.point(lambda i: i * (1 / 255))
1476
1477
1478
1479
1480
1481
1482
1483
1484
1485
1486
1487
1488
1489
            image = i.convert("RGB")
            image = np.array(image).astype(np.float32) / 255.0
            image = torch.from_numpy(image)[None,]
            if 'A' in i.getbands():
                mask = np.array(i.getchannel('A')).astype(np.float32) / 255.0
                mask = 1. - torch.from_numpy(mask)
            else:
                mask = torch.zeros((64,64), dtype=torch.float32, device="cpu")
            output_images.append(image)
            output_masks.append(mask.unsqueeze(0))

        if len(output_images) > 1:
            output_image = torch.cat(output_images, dim=0)
            output_mask = torch.cat(output_masks, dim=0)
1490
        else:
1491
1492
1493
1494
            output_image = output_images[0]
            output_mask = output_masks[0]

        return (output_image, output_mask)
1495

1496
1497
    @classmethod
    def IS_CHANGED(s, image):
1498
        image_path = folder_paths.get_annotated_filepath(image)
1499
1500
1501
1502
        m = hashlib.sha256()
        with open(image_path, 'rb') as f:
            m.update(f.read())
        return m.digest().hex()
pythongosssss's avatar
pythongosssss committed
1503

1504
1505
1506
1507
1508
1509
1510
    @classmethod
    def VALIDATE_INPUTS(s, image):
        if not folder_paths.exists_annotated_filepath(image):
            return "Invalid image file: {}".format(image)

        return True

1511
class LoadImageMask:
1512
    _color_channels = ["alpha", "red", "green", "blue"]
1513
1514
    @classmethod
    def INPUT_TYPES(s):
1515
        input_dir = folder_paths.get_input_directory()
comfyanonymous's avatar
comfyanonymous committed
1516
        files = [f for f in os.listdir(input_dir) if os.path.isfile(os.path.join(input_dir, f))]
1517
        return {"required":
1518
                    {"image": (sorted(files), {"image_upload": True}),
1519
                     "channel": (s._color_channels, ), }
1520
1521
                }

1522
    CATEGORY = "mask"
1523
1524
1525
1526

    RETURN_TYPES = ("MASK",)
    FUNCTION = "load_image"
    def load_image(self, image, channel):
1527
        image_path = folder_paths.get_annotated_filepath(image)
1528
        i = Image.open(image_path)
1529
        i = ImageOps.exif_transpose(i)
1530
        if i.getbands() != ("R", "G", "B", "A"):
1531
1532
            if i.mode == 'I':
                i = i.point(lambda i: i * (1 / 255))
1533
            i = i.convert("RGBA")
1534
1535
1536
1537
1538
1539
1540
1541
1542
        mask = None
        c = channel[0].upper()
        if c in i.getbands():
            mask = np.array(i.getchannel(c)).astype(np.float32) / 255.0
            mask = torch.from_numpy(mask)
            if c == 'A':
                mask = 1. - mask
        else:
            mask = torch.zeros((64,64), dtype=torch.float32, device="cpu")
1543
        return (mask.unsqueeze(0),)
1544
1545
1546

    @classmethod
    def IS_CHANGED(s, image, channel):
1547
        image_path = folder_paths.get_annotated_filepath(image)
1548
1549
1550
1551
        m = hashlib.sha256()
        with open(image_path, 'rb') as f:
            m.update(f.read())
        return m.digest().hex()
pythongosssss's avatar
pythongosssss committed
1552

1553
    @classmethod
1554
    def VALIDATE_INPUTS(s, image):
1555
1556
1557
1558
1559
        if not folder_paths.exists_annotated_filepath(image):
            return "Invalid image file: {}".format(image)

        return True

comfyanonymous's avatar
comfyanonymous committed
1560
class ImageScale:
1561
    upscale_methods = ["nearest-exact", "bilinear", "area", "bicubic", "lanczos"]
comfyanonymous's avatar
comfyanonymous committed
1562
1563
1564
1565
1566
    crop_methods = ["disabled", "center"]

    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "image": ("IMAGE",), "upscale_method": (s.upscale_methods,),
1567
1568
                              "width": ("INT", {"default": 512, "min": 0, "max": MAX_RESOLUTION, "step": 1}),
                              "height": ("INT", {"default": 512, "min": 0, "max": MAX_RESOLUTION, "step": 1}),
comfyanonymous's avatar
comfyanonymous committed
1569
1570
1571
1572
                              "crop": (s.crop_methods,)}}
    RETURN_TYPES = ("IMAGE",)
    FUNCTION = "upscale"

1573
    CATEGORY = "image/upscaling"
1574

comfyanonymous's avatar
comfyanonymous committed
1575
    def upscale(self, image, upscale_method, width, height, crop):
1576
1577
1578
1579
1580
1581
1582
1583
1584
1585
1586
1587
        if width == 0 and height == 0:
            s = image
        else:
            samples = image.movedim(-1,1)

            if width == 0:
                width = max(1, round(samples.shape[3] * height / samples.shape[2]))
            elif height == 0:
                height = max(1, round(samples.shape[2] * width / samples.shape[3]))

            s = comfy.utils.common_upscale(samples, width, height, upscale_method, crop)
            s = s.movedim(1,-1)
comfyanonymous's avatar
comfyanonymous committed
1588
        return (s,)
comfyanonymous's avatar
comfyanonymous committed
1589

comfyanonymous's avatar
comfyanonymous committed
1590
class ImageScaleBy:
1591
    upscale_methods = ["nearest-exact", "bilinear", "area", "bicubic", "lanczos"]
comfyanonymous's avatar
comfyanonymous committed
1592
1593
1594
1595
1596
1597
1598
1599
1600
1601
1602
1603
1604
1605
1606
1607
1608
1609

    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "image": ("IMAGE",), "upscale_method": (s.upscale_methods,),
                              "scale_by": ("FLOAT", {"default": 1.0, "min": 0.01, "max": 8.0, "step": 0.01}),}}
    RETURN_TYPES = ("IMAGE",)
    FUNCTION = "upscale"

    CATEGORY = "image/upscaling"

    def upscale(self, image, upscale_method, scale_by):
        samples = image.movedim(-1,1)
        width = round(samples.shape[3] * scale_by)
        height = round(samples.shape[2] * scale_by)
        s = comfy.utils.common_upscale(samples, width, height, upscale_method, "disabled")
        s = s.movedim(1,-1)
        return (s,)

1610
1611
1612
1613
1614
1615
1616
1617
1618
1619
1620
1621
1622
1623
1624
class ImageInvert:

    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "image": ("IMAGE",)}}

    RETURN_TYPES = ("IMAGE",)
    FUNCTION = "invert"

    CATEGORY = "image"

    def invert(self, image):
        s = 1.0 - image
        return (s,)

1625
1626
1627
1628
1629
1630
1631
1632
1633
1634
1635
1636
1637
1638
1639
1640
class ImageBatch:

    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "image1": ("IMAGE",), "image2": ("IMAGE",)}}

    RETURN_TYPES = ("IMAGE",)
    FUNCTION = "batch"

    CATEGORY = "image"

    def batch(self, image1, image2):
        if image1.shape[1:] != image2.shape[1:]:
            image2 = comfy.utils.common_upscale(image2.movedim(-1,1), image1.shape[2], image1.shape[1], "bilinear", "center").movedim(1,-1)
        s = torch.cat((image1, image2), dim=0)
        return (s,)
1641

comfyanonymous's avatar
comfyanonymous committed
1642
1643
1644
1645
1646
1647
1648
1649
class EmptyImage:
    def __init__(self, device="cpu"):
        self.device = device

    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "width": ("INT", {"default": 512, "min": 1, "max": MAX_RESOLUTION, "step": 1}),
                              "height": ("INT", {"default": 512, "min": 1, "max": MAX_RESOLUTION, "step": 1}),
comfyanonymous's avatar
comfyanonymous committed
1650
                              "batch_size": ("INT", {"default": 1, "min": 1, "max": 4096}),
comfyanonymous's avatar
comfyanonymous committed
1651
1652
1653
1654
1655
1656
1657
1658
1659
1660
1661
1662
1663
                              "color": ("INT", {"default": 0, "min": 0, "max": 0xFFFFFF, "step": 1, "display": "color"}),
                              }}
    RETURN_TYPES = ("IMAGE",)
    FUNCTION = "generate"

    CATEGORY = "image"

    def generate(self, width, height, batch_size=1, color=0):
        r = torch.full([batch_size, height, width, 1], ((color >> 16) & 0xFF) / 0xFF)
        g = torch.full([batch_size, height, width, 1], ((color >> 8) & 0xFF) / 0xFF)
        b = torch.full([batch_size, height, width, 1], ((color) & 0xFF) / 0xFF)
        return (torch.cat((r, g, b), dim=-1), )

Guo Y.K's avatar
Guo Y.K committed
1664
1665
1666
1667
1668
1669
1670
class ImagePadForOutpaint:

    @classmethod
    def INPUT_TYPES(s):
        return {
            "required": {
                "image": ("IMAGE",),
1671
1672
1673
1674
                "left": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 8}),
                "top": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 8}),
                "right": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 8}),
                "bottom": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 8}),
1675
                "feathering": ("INT", {"default": 40, "min": 0, "max": MAX_RESOLUTION, "step": 1}),
Guo Y.K's avatar
Guo Y.K committed
1676
1677
1678
1679
1680
1681
1682
1683
            }
        }

    RETURN_TYPES = ("IMAGE", "MASK")
    FUNCTION = "expand_image"

    CATEGORY = "image"

1684
    def expand_image(self, image, left, top, right, bottom, feathering):
Guo Y.K's avatar
Guo Y.K committed
1685
1686
        d1, d2, d3, d4 = image.size()

1687
        new_image = torch.ones(
Guo Y.K's avatar
Guo Y.K committed
1688
1689
            (d1, d2 + top + bottom, d3 + left + right, d4),
            dtype=torch.float32,
1690
1691
        ) * 0.5

Guo Y.K's avatar
Guo Y.K committed
1692
1693
1694
1695
1696
1697
        new_image[:, top:top + d2, left:left + d3, :] = image

        mask = torch.ones(
            (d2 + top + bottom, d3 + left + right),
            dtype=torch.float32,
        )
1698

1699
1700
1701
1702
1703
        t = torch.zeros(
            (d2, d3),
            dtype=torch.float32
        )

1704
        if feathering > 0 and feathering * 2 < d2 and feathering * 2 < d3:
1705
1706
1707
1708
1709
1710
1711
1712
1713
1714
1715
1716
1717
1718
1719
1720
1721
1722
1723

            for i in range(d2):
                for j in range(d3):
                    dt = i if top != 0 else d2
                    db = d2 - i if bottom != 0 else d2

                    dl = j if left != 0 else d3
                    dr = d3 - j if right != 0 else d3

                    d = min(dt, db, dl, dr)

                    if d >= feathering:
                        continue

                    v = (feathering - d) / feathering

                    t[i, j] = v * v

        mask[top:top + d2, left:left + d3] = t
1724

Guo Y.K's avatar
Guo Y.K committed
1725
1726
1727
        return (new_image, mask)


comfyanonymous's avatar
comfyanonymous committed
1728
1729
NODE_CLASS_MAPPINGS = {
    "KSampler": KSampler,
1730
    "CheckpointLoaderSimple": CheckpointLoaderSimple,
comfyanonymous's avatar
comfyanonymous committed
1731
    "CLIPTextEncode": CLIPTextEncode,
comfyanonymous's avatar
comfyanonymous committed
1732
    "CLIPSetLastLayer": CLIPSetLastLayer,
comfyanonymous's avatar
comfyanonymous committed
1733
1734
    "VAEDecode": VAEDecode,
    "VAEEncode": VAEEncode,
1735
    "VAEEncodeForInpaint": VAEEncodeForInpaint,
comfyanonymous's avatar
comfyanonymous committed
1736
1737
1738
    "VAELoader": VAELoader,
    "EmptyLatentImage": EmptyLatentImage,
    "LatentUpscale": LatentUpscale,
comfyanonymous's avatar
comfyanonymous committed
1739
    "LatentUpscaleBy": LatentUpscaleBy,
1740
    "LatentFromBatch": LatentFromBatch,
1741
    "RepeatLatentBatch": RepeatLatentBatch,
comfyanonymous's avatar
comfyanonymous committed
1742
    "SaveImage": SaveImage,
pythongosssss's avatar
pythongosssss committed
1743
    "PreviewImage": PreviewImage,
comfyanonymous's avatar
comfyanonymous committed
1744
    "LoadImage": LoadImage,
1745
    "LoadImageMask": LoadImageMask,
comfyanonymous's avatar
comfyanonymous committed
1746
    "ImageScale": ImageScale,
comfyanonymous's avatar
comfyanonymous committed
1747
    "ImageScaleBy": ImageScaleBy,
1748
    "ImageInvert": ImageInvert,
1749
    "ImageBatch": ImageBatch,
Guo Y.K's avatar
Guo Y.K committed
1750
    "ImagePadForOutpaint": ImagePadForOutpaint,
comfyanonymous's avatar
comfyanonymous committed
1751
    "EmptyImage": EmptyImage,
comfyanonymous's avatar
comfyanonymous committed
1752
    "ConditioningAverage": ConditioningAverage ,
comfyanonymous's avatar
comfyanonymous committed
1753
    "ConditioningCombine": ConditioningCombine,
1754
    "ConditioningConcat": ConditioningConcat,
comfyanonymous's avatar
comfyanonymous committed
1755
    "ConditioningSetArea": ConditioningSetArea,
1756
    "ConditioningSetAreaPercentage": ConditioningSetAreaPercentage,
1757
    "ConditioningSetAreaStrength": ConditioningSetAreaStrength,
Jacob Segal's avatar
Jacob Segal committed
1758
    "ConditioningSetMask": ConditioningSetMask,
comfyanonymous's avatar
comfyanonymous committed
1759
    "KSamplerAdvanced": KSamplerAdvanced,
1760
    "SetLatentNoiseMask": SetLatentNoiseMask,
comfyanonymous's avatar
comfyanonymous committed
1761
    "LatentComposite": LatentComposite,
1762
    "LatentBlend": LatentBlend,
comfyanonymous's avatar
comfyanonymous committed
1763
    "LatentRotate": LatentRotate,
comfyanonymous's avatar
comfyanonymous committed
1764
    "LatentFlip": LatentFlip,
comfyanonymous's avatar
comfyanonymous committed
1765
    "LatentCrop": LatentCrop,
1766
    "LoraLoader": LoraLoader,
1767
    "CLIPLoader": CLIPLoader,
1768
    "UNETLoader": UNETLoader,
1769
    "DualCLIPLoader": DualCLIPLoader,
1770
    "CLIPVisionEncode": CLIPVisionEncode,
1771
    "StyleModelApply": StyleModelApply,
1772
    "unCLIPConditioning": unCLIPConditioning,
comfyanonymous's avatar
comfyanonymous committed
1773
    "ControlNetApply": ControlNetApply,
1774
    "ControlNetApplyAdvanced": ControlNetApplyAdvanced,
comfyanonymous's avatar
comfyanonymous committed
1775
    "ControlNetLoader": ControlNetLoader,
1776
    "DiffControlNetLoader": DiffControlNetLoader,
comfyanonymous's avatar
comfyanonymous committed
1777
1778
    "StyleModelLoader": StyleModelLoader,
    "CLIPVisionLoader": CLIPVisionLoader,
1779
    "VAEDecodeTiled": VAEDecodeTiled,
comfyanonymous's avatar
comfyanonymous committed
1780
    "VAEEncodeTiled": VAEEncodeTiled,
1781
    "unCLIPCheckpointLoader": unCLIPCheckpointLoader,
1782
1783
    "GLIGENLoader": GLIGENLoader,
    "GLIGENTextBoxApply": GLIGENTextBoxApply,
1784
    "InpaintModelConditioning": InpaintModelConditioning,
1785

1786
    "CheckpointLoader": CheckpointLoader,
sALTaccount's avatar
sALTaccount committed
1787
    "DiffusersLoader": DiffusersLoader,
Dr.Lt.Data's avatar
Dr.Lt.Data committed
1788
1789

    "LoadLatent": LoadLatent,
1790
    "SaveLatent": SaveLatent,
1791
1792

    "ConditioningZeroOut": ConditioningZeroOut,
1793
    "ConditioningSetTimestepRange": ConditioningSetTimestepRange,
1794
    "LoraLoaderModelOnly": LoraLoaderModelOnly,
comfyanonymous's avatar
comfyanonymous committed
1795
1796
}

City's avatar
City committed
1797
1798
1799
1800
1801
NODE_DISPLAY_NAME_MAPPINGS = {
    # Sampling
    "KSampler": "KSampler",
    "KSamplerAdvanced": "KSampler (Advanced)",
    # Loaders
1802
    "CheckpointLoader": "Load Checkpoint With Config (DEPRECATED)",
comfyanonymous's avatar
Rename.  
comfyanonymous committed
1803
    "CheckpointLoaderSimple": "Load Checkpoint",
City's avatar
City committed
1804
1805
1806
1807
1808
1809
1810
1811
1812
1813
1814
1815
1816
1817
    "VAELoader": "Load VAE",
    "LoraLoader": "Load LoRA",
    "CLIPLoader": "Load CLIP",
    "ControlNetLoader": "Load ControlNet Model",
    "DiffControlNetLoader": "Load ControlNet Model (diff)",
    "StyleModelLoader": "Load Style Model",
    "CLIPVisionLoader": "Load CLIP Vision",
    "UpscaleModelLoader": "Load Upscale Model",
    # Conditioning
    "CLIPVisionEncode": "CLIP Vision Encode",
    "StyleModelApply": "Apply Style Model",
    "CLIPTextEncode": "CLIP Text Encode (Prompt)",
    "CLIPSetLastLayer": "CLIP Set Last Layer",
    "ConditioningCombine": "Conditioning (Combine)",
FizzleDorf's avatar
FizzleDorf committed
1818
    "ConditioningAverage ": "Conditioning (Average)",
1819
    "ConditioningConcat": "Conditioning (Concat)",
City's avatar
City committed
1820
    "ConditioningSetArea": "Conditioning (Set Area)",
1821
    "ConditioningSetAreaPercentage": "Conditioning (Set Area with Percentage)",
Jacob Segal's avatar
Jacob Segal committed
1822
    "ConditioningSetMask": "Conditioning (Set Mask)",
City's avatar
City committed
1823
    "ControlNetApply": "Apply ControlNet",
1824
    "ControlNetApplyAdvanced": "Apply ControlNet (Advanced)",
City's avatar
City committed
1825
1826
1827
1828
1829
1830
1831
1832
1833
1834
    # Latent
    "VAEEncodeForInpaint": "VAE Encode (for Inpainting)",
    "SetLatentNoiseMask": "Set Latent Noise Mask",
    "VAEDecode": "VAE Decode",
    "VAEEncode": "VAE Encode",
    "LatentRotate": "Rotate Latent",
    "LatentFlip": "Flip Latent",
    "LatentCrop": "Crop Latent",
    "EmptyLatentImage": "Empty Latent Image",
    "LatentUpscale": "Upscale Latent",
comfyanonymous's avatar
comfyanonymous committed
1835
    "LatentUpscaleBy": "Upscale Latent By",
City's avatar
City committed
1836
    "LatentComposite": "Latent Composite",
1837
    "LatentBlend": "Latent Blend",
1838
1839
    "LatentFromBatch" : "Latent From Batch",
    "RepeatLatentBatch": "Repeat Latent Batch",
City's avatar
City committed
1840
1841
1842
1843
1844
1845
    # Image
    "SaveImage": "Save Image",
    "PreviewImage": "Preview Image",
    "LoadImage": "Load Image",
    "LoadImageMask": "Load Image (as Mask)",
    "ImageScale": "Upscale Image",
comfyanonymous's avatar
comfyanonymous committed
1846
    "ImageScaleBy": "Upscale Image By",
City's avatar
City committed
1847
1848
1849
    "ImageUpscaleWithModel": "Upscale Image (using Model)",
    "ImageInvert": "Invert Image",
    "ImagePadForOutpaint": "Pad Image for Outpainting",
1850
    "ImageBatch": "Batch Images",
City's avatar
City committed
1851
1852
1853
1854
1855
    # _for_testing
    "VAEDecodeTiled": "VAE Decode (Tiled)",
    "VAEEncodeTiled": "VAE Encode (Tiled)",
}

1856
1857
EXTENSION_WEB_DIRS = {}

1858
def load_custom_node(module_path, ignore=set()):
1859
1860
1861
1862
1863
    module_name = os.path.basename(module_path)
    if os.path.isfile(module_path):
        sp = os.path.splitext(module_path)
        module_name = sp[0]
    try:
1864
        logging.debug("Trying to load custom node {}".format(module_path))
1865
1866
        if os.path.isfile(module_path):
            module_spec = importlib.util.spec_from_file_location(module_name, module_path)
1867
            module_dir = os.path.split(module_path)[0]
1868
1869
        else:
            module_spec = importlib.util.spec_from_file_location(module_name, os.path.join(module_path, "__init__.py"))
1870
1871
            module_dir = module_path

1872
1873
1874
        module = importlib.util.module_from_spec(module_spec)
        sys.modules[module_name] = module
        module_spec.loader.exec_module(module)
1875
1876
1877
1878
1879
1880

        if hasattr(module, "WEB_DIRECTORY") and getattr(module, "WEB_DIRECTORY") is not None:
            web_dir = os.path.abspath(os.path.join(module_dir, getattr(module, "WEB_DIRECTORY")))
            if os.path.isdir(web_dir):
                EXTENSION_WEB_DIRS[module_name] = web_dir

1881
        if hasattr(module, "NODE_CLASS_MAPPINGS") and getattr(module, "NODE_CLASS_MAPPINGS") is not None:
1882
1883
1884
            for name in module.NODE_CLASS_MAPPINGS:
                if name not in ignore:
                    NODE_CLASS_MAPPINGS[name] = module.NODE_CLASS_MAPPINGS[name]
1885
1886
            if hasattr(module, "NODE_DISPLAY_NAME_MAPPINGS") and getattr(module, "NODE_DISPLAY_NAME_MAPPINGS") is not None:
                NODE_DISPLAY_NAME_MAPPINGS.update(module.NODE_DISPLAY_NAME_MAPPINGS)
1887
            return True
1888
        else:
1889
            logging.warning(f"Skip {module_path} module for custom nodes due to the lack of NODE_CLASS_MAPPINGS.")
1890
            return False
1891
    except Exception as e:
1892
        logging.warning(traceback.format_exc())
1893
        logging.warning(f"Cannot import {module_path} module for custom nodes: {e}")
1894
        return False
1895

Hacker 17082006's avatar
Hacker 17082006 committed
1896
def load_custom_nodes():
1897
    base_node_names = set(NODE_CLASS_MAPPINGS.keys())
1898
    node_paths = folder_paths.get_folder_paths("custom_nodes")
1899
    node_import_times = []
1900
    for custom_node_path in node_paths:
Enrico Fasoli's avatar
Enrico Fasoli committed
1901
        possible_modules = os.listdir(os.path.realpath(custom_node_path))
1902
1903
1904
1905
1906
1907
        if "__pycache__" in possible_modules:
            possible_modules.remove("__pycache__")

        for possible_module in possible_modules:
            module_path = os.path.join(custom_node_path, possible_module)
            if os.path.isfile(module_path) and os.path.splitext(module_path)[1] != ".py": continue
1908
            if module_path.endswith(".disabled"): continue
1909
            time_before = time.perf_counter()
1910
            success = load_custom_node(module_path, base_node_names)
1911
            node_import_times.append((time.perf_counter() - time_before, module_path, success))
1912

1913
    if len(node_import_times) > 0:
comfyanonymous's avatar
comfyanonymous committed
1914
        logging.info("\nImport times for custom nodes:")
1915
        for n in sorted(node_import_times):
1916
1917
1918
1919
            if n[2]:
                import_message = ""
            else:
                import_message = " (IMPORT FAILED)"
comfyanonymous's avatar
comfyanonymous committed
1920
1921
            logging.info("{:6.1f} seconds{}: {}".format(n[0], import_message, n[1]))
        logging.info("")
1922

1923
def init_custom_nodes():
1924
1925
1926
1927
1928
1929
1930
    extras_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), "comfy_extras")
    extras_files = [
        "nodes_latent.py",
        "nodes_hypernetwork.py",
        "nodes_upscale_model.py",
        "nodes_post_processing.py",
        "nodes_mask.py",
1931
        "nodes_compositing.py",
1932
1933
1934
1935
1936
1937
        "nodes_rebatch.py",
        "nodes_model_merging.py",
        "nodes_tomesd.py",
        "nodes_clip_sdxl.py",
        "nodes_canny.py",
        "nodes_freelunch.py",
1938
1939
        "nodes_custom_sampler.py",
        "nodes_hypertile.py",
1940
        "nodes_model_advanced.py",
1941
        "nodes_model_downscale.py",
comfyanonymous's avatar
comfyanonymous committed
1942
        "nodes_images.py",
1943
        "nodes_video_model.py",
1944
        "nodes_sag.py",
Hari's avatar
Hari committed
1945
        "nodes_perpneg.py",
1946
        "nodes_stable3d.py",
1947
        "nodes_sdupscale.py",
1948
        "nodes_photomaker.py",
1949
        "nodes_cond.py",
1950
        "nodes_morphology.py",
comfyanonymous's avatar
comfyanonymous committed
1951
        "nodes_stable_cascade.py",
1952
        "nodes_differential_diffusion.py",
1953
        "nodes_ip2p.py",
1954
1955
    ]

1956
    import_failed = []
1957
    for node_file in extras_files:
1958
1959
        if not load_custom_node(os.path.join(extras_dir, node_file)):
            import_failed.append(node_file)
1960

1961
    load_custom_nodes()
1962
1963

    if len(import_failed) > 0:
1964
        logging.warning("WARNING: some comfy_extras/ nodes did not import correctly. This may be because they are missing some dependencies.\n")
1965
        for node in import_failed:
1966
1967
            logging.warning("IMPORT FAILED: {}".format(node))
        logging.warning("\nThis issue might be caused by new missing dependencies added the last time you updated ComfyUI.")
1968
        if args.windows_standalone_build:
1969
            logging.warning("Please run the update script: update/update_comfyui.bat")
1970
        else:
1971
1972
            logging.warning("Please do a: pip install -r requirements.txt")
        logging.warning("")