nodes.py 72.2 KB
Newer Older
comfyanonymous's avatar
comfyanonymous committed
1
2
3
4
5
import torch

import os
import sys
import json
6
import hashlib
7
import traceback
8
import math
9
import time
10
import random
comfyanonymous's avatar
comfyanonymous committed
11

12
from PIL import Image, ImageOps, ImageSequence
comfyanonymous's avatar
comfyanonymous committed
13
14
from PIL.PngImagePlugin import PngInfo
import numpy as np
15
import safetensors.torch
comfyanonymous's avatar
comfyanonymous committed
16

comfyanonymous's avatar
comfyanonymous committed
17
sys.path.insert(0, os.path.join(os.path.dirname(os.path.realpath(__file__)), "comfy"))
comfyanonymous's avatar
comfyanonymous committed
18
19


20
import comfy.diffusers_load
comfyanonymous's avatar
comfyanonymous committed
21
import comfy.samplers
22
import comfy.sample
comfyanonymous's avatar
comfyanonymous committed
23
import comfy.sd
comfyanonymous's avatar
comfyanonymous committed
24
import comfy.utils
25
import comfy.controlnet
comfyanonymous's avatar
comfyanonymous committed
26

27
import comfy.clip_vision
28

29
import comfy.model_management
30
31
from comfy.cli_args import args

32
import importlib
comfyanonymous's avatar
comfyanonymous committed
33

34
import folder_paths
35
import latent_preview
space-nuko's avatar
space-nuko committed
36

37
def before_node_execution():
38
    comfy.model_management.throw_exception_if_processing_interrupted()
39

40
def interrupt_processing(value=True):
41
    comfy.model_management.interrupt_current_processing(value)
42

43
44
MAX_RESOLUTION=8192

comfyanonymous's avatar
comfyanonymous committed
45
46
47
class CLIPTextEncode:
    @classmethod
    def INPUT_TYPES(s):
48
        return {"required": {"text": ("STRING", {"multiline": True}), "clip": ("CLIP", )}}
comfyanonymous's avatar
comfyanonymous committed
49
50
51
    RETURN_TYPES = ("CONDITIONING",)
    FUNCTION = "encode"

52
53
    CATEGORY = "conditioning"

comfyanonymous's avatar
comfyanonymous committed
54
    def encode(self, clip, text):
55
56
57
        tokens = clip.tokenize(text)
        cond, pooled = clip.encode_from_tokens(tokens, return_pooled=True)
        return ([[cond, {"pooled_output": pooled}]], )
comfyanonymous's avatar
comfyanonymous committed
58
59
60
61
62
63
64
65

class ConditioningCombine:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": {"conditioning_1": ("CONDITIONING", ), "conditioning_2": ("CONDITIONING", )}}
    RETURN_TYPES = ("CONDITIONING",)
    FUNCTION = "combine"

66
67
    CATEGORY = "conditioning"

comfyanonymous's avatar
comfyanonymous committed
68
69
70
    def combine(self, conditioning_1, conditioning_2):
        return (conditioning_1 + conditioning_2, )

FizzleDorf's avatar
FizzleDorf committed
71
72
73
class ConditioningAverage :
    @classmethod
    def INPUT_TYPES(s):
comfyanonymous's avatar
comfyanonymous committed
74
75
        return {"required": {"conditioning_to": ("CONDITIONING", ), "conditioning_from": ("CONDITIONING", ),
                              "conditioning_to_strength": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01})
FizzleDorf's avatar
FizzleDorf committed
76
77
78
79
80
81
                             }}
    RETURN_TYPES = ("CONDITIONING",)
    FUNCTION = "addWeighted"

    CATEGORY = "conditioning"

comfyanonymous's avatar
comfyanonymous committed
82
    def addWeighted(self, conditioning_to, conditioning_from, conditioning_to_strength):
FizzleDorf's avatar
FizzleDorf committed
83
        out = []
comfyanonymous's avatar
comfyanonymous committed
84
85
86
87
88

        if len(conditioning_from) > 1:
            print("Warning: ConditioningAverage conditioning_from contains more than 1 cond, only the first one will actually be applied to conditioning_to.")

        cond_from = conditioning_from[0][0]
89
        pooled_output_from = conditioning_from[0][1].get("pooled_output", None)
comfyanonymous's avatar
comfyanonymous committed
90
91
92

        for i in range(len(conditioning_to)):
            t1 = conditioning_to[i][0]
93
            pooled_output_to = conditioning_to[i][1].get("pooled_output", pooled_output_from)
comfyanonymous's avatar
comfyanonymous committed
94
95
96
97
98
            t0 = cond_from[:,:t1.shape[1]]
            if t0.shape[1] < t1.shape[1]:
                t0 = torch.cat([t0] + [torch.zeros((1, (t1.shape[1] - t0.shape[1]), t1.shape[2]))], dim=1)

            tw = torch.mul(t1, conditioning_to_strength) + torch.mul(t0, (1.0 - conditioning_to_strength))
99
100
101
102
103
104
105
            t_to = conditioning_to[i][1].copy()
            if pooled_output_from is not None and pooled_output_to is not None:
                t_to["pooled_output"] = torch.mul(pooled_output_to, conditioning_to_strength) + torch.mul(pooled_output_from, (1.0 - conditioning_to_strength))
            elif pooled_output_from is not None:
                t_to["pooled_output"] = pooled_output_from

            n = [tw, t_to]
FizzleDorf's avatar
FizzleDorf committed
106
107
108
            out.append(n)
        return (out, )

109
110
111
112
113
114
115
116
117
118
class ConditioningConcat:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": {
            "conditioning_to": ("CONDITIONING",),
            "conditioning_from": ("CONDITIONING",),
            }}
    RETURN_TYPES = ("CONDITIONING",)
    FUNCTION = "concat"

119
    CATEGORY = "conditioning"
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136

    def concat(self, conditioning_to, conditioning_from):
        out = []

        if len(conditioning_from) > 1:
            print("Warning: ConditioningConcat conditioning_from contains more than 1 cond, only the first one will actually be applied to conditioning_to.")

        cond_from = conditioning_from[0][0]

        for i in range(len(conditioning_to)):
            t1 = conditioning_to[i][0]
            tw = torch.cat((t1, cond_from),1)
            n = [tw, conditioning_to[i][1].copy()]
            out.append(n)

        return (out, )

comfyanonymous's avatar
comfyanonymous committed
137
138
139
140
class ConditioningSetArea:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": {"conditioning": ("CONDITIONING", ),
141
142
143
144
                              "width": ("INT", {"default": 64, "min": 64, "max": MAX_RESOLUTION, "step": 8}),
                              "height": ("INT", {"default": 64, "min": 64, "max": MAX_RESOLUTION, "step": 8}),
                              "x": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 8}),
                              "y": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 8}),
comfyanonymous's avatar
comfyanonymous committed
145
146
147
148
149
                              "strength": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}),
                             }}
    RETURN_TYPES = ("CONDITIONING",)
    FUNCTION = "append"

150
151
    CATEGORY = "conditioning"

152
    def append(self, conditioning, width, height, x, y, strength):
comfyanonymous's avatar
comfyanonymous committed
153
154
155
156
157
        c = []
        for t in conditioning:
            n = [t[0], t[1].copy()]
            n[1]['area'] = (height // 8, width // 8, y // 8, x // 8)
            n[1]['strength'] = strength
158
            n[1]['set_area_to_bounds'] = False
comfyanonymous's avatar
comfyanonymous committed
159
            c.append(n)
comfyanonymous's avatar
comfyanonymous committed
160
        return (c, )
comfyanonymous's avatar
comfyanonymous committed
161

162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
class ConditioningSetAreaPercentage:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": {"conditioning": ("CONDITIONING", ),
                              "width": ("FLOAT", {"default": 1.0, "min": 0, "max": 1.0, "step": 0.01}),
                              "height": ("FLOAT", {"default": 1.0, "min": 0, "max": 1.0, "step": 0.01}),
                              "x": ("FLOAT", {"default": 0, "min": 0, "max": 1.0, "step": 0.01}),
                              "y": ("FLOAT", {"default": 0, "min": 0, "max": 1.0, "step": 0.01}),
                              "strength": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}),
                             }}
    RETURN_TYPES = ("CONDITIONING",)
    FUNCTION = "append"

    CATEGORY = "conditioning"

    def append(self, conditioning, width, height, x, y, strength):
        c = []
        for t in conditioning:
            n = [t[0], t[1].copy()]
            n[1]['area'] = ("percentage", height, width, y, x)
            n[1]['strength'] = strength
            n[1]['set_area_to_bounds'] = False
            c.append(n)
        return (c, )

187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
class ConditioningSetAreaStrength:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": {"conditioning": ("CONDITIONING", ),
                              "strength": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}),
                             }}
    RETURN_TYPES = ("CONDITIONING",)
    FUNCTION = "append"

    CATEGORY = "conditioning"

    def append(self, conditioning, strength):
        c = []
        for t in conditioning:
            n = [t[0], t[1].copy()]
            n[1]['strength'] = strength
            c.append(n)
        return (c, )


Jacob Segal's avatar
Jacob Segal committed
207
208
209
210
211
212
class ConditioningSetMask:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": {"conditioning": ("CONDITIONING", ),
                              "mask": ("MASK", ),
                              "strength": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}),
213
                              "set_cond_area": (["default", "mask bounds"],),
Jacob Segal's avatar
Jacob Segal committed
214
215
216
217
218
219
                             }}
    RETURN_TYPES = ("CONDITIONING",)
    FUNCTION = "append"

    CATEGORY = "conditioning"

220
    def append(self, conditioning, mask, set_cond_area, strength):
Jacob Segal's avatar
Jacob Segal committed
221
        c = []
222
223
224
        set_area_to_bounds = False
        if set_cond_area != "default":
            set_area_to_bounds = True
Jacob Segal's avatar
Jacob Segal committed
225
226
227
228
229
230
        if len(mask.shape) < 3:
            mask = mask.unsqueeze(0)
        for t in conditioning:
            n = [t[0], t[1].copy()]
            _, h, w = mask.shape
            n[1]['mask'] = mask
Jacob Segal's avatar
Jacob Segal committed
231
            n[1]['set_area_to_bounds'] = set_area_to_bounds
232
            n[1]['mask_strength'] = strength
Jacob Segal's avatar
Jacob Segal committed
233
234
235
            c.append(n)
        return (c, )

236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
class ConditioningZeroOut:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": {"conditioning": ("CONDITIONING", )}}
    RETURN_TYPES = ("CONDITIONING",)
    FUNCTION = "zero_out"

    CATEGORY = "advanced/conditioning"

    def zero_out(self, conditioning):
        c = []
        for t in conditioning:
            d = t[1].copy()
            if "pooled_output" in d:
                d["pooled_output"] = torch.zeros_like(d["pooled_output"])
            n = [torch.zeros_like(t[0]), d]
            c.append(n)
        return (c, )

255
256
257
258
class ConditioningSetTimestepRange:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": {"conditioning": ("CONDITIONING", ),
259
260
                             "start": ("FLOAT", {"default": 0.0, "min": 0.0, "max": 1.0, "step": 0.001}),
                             "end": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.001})
261
262
263
264
265
266
267
268
269
270
                             }}
    RETURN_TYPES = ("CONDITIONING",)
    FUNCTION = "set_range"

    CATEGORY = "advanced/conditioning"

    def set_range(self, conditioning, start, end):
        c = []
        for t in conditioning:
            d = t[1].copy()
271
272
            d['start_percent'] = start
            d['end_percent'] = end
273
274
275
276
            n = [t[0], d]
            c.append(n)
        return (c, )

comfyanonymous's avatar
comfyanonymous committed
277
278
279
280
281
282
283
class VAEDecode:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "samples": ("LATENT", ), "vae": ("VAE", )}}
    RETURN_TYPES = ("IMAGE",)
    FUNCTION = "decode"

284
285
    CATEGORY = "latent"

comfyanonymous's avatar
comfyanonymous committed
286
    def decode(self, vae, samples):
287
        return (vae.decode(samples["samples"]), )
comfyanonymous's avatar
comfyanonymous committed
288

289
290
291
class VAEDecodeTiled:
    @classmethod
    def INPUT_TYPES(s):
292
        return {"required": {"samples": ("LATENT", ), "vae": ("VAE", ),
comfyanonymous's avatar
comfyanonymous committed
293
                             "tile_size": ("INT", {"default": 512, "min": 320, "max": 4096, "step": 64})
294
                            }}
295
296
297
298
299
    RETURN_TYPES = ("IMAGE",)
    FUNCTION = "decode"

    CATEGORY = "_for_testing"

300
    def decode(self, vae, samples, tile_size):
301
        return (vae.decode_tiled(samples["samples"], tile_x=tile_size // 8, tile_y=tile_size // 8, ), )
302

comfyanonymous's avatar
comfyanonymous committed
303
304
305
306
307
308
309
class VAEEncode:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "pixels": ("IMAGE", ), "vae": ("VAE", )}}
    RETURN_TYPES = ("LATENT",)
    FUNCTION = "encode"

310
311
    CATEGORY = "latent"

312
313
314
315
    @staticmethod
    def vae_encode_crop_pixels(pixels):
        x = (pixels.shape[1] // 8) * 8
        y = (pixels.shape[2] // 8) * 8
316
        if pixels.shape[1] != x or pixels.shape[2] != y:
317
318
319
320
            x_offset = (pixels.shape[1] % 8) // 2
            y_offset = (pixels.shape[2] % 8) // 2
            pixels = pixels[:, x_offset:x + x_offset, y_offset:y + y_offset, :]
        return pixels
321

322
323
324
    def encode(self, vae, pixels):
        pixels = self.vae_encode_crop_pixels(pixels)
        t = vae.encode(pixels[:,:,:,:3])
325
        return ({"samples":t}, )
comfyanonymous's avatar
comfyanonymous committed
326

comfyanonymous's avatar
comfyanonymous committed
327
328
329
class VAEEncodeTiled:
    @classmethod
    def INPUT_TYPES(s):
330
        return {"required": {"pixels": ("IMAGE", ), "vae": ("VAE", ),
331
                             "tile_size": ("INT", {"default": 512, "min": 320, "max": 4096, "step": 64})
332
                            }}
comfyanonymous's avatar
comfyanonymous committed
333
334
335
336
337
    RETURN_TYPES = ("LATENT",)
    FUNCTION = "encode"

    CATEGORY = "_for_testing"

338
    def encode(self, vae, pixels, tile_size):
339
        pixels = VAEEncode.vae_encode_crop_pixels(pixels)
340
        t = vae.encode_tiled(pixels[:,:,:,:3], tile_x=tile_size, tile_y=tile_size, )
comfyanonymous's avatar
comfyanonymous committed
341
        return ({"samples":t}, )
342

343
344
345
class VAEEncodeForInpaint:
    @classmethod
    def INPUT_TYPES(s):
346
        return {"required": { "pixels": ("IMAGE", ), "vae": ("VAE", ), "mask": ("MASK", ), "grow_mask_by": ("INT", {"default": 6, "min": 0, "max": 64, "step": 1}),}}
347
348
349
350
351
    RETURN_TYPES = ("LATENT",)
    FUNCTION = "encode"

    CATEGORY = "latent/inpaint"

352
    def encode(self, vae, pixels, mask, grow_mask_by=6):
353
354
        x = (pixels.shape[1] // 8) * 8
        y = (pixels.shape[2] // 8) * 8
355
        mask = torch.nn.functional.interpolate(mask.reshape((-1, 1, mask.shape[-2], mask.shape[-1])), size=(pixels.shape[1], pixels.shape[2]), mode="bilinear")
356

357
        pixels = pixels.clone()
358
        if pixels.shape[1] != x or pixels.shape[2] != y:
359
360
361
362
            x_offset = (pixels.shape[1] % 8) // 2
            y_offset = (pixels.shape[2] % 8) // 2
            pixels = pixels[:,x_offset:x + x_offset, y_offset:y + y_offset,:]
            mask = mask[:,:,x_offset:x + x_offset, y_offset:y + y_offset]
363

364
        #grow mask by a few pixels to keep things seamless in latent space
365
366
367
368
369
370
371
372
        if grow_mask_by == 0:
            mask_erosion = mask
        else:
            kernel_tensor = torch.ones((1, 1, grow_mask_by, grow_mask_by))
            padding = math.ceil((grow_mask_by - 1) / 2)

            mask_erosion = torch.clamp(torch.nn.functional.conv2d(mask.round(), kernel_tensor, padding=padding), 0, 1)

373
        m = (1.0 - mask.round()).squeeze(1)
374
375
        for i in range(3):
            pixels[:,:,:,i] -= 0.5
376
            pixels[:,:,:,i] *= m
377
378
379
            pixels[:,:,:,i] += 0.5
        t = vae.encode(pixels)

380
        return ({"samples":t, "noise_mask": (mask_erosion[:,:,:x,:y].round())}, )
comfyanonymous's avatar
comfyanonymous committed
381

382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437

class InpaintModelConditioning:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": {"positive": ("CONDITIONING", ),
                             "negative": ("CONDITIONING", ),
                             "vae": ("VAE", ),
                             "pixels": ("IMAGE", ),
                             "mask": ("MASK", ),
                             }}

    RETURN_TYPES = ("CONDITIONING","CONDITIONING","LATENT")
    RETURN_NAMES = ("positive", "negative", "latent")
    FUNCTION = "encode"

    CATEGORY = "conditioning/inpaint"

    def encode(self, positive, negative, pixels, vae, mask):
        x = (pixels.shape[1] // 8) * 8
        y = (pixels.shape[2] // 8) * 8
        mask = torch.nn.functional.interpolate(mask.reshape((-1, 1, mask.shape[-2], mask.shape[-1])), size=(pixels.shape[1], pixels.shape[2]), mode="bilinear")

        orig_pixels = pixels
        pixels = orig_pixels.clone()
        if pixels.shape[1] != x or pixels.shape[2] != y:
            x_offset = (pixels.shape[1] % 8) // 2
            y_offset = (pixels.shape[2] % 8) // 2
            pixels = pixels[:,x_offset:x + x_offset, y_offset:y + y_offset,:]
            mask = mask[:,:,x_offset:x + x_offset, y_offset:y + y_offset]

        m = (1.0 - mask.round()).squeeze(1)
        for i in range(3):
            pixels[:,:,:,i] -= 0.5
            pixels[:,:,:,i] *= m
            pixels[:,:,:,i] += 0.5
        concat_latent = vae.encode(pixels)
        orig_latent = vae.encode(orig_pixels)

        out_latent = {}

        out_latent["samples"] = orig_latent
        out_latent["noise_mask"] = mask

        out = []
        for conditioning in [positive, negative]:
            c = []
            for t in conditioning:
                d = t[1].copy()
                d["concat_latent_image"] = concat_latent
                d["concat_mask"] = mask
                n = [t[0], d]
                c.append(n)
            out.append(c)
        return (out[0], out[1], out_latent)


Dr.Lt.Data's avatar
Dr.Lt.Data committed
438
439
class SaveLatent:
    def __init__(self):
440
        self.output_dir = folder_paths.get_output_directory()
Dr.Lt.Data's avatar
Dr.Lt.Data committed
441
442
443
444

    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "samples": ("LATENT", ),
445
                              "filename_prefix": ("STRING", {"default": "latents/ComfyUI"})},
Dr.Lt.Data's avatar
Dr.Lt.Data committed
446
447
448
449
450
451
452
453
454
455
                "hidden": {"prompt": "PROMPT", "extra_pnginfo": "EXTRA_PNGINFO"},
                }
    RETURN_TYPES = ()
    FUNCTION = "save"

    OUTPUT_NODE = True

    CATEGORY = "_for_testing"

    def save(self, samples, filename_prefix="ComfyUI", prompt=None, extra_pnginfo=None):
456
        full_output_folder, filename, counter, subfolder, filename_prefix = folder_paths.get_save_image_path(filename_prefix, self.output_dir)
Dr.Lt.Data's avatar
Dr.Lt.Data committed
457
458
459
460
461
462

        # support save metadata for latent sharing
        prompt_info = ""
        if prompt is not None:
            prompt_info = json.dumps(prompt)

463
464
465
466
467
468
        metadata = None
        if not args.disable_metadata:
            metadata = {"prompt": prompt_info}
            if extra_pnginfo is not None:
                for x in extra_pnginfo:
                    metadata[x] = json.dumps(extra_pnginfo[x])
Dr.Lt.Data's avatar
Dr.Lt.Data committed
469
470

        file = f"{filename}_{counter:05}_.latent"
471
472
473
474
475
476
477
478

        results = list()
        results.append({
            "filename": file,
            "subfolder": subfolder,
            "type": "output"
        })

Dr.Lt.Data's avatar
Dr.Lt.Data committed
479
480
        file = os.path.join(full_output_folder, file)

481
482
        output = {}
        output["latent_tensor"] = samples["samples"]
483
        output["latent_format_version_0"] = torch.tensor([])
484

485
        comfy.utils.save_torch_file(output, file, metadata=metadata)
486
        return { "ui": { "latents": results } }
Dr.Lt.Data's avatar
Dr.Lt.Data committed
487
488
489
490
491


class LoadLatent:
    @classmethod
    def INPUT_TYPES(s):
492
493
        input_dir = folder_paths.get_input_directory()
        files = [f for f in os.listdir(input_dir) if os.path.isfile(os.path.join(input_dir, f)) and f.endswith(".latent")]
Dr.Lt.Data's avatar
Dr.Lt.Data committed
494
495
496
497
498
499
500
501
        return {"required": {"latent": [sorted(files), ]}, }

    CATEGORY = "_for_testing"

    RETURN_TYPES = ("LATENT", )
    FUNCTION = "load"

    def load(self, latent):
502
503
        latent_path = folder_paths.get_annotated_filepath(latent)
        latent = safetensors.torch.load_file(latent_path, device="cpu")
504
505
506
507
        multiplier = 1.0
        if "latent_format_version_0" not in latent:
            multiplier = 1.0 / 0.18215
        samples = {"samples": latent["latent_tensor"].float() * multiplier}
508
        return (samples, )
Dr.Lt.Data's avatar
Dr.Lt.Data committed
509

510
511
512
513
514
515
516
517
518
519
520
521
522
523
    @classmethod
    def IS_CHANGED(s, latent):
        image_path = folder_paths.get_annotated_filepath(latent)
        m = hashlib.sha256()
        with open(image_path, 'rb') as f:
            m.update(f.read())
        return m.digest().hex()

    @classmethod
    def VALIDATE_INPUTS(s, latent):
        if not folder_paths.exists_annotated_filepath(latent):
            return "Invalid latent file: {}".format(latent)
        return True

Dr.Lt.Data's avatar
Dr.Lt.Data committed
524

comfyanonymous's avatar
comfyanonymous committed
525
526
527
class CheckpointLoader:
    @classmethod
    def INPUT_TYPES(s):
528
529
        return {"required": { "config_name": (folder_paths.get_filename_list("configs"), ),
                              "ckpt_name": (folder_paths.get_filename_list("checkpoints"), )}}
comfyanonymous's avatar
comfyanonymous committed
530
531
532
    RETURN_TYPES = ("MODEL", "CLIP", "VAE")
    FUNCTION = "load_checkpoint"

533
    CATEGORY = "advanced/loaders"
534

comfyanonymous's avatar
comfyanonymous committed
535
    def load_checkpoint(self, config_name, ckpt_name, output_vae=True, output_clip=True):
536
537
        config_path = folder_paths.get_full_path("configs", config_name)
        ckpt_path = folder_paths.get_full_path("checkpoints", ckpt_name)
538
        return comfy.sd.load_checkpoint(config_path, ckpt_path, output_vae=True, output_clip=True, embedding_directory=folder_paths.get_folder_paths("embeddings"))
comfyanonymous's avatar
comfyanonymous committed
539

540
541
542
class CheckpointLoaderSimple:
    @classmethod
    def INPUT_TYPES(s):
543
        return {"required": { "ckpt_name": (folder_paths.get_filename_list("checkpoints"), ),
544
545
546
547
                             }}
    RETURN_TYPES = ("MODEL", "CLIP", "VAE")
    FUNCTION = "load_checkpoint"

548
    CATEGORY = "loaders"
549

550
    def load_checkpoint(self, ckpt_name, output_vae=True, output_clip=True):
551
        ckpt_path = folder_paths.get_full_path("checkpoints", ckpt_name)
552
        out = comfy.sd.load_checkpoint_guess_config(ckpt_path, output_vae=True, output_clip=True, embedding_directory=folder_paths.get_folder_paths("embeddings"))
553
        return out[:3]
554

sALTaccount's avatar
sALTaccount committed
555
556
557
class DiffusersLoader:
    @classmethod
    def INPUT_TYPES(cls):
558
        paths = []
sALTaccount's avatar
sALTaccount committed
559
        for search_path in folder_paths.get_folder_paths("diffusers"):
560
            if os.path.exists(search_path):
561
562
563
564
                for root, subdir, files in os.walk(search_path, followlinks=True):
                    if "model_index.json" in files:
                        paths.append(os.path.relpath(root, start=search_path))

565
        return {"required": {"model_path": (paths,), }}
sALTaccount's avatar
sALTaccount committed
566
567
568
    RETURN_TYPES = ("MODEL", "CLIP", "VAE")
    FUNCTION = "load_checkpoint"

569
    CATEGORY = "advanced/loaders/deprecated"
sALTaccount's avatar
sALTaccount committed
570
571

    def load_checkpoint(self, model_path, output_vae=True, output_clip=True):
sALTaccount's avatar
sALTaccount committed
572
573
        for search_path in folder_paths.get_folder_paths("diffusers"):
            if os.path.exists(search_path):
574
575
576
                path = os.path.join(search_path, model_path)
                if os.path.exists(path):
                    model_path = path
sALTaccount's avatar
sALTaccount committed
577
                    break
578

579
        return comfy.diffusers_load.load_diffusers(model_path, output_vae=output_vae, output_clip=output_clip, embedding_directory=folder_paths.get_folder_paths("embeddings"))
sALTaccount's avatar
sALTaccount committed
580
581


582
583
584
585
586
587
588
589
class unCLIPCheckpointLoader:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "ckpt_name": (folder_paths.get_filename_list("checkpoints"), ),
                             }}
    RETURN_TYPES = ("MODEL", "CLIP", "VAE", "CLIP_VISION")
    FUNCTION = "load_checkpoint"

590
    CATEGORY = "loaders"
591
592
593
594
595
596

    def load_checkpoint(self, ckpt_name, output_vae=True, output_clip=True):
        ckpt_path = folder_paths.get_full_path("checkpoints", ckpt_name)
        out = comfy.sd.load_checkpoint_guess_config(ckpt_path, output_vae=True, output_clip=True, output_clipvision=True, embedding_directory=folder_paths.get_folder_paths("embeddings"))
        return out

comfyanonymous's avatar
comfyanonymous committed
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
class CLIPSetLastLayer:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "clip": ("CLIP", ),
                              "stop_at_clip_layer": ("INT", {"default": -1, "min": -24, "max": -1, "step": 1}),
                              }}
    RETURN_TYPES = ("CLIP",)
    FUNCTION = "set_last_layer"

    CATEGORY = "conditioning"

    def set_last_layer(self, clip, stop_at_clip_layer):
        clip = clip.clone()
        clip.clip_layer(stop_at_clip_layer)
        return (clip,)

613
class LoraLoader:
614
615
616
    def __init__(self):
        self.loaded_lora = None

617
618
619
620
    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "model": ("MODEL",),
                              "clip": ("CLIP", ),
621
                              "lora_name": (folder_paths.get_filename_list("loras"), ),
622
623
                              "strength_model": ("FLOAT", {"default": 1.0, "min": -20.0, "max": 20.0, "step": 0.01}),
                              "strength_clip": ("FLOAT", {"default": 1.0, "min": -20.0, "max": 20.0, "step": 0.01}),
624
625
626
627
628
629
630
                              }}
    RETURN_TYPES = ("MODEL", "CLIP")
    FUNCTION = "load_lora"

    CATEGORY = "loaders"

    def load_lora(self, model, clip, lora_name, strength_model, strength_clip):
631
632
633
        if strength_model == 0 and strength_clip == 0:
            return (model, clip)

634
        lora_path = folder_paths.get_full_path("loras", lora_name)
635
636
637
638
639
        lora = None
        if self.loaded_lora is not None:
            if self.loaded_lora[0] == lora_path:
                lora = self.loaded_lora[1]
            else:
640
641
642
                temp = self.loaded_lora
                self.loaded_lora = None
                del temp
643
644
645
646
647
648

        if lora is None:
            lora = comfy.utils.load_torch_file(lora_path, safe_load=True)
            self.loaded_lora = (lora_path, lora)

        model_lora, clip_lora = comfy.sd.load_lora_for_models(model, clip, lora, strength_model, strength_clip)
649
650
        return (model_lora, clip_lora)

651
652
653
654
655
656
657
658
659
660
661
662
663
class LoraLoaderModelOnly(LoraLoader):
    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "model": ("MODEL",),
                              "lora_name": (folder_paths.get_filename_list("loras"), ),
                              "strength_model": ("FLOAT", {"default": 1.0, "min": -20.0, "max": 20.0, "step": 0.01}),
                              }}
    RETURN_TYPES = ("MODEL",)
    FUNCTION = "load_lora_model_only"

    def load_lora_model_only(self, model, lora_name, strength_model):
        return (self.load_lora(model, None, lora_name, strength_model, 0)[0],)

comfyanonymous's avatar
comfyanonymous committed
664
class VAELoader:
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
    @staticmethod
    def vae_list():
        vaes = folder_paths.get_filename_list("vae")
        approx_vaes = folder_paths.get_filename_list("vae_approx")
        sdxl_taesd_enc = False
        sdxl_taesd_dec = False
        sd1_taesd_enc = False
        sd1_taesd_dec = False

        for v in approx_vaes:
            if v.startswith("taesd_decoder."):
                sd1_taesd_dec = True
            elif v.startswith("taesd_encoder."):
                sd1_taesd_enc = True
            elif v.startswith("taesdxl_decoder."):
                sdxl_taesd_dec = True
            elif v.startswith("taesdxl_encoder."):
                sdxl_taesd_enc = True
        if sd1_taesd_dec and sd1_taesd_enc:
            vaes.append("taesd")
        if sdxl_taesd_dec and sdxl_taesd_enc:
            vaes.append("taesdxl")
        return vaes

    @staticmethod
    def load_taesd(name):
        sd = {}
        approx_vaes = folder_paths.get_filename_list("vae_approx")

        encoder = next(filter(lambda a: a.startswith("{}_encoder.".format(name)), approx_vaes))
        decoder = next(filter(lambda a: a.startswith("{}_decoder.".format(name)), approx_vaes))

        enc = comfy.utils.load_torch_file(folder_paths.get_full_path("vae_approx", encoder))
        for k in enc:
            sd["taesd_encoder.{}".format(k)] = enc[k]

        dec = comfy.utils.load_torch_file(folder_paths.get_full_path("vae_approx", decoder))
        for k in dec:
            sd["taesd_decoder.{}".format(k)] = dec[k]

        if name == "taesd":
            sd["vae_scale"] = torch.tensor(0.18215)
        elif name == "taesdxl":
            sd["vae_scale"] = torch.tensor(0.13025)
        return sd

comfyanonymous's avatar
comfyanonymous committed
711
712
    @classmethod
    def INPUT_TYPES(s):
713
        return {"required": { "vae_name": (s.vae_list(), )}}
comfyanonymous's avatar
comfyanonymous committed
714
715
716
    RETURN_TYPES = ("VAE",)
    FUNCTION = "load_vae"

717
718
    CATEGORY = "loaders"

comfyanonymous's avatar
comfyanonymous committed
719
720
    #TODO: scale factor?
    def load_vae(self, vae_name):
721
722
723
724
725
        if vae_name in ["taesd", "taesdxl"]:
            sd = self.load_taesd(vae_name)
        else:
            vae_path = folder_paths.get_full_path("vae", vae_name)
            sd = comfy.utils.load_torch_file(vae_path)
comfyanonymous's avatar
comfyanonymous committed
726
        vae = comfy.sd.VAE(sd=sd)
comfyanonymous's avatar
comfyanonymous committed
727
728
        return (vae,)

comfyanonymous's avatar
comfyanonymous committed
729
730
731
class ControlNetLoader:
    @classmethod
    def INPUT_TYPES(s):
732
        return {"required": { "control_net_name": (folder_paths.get_filename_list("controlnet"), )}}
comfyanonymous's avatar
comfyanonymous committed
733
734
735
736
737
738
739

    RETURN_TYPES = ("CONTROL_NET",)
    FUNCTION = "load_controlnet"

    CATEGORY = "loaders"

    def load_controlnet(self, control_net_name):
740
        controlnet_path = folder_paths.get_full_path("controlnet", control_net_name)
741
        controlnet = comfy.controlnet.load_controlnet(controlnet_path)
comfyanonymous's avatar
comfyanonymous committed
742
743
        return (controlnet,)

744
745
746
747
class DiffControlNetLoader:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "model": ("MODEL",),
748
                              "control_net_name": (folder_paths.get_filename_list("controlnet"), )}}
749
750
751
752
753
754
755

    RETURN_TYPES = ("CONTROL_NET",)
    FUNCTION = "load_controlnet"

    CATEGORY = "loaders"

    def load_controlnet(self, model, control_net_name):
756
        controlnet_path = folder_paths.get_full_path("controlnet", control_net_name)
757
        controlnet = comfy.controlnet.load_controlnet(controlnet_path, model)
758
759
        return (controlnet,)

comfyanonymous's avatar
comfyanonymous committed
760
761
762
763

class ControlNetApply:
    @classmethod
    def INPUT_TYPES(s):
764
765
766
767
768
        return {"required": {"conditioning": ("CONDITIONING", ),
                             "control_net": ("CONTROL_NET", ),
                             "image": ("IMAGE", ),
                             "strength": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01})
                             }}
comfyanonymous's avatar
comfyanonymous committed
769
770
771
772
773
    RETURN_TYPES = ("CONDITIONING",)
    FUNCTION = "apply_controlnet"

    CATEGORY = "conditioning"

774
    def apply_controlnet(self, conditioning, control_net, image, strength):
775
776
777
        if strength == 0:
            return (conditioning, )

comfyanonymous's avatar
comfyanonymous committed
778
779
780
781
        c = []
        control_hint = image.movedim(-1,1)
        for t in conditioning:
            n = [t[0], t[1].copy()]
comfyanonymous's avatar
comfyanonymous committed
782
783
784
785
            c_net = control_net.copy().set_cond_hint(control_hint, strength)
            if 'control' in t[1]:
                c_net.set_previous_controlnet(t[1]['control'])
            n[1]['control'] = c_net
786
            n[1]['control_apply_to_uncond'] = True
comfyanonymous's avatar
comfyanonymous committed
787
788
789
            c.append(n)
        return (c, )

790
791
792
793
794
795
796
797
798

class ControlNetApplyAdvanced:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": {"positive": ("CONDITIONING", ),
                             "negative": ("CONDITIONING", ),
                             "control_net": ("CONTROL_NET", ),
                             "image": ("IMAGE", ),
                             "strength": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}),
799
800
                             "start_percent": ("FLOAT", {"default": 0.0, "min": 0.0, "max": 1.0, "step": 0.001}),
                             "end_percent": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.001})
801
802
803
804
805
806
807
808
                             }}

    RETURN_TYPES = ("CONDITIONING","CONDITIONING")
    RETURN_NAMES = ("positive", "negative")
    FUNCTION = "apply_controlnet"

    CATEGORY = "conditioning"

809
    def apply_controlnet(self, positive, negative, control_net, image, strength, start_percent, end_percent):
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
        if strength == 0:
            return (positive, negative)

        control_hint = image.movedim(-1,1)
        cnets = {}

        out = []
        for conditioning in [positive, negative]:
            c = []
            for t in conditioning:
                d = t[1].copy()

                prev_cnet = d.get('control', None)
                if prev_cnet in cnets:
                    c_net = cnets[prev_cnet]
                else:
826
                    c_net = control_net.copy().set_cond_hint(control_hint, strength, (start_percent, end_percent))
827
828
829
830
831
832
833
834
835
836
837
                    c_net.set_previous_controlnet(prev_cnet)
                    cnets[prev_cnet] = c_net

                d['control'] = c_net
                d['control_apply_to_uncond'] = False
                n = [t[0], d]
                c.append(n)
            out.append(c)
        return (out[0], out[1])


838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
class UNETLoader:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "unet_name": (folder_paths.get_filename_list("unet"), ),
                             }}
    RETURN_TYPES = ("MODEL",)
    FUNCTION = "load_unet"

    CATEGORY = "advanced/loaders"

    def load_unet(self, unet_name):
        unet_path = folder_paths.get_full_path("unet", unet_name)
        model = comfy.sd.load_unet(unet_path)
        return (model,)

853
854
855
class CLIPLoader:
    @classmethod
    def INPUT_TYPES(s):
856
        return {"required": { "clip_name": (folder_paths.get_filename_list("clip"), ),
857
858
859
860
                             }}
    RETURN_TYPES = ("CLIP",)
    FUNCTION = "load_clip"

861
    CATEGORY = "advanced/loaders"
862

863
    def load_clip(self, clip_name):
864
        clip_path = folder_paths.get_full_path("clip", clip_name)
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
        clip = comfy.sd.load_clip(ckpt_paths=[clip_path], embedding_directory=folder_paths.get_folder_paths("embeddings"))
        return (clip,)

class DualCLIPLoader:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "clip_name1": (folder_paths.get_filename_list("clip"), ), "clip_name2": (folder_paths.get_filename_list("clip"), ),
                             }}
    RETURN_TYPES = ("CLIP",)
    FUNCTION = "load_clip"

    CATEGORY = "advanced/loaders"

    def load_clip(self, clip_name1, clip_name2):
        clip_path1 = folder_paths.get_full_path("clip", clip_name1)
        clip_path2 = folder_paths.get_full_path("clip", clip_name2)
        clip = comfy.sd.load_clip(ckpt_paths=[clip_path1, clip_path2], embedding_directory=folder_paths.get_folder_paths("embeddings"))
882
883
        return (clip,)

884
885
886
class CLIPVisionLoader:
    @classmethod
    def INPUT_TYPES(s):
887
        return {"required": { "clip_name": (folder_paths.get_filename_list("clip_vision"), ),
888
889
890
891
892
893
894
                             }}
    RETURN_TYPES = ("CLIP_VISION",)
    FUNCTION = "load_clip"

    CATEGORY = "loaders"

    def load_clip(self, clip_name):
895
        clip_path = folder_paths.get_full_path("clip_vision", clip_name)
896
        clip_vision = comfy.clip_vision.load(clip_path)
897
898
899
900
901
902
903
904
        return (clip_vision,)

class CLIPVisionEncode:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "clip_vision": ("CLIP_VISION",),
                              "image": ("IMAGE",)
                             }}
comfyanonymous's avatar
comfyanonymous committed
905
    RETURN_TYPES = ("CLIP_VISION_OUTPUT",)
906
907
    FUNCTION = "encode"

908
    CATEGORY = "conditioning"
909
910
911
912
913
914
915
916

    def encode(self, clip_vision, image):
        output = clip_vision.encode_image(image)
        return (output,)

class StyleModelLoader:
    @classmethod
    def INPUT_TYPES(s):
917
        return {"required": { "style_model_name": (folder_paths.get_filename_list("style_models"), )}}
918
919
920
921
922
923
924

    RETURN_TYPES = ("STYLE_MODEL",)
    FUNCTION = "load_style_model"

    CATEGORY = "loaders"

    def load_style_model(self, style_model_name):
925
        style_model_path = folder_paths.get_full_path("style_models", style_model_name)
926
927
928
929
930
931
932
        style_model = comfy.sd.load_style_model(style_model_path)
        return (style_model,)


class StyleModelApply:
    @classmethod
    def INPUT_TYPES(s):
933
934
935
        return {"required": {"conditioning": ("CONDITIONING", ),
                             "style_model": ("STYLE_MODEL", ),
                             "clip_vision_output": ("CLIP_VISION_OUTPUT", ),
936
937
938
939
                             }}
    RETURN_TYPES = ("CONDITIONING",)
    FUNCTION = "apply_stylemodel"

comfyanonymous's avatar
comfyanonymous committed
940
    CATEGORY = "conditioning/style_model"
941

942
    def apply_stylemodel(self, clip_vision_output, style_model, conditioning):
943
        cond = style_model.get_cond(clip_vision_output).flatten(start_dim=0, end_dim=1).unsqueeze(dim=0)
944
        c = []
945
946
        for t in conditioning:
            n = [torch.cat((t[0], cond), dim=1), t[1].copy()]
947
948
949
            c.append(n)
        return (c, )

950
951
952
953
954
955
class unCLIPConditioning:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": {"conditioning": ("CONDITIONING", ),
                             "clip_vision_output": ("CLIP_VISION_OUTPUT", ),
                             "strength": ("FLOAT", {"default": 1.0, "min": -10.0, "max": 10.0, "step": 0.01}),
956
                             "noise_augmentation": ("FLOAT", {"default": 0.0, "min": 0.0, "max": 1.0, "step": 0.01}),
957
958
959
960
                             }}
    RETURN_TYPES = ("CONDITIONING",)
    FUNCTION = "apply_adm"

961
    CATEGORY = "conditioning"
962

963
    def apply_adm(self, conditioning, clip_vision_output, strength, noise_augmentation):
964
965
966
        if strength == 0:
            return (conditioning, )

967
968
969
        c = []
        for t in conditioning:
            o = t[1].copy()
970
971
972
            x = {"clip_vision_output": clip_vision_output, "strength": strength, "noise_augmentation": noise_augmentation}
            if "unclip_conditioning" in o:
                o["unclip_conditioning"] = o["unclip_conditioning"][:] + [x]
973
            else:
974
                o["unclip_conditioning"] = [x]
975
976
977
978
            n = [t[0], o]
            c.append(n)
        return (c, )

979
980
981
982
983
984
985
986
class GLIGENLoader:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "gligen_name": (folder_paths.get_filename_list("gligen"), )}}

    RETURN_TYPES = ("GLIGEN",)
    FUNCTION = "load_gligen"

comfyanonymous's avatar
comfyanonymous committed
987
    CATEGORY = "loaders"
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008

    def load_gligen(self, gligen_name):
        gligen_path = folder_paths.get_full_path("gligen", gligen_name)
        gligen = comfy.sd.load_gligen(gligen_path)
        return (gligen,)

class GLIGENTextBoxApply:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": {"conditioning_to": ("CONDITIONING", ),
                              "clip": ("CLIP", ),
                              "gligen_textbox_model": ("GLIGEN", ),
                              "text": ("STRING", {"multiline": True}),
                              "width": ("INT", {"default": 64, "min": 8, "max": MAX_RESOLUTION, "step": 8}),
                              "height": ("INT", {"default": 64, "min": 8, "max": MAX_RESOLUTION, "step": 8}),
                              "x": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 8}),
                              "y": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 8}),
                             }}
    RETURN_TYPES = ("CONDITIONING",)
    FUNCTION = "append"

comfyanonymous's avatar
comfyanonymous committed
1009
    CATEGORY = "conditioning/gligen"
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023

    def append(self, conditioning_to, clip, gligen_textbox_model, text, width, height, x, y):
        c = []
        cond, cond_pooled = clip.encode_from_tokens(clip.tokenize(text), return_pooled=True)
        for t in conditioning_to:
            n = [t[0], t[1].copy()]
            position_params = [(cond_pooled, height // 8, width // 8, y // 8, x // 8)]
            prev = []
            if "gligen" in n[1]:
                prev = n[1]['gligen'][2]

            n[1]['gligen'] = ("position", gligen_textbox_model, prev + position_params)
            c.append(n)
        return (c, )
1024

comfyanonymous's avatar
comfyanonymous committed
1025
class EmptyLatentImage:
1026
1027
    def __init__(self):
        self.device = comfy.model_management.intermediate_device()
comfyanonymous's avatar
comfyanonymous committed
1028
1029
1030

    @classmethod
    def INPUT_TYPES(s):
1031
1032
        return {"required": { "width": ("INT", {"default": 512, "min": 16, "max": MAX_RESOLUTION, "step": 8}),
                              "height": ("INT", {"default": 512, "min": 16, "max": MAX_RESOLUTION, "step": 8}),
comfyanonymous's avatar
comfyanonymous committed
1033
                              "batch_size": ("INT", {"default": 1, "min": 1, "max": 4096})}}
comfyanonymous's avatar
comfyanonymous committed
1034
1035
1036
    RETURN_TYPES = ("LATENT",)
    FUNCTION = "generate"

1037
1038
    CATEGORY = "latent"

comfyanonymous's avatar
comfyanonymous committed
1039
    def generate(self, width, height, batch_size=1):
1040
        latent = torch.zeros([batch_size, 4, height // 8, width // 8], device=self.device)
1041
        return ({"samples":latent}, )
comfyanonymous's avatar
comfyanonymous committed
1042

comfyanonymous's avatar
comfyanonymous committed
1043

1044
1045
1046
1047
1048
class LatentFromBatch:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "samples": ("LATENT",),
                              "batch_index": ("INT", {"default": 0, "min": 0, "max": 63}),
1049
                              "length": ("INT", {"default": 1, "min": 1, "max": 64}),
1050
1051
                              }}
    RETURN_TYPES = ("LATENT",)
1052
    FUNCTION = "frombatch"
1053

1054
    CATEGORY = "latent/batch"
1055

1056
    def frombatch(self, samples, batch_index, length):
1057
1058
1059
        s = samples.copy()
        s_in = samples["samples"]
        batch_index = min(s_in.shape[0] - 1, batch_index)
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
        length = min(s_in.shape[0] - batch_index, length)
        s["samples"] = s_in[batch_index:batch_index + length].clone()
        if "noise_mask" in samples:
            masks = samples["noise_mask"]
            if masks.shape[0] == 1:
                s["noise_mask"] = masks.clone()
            else:
                if masks.shape[0] < s_in.shape[0]:
                    masks = masks.repeat(math.ceil(s_in.shape[0] / masks.shape[0]), 1, 1, 1)[:s_in.shape[0]]
                s["noise_mask"] = masks[batch_index:batch_index + length].clone()
        if "batch_index" not in s:
            s["batch_index"] = [x for x in range(batch_index, batch_index+length)]
        else:
            s["batch_index"] = samples["batch_index"][batch_index:batch_index + length]
        return (s,)
    
class RepeatLatentBatch:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "samples": ("LATENT",),
                              "amount": ("INT", {"default": 1, "min": 1, "max": 64}),
                              }}
    RETURN_TYPES = ("LATENT",)
    FUNCTION = "repeat"

    CATEGORY = "latent/batch"

    def repeat(self, samples, amount):
        s = samples.copy()
        s_in = samples["samples"]
        
        s["samples"] = s_in.repeat((amount, 1,1,1))
        if "noise_mask" in samples and samples["noise_mask"].shape[0] > 1:
            masks = samples["noise_mask"]
            if masks.shape[0] < s_in.shape[0]:
                masks = masks.repeat(math.ceil(s_in.shape[0] / masks.shape[0]), 1, 1, 1)[:s_in.shape[0]]
            s["noise_mask"] = samples["noise_mask"].repeat((amount, 1,1,1))
        if "batch_index" in s:
            offset = max(s["batch_index"]) - min(s["batch_index"]) + 1
            s["batch_index"] = s["batch_index"] + [x + (i * offset) for i in range(1, amount) for x in s["batch_index"]]
1100
        return (s,)
comfyanonymous's avatar
comfyanonymous committed
1101

comfyanonymous's avatar
comfyanonymous committed
1102
class LatentUpscale:
comfyanonymous's avatar
comfyanonymous committed
1103
    upscale_methods = ["nearest-exact", "bilinear", "area", "bicubic", "bislerp"]
1104
    crop_methods = ["disabled", "center"]
comfyanonymous's avatar
comfyanonymous committed
1105
1106
1107
1108

    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "samples": ("LATENT",), "upscale_method": (s.upscale_methods,),
1109
1110
                              "width": ("INT", {"default": 512, "min": 0, "max": MAX_RESOLUTION, "step": 8}),
                              "height": ("INT", {"default": 512, "min": 0, "max": MAX_RESOLUTION, "step": 8}),
1111
                              "crop": (s.crop_methods,)}}
comfyanonymous's avatar
comfyanonymous committed
1112
1113
1114
    RETURN_TYPES = ("LATENT",)
    FUNCTION = "upscale"

1115
1116
    CATEGORY = "latent"

1117
    def upscale(self, samples, upscale_method, width, height, crop):
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
        if width == 0 and height == 0:
            s = samples
        else:
            s = samples.copy()

            if width == 0:
                height = max(64, height)
                width = max(64, round(samples["samples"].shape[3] * height / samples["samples"].shape[2]))
            elif height == 0:
                width = max(64, width)
                height = max(64, round(samples["samples"].shape[2] * width / samples["samples"].shape[3]))
            else:
                width = max(64, width)
                height = max(64, height)

            s["samples"] = comfy.utils.common_upscale(samples["samples"], width // 8, height // 8, upscale_method, crop)
comfyanonymous's avatar
comfyanonymous committed
1134
1135
        return (s,)

comfyanonymous's avatar
comfyanonymous committed
1136
class LatentUpscaleBy:
comfyanonymous's avatar
comfyanonymous committed
1137
    upscale_methods = ["nearest-exact", "bilinear", "area", "bicubic", "bislerp"]
comfyanonymous's avatar
comfyanonymous committed
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154

    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "samples": ("LATENT",), "upscale_method": (s.upscale_methods,),
                              "scale_by": ("FLOAT", {"default": 1.5, "min": 0.01, "max": 8.0, "step": 0.01}),}}
    RETURN_TYPES = ("LATENT",)
    FUNCTION = "upscale"

    CATEGORY = "latent"

    def upscale(self, samples, upscale_method, scale_by):
        s = samples.copy()
        width = round(samples["samples"].shape[3] * scale_by)
        height = round(samples["samples"].shape[2] * scale_by)
        s["samples"] = comfy.utils.common_upscale(samples["samples"], width, height, upscale_method, "disabled")
        return (s,)

comfyanonymous's avatar
comfyanonymous committed
1155
1156
1157
1158
1159
1160
1161
1162
1163
class LatentRotate:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "samples": ("LATENT",),
                              "rotation": (["none", "90 degrees", "180 degrees", "270 degrees"],),
                              }}
    RETURN_TYPES = ("LATENT",)
    FUNCTION = "rotate"

comfyanonymous's avatar
comfyanonymous committed
1164
    CATEGORY = "latent/transform"
comfyanonymous's avatar
comfyanonymous committed
1165
1166

    def rotate(self, samples, rotation):
1167
        s = samples.copy()
comfyanonymous's avatar
comfyanonymous committed
1168
1169
1170
1171
1172
1173
1174
1175
        rotate_by = 0
        if rotation.startswith("90"):
            rotate_by = 1
        elif rotation.startswith("180"):
            rotate_by = 2
        elif rotation.startswith("270"):
            rotate_by = 3

1176
        s["samples"] = torch.rot90(samples["samples"], k=rotate_by, dims=[3, 2])
comfyanonymous's avatar
comfyanonymous committed
1177
        return (s,)
comfyanonymous's avatar
comfyanonymous committed
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187

class LatentFlip:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "samples": ("LATENT",),
                              "flip_method": (["x-axis: vertically", "y-axis: horizontally"],),
                              }}
    RETURN_TYPES = ("LATENT",)
    FUNCTION = "flip"

comfyanonymous's avatar
comfyanonymous committed
1188
    CATEGORY = "latent/transform"
comfyanonymous's avatar
comfyanonymous committed
1189
1190

    def flip(self, samples, flip_method):
1191
        s = samples.copy()
comfyanonymous's avatar
comfyanonymous committed
1192
        if flip_method.startswith("x"):
1193
            s["samples"] = torch.flip(samples["samples"], dims=[2])
comfyanonymous's avatar
comfyanonymous committed
1194
        elif flip_method.startswith("y"):
1195
            s["samples"] = torch.flip(samples["samples"], dims=[3])
comfyanonymous's avatar
comfyanonymous committed
1196
1197

        return (s,)
comfyanonymous's avatar
comfyanonymous committed
1198
1199
1200
1201

class LatentComposite:
    @classmethod
    def INPUT_TYPES(s):
comfyanonymous's avatar
comfyanonymous committed
1202
1203
1204
1205
1206
1207
        return {"required": { "samples_to": ("LATENT",),
                              "samples_from": ("LATENT",),
                              "x": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 8}),
                              "y": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 8}),
                              "feather": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 8}),
                              }}
comfyanonymous's avatar
comfyanonymous committed
1208
1209
1210
1211
1212
    RETURN_TYPES = ("LATENT",)
    FUNCTION = "composite"

    CATEGORY = "latent"

comfyanonymous's avatar
comfyanonymous committed
1213
1214
1215
    def composite(self, samples_to, samples_from, x, y, composite_method="normal", feather=0):
        x =  x // 8
        y = y // 8
1216
        feather = feather // 8
comfyanonymous's avatar
comfyanonymous committed
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
        samples_out = samples_to.copy()
        s = samples_to["samples"].clone()
        samples_to = samples_to["samples"]
        samples_from = samples_from["samples"]
        if feather == 0:
            s[:,:,y:y+samples_from.shape[2],x:x+samples_from.shape[3]] = samples_from[:,:,:samples_to.shape[2] - y, :samples_to.shape[3] - x]
        else:
            samples_from = samples_from[:,:,:samples_to.shape[2] - y, :samples_to.shape[3] - x]
            mask = torch.ones_like(samples_from)
            for t in range(feather):
                if y != 0:
                    mask[:,:,t:1+t,:] *= ((1.0/feather) * (t + 1))

                if y + samples_from.shape[2] < samples_to.shape[2]:
                    mask[:,:,mask.shape[2] -1 -t: mask.shape[2]-t,:] *= ((1.0/feather) * (t + 1))
                if x != 0:
                    mask[:,:,:,t:1+t] *= ((1.0/feather) * (t + 1))
                if x + samples_from.shape[3] < samples_to.shape[3]:
                    mask[:,:,:,mask.shape[3]- 1 - t: mask.shape[3]- t] *= ((1.0/feather) * (t + 1))
            rev_mask = torch.ones_like(mask) - mask
            s[:,:,y:y+samples_from.shape[2],x:x+samples_from.shape[3]] = samples_from[:,:,:samples_to.shape[2] - y, :samples_to.shape[3] - x] * mask + s[:,:,y:y+samples_from.shape[2],x:x+samples_from.shape[3]] * rev_mask
        samples_out["samples"] = s
        return (samples_out,)
comfyanonymous's avatar
comfyanonymous committed
1240

1241
1242
1243
1244
class LatentBlend:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": {
1245
1246
            "samples1": ("LATENT",),
            "samples2": ("LATENT",),
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
            "blend_factor": ("FLOAT", {
                "default": 0.5,
                "min": 0,
                "max": 1,
                "step": 0.01
            }),
        }}

    RETURN_TYPES = ("LATENT",)
    FUNCTION = "blend"

    CATEGORY = "_for_testing"

1260
    def blend(self, samples1, samples2, blend_factor:float, blend_mode: str="normal"):
1261

1262
1263
1264
        samples_out = samples1.copy()
        samples1 = samples1["samples"]
        samples2 = samples2["samples"]
1265

1266
1267
1268
1269
        if samples1.shape != samples2.shape:
            samples2.permute(0, 3, 1, 2)
            samples2 = comfy.utils.common_upscale(samples2, samples1.shape[3], samples1.shape[2], 'bicubic', crop='center')
            samples2.permute(0, 2, 3, 1)
1270

1271
1272
        samples_blended = self.blend_mode(samples1, samples2, blend_mode)
        samples_blended = samples1 * blend_factor + samples_blended * (1 - blend_factor)
1273
1274
1275
1276
1277
1278
1279
1280
1281
        samples_out["samples"] = samples_blended
        return (samples_out,)

    def blend_mode(self, img1, img2, mode):
        if mode == "normal":
            return img2
        else:
            raise ValueError(f"Unsupported blend mode: {mode}")

comfyanonymous's avatar
comfyanonymous committed
1282
1283
1284
1285
class LatentCrop:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "samples": ("LATENT",),
1286
1287
                              "width": ("INT", {"default": 512, "min": 64, "max": MAX_RESOLUTION, "step": 8}),
                              "height": ("INT", {"default": 512, "min": 64, "max": MAX_RESOLUTION, "step": 8}),
1288
1289
                              "x": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 8}),
                              "y": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 8}),
comfyanonymous's avatar
comfyanonymous committed
1290
1291
1292
1293
                              }}
    RETURN_TYPES = ("LATENT",)
    FUNCTION = "crop"

comfyanonymous's avatar
comfyanonymous committed
1294
    CATEGORY = "latent/transform"
comfyanonymous's avatar
comfyanonymous committed
1295
1296

    def crop(self, samples, width, height, x, y):
1297
1298
        s = samples.copy()
        samples = samples['samples']
comfyanonymous's avatar
comfyanonymous committed
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
        x =  x // 8
        y = y // 8

        #enfonce minimum size of 64
        if x > (samples.shape[3] - 8):
            x = samples.shape[3] - 8
        if y > (samples.shape[2] - 8):
            y = samples.shape[2] - 8

        new_height = height // 8
        new_width = width // 8
        to_x = new_width + x
        to_y = new_height + y
1312
        s['samples'] = samples[:,:,y:to_y, x:to_x]
comfyanonymous's avatar
comfyanonymous committed
1313
1314
        return (s,)

1315
1316
1317
1318
1319
1320
1321
1322
1323
class SetLatentNoiseMask:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "samples": ("LATENT",),
                              "mask": ("MASK",),
                              }}
    RETURN_TYPES = ("LATENT",)
    FUNCTION = "set_mask"

1324
    CATEGORY = "latent/inpaint"
1325
1326
1327

    def set_mask(self, samples, mask):
        s = samples.copy()
1328
        s["noise_mask"] = mask.reshape((-1, 1, mask.shape[-2], mask.shape[-1]))
1329
1330
        return (s,)

space-nuko's avatar
space-nuko committed
1331
def common_ksampler(model, seed, steps, cfg, sampler_name, scheduler, positive, negative, latent, denoise=1.0, disable_noise=False, start_step=None, last_step=None, force_full_denoise=False):
1332
    latent_image = latent["samples"]
comfyanonymous's avatar
comfyanonymous committed
1333
1334
1335
    if disable_noise:
        noise = torch.zeros(latent_image.size(), dtype=latent_image.dtype, layout=latent_image.layout, device="cpu")
    else:
1336
1337
        batch_inds = latent["batch_index"] if "batch_index" in latent else None
        noise = comfy.sample.prepare_noise(latent_image, seed, batch_inds)
comfyanonymous's avatar
comfyanonymous committed
1338

1339
    noise_mask = None
1340
    if "noise_mask" in latent:
1341
        noise_mask = latent["noise_mask"]
comfyanonymous's avatar
comfyanonymous committed
1342

1343
    callback = latent_preview.prepare_callback(model, steps)
1344
    disable_pbar = not comfy.utils.PROGRESS_BAR_ENABLED
1345
1346
    samples = comfy.sample.sample(model, noise, steps, cfg, sampler_name, scheduler, positive, negative, latent_image,
                                  denoise=denoise, disable_noise=disable_noise, start_step=start_step, last_step=last_step,
comfyanonymous's avatar
comfyanonymous committed
1347
                                  force_full_denoise=force_full_denoise, noise_mask=noise_mask, callback=callback, disable_pbar=disable_pbar, seed=seed)
1348
1349
1350
    out = latent.copy()
    out["samples"] = samples
    return (out, )
comfyanonymous's avatar
comfyanonymous committed
1351

comfyanonymous's avatar
comfyanonymous committed
1352
1353
1354
class KSampler:
    @classmethod
    def INPUT_TYPES(s):
comfyanonymous's avatar
comfyanonymous committed
1355
        return {"required":
comfyanonymous's avatar
comfyanonymous committed
1356
1357
1358
                    {"model": ("MODEL",),
                    "seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}),
                    "steps": ("INT", {"default": 20, "min": 1, "max": 10000}),
1359
                    "cfg": ("FLOAT", {"default": 8.0, "min": 0.0, "max": 100.0, "step":0.1, "round": 0.01}),
comfyanonymous's avatar
comfyanonymous committed
1360
1361
1362
1363
1364
1365
                    "sampler_name": (comfy.samplers.KSampler.SAMPLERS, ),
                    "scheduler": (comfy.samplers.KSampler.SCHEDULERS, ),
                    "positive": ("CONDITIONING", ),
                    "negative": ("CONDITIONING", ),
                    "latent_image": ("LATENT", ),
                    "denoise": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}),
space-nuko's avatar
space-nuko committed
1366
1367
                     }
                }
comfyanonymous's avatar
comfyanonymous committed
1368
1369
1370
1371

    RETURN_TYPES = ("LATENT",)
    FUNCTION = "sample"

1372
1373
    CATEGORY = "sampling"

space-nuko's avatar
space-nuko committed
1374
1375
    def sample(self, model, seed, steps, cfg, sampler_name, scheduler, positive, negative, latent_image, denoise=1.0):
        return common_ksampler(model, seed, steps, cfg, sampler_name, scheduler, positive, negative, latent_image, denoise=denoise)
comfyanonymous's avatar
comfyanonymous committed
1376

comfyanonymous's avatar
comfyanonymous committed
1377
1378
1379
1380
1381
1382
1383
1384
class KSamplerAdvanced:
    @classmethod
    def INPUT_TYPES(s):
        return {"required":
                    {"model": ("MODEL",),
                    "add_noise": (["enable", "disable"], ),
                    "noise_seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}),
                    "steps": ("INT", {"default": 20, "min": 1, "max": 10000}),
1385
                    "cfg": ("FLOAT", {"default": 8.0, "min": 0.0, "max": 100.0, "step":0.1, "round": 0.01}),
comfyanonymous's avatar
comfyanonymous committed
1386
1387
1388
1389
1390
1391
1392
1393
                    "sampler_name": (comfy.samplers.KSampler.SAMPLERS, ),
                    "scheduler": (comfy.samplers.KSampler.SCHEDULERS, ),
                    "positive": ("CONDITIONING", ),
                    "negative": ("CONDITIONING", ),
                    "latent_image": ("LATENT", ),
                    "start_at_step": ("INT", {"default": 0, "min": 0, "max": 10000}),
                    "end_at_step": ("INT", {"default": 10000, "min": 0, "max": 10000}),
                    "return_with_leftover_noise": (["disable", "enable"], ),
space-nuko's avatar
space-nuko committed
1394
1395
                     }
                }
comfyanonymous's avatar
comfyanonymous committed
1396
1397
1398
1399
1400

    RETURN_TYPES = ("LATENT",)
    FUNCTION = "sample"

    CATEGORY = "sampling"
comfyanonymous's avatar
comfyanonymous committed
1401

space-nuko's avatar
space-nuko committed
1402
    def sample(self, model, add_noise, noise_seed, steps, cfg, sampler_name, scheduler, positive, negative, latent_image, start_at_step, end_at_step, return_with_leftover_noise, denoise=1.0):
comfyanonymous's avatar
comfyanonymous committed
1403
1404
1405
1406
1407
1408
        force_full_denoise = True
        if return_with_leftover_noise == "enable":
            force_full_denoise = False
        disable_noise = False
        if add_noise == "disable":
            disable_noise = True
space-nuko's avatar
space-nuko committed
1409
        return common_ksampler(model, noise_seed, steps, cfg, sampler_name, scheduler, positive, negative, latent_image, denoise=denoise, disable_noise=disable_noise, start_step=start_at_step, last_step=end_at_step, force_full_denoise=force_full_denoise)
comfyanonymous's avatar
comfyanonymous committed
1410
1411
1412

class SaveImage:
    def __init__(self):
1413
        self.output_dir = folder_paths.get_output_directory()
m957ymj75urz's avatar
m957ymj75urz committed
1414
        self.type = "output"
1415
        self.prefix_append = ""
1416
        self.compress_level = 4
comfyanonymous's avatar
comfyanonymous committed
1417
1418
1419
1420

    @classmethod
    def INPUT_TYPES(s):
        return {"required": 
1421
                    {"images": ("IMAGE", ),
pythongosssss's avatar
tidy  
pythongosssss committed
1422
                     "filename_prefix": ("STRING", {"default": "ComfyUI"})},
pythongosssss's avatar
pythongosssss committed
1423
                "hidden": {"prompt": "PROMPT", "extra_pnginfo": "EXTRA_PNGINFO"},
comfyanonymous's avatar
comfyanonymous committed
1424
1425
1426
1427
1428
1429
1430
                }

    RETURN_TYPES = ()
    FUNCTION = "save_images"

    OUTPUT_NODE = True

1431
1432
    CATEGORY = "image"

pythongosssss's avatar
tidy  
pythongosssss committed
1433
    def save_images(self, images, filename_prefix="ComfyUI", prompt=None, extra_pnginfo=None):
1434
        filename_prefix += self.prefix_append
1435
        full_output_folder, filename, counter, subfolder, filename_prefix = folder_paths.get_save_image_path(filename_prefix, self.output_dir, images[0].shape[1], images[0].shape[0])
m957ymj75urz's avatar
m957ymj75urz committed
1436
        results = list()
comfyanonymous's avatar
comfyanonymous committed
1437
1438
        for image in images:
            i = 255. * image.cpu().numpy()
1439
            img = Image.fromarray(np.clip(i, 0, 255).astype(np.uint8))
1440
1441
1442
1443
1444
1445
1446
1447
            metadata = None
            if not args.disable_metadata:
                metadata = PngInfo()
                if prompt is not None:
                    metadata.add_text("prompt", json.dumps(prompt))
                if extra_pnginfo is not None:
                    for x in extra_pnginfo:
                        metadata.add_text(x, json.dumps(extra_pnginfo[x]))
1448

1449
            file = f"{filename}_{counter:05}_.png"
1450
            img.save(os.path.join(full_output_folder, file), pnginfo=metadata, compress_level=self.compress_level)
m957ymj75urz's avatar
m957ymj75urz committed
1451
1452
1453
1454
            results.append({
                "filename": file,
                "subfolder": subfolder,
                "type": self.type
Gavroche CryptoRUSH's avatar
Gavroche CryptoRUSH committed
1455
            })
1456
            counter += 1
comfyanonymous's avatar
comfyanonymous committed
1457

m957ymj75urz's avatar
m957ymj75urz committed
1458
        return { "ui": { "images": results } }
comfyanonymous's avatar
comfyanonymous committed
1459

pythongosssss's avatar
pythongosssss committed
1460
1461
class PreviewImage(SaveImage):
    def __init__(self):
1462
        self.output_dir = folder_paths.get_temp_directory()
m957ymj75urz's avatar
m957ymj75urz committed
1463
        self.type = "temp"
1464
        self.prefix_append = "_temp_" + ''.join(random.choice("abcdefghijklmnopqrstupvxyz") for x in range(5))
1465
        self.compress_level = 1
pythongosssss's avatar
pythongosssss committed
1466
1467
1468

    @classmethod
    def INPUT_TYPES(s):
1469
        return {"required":
pythongosssss's avatar
pythongosssss committed
1470
1471
1472
                    {"images": ("IMAGE", ), },
                "hidden": {"prompt": "PROMPT", "extra_pnginfo": "EXTRA_PNGINFO"},
                }
1473

1474
1475
1476
class LoadImage:
    @classmethod
    def INPUT_TYPES(s):
1477
        input_dir = folder_paths.get_input_directory()
comfyanonymous's avatar
comfyanonymous committed
1478
        files = [f for f in os.listdir(input_dir) if os.path.isfile(os.path.join(input_dir, f))]
1479
        return {"required":
1480
                    {"image": (sorted(files), {"image_upload": True})},
1481
                }
1482
1483

    CATEGORY = "image"
1484

1485
    RETURN_TYPES = ("IMAGE", "MASK")
1486
1487
    FUNCTION = "load_image"
    def load_image(self, image):
1488
        image_path = folder_paths.get_annotated_filepath(image)
1489
1490
1491
1492
1493
        img = Image.open(image_path)
        output_images = []
        output_masks = []
        for i in ImageSequence.Iterator(img):
            i = ImageOps.exif_transpose(i)
1494
1495
            if i.mode == 'I':
                i = i.point(lambda i: i * (1 / 255))
1496
1497
1498
1499
1500
1501
1502
1503
1504
1505
1506
1507
1508
1509
            image = i.convert("RGB")
            image = np.array(image).astype(np.float32) / 255.0
            image = torch.from_numpy(image)[None,]
            if 'A' in i.getbands():
                mask = np.array(i.getchannel('A')).astype(np.float32) / 255.0
                mask = 1. - torch.from_numpy(mask)
            else:
                mask = torch.zeros((64,64), dtype=torch.float32, device="cpu")
            output_images.append(image)
            output_masks.append(mask.unsqueeze(0))

        if len(output_images) > 1:
            output_image = torch.cat(output_images, dim=0)
            output_mask = torch.cat(output_masks, dim=0)
1510
        else:
1511
1512
1513
1514
            output_image = output_images[0]
            output_mask = output_masks[0]

        return (output_image, output_mask)
1515

1516
1517
    @classmethod
    def IS_CHANGED(s, image):
1518
        image_path = folder_paths.get_annotated_filepath(image)
1519
1520
1521
1522
        m = hashlib.sha256()
        with open(image_path, 'rb') as f:
            m.update(f.read())
        return m.digest().hex()
pythongosssss's avatar
pythongosssss committed
1523

1524
1525
1526
1527
1528
1529
1530
    @classmethod
    def VALIDATE_INPUTS(s, image):
        if not folder_paths.exists_annotated_filepath(image):
            return "Invalid image file: {}".format(image)

        return True

1531
class LoadImageMask:
1532
    _color_channels = ["alpha", "red", "green", "blue"]
1533
1534
    @classmethod
    def INPUT_TYPES(s):
1535
        input_dir = folder_paths.get_input_directory()
comfyanonymous's avatar
comfyanonymous committed
1536
        files = [f for f in os.listdir(input_dir) if os.path.isfile(os.path.join(input_dir, f))]
1537
        return {"required":
1538
                    {"image": (sorted(files), {"image_upload": True}),
1539
                     "channel": (s._color_channels, ), }
1540
1541
                }

1542
    CATEGORY = "mask"
1543
1544
1545
1546

    RETURN_TYPES = ("MASK",)
    FUNCTION = "load_image"
    def load_image(self, image, channel):
1547
        image_path = folder_paths.get_annotated_filepath(image)
1548
        i = Image.open(image_path)
1549
        i = ImageOps.exif_transpose(i)
1550
        if i.getbands() != ("R", "G", "B", "A"):
1551
1552
            if i.mode == 'I':
                i = i.point(lambda i: i * (1 / 255))
1553
            i = i.convert("RGBA")
1554
1555
1556
1557
1558
1559
1560
1561
1562
        mask = None
        c = channel[0].upper()
        if c in i.getbands():
            mask = np.array(i.getchannel(c)).astype(np.float32) / 255.0
            mask = torch.from_numpy(mask)
            if c == 'A':
                mask = 1. - mask
        else:
            mask = torch.zeros((64,64), dtype=torch.float32, device="cpu")
1563
        return (mask.unsqueeze(0),)
1564
1565
1566

    @classmethod
    def IS_CHANGED(s, image, channel):
1567
        image_path = folder_paths.get_annotated_filepath(image)
1568
1569
1570
1571
        m = hashlib.sha256()
        with open(image_path, 'rb') as f:
            m.update(f.read())
        return m.digest().hex()
pythongosssss's avatar
pythongosssss committed
1572

1573
    @classmethod
1574
    def VALIDATE_INPUTS(s, image):
1575
1576
1577
1578
1579
        if not folder_paths.exists_annotated_filepath(image):
            return "Invalid image file: {}".format(image)

        return True

comfyanonymous's avatar
comfyanonymous committed
1580
class ImageScale:
1581
    upscale_methods = ["nearest-exact", "bilinear", "area", "bicubic", "lanczos"]
comfyanonymous's avatar
comfyanonymous committed
1582
1583
1584
1585
1586
    crop_methods = ["disabled", "center"]

    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "image": ("IMAGE",), "upscale_method": (s.upscale_methods,),
1587
1588
                              "width": ("INT", {"default": 512, "min": 0, "max": MAX_RESOLUTION, "step": 1}),
                              "height": ("INT", {"default": 512, "min": 0, "max": MAX_RESOLUTION, "step": 1}),
comfyanonymous's avatar
comfyanonymous committed
1589
1590
1591
1592
                              "crop": (s.crop_methods,)}}
    RETURN_TYPES = ("IMAGE",)
    FUNCTION = "upscale"

1593
    CATEGORY = "image/upscaling"
1594

comfyanonymous's avatar
comfyanonymous committed
1595
    def upscale(self, image, upscale_method, width, height, crop):
1596
1597
1598
1599
1600
1601
1602
1603
1604
1605
1606
1607
        if width == 0 and height == 0:
            s = image
        else:
            samples = image.movedim(-1,1)

            if width == 0:
                width = max(1, round(samples.shape[3] * height / samples.shape[2]))
            elif height == 0:
                height = max(1, round(samples.shape[2] * width / samples.shape[3]))

            s = comfy.utils.common_upscale(samples, width, height, upscale_method, crop)
            s = s.movedim(1,-1)
comfyanonymous's avatar
comfyanonymous committed
1608
        return (s,)
comfyanonymous's avatar
comfyanonymous committed
1609

comfyanonymous's avatar
comfyanonymous committed
1610
class ImageScaleBy:
1611
    upscale_methods = ["nearest-exact", "bilinear", "area", "bicubic", "lanczos"]
comfyanonymous's avatar
comfyanonymous committed
1612
1613
1614
1615
1616
1617
1618
1619
1620
1621
1622
1623
1624
1625
1626
1627
1628
1629

    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "image": ("IMAGE",), "upscale_method": (s.upscale_methods,),
                              "scale_by": ("FLOAT", {"default": 1.0, "min": 0.01, "max": 8.0, "step": 0.01}),}}
    RETURN_TYPES = ("IMAGE",)
    FUNCTION = "upscale"

    CATEGORY = "image/upscaling"

    def upscale(self, image, upscale_method, scale_by):
        samples = image.movedim(-1,1)
        width = round(samples.shape[3] * scale_by)
        height = round(samples.shape[2] * scale_by)
        s = comfy.utils.common_upscale(samples, width, height, upscale_method, "disabled")
        s = s.movedim(1,-1)
        return (s,)

1630
1631
1632
1633
1634
1635
1636
1637
1638
1639
1640
1641
1642
1643
1644
class ImageInvert:

    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "image": ("IMAGE",)}}

    RETURN_TYPES = ("IMAGE",)
    FUNCTION = "invert"

    CATEGORY = "image"

    def invert(self, image):
        s = 1.0 - image
        return (s,)

1645
1646
1647
1648
1649
1650
1651
1652
1653
1654
1655
1656
1657
1658
1659
1660
class ImageBatch:

    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "image1": ("IMAGE",), "image2": ("IMAGE",)}}

    RETURN_TYPES = ("IMAGE",)
    FUNCTION = "batch"

    CATEGORY = "image"

    def batch(self, image1, image2):
        if image1.shape[1:] != image2.shape[1:]:
            image2 = comfy.utils.common_upscale(image2.movedim(-1,1), image1.shape[2], image1.shape[1], "bilinear", "center").movedim(1,-1)
        s = torch.cat((image1, image2), dim=0)
        return (s,)
1661

comfyanonymous's avatar
comfyanonymous committed
1662
1663
1664
1665
1666
1667
1668
1669
class EmptyImage:
    def __init__(self, device="cpu"):
        self.device = device

    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "width": ("INT", {"default": 512, "min": 1, "max": MAX_RESOLUTION, "step": 1}),
                              "height": ("INT", {"default": 512, "min": 1, "max": MAX_RESOLUTION, "step": 1}),
comfyanonymous's avatar
comfyanonymous committed
1670
                              "batch_size": ("INT", {"default": 1, "min": 1, "max": 4096}),
comfyanonymous's avatar
comfyanonymous committed
1671
1672
1673
1674
1675
1676
1677
1678
1679
1680
1681
1682
1683
                              "color": ("INT", {"default": 0, "min": 0, "max": 0xFFFFFF, "step": 1, "display": "color"}),
                              }}
    RETURN_TYPES = ("IMAGE",)
    FUNCTION = "generate"

    CATEGORY = "image"

    def generate(self, width, height, batch_size=1, color=0):
        r = torch.full([batch_size, height, width, 1], ((color >> 16) & 0xFF) / 0xFF)
        g = torch.full([batch_size, height, width, 1], ((color >> 8) & 0xFF) / 0xFF)
        b = torch.full([batch_size, height, width, 1], ((color) & 0xFF) / 0xFF)
        return (torch.cat((r, g, b), dim=-1), )

Guo Y.K's avatar
Guo Y.K committed
1684
1685
1686
1687
1688
1689
1690
class ImagePadForOutpaint:

    @classmethod
    def INPUT_TYPES(s):
        return {
            "required": {
                "image": ("IMAGE",),
1691
1692
1693
1694
                "left": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 8}),
                "top": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 8}),
                "right": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 8}),
                "bottom": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 8}),
1695
                "feathering": ("INT", {"default": 40, "min": 0, "max": MAX_RESOLUTION, "step": 1}),
Guo Y.K's avatar
Guo Y.K committed
1696
1697
1698
1699
1700
1701
1702
1703
            }
        }

    RETURN_TYPES = ("IMAGE", "MASK")
    FUNCTION = "expand_image"

    CATEGORY = "image"

1704
    def expand_image(self, image, left, top, right, bottom, feathering):
Guo Y.K's avatar
Guo Y.K committed
1705
1706
        d1, d2, d3, d4 = image.size()

1707
        new_image = torch.ones(
Guo Y.K's avatar
Guo Y.K committed
1708
1709
            (d1, d2 + top + bottom, d3 + left + right, d4),
            dtype=torch.float32,
1710
1711
        ) * 0.5

Guo Y.K's avatar
Guo Y.K committed
1712
1713
1714
1715
1716
1717
        new_image[:, top:top + d2, left:left + d3, :] = image

        mask = torch.ones(
            (d2 + top + bottom, d3 + left + right),
            dtype=torch.float32,
        )
1718

1719
1720
1721
1722
1723
        t = torch.zeros(
            (d2, d3),
            dtype=torch.float32
        )

1724
        if feathering > 0 and feathering * 2 < d2 and feathering * 2 < d3:
1725
1726
1727
1728
1729
1730
1731
1732
1733
1734
1735
1736
1737
1738
1739
1740
1741
1742
1743

            for i in range(d2):
                for j in range(d3):
                    dt = i if top != 0 else d2
                    db = d2 - i if bottom != 0 else d2

                    dl = j if left != 0 else d3
                    dr = d3 - j if right != 0 else d3

                    d = min(dt, db, dl, dr)

                    if d >= feathering:
                        continue

                    v = (feathering - d) / feathering

                    t[i, j] = v * v

        mask[top:top + d2, left:left + d3] = t
1744

Guo Y.K's avatar
Guo Y.K committed
1745
1746
1747
        return (new_image, mask)


comfyanonymous's avatar
comfyanonymous committed
1748
1749
NODE_CLASS_MAPPINGS = {
    "KSampler": KSampler,
1750
    "CheckpointLoaderSimple": CheckpointLoaderSimple,
comfyanonymous's avatar
comfyanonymous committed
1751
    "CLIPTextEncode": CLIPTextEncode,
comfyanonymous's avatar
comfyanonymous committed
1752
    "CLIPSetLastLayer": CLIPSetLastLayer,
comfyanonymous's avatar
comfyanonymous committed
1753
1754
    "VAEDecode": VAEDecode,
    "VAEEncode": VAEEncode,
1755
    "VAEEncodeForInpaint": VAEEncodeForInpaint,
comfyanonymous's avatar
comfyanonymous committed
1756
1757
1758
    "VAELoader": VAELoader,
    "EmptyLatentImage": EmptyLatentImage,
    "LatentUpscale": LatentUpscale,
comfyanonymous's avatar
comfyanonymous committed
1759
    "LatentUpscaleBy": LatentUpscaleBy,
1760
    "LatentFromBatch": LatentFromBatch,
1761
    "RepeatLatentBatch": RepeatLatentBatch,
comfyanonymous's avatar
comfyanonymous committed
1762
    "SaveImage": SaveImage,
pythongosssss's avatar
pythongosssss committed
1763
    "PreviewImage": PreviewImage,
comfyanonymous's avatar
comfyanonymous committed
1764
    "LoadImage": LoadImage,
1765
    "LoadImageMask": LoadImageMask,
comfyanonymous's avatar
comfyanonymous committed
1766
    "ImageScale": ImageScale,
comfyanonymous's avatar
comfyanonymous committed
1767
    "ImageScaleBy": ImageScaleBy,
1768
    "ImageInvert": ImageInvert,
1769
    "ImageBatch": ImageBatch,
Guo Y.K's avatar
Guo Y.K committed
1770
    "ImagePadForOutpaint": ImagePadForOutpaint,
comfyanonymous's avatar
comfyanonymous committed
1771
    "EmptyImage": EmptyImage,
comfyanonymous's avatar
comfyanonymous committed
1772
    "ConditioningAverage": ConditioningAverage ,
comfyanonymous's avatar
comfyanonymous committed
1773
    "ConditioningCombine": ConditioningCombine,
1774
    "ConditioningConcat": ConditioningConcat,
comfyanonymous's avatar
comfyanonymous committed
1775
    "ConditioningSetArea": ConditioningSetArea,
1776
    "ConditioningSetAreaPercentage": ConditioningSetAreaPercentage,
1777
    "ConditioningSetAreaStrength": ConditioningSetAreaStrength,
Jacob Segal's avatar
Jacob Segal committed
1778
    "ConditioningSetMask": ConditioningSetMask,
comfyanonymous's avatar
comfyanonymous committed
1779
    "KSamplerAdvanced": KSamplerAdvanced,
1780
    "SetLatentNoiseMask": SetLatentNoiseMask,
comfyanonymous's avatar
comfyanonymous committed
1781
    "LatentComposite": LatentComposite,
1782
    "LatentBlend": LatentBlend,
comfyanonymous's avatar
comfyanonymous committed
1783
    "LatentRotate": LatentRotate,
comfyanonymous's avatar
comfyanonymous committed
1784
    "LatentFlip": LatentFlip,
comfyanonymous's avatar
comfyanonymous committed
1785
    "LatentCrop": LatentCrop,
1786
    "LoraLoader": LoraLoader,
1787
    "CLIPLoader": CLIPLoader,
1788
    "UNETLoader": UNETLoader,
1789
    "DualCLIPLoader": DualCLIPLoader,
1790
    "CLIPVisionEncode": CLIPVisionEncode,
1791
    "StyleModelApply": StyleModelApply,
1792
    "unCLIPConditioning": unCLIPConditioning,
comfyanonymous's avatar
comfyanonymous committed
1793
    "ControlNetApply": ControlNetApply,
1794
    "ControlNetApplyAdvanced": ControlNetApplyAdvanced,
comfyanonymous's avatar
comfyanonymous committed
1795
    "ControlNetLoader": ControlNetLoader,
1796
    "DiffControlNetLoader": DiffControlNetLoader,
comfyanonymous's avatar
comfyanonymous committed
1797
1798
    "StyleModelLoader": StyleModelLoader,
    "CLIPVisionLoader": CLIPVisionLoader,
1799
    "VAEDecodeTiled": VAEDecodeTiled,
comfyanonymous's avatar
comfyanonymous committed
1800
    "VAEEncodeTiled": VAEEncodeTiled,
1801
    "unCLIPCheckpointLoader": unCLIPCheckpointLoader,
1802
1803
    "GLIGENLoader": GLIGENLoader,
    "GLIGENTextBoxApply": GLIGENTextBoxApply,
1804
    "InpaintModelConditioning": InpaintModelConditioning,
1805

1806
    "CheckpointLoader": CheckpointLoader,
sALTaccount's avatar
sALTaccount committed
1807
    "DiffusersLoader": DiffusersLoader,
Dr.Lt.Data's avatar
Dr.Lt.Data committed
1808
1809

    "LoadLatent": LoadLatent,
1810
    "SaveLatent": SaveLatent,
1811
1812

    "ConditioningZeroOut": ConditioningZeroOut,
1813
    "ConditioningSetTimestepRange": ConditioningSetTimestepRange,
1814
    "LoraLoaderModelOnly": LoraLoaderModelOnly,
comfyanonymous's avatar
comfyanonymous committed
1815
1816
}

City's avatar
City committed
1817
1818
1819
1820
1821
NODE_DISPLAY_NAME_MAPPINGS = {
    # Sampling
    "KSampler": "KSampler",
    "KSamplerAdvanced": "KSampler (Advanced)",
    # Loaders
1822
    "CheckpointLoader": "Load Checkpoint With Config (DEPRECATED)",
comfyanonymous's avatar
Rename.  
comfyanonymous committed
1823
    "CheckpointLoaderSimple": "Load Checkpoint",
City's avatar
City committed
1824
1825
1826
1827
1828
1829
1830
1831
1832
1833
1834
1835
1836
1837
    "VAELoader": "Load VAE",
    "LoraLoader": "Load LoRA",
    "CLIPLoader": "Load CLIP",
    "ControlNetLoader": "Load ControlNet Model",
    "DiffControlNetLoader": "Load ControlNet Model (diff)",
    "StyleModelLoader": "Load Style Model",
    "CLIPVisionLoader": "Load CLIP Vision",
    "UpscaleModelLoader": "Load Upscale Model",
    # Conditioning
    "CLIPVisionEncode": "CLIP Vision Encode",
    "StyleModelApply": "Apply Style Model",
    "CLIPTextEncode": "CLIP Text Encode (Prompt)",
    "CLIPSetLastLayer": "CLIP Set Last Layer",
    "ConditioningCombine": "Conditioning (Combine)",
FizzleDorf's avatar
FizzleDorf committed
1838
    "ConditioningAverage ": "Conditioning (Average)",
1839
    "ConditioningConcat": "Conditioning (Concat)",
City's avatar
City committed
1840
    "ConditioningSetArea": "Conditioning (Set Area)",
1841
    "ConditioningSetAreaPercentage": "Conditioning (Set Area with Percentage)",
Jacob Segal's avatar
Jacob Segal committed
1842
    "ConditioningSetMask": "Conditioning (Set Mask)",
City's avatar
City committed
1843
    "ControlNetApply": "Apply ControlNet",
1844
    "ControlNetApplyAdvanced": "Apply ControlNet (Advanced)",
City's avatar
City committed
1845
1846
1847
1848
1849
1850
1851
1852
1853
1854
    # Latent
    "VAEEncodeForInpaint": "VAE Encode (for Inpainting)",
    "SetLatentNoiseMask": "Set Latent Noise Mask",
    "VAEDecode": "VAE Decode",
    "VAEEncode": "VAE Encode",
    "LatentRotate": "Rotate Latent",
    "LatentFlip": "Flip Latent",
    "LatentCrop": "Crop Latent",
    "EmptyLatentImage": "Empty Latent Image",
    "LatentUpscale": "Upscale Latent",
comfyanonymous's avatar
comfyanonymous committed
1855
    "LatentUpscaleBy": "Upscale Latent By",
City's avatar
City committed
1856
    "LatentComposite": "Latent Composite",
1857
    "LatentBlend": "Latent Blend",
1858
1859
    "LatentFromBatch" : "Latent From Batch",
    "RepeatLatentBatch": "Repeat Latent Batch",
City's avatar
City committed
1860
1861
1862
1863
1864
1865
    # Image
    "SaveImage": "Save Image",
    "PreviewImage": "Preview Image",
    "LoadImage": "Load Image",
    "LoadImageMask": "Load Image (as Mask)",
    "ImageScale": "Upscale Image",
comfyanonymous's avatar
comfyanonymous committed
1866
    "ImageScaleBy": "Upscale Image By",
City's avatar
City committed
1867
1868
1869
    "ImageUpscaleWithModel": "Upscale Image (using Model)",
    "ImageInvert": "Invert Image",
    "ImagePadForOutpaint": "Pad Image for Outpainting",
1870
    "ImageBatch": "Batch Images",
City's avatar
City committed
1871
1872
1873
1874
1875
    # _for_testing
    "VAEDecodeTiled": "VAE Decode (Tiled)",
    "VAEEncodeTiled": "VAE Encode (Tiled)",
}

1876
1877
EXTENSION_WEB_DIRS = {}

1878
def load_custom_node(module_path, ignore=set()):
1879
1880
1881
1882
1883
1884
1885
    module_name = os.path.basename(module_path)
    if os.path.isfile(module_path):
        sp = os.path.splitext(module_path)
        module_name = sp[0]
    try:
        if os.path.isfile(module_path):
            module_spec = importlib.util.spec_from_file_location(module_name, module_path)
1886
            module_dir = os.path.split(module_path)[0]
1887
1888
        else:
            module_spec = importlib.util.spec_from_file_location(module_name, os.path.join(module_path, "__init__.py"))
1889
1890
            module_dir = module_path

1891
1892
1893
        module = importlib.util.module_from_spec(module_spec)
        sys.modules[module_name] = module
        module_spec.loader.exec_module(module)
1894
1895
1896
1897
1898
1899

        if hasattr(module, "WEB_DIRECTORY") and getattr(module, "WEB_DIRECTORY") is not None:
            web_dir = os.path.abspath(os.path.join(module_dir, getattr(module, "WEB_DIRECTORY")))
            if os.path.isdir(web_dir):
                EXTENSION_WEB_DIRS[module_name] = web_dir

1900
        if hasattr(module, "NODE_CLASS_MAPPINGS") and getattr(module, "NODE_CLASS_MAPPINGS") is not None:
1901
1902
1903
            for name in module.NODE_CLASS_MAPPINGS:
                if name not in ignore:
                    NODE_CLASS_MAPPINGS[name] = module.NODE_CLASS_MAPPINGS[name]
1904
1905
            if hasattr(module, "NODE_DISPLAY_NAME_MAPPINGS") and getattr(module, "NODE_DISPLAY_NAME_MAPPINGS") is not None:
                NODE_DISPLAY_NAME_MAPPINGS.update(module.NODE_DISPLAY_NAME_MAPPINGS)
1906
            return True
1907
1908
        else:
            print(f"Skip {module_path} module for custom nodes due to the lack of NODE_CLASS_MAPPINGS.")
1909
            return False
1910
1911
1912
    except Exception as e:
        print(traceback.format_exc())
        print(f"Cannot import {module_path} module for custom nodes:", e)
1913
        return False
1914

Hacker 17082006's avatar
Hacker 17082006 committed
1915
def load_custom_nodes():
1916
    base_node_names = set(NODE_CLASS_MAPPINGS.keys())
1917
    node_paths = folder_paths.get_folder_paths("custom_nodes")
1918
    node_import_times = []
1919
    for custom_node_path in node_paths:
Enrico Fasoli's avatar
Enrico Fasoli committed
1920
        possible_modules = os.listdir(os.path.realpath(custom_node_path))
1921
1922
1923
1924
1925
1926
        if "__pycache__" in possible_modules:
            possible_modules.remove("__pycache__")

        for possible_module in possible_modules:
            module_path = os.path.join(custom_node_path, possible_module)
            if os.path.isfile(module_path) and os.path.splitext(module_path)[1] != ".py": continue
1927
            if module_path.endswith(".disabled"): continue
1928
            time_before = time.perf_counter()
1929
            success = load_custom_node(module_path, base_node_names)
1930
            node_import_times.append((time.perf_counter() - time_before, module_path, success))
1931

1932
    if len(node_import_times) > 0:
comfyanonymous's avatar
comfyanonymous committed
1933
        print("\nImport times for custom nodes:")
1934
        for n in sorted(node_import_times):
1935
1936
1937
1938
1939
            if n[2]:
                import_message = ""
            else:
                import_message = " (IMPORT FAILED)"
            print("{:6.1f} seconds{}:".format(n[0], import_message), n[1])
1940
        print()
1941

1942
def init_custom_nodes():
1943
1944
1945
1946
1947
1948
1949
    extras_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), "comfy_extras")
    extras_files = [
        "nodes_latent.py",
        "nodes_hypernetwork.py",
        "nodes_upscale_model.py",
        "nodes_post_processing.py",
        "nodes_mask.py",
1950
        "nodes_compositing.py",
1951
1952
1953
1954
1955
1956
        "nodes_rebatch.py",
        "nodes_model_merging.py",
        "nodes_tomesd.py",
        "nodes_clip_sdxl.py",
        "nodes_canny.py",
        "nodes_freelunch.py",
1957
1958
        "nodes_custom_sampler.py",
        "nodes_hypertile.py",
1959
        "nodes_model_advanced.py",
1960
        "nodes_model_downscale.py",
comfyanonymous's avatar
comfyanonymous committed
1961
        "nodes_images.py",
1962
        "nodes_video_model.py",
1963
        "nodes_sag.py",
Hari's avatar
Hari committed
1964
        "nodes_perpneg.py",
1965
        "nodes_stable3d.py",
1966
        "nodes_sdupscale.py",
1967
        "nodes_photomaker.py",
1968
1969
1970
1971
1972
    ]

    for node_file in extras_files:
        load_custom_node(os.path.join(extras_dir, node_file))

1973
    load_custom_nodes()