nodes.py 52.8 KB
Newer Older
comfyanonymous's avatar
comfyanonymous committed
1
2
3
4
5
import torch

import os
import sys
import json
6
import hashlib
7
import traceback
8
import math
9
import time
comfyanonymous's avatar
comfyanonymous committed
10
11
12
13

from PIL import Image
from PIL.PngImagePlugin import PngInfo
import numpy as np
14
import safetensors.torch
comfyanonymous's avatar
comfyanonymous committed
15

sALTaccount's avatar
sALTaccount committed
16

comfyanonymous's avatar
comfyanonymous committed
17
sys.path.insert(0, os.path.join(os.path.dirname(os.path.realpath(__file__)), "comfy"))
comfyanonymous's avatar
comfyanonymous committed
18
19


comfyanonymous's avatar
comfyanonymous committed
20
import comfy.diffusers_convert
comfyanonymous's avatar
comfyanonymous committed
21
import comfy.samplers
22
import comfy.sample
comfyanonymous's avatar
comfyanonymous committed
23
import comfy.sd
comfyanonymous's avatar
comfyanonymous committed
24
25
import comfy.utils

26
import comfy.clip_vision
27

28
import comfy.model_management
29
import importlib
comfyanonymous's avatar
comfyanonymous committed
30

31
import folder_paths
32

Dr.Lt.Data's avatar
Dr.Lt.Data committed
33

34
def before_node_execution():
35
    comfy.model_management.throw_exception_if_processing_interrupted()
36

37
def interrupt_processing(value=True):
38
    comfy.model_management.interrupt_current_processing(value)
39

40
41
MAX_RESOLUTION=8192

comfyanonymous's avatar
comfyanonymous committed
42
43
44
class CLIPTextEncode:
    @classmethod
    def INPUT_TYPES(s):
45
        return {"required": {"text": ("STRING", {"multiline": True}), "clip": ("CLIP", )}}
comfyanonymous's avatar
comfyanonymous committed
46
47
48
    RETURN_TYPES = ("CONDITIONING",)
    FUNCTION = "encode"

49
50
    CATEGORY = "conditioning"

comfyanonymous's avatar
comfyanonymous committed
51
    def encode(self, clip, text):
comfyanonymous's avatar
comfyanonymous committed
52
53
54
55
56
57
58
59
60
        return ([[clip.encode(text), {}]], )

class ConditioningCombine:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": {"conditioning_1": ("CONDITIONING", ), "conditioning_2": ("CONDITIONING", )}}
    RETURN_TYPES = ("CONDITIONING",)
    FUNCTION = "combine"

61
62
    CATEGORY = "conditioning"

comfyanonymous's avatar
comfyanonymous committed
63
64
65
    def combine(self, conditioning_1, conditioning_2):
        return (conditioning_1 + conditioning_2, )

FizzleDorf's avatar
FizzleDorf committed
66
67
68
class ConditioningAverage :
    @classmethod
    def INPUT_TYPES(s):
comfyanonymous's avatar
comfyanonymous committed
69
70
        return {"required": {"conditioning_to": ("CONDITIONING", ), "conditioning_from": ("CONDITIONING", ),
                              "conditioning_to_strength": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01})
FizzleDorf's avatar
FizzleDorf committed
71
72
73
74
75
76
                             }}
    RETURN_TYPES = ("CONDITIONING",)
    FUNCTION = "addWeighted"

    CATEGORY = "conditioning"

comfyanonymous's avatar
comfyanonymous committed
77
    def addWeighted(self, conditioning_to, conditioning_from, conditioning_to_strength):
FizzleDorf's avatar
FizzleDorf committed
78
        out = []
comfyanonymous's avatar
comfyanonymous committed
79
80
81
82
83
84
85
86
87
88
89
90
91
92

        if len(conditioning_from) > 1:
            print("Warning: ConditioningAverage conditioning_from contains more than 1 cond, only the first one will actually be applied to conditioning_to.")

        cond_from = conditioning_from[0][0]

        for i in range(len(conditioning_to)):
            t1 = conditioning_to[i][0]
            t0 = cond_from[:,:t1.shape[1]]
            if t0.shape[1] < t1.shape[1]:
                t0 = torch.cat([t0] + [torch.zeros((1, (t1.shape[1] - t0.shape[1]), t1.shape[2]))], dim=1)

            tw = torch.mul(t1, conditioning_to_strength) + torch.mul(t0, (1.0 - conditioning_to_strength))
            n = [tw, conditioning_to[i][1].copy()]
FizzleDorf's avatar
FizzleDorf committed
93
94
95
            out.append(n)
        return (out, )

comfyanonymous's avatar
comfyanonymous committed
96
97
98
99
class ConditioningSetArea:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": {"conditioning": ("CONDITIONING", ),
100
101
102
103
                              "width": ("INT", {"default": 64, "min": 64, "max": MAX_RESOLUTION, "step": 8}),
                              "height": ("INT", {"default": 64, "min": 64, "max": MAX_RESOLUTION, "step": 8}),
                              "x": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 8}),
                              "y": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 8}),
comfyanonymous's avatar
comfyanonymous committed
104
105
106
107
108
                              "strength": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}),
                             }}
    RETURN_TYPES = ("CONDITIONING",)
    FUNCTION = "append"

109
110
    CATEGORY = "conditioning"

111
    def append(self, conditioning, width, height, x, y, strength):
comfyanonymous's avatar
comfyanonymous committed
112
113
114
115
116
        c = []
        for t in conditioning:
            n = [t[0], t[1].copy()]
            n[1]['area'] = (height // 8, width // 8, y // 8, x // 8)
            n[1]['strength'] = strength
117
            n[1]['set_area_to_bounds'] = False
comfyanonymous's avatar
comfyanonymous committed
118
            c.append(n)
comfyanonymous's avatar
comfyanonymous committed
119
        return (c, )
comfyanonymous's avatar
comfyanonymous committed
120

Jacob Segal's avatar
Jacob Segal committed
121
122
123
124
125
126
class ConditioningSetMask:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": {"conditioning": ("CONDITIONING", ),
                              "mask": ("MASK", ),
                              "strength": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}),
127
                              "set_cond_area": (["default", "mask bounds"],),
Jacob Segal's avatar
Jacob Segal committed
128
129
130
131
132
133
                             }}
    RETURN_TYPES = ("CONDITIONING",)
    FUNCTION = "append"

    CATEGORY = "conditioning"

134
    def append(self, conditioning, mask, set_cond_area, strength):
Jacob Segal's avatar
Jacob Segal committed
135
        c = []
136
137
138
        set_area_to_bounds = False
        if set_cond_area != "default":
            set_area_to_bounds = True
Jacob Segal's avatar
Jacob Segal committed
139
140
141
142
143
144
        if len(mask.shape) < 3:
            mask = mask.unsqueeze(0)
        for t in conditioning:
            n = [t[0], t[1].copy()]
            _, h, w = mask.shape
            n[1]['mask'] = mask
Jacob Segal's avatar
Jacob Segal committed
145
            n[1]['set_area_to_bounds'] = set_area_to_bounds
146
            n[1]['mask_strength'] = strength
Jacob Segal's avatar
Jacob Segal committed
147
148
149
            c.append(n)
        return (c, )

comfyanonymous's avatar
comfyanonymous committed
150
151
152
153
154
155
156
class VAEDecode:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "samples": ("LATENT", ), "vae": ("VAE", )}}
    RETURN_TYPES = ("IMAGE",)
    FUNCTION = "decode"

157
158
    CATEGORY = "latent"

comfyanonymous's avatar
comfyanonymous committed
159
    def decode(self, vae, samples):
160
        return (vae.decode(samples["samples"]), )
comfyanonymous's avatar
comfyanonymous committed
161

162
163
164
165
166
167
168
169
170
171
172
173
class VAEDecodeTiled:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "samples": ("LATENT", ), "vae": ("VAE", )}}
    RETURN_TYPES = ("IMAGE",)
    FUNCTION = "decode"

    CATEGORY = "_for_testing"

    def decode(self, vae, samples):
        return (vae.decode_tiled(samples["samples"]), )

comfyanonymous's avatar
comfyanonymous committed
174
175
176
177
178
179
180
class VAEEncode:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "pixels": ("IMAGE", ), "vae": ("VAE", )}}
    RETURN_TYPES = ("LATENT",)
    FUNCTION = "encode"

181
182
    CATEGORY = "latent"

183
184
185
186
    @staticmethod
    def vae_encode_crop_pixels(pixels):
        x = (pixels.shape[1] // 8) * 8
        y = (pixels.shape[2] // 8) * 8
187
        if pixels.shape[1] != x or pixels.shape[2] != y:
188
189
190
191
            x_offset = (pixels.shape[1] % 8) // 2
            y_offset = (pixels.shape[2] % 8) // 2
            pixels = pixels[:, x_offset:x + x_offset, y_offset:y + y_offset, :]
        return pixels
192

193
194
195
    def encode(self, vae, pixels):
        pixels = self.vae_encode_crop_pixels(pixels)
        t = vae.encode(pixels[:,:,:,:3])
196
        return ({"samples":t}, )
comfyanonymous's avatar
comfyanonymous committed
197

comfyanonymous's avatar
comfyanonymous committed
198
199
200
201
202
203
204
205
206
207
class VAEEncodeTiled:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "pixels": ("IMAGE", ), "vae": ("VAE", )}}
    RETURN_TYPES = ("LATENT",)
    FUNCTION = "encode"

    CATEGORY = "_for_testing"

    def encode(self, vae, pixels):
208
        pixels = VAEEncode.vae_encode_crop_pixels(pixels)
comfyanonymous's avatar
comfyanonymous committed
209
210
        t = vae.encode_tiled(pixels[:,:,:,:3])
        return ({"samples":t}, )
211

212
213
214
class VAEEncodeForInpaint:
    @classmethod
    def INPUT_TYPES(s):
215
        return {"required": { "pixels": ("IMAGE", ), "vae": ("VAE", ), "mask": ("MASK", ), "grow_mask_by": ("INT", {"default": 6, "min": 0, "max": 64, "step": 1}),}}
216
217
218
219
220
    RETURN_TYPES = ("LATENT",)
    FUNCTION = "encode"

    CATEGORY = "latent/inpaint"

221
    def encode(self, vae, pixels, mask, grow_mask_by=6):
222
223
        x = (pixels.shape[1] // 8) * 8
        y = (pixels.shape[2] // 8) * 8
224
        mask = torch.nn.functional.interpolate(mask.reshape((-1, 1, mask.shape[-2], mask.shape[-1])), size=(pixels.shape[1], pixels.shape[2]), mode="bilinear")
225

226
        pixels = pixels.clone()
227
        if pixels.shape[1] != x or pixels.shape[2] != y:
228
229
230
231
            x_offset = (pixels.shape[1] % 8) // 2
            y_offset = (pixels.shape[2] % 8) // 2
            pixels = pixels[:,x_offset:x + x_offset, y_offset:y + y_offset,:]
            mask = mask[:,:,x_offset:x + x_offset, y_offset:y + y_offset]
232

233
        #grow mask by a few pixels to keep things seamless in latent space
234
235
236
237
238
239
240
241
        if grow_mask_by == 0:
            mask_erosion = mask
        else:
            kernel_tensor = torch.ones((1, 1, grow_mask_by, grow_mask_by))
            padding = math.ceil((grow_mask_by - 1) / 2)

            mask_erosion = torch.clamp(torch.nn.functional.conv2d(mask.round(), kernel_tensor, padding=padding), 0, 1)

242
        m = (1.0 - mask.round()).squeeze(1)
243
244
        for i in range(3):
            pixels[:,:,:,i] -= 0.5
245
            pixels[:,:,:,i] *= m
246
247
248
            pixels[:,:,:,i] += 0.5
        t = vae.encode(pixels)

249
        return ({"samples":t, "noise_mask": (mask_erosion[:,:,:x,:y].round())}, )
comfyanonymous's avatar
comfyanonymous committed
250

Dr.Lt.Data's avatar
Dr.Lt.Data committed
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309

class SaveLatent:
    def __init__(self):
        self.output_dir = os.path.join(folder_paths.get_input_directory(), "latents")
        self.type = "output"

    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "samples": ("LATENT", ),
                              "filename_prefix": ("STRING", {"default": "ComfyUI"})},
                "hidden": {"prompt": "PROMPT", "extra_pnginfo": "EXTRA_PNGINFO"},
                }
    RETURN_TYPES = ()
    FUNCTION = "save"

    OUTPUT_NODE = True

    CATEGORY = "_for_testing"

    def save(self, samples, filename_prefix="ComfyUI", prompt=None, extra_pnginfo=None):
        def map_filename(filename):
            prefix_len = len(os.path.basename(filename_prefix))
            prefix = filename[:prefix_len + 1]
            try:
                digits = int(filename[prefix_len + 1:].split('_')[0])
            except:
                digits = 0
            return (digits, prefix)

        subfolder = os.path.dirname(os.path.normpath(filename_prefix))
        filename = os.path.basename(os.path.normpath(filename_prefix))

        full_output_folder = os.path.join(self.output_dir, subfolder)

        if os.path.commonpath((self.output_dir, os.path.abspath(full_output_folder))) != self.output_dir:
            print("Saving latent outside the 'input/latents' folder is not allowed.")
            return {}

        try:
            counter = max(filter(lambda a: a[1][:-1] == filename and a[1][-1] == "_", map(map_filename, os.listdir(full_output_folder))))[0] + 1
        except ValueError:
            counter = 1
        except FileNotFoundError:
            os.makedirs(full_output_folder, exist_ok=True)
            counter = 1

        # support save metadata for latent sharing
        prompt_info = ""
        if prompt is not None:
            prompt_info = json.dumps(prompt)

        metadata = {"workflow": prompt_info}
        if extra_pnginfo is not None:
            for x in extra_pnginfo:
                metadata[x] = json.dumps(extra_pnginfo[x])

        file = f"{filename}_{counter:05}_.latent"
        file = os.path.join(full_output_folder, file)

310
311
312
313
        output = {}
        output["latent_tensor"] = samples["samples"]

        safetensors.torch.save_file(output, file, metadata=metadata)
Dr.Lt.Data's avatar
Dr.Lt.Data committed
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333

        return {}


class LoadLatent:
    input_dir = os.path.join(folder_paths.get_input_directory(), "latents")

    @classmethod
    def INPUT_TYPES(s):
        files = [f for f in os.listdir(s.input_dir) if os.path.isfile(os.path.join(s.input_dir, f)) and f.endswith(".latent")]
        return {"required": {"latent": [sorted(files), ]}, }

    CATEGORY = "_for_testing"

    RETURN_TYPES = ("LATENT", )
    FUNCTION = "load"

    def load(self, latent):
        file = folder_paths.get_annotated_filepath(latent, self.input_dir)

334
335
        latent = safetensors.torch.load_file(file, device="cpu")
        samples = {"samples": latent["latent_tensor"]}
Dr.Lt.Data's avatar
Dr.Lt.Data committed
336

337
        return (samples, )
Dr.Lt.Data's avatar
Dr.Lt.Data committed
338
339


comfyanonymous's avatar
comfyanonymous committed
340
341
342
class CheckpointLoader:
    @classmethod
    def INPUT_TYPES(s):
343
344
        return {"required": { "config_name": (folder_paths.get_filename_list("configs"), ),
                              "ckpt_name": (folder_paths.get_filename_list("checkpoints"), )}}
comfyanonymous's avatar
comfyanonymous committed
345
346
347
    RETURN_TYPES = ("MODEL", "CLIP", "VAE")
    FUNCTION = "load_checkpoint"

348
    CATEGORY = "advanced/loaders"
349

comfyanonymous's avatar
comfyanonymous committed
350
    def load_checkpoint(self, config_name, ckpt_name, output_vae=True, output_clip=True):
351
352
        config_path = folder_paths.get_full_path("configs", config_name)
        ckpt_path = folder_paths.get_full_path("checkpoints", ckpt_name)
353
        return comfy.sd.load_checkpoint(config_path, ckpt_path, output_vae=True, output_clip=True, embedding_directory=folder_paths.get_folder_paths("embeddings"))
comfyanonymous's avatar
comfyanonymous committed
354

355
356
357
class CheckpointLoaderSimple:
    @classmethod
    def INPUT_TYPES(s):
358
        return {"required": { "ckpt_name": (folder_paths.get_filename_list("checkpoints"), ),
359
360
361
362
                             }}
    RETURN_TYPES = ("MODEL", "CLIP", "VAE")
    FUNCTION = "load_checkpoint"

363
    CATEGORY = "loaders"
364

365
    def load_checkpoint(self, ckpt_name, output_vae=True, output_clip=True):
366
        ckpt_path = folder_paths.get_full_path("checkpoints", ckpt_name)
367
        out = comfy.sd.load_checkpoint_guess_config(ckpt_path, output_vae=True, output_clip=True, embedding_directory=folder_paths.get_folder_paths("embeddings"))
368
369
        return out

sALTaccount's avatar
sALTaccount committed
370
371
372
class DiffusersLoader:
    @classmethod
    def INPUT_TYPES(cls):
373
        paths = []
sALTaccount's avatar
sALTaccount committed
374
        for search_path in folder_paths.get_folder_paths("diffusers"):
375
            if os.path.exists(search_path):
376
377
378
379
                for root, subdir, files in os.walk(search_path, followlinks=True):
                    if "model_index.json" in files:
                        paths.append(os.path.relpath(root, start=search_path))

380
        return {"required": {"model_path": (paths,), }}
sALTaccount's avatar
sALTaccount committed
381
382
383
    RETURN_TYPES = ("MODEL", "CLIP", "VAE")
    FUNCTION = "load_checkpoint"

384
    CATEGORY = "advanced/loaders"
sALTaccount's avatar
sALTaccount committed
385
386

    def load_checkpoint(self, model_path, output_vae=True, output_clip=True):
sALTaccount's avatar
sALTaccount committed
387
388
        for search_path in folder_paths.get_folder_paths("diffusers"):
            if os.path.exists(search_path):
389
390
391
                path = os.path.join(search_path, model_path)
                if os.path.exists(path):
                    model_path = path
sALTaccount's avatar
sALTaccount committed
392
                    break
393

394
        return comfy.diffusers_convert.load_diffusers(model_path, fp16=comfy.model_management.should_use_fp16(), output_vae=output_vae, output_clip=output_clip, embedding_directory=folder_paths.get_folder_paths("embeddings"))
sALTaccount's avatar
sALTaccount committed
395
396


397
398
399
400
401
402
403
404
class unCLIPCheckpointLoader:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "ckpt_name": (folder_paths.get_filename_list("checkpoints"), ),
                             }}
    RETURN_TYPES = ("MODEL", "CLIP", "VAE", "CLIP_VISION")
    FUNCTION = "load_checkpoint"

405
    CATEGORY = "loaders"
406
407
408
409
410
411

    def load_checkpoint(self, ckpt_name, output_vae=True, output_clip=True):
        ckpt_path = folder_paths.get_full_path("checkpoints", ckpt_name)
        out = comfy.sd.load_checkpoint_guess_config(ckpt_path, output_vae=True, output_clip=True, output_clipvision=True, embedding_directory=folder_paths.get_folder_paths("embeddings"))
        return out

comfyanonymous's avatar
comfyanonymous committed
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
class CLIPSetLastLayer:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "clip": ("CLIP", ),
                              "stop_at_clip_layer": ("INT", {"default": -1, "min": -24, "max": -1, "step": 1}),
                              }}
    RETURN_TYPES = ("CLIP",)
    FUNCTION = "set_last_layer"

    CATEGORY = "conditioning"

    def set_last_layer(self, clip, stop_at_clip_layer):
        clip = clip.clone()
        clip.clip_layer(stop_at_clip_layer)
        return (clip,)

428
429
430
431
432
class LoraLoader:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "model": ("MODEL",),
                              "clip": ("CLIP", ),
433
                              "lora_name": (folder_paths.get_filename_list("loras"), ),
434
435
                              "strength_model": ("FLOAT", {"default": 1.0, "min": -10.0, "max": 10.0, "step": 0.01}),
                              "strength_clip": ("FLOAT", {"default": 1.0, "min": -10.0, "max": 10.0, "step": 0.01}),
436
437
438
439
440
441
442
                              }}
    RETURN_TYPES = ("MODEL", "CLIP")
    FUNCTION = "load_lora"

    CATEGORY = "loaders"

    def load_lora(self, model, clip, lora_name, strength_model, strength_clip):
443
        lora_path = folder_paths.get_full_path("loras", lora_name)
444
445
446
        model_lora, clip_lora = comfy.sd.load_lora_for_models(model, clip, lora_path, strength_model, strength_clip)
        return (model_lora, clip_lora)

447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
class TomePatchModel:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "model": ("MODEL",),
                              "ratio": ("FLOAT", {"default": 0.3, "min": 0.0, "max": 1.0, "step": 0.01}),
                              }}
    RETURN_TYPES = ("MODEL",)
    FUNCTION = "patch"

    CATEGORY = "_for_testing"

    def patch(self, model, ratio):
        m = model.clone()
        m.set_model_tomesd(ratio)
        return (m, )

comfyanonymous's avatar
comfyanonymous committed
463
464
465
class VAELoader:
    @classmethod
    def INPUT_TYPES(s):
466
        return {"required": { "vae_name": (folder_paths.get_filename_list("vae"), )}}
comfyanonymous's avatar
comfyanonymous committed
467
468
469
    RETURN_TYPES = ("VAE",)
    FUNCTION = "load_vae"

470
471
    CATEGORY = "loaders"

comfyanonymous's avatar
comfyanonymous committed
472
473
    #TODO: scale factor?
    def load_vae(self, vae_name):
474
        vae_path = folder_paths.get_full_path("vae", vae_name)
comfyanonymous's avatar
comfyanonymous committed
475
476
477
        vae = comfy.sd.VAE(ckpt_path=vae_path)
        return (vae,)

comfyanonymous's avatar
comfyanonymous committed
478
479
480
class ControlNetLoader:
    @classmethod
    def INPUT_TYPES(s):
481
        return {"required": { "control_net_name": (folder_paths.get_filename_list("controlnet"), )}}
comfyanonymous's avatar
comfyanonymous committed
482
483
484
485
486
487
488

    RETURN_TYPES = ("CONTROL_NET",)
    FUNCTION = "load_controlnet"

    CATEGORY = "loaders"

    def load_controlnet(self, control_net_name):
489
        controlnet_path = folder_paths.get_full_path("controlnet", control_net_name)
comfyanonymous's avatar
comfyanonymous committed
490
491
492
        controlnet = comfy.sd.load_controlnet(controlnet_path)
        return (controlnet,)

493
494
495
496
class DiffControlNetLoader:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "model": ("MODEL",),
497
                              "control_net_name": (folder_paths.get_filename_list("controlnet"), )}}
498
499
500
501
502
503
504

    RETURN_TYPES = ("CONTROL_NET",)
    FUNCTION = "load_controlnet"

    CATEGORY = "loaders"

    def load_controlnet(self, model, control_net_name):
505
        controlnet_path = folder_paths.get_full_path("controlnet", control_net_name)
506
507
508
        controlnet = comfy.sd.load_controlnet(controlnet_path, model)
        return (controlnet,)

comfyanonymous's avatar
comfyanonymous committed
509
510
511
512

class ControlNetApply:
    @classmethod
    def INPUT_TYPES(s):
513
514
515
516
517
        return {"required": {"conditioning": ("CONDITIONING", ),
                             "control_net": ("CONTROL_NET", ),
                             "image": ("IMAGE", ),
                             "strength": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01})
                             }}
comfyanonymous's avatar
comfyanonymous committed
518
519
520
521
522
    RETURN_TYPES = ("CONDITIONING",)
    FUNCTION = "apply_controlnet"

    CATEGORY = "conditioning"

523
    def apply_controlnet(self, conditioning, control_net, image, strength):
comfyanonymous's avatar
comfyanonymous committed
524
525
526
527
        c = []
        control_hint = image.movedim(-1,1)
        for t in conditioning:
            n = [t[0], t[1].copy()]
comfyanonymous's avatar
comfyanonymous committed
528
529
530
531
            c_net = control_net.copy().set_cond_hint(control_hint, strength)
            if 'control' in t[1]:
                c_net.set_previous_controlnet(t[1]['control'])
            n[1]['control'] = c_net
comfyanonymous's avatar
comfyanonymous committed
532
533
534
            c.append(n)
        return (c, )

535
536
537
class CLIPLoader:
    @classmethod
    def INPUT_TYPES(s):
538
        return {"required": { "clip_name": (folder_paths.get_filename_list("clip"), ),
539
540
541
542
543
544
                             }}
    RETURN_TYPES = ("CLIP",)
    FUNCTION = "load_clip"

    CATEGORY = "loaders"

545
    def load_clip(self, clip_name):
546
        clip_path = folder_paths.get_full_path("clip", clip_name)
comfyanonymous's avatar
comfyanonymous committed
547
        clip = comfy.sd.load_clip(ckpt_path=clip_path, embedding_directory=folder_paths.get_folder_paths("embeddings"))
548
549
        return (clip,)

550
551
552
class CLIPVisionLoader:
    @classmethod
    def INPUT_TYPES(s):
553
        return {"required": { "clip_name": (folder_paths.get_filename_list("clip_vision"), ),
554
555
556
557
558
559
560
                             }}
    RETURN_TYPES = ("CLIP_VISION",)
    FUNCTION = "load_clip"

    CATEGORY = "loaders"

    def load_clip(self, clip_name):
561
        clip_path = folder_paths.get_full_path("clip_vision", clip_name)
562
        clip_vision = comfy.clip_vision.load(clip_path)
563
564
565
566
567
568
569
570
        return (clip_vision,)

class CLIPVisionEncode:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "clip_vision": ("CLIP_VISION",),
                              "image": ("IMAGE",)
                             }}
comfyanonymous's avatar
comfyanonymous committed
571
    RETURN_TYPES = ("CLIP_VISION_OUTPUT",)
572
573
    FUNCTION = "encode"

574
    CATEGORY = "conditioning"
575
576
577
578
579
580
581
582

    def encode(self, clip_vision, image):
        output = clip_vision.encode_image(image)
        return (output,)

class StyleModelLoader:
    @classmethod
    def INPUT_TYPES(s):
583
        return {"required": { "style_model_name": (folder_paths.get_filename_list("style_models"), )}}
584
585
586
587
588
589
590

    RETURN_TYPES = ("STYLE_MODEL",)
    FUNCTION = "load_style_model"

    CATEGORY = "loaders"

    def load_style_model(self, style_model_name):
591
        style_model_path = folder_paths.get_full_path("style_models", style_model_name)
592
593
594
595
596
597
598
        style_model = comfy.sd.load_style_model(style_model_path)
        return (style_model,)


class StyleModelApply:
    @classmethod
    def INPUT_TYPES(s):
599
600
601
        return {"required": {"conditioning": ("CONDITIONING", ),
                             "style_model": ("STYLE_MODEL", ),
                             "clip_vision_output": ("CLIP_VISION_OUTPUT", ),
602
603
604
605
                             }}
    RETURN_TYPES = ("CONDITIONING",)
    FUNCTION = "apply_stylemodel"

comfyanonymous's avatar
comfyanonymous committed
606
    CATEGORY = "conditioning/style_model"
607

608
609
    def apply_stylemodel(self, clip_vision_output, style_model, conditioning):
        cond = style_model.get_cond(clip_vision_output)
610
        c = []
611
612
        for t in conditioning:
            n = [torch.cat((t[0], cond), dim=1), t[1].copy()]
613
614
615
            c.append(n)
        return (c, )

616
617
618
619
620
621
class unCLIPConditioning:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": {"conditioning": ("CONDITIONING", ),
                             "clip_vision_output": ("CLIP_VISION_OUTPUT", ),
                             "strength": ("FLOAT", {"default": 1.0, "min": -10.0, "max": 10.0, "step": 0.01}),
622
                             "noise_augmentation": ("FLOAT", {"default": 0.0, "min": 0.0, "max": 1.0, "step": 0.01}),
623
624
625
626
                             }}
    RETURN_TYPES = ("CONDITIONING",)
    FUNCTION = "apply_adm"

627
    CATEGORY = "conditioning"
628

629
    def apply_adm(self, conditioning, clip_vision_output, strength, noise_augmentation):
630
631
632
        c = []
        for t in conditioning:
            o = t[1].copy()
633
            x = (clip_vision_output, strength, noise_augmentation)
634
635
636
637
638
639
640
641
            if "adm" in o:
                o["adm"] = o["adm"][:] + [x]
            else:
                o["adm"] = [x]
            n = [t[0], o]
            c.append(n)
        return (c, )

642
643
644
645
646
647
648
649
class GLIGENLoader:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "gligen_name": (folder_paths.get_filename_list("gligen"), )}}

    RETURN_TYPES = ("GLIGEN",)
    FUNCTION = "load_gligen"

comfyanonymous's avatar
comfyanonymous committed
650
    CATEGORY = "loaders"
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671

    def load_gligen(self, gligen_name):
        gligen_path = folder_paths.get_full_path("gligen", gligen_name)
        gligen = comfy.sd.load_gligen(gligen_path)
        return (gligen,)

class GLIGENTextBoxApply:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": {"conditioning_to": ("CONDITIONING", ),
                              "clip": ("CLIP", ),
                              "gligen_textbox_model": ("GLIGEN", ),
                              "text": ("STRING", {"multiline": True}),
                              "width": ("INT", {"default": 64, "min": 8, "max": MAX_RESOLUTION, "step": 8}),
                              "height": ("INT", {"default": 64, "min": 8, "max": MAX_RESOLUTION, "step": 8}),
                              "x": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 8}),
                              "y": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 8}),
                             }}
    RETURN_TYPES = ("CONDITIONING",)
    FUNCTION = "append"

comfyanonymous's avatar
comfyanonymous committed
672
    CATEGORY = "conditioning/gligen"
673
674
675
676
677
678
679
680
681
682
683
684
685
686

    def append(self, conditioning_to, clip, gligen_textbox_model, text, width, height, x, y):
        c = []
        cond, cond_pooled = clip.encode_from_tokens(clip.tokenize(text), return_pooled=True)
        for t in conditioning_to:
            n = [t[0], t[1].copy()]
            position_params = [(cond_pooled, height // 8, width // 8, y // 8, x // 8)]
            prev = []
            if "gligen" in n[1]:
                prev = n[1]['gligen'][2]

            n[1]['gligen'] = ("position", gligen_textbox_model, prev + position_params)
            c.append(n)
        return (c, )
687

comfyanonymous's avatar
comfyanonymous committed
688
689
690
691
692
693
class EmptyLatentImage:
    def __init__(self, device="cpu"):
        self.device = device

    @classmethod
    def INPUT_TYPES(s):
694
695
        return {"required": { "width": ("INT", {"default": 512, "min": 64, "max": MAX_RESOLUTION, "step": 8}),
                              "height": ("INT", {"default": 512, "min": 64, "max": MAX_RESOLUTION, "step": 8}),
comfyanonymous's avatar
comfyanonymous committed
696
697
698
699
                              "batch_size": ("INT", {"default": 1, "min": 1, "max": 64})}}
    RETURN_TYPES = ("LATENT",)
    FUNCTION = "generate"

700
701
    CATEGORY = "latent"

comfyanonymous's avatar
comfyanonymous committed
702
703
    def generate(self, width, height, batch_size=1):
        latent = torch.zeros([batch_size, 4, height // 8, width // 8])
704
        return ({"samples":latent}, )
comfyanonymous's avatar
comfyanonymous committed
705

comfyanonymous's avatar
comfyanonymous committed
706

707
708
709
710
711
class LatentFromBatch:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "samples": ("LATENT",),
                              "batch_index": ("INT", {"default": 0, "min": 0, "max": 63}),
712
                              "length": ("INT", {"default": 1, "min": 1, "max": 64}),
713
714
                              }}
    RETURN_TYPES = ("LATENT",)
715
    FUNCTION = "frombatch"
716

717
    CATEGORY = "latent/batch"
718

719
    def frombatch(self, samples, batch_index, length):
720
721
722
        s = samples.copy()
        s_in = samples["samples"]
        batch_index = min(s_in.shape[0] - 1, batch_index)
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
        length = min(s_in.shape[0] - batch_index, length)
        s["samples"] = s_in[batch_index:batch_index + length].clone()
        if "noise_mask" in samples:
            masks = samples["noise_mask"]
            if masks.shape[0] == 1:
                s["noise_mask"] = masks.clone()
            else:
                if masks.shape[0] < s_in.shape[0]:
                    masks = masks.repeat(math.ceil(s_in.shape[0] / masks.shape[0]), 1, 1, 1)[:s_in.shape[0]]
                s["noise_mask"] = masks[batch_index:batch_index + length].clone()
        if "batch_index" not in s:
            s["batch_index"] = [x for x in range(batch_index, batch_index+length)]
        else:
            s["batch_index"] = samples["batch_index"][batch_index:batch_index + length]
        return (s,)
    
class RepeatLatentBatch:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "samples": ("LATENT",),
                              "amount": ("INT", {"default": 1, "min": 1, "max": 64}),
                              }}
    RETURN_TYPES = ("LATENT",)
    FUNCTION = "repeat"

    CATEGORY = "latent/batch"

    def repeat(self, samples, amount):
        s = samples.copy()
        s_in = samples["samples"]
        
        s["samples"] = s_in.repeat((amount, 1,1,1))
        if "noise_mask" in samples and samples["noise_mask"].shape[0] > 1:
            masks = samples["noise_mask"]
            if masks.shape[0] < s_in.shape[0]:
                masks = masks.repeat(math.ceil(s_in.shape[0] / masks.shape[0]), 1, 1, 1)[:s_in.shape[0]]
            s["noise_mask"] = samples["noise_mask"].repeat((amount, 1,1,1))
        if "batch_index" in s:
            offset = max(s["batch_index"]) - min(s["batch_index"]) + 1
            s["batch_index"] = s["batch_index"] + [x + (i * offset) for i in range(1, amount) for x in s["batch_index"]]
763
        return (s,)
comfyanonymous's avatar
comfyanonymous committed
764

comfyanonymous's avatar
comfyanonymous committed
765
766
class LatentUpscale:
    upscale_methods = ["nearest-exact", "bilinear", "area"]
767
    crop_methods = ["disabled", "center"]
comfyanonymous's avatar
comfyanonymous committed
768
769
770
771

    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "samples": ("LATENT",), "upscale_method": (s.upscale_methods,),
772
773
                              "width": ("INT", {"default": 512, "min": 64, "max": MAX_RESOLUTION, "step": 8}),
                              "height": ("INT", {"default": 512, "min": 64, "max": MAX_RESOLUTION, "step": 8}),
774
                              "crop": (s.crop_methods,)}}
comfyanonymous's avatar
comfyanonymous committed
775
776
777
    RETURN_TYPES = ("LATENT",)
    FUNCTION = "upscale"

778
779
    CATEGORY = "latent"

780
    def upscale(self, samples, upscale_method, width, height, crop):
781
        s = samples.copy()
comfyanonymous's avatar
comfyanonymous committed
782
        s["samples"] = comfy.utils.common_upscale(samples["samples"], width // 8, height // 8, upscale_method, crop)
comfyanonymous's avatar
comfyanonymous committed
783
784
        return (s,)

comfyanonymous's avatar
comfyanonymous committed
785
786
787
788
789
790
791
792
793
class LatentRotate:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "samples": ("LATENT",),
                              "rotation": (["none", "90 degrees", "180 degrees", "270 degrees"],),
                              }}
    RETURN_TYPES = ("LATENT",)
    FUNCTION = "rotate"

comfyanonymous's avatar
comfyanonymous committed
794
    CATEGORY = "latent/transform"
comfyanonymous's avatar
comfyanonymous committed
795
796

    def rotate(self, samples, rotation):
797
        s = samples.copy()
comfyanonymous's avatar
comfyanonymous committed
798
799
800
801
802
803
804
805
        rotate_by = 0
        if rotation.startswith("90"):
            rotate_by = 1
        elif rotation.startswith("180"):
            rotate_by = 2
        elif rotation.startswith("270"):
            rotate_by = 3

806
        s["samples"] = torch.rot90(samples["samples"], k=rotate_by, dims=[3, 2])
comfyanonymous's avatar
comfyanonymous committed
807
        return (s,)
comfyanonymous's avatar
comfyanonymous committed
808
809
810
811
812
813
814
815
816
817

class LatentFlip:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "samples": ("LATENT",),
                              "flip_method": (["x-axis: vertically", "y-axis: horizontally"],),
                              }}
    RETURN_TYPES = ("LATENT",)
    FUNCTION = "flip"

comfyanonymous's avatar
comfyanonymous committed
818
    CATEGORY = "latent/transform"
comfyanonymous's avatar
comfyanonymous committed
819
820

    def flip(self, samples, flip_method):
821
        s = samples.copy()
comfyanonymous's avatar
comfyanonymous committed
822
        if flip_method.startswith("x"):
823
            s["samples"] = torch.flip(samples["samples"], dims=[2])
comfyanonymous's avatar
comfyanonymous committed
824
        elif flip_method.startswith("y"):
825
            s["samples"] = torch.flip(samples["samples"], dims=[3])
comfyanonymous's avatar
comfyanonymous committed
826
827

        return (s,)
comfyanonymous's avatar
comfyanonymous committed
828
829
830
831

class LatentComposite:
    @classmethod
    def INPUT_TYPES(s):
comfyanonymous's avatar
comfyanonymous committed
832
833
834
835
836
837
        return {"required": { "samples_to": ("LATENT",),
                              "samples_from": ("LATENT",),
                              "x": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 8}),
                              "y": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 8}),
                              "feather": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 8}),
                              }}
comfyanonymous's avatar
comfyanonymous committed
838
839
840
841
842
    RETURN_TYPES = ("LATENT",)
    FUNCTION = "composite"

    CATEGORY = "latent"

comfyanonymous's avatar
comfyanonymous committed
843
844
845
    def composite(self, samples_to, samples_from, x, y, composite_method="normal", feather=0):
        x =  x // 8
        y = y // 8
846
        feather = feather // 8
comfyanonymous's avatar
comfyanonymous committed
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
        samples_out = samples_to.copy()
        s = samples_to["samples"].clone()
        samples_to = samples_to["samples"]
        samples_from = samples_from["samples"]
        if feather == 0:
            s[:,:,y:y+samples_from.shape[2],x:x+samples_from.shape[3]] = samples_from[:,:,:samples_to.shape[2] - y, :samples_to.shape[3] - x]
        else:
            samples_from = samples_from[:,:,:samples_to.shape[2] - y, :samples_to.shape[3] - x]
            mask = torch.ones_like(samples_from)
            for t in range(feather):
                if y != 0:
                    mask[:,:,t:1+t,:] *= ((1.0/feather) * (t + 1))

                if y + samples_from.shape[2] < samples_to.shape[2]:
                    mask[:,:,mask.shape[2] -1 -t: mask.shape[2]-t,:] *= ((1.0/feather) * (t + 1))
                if x != 0:
                    mask[:,:,:,t:1+t] *= ((1.0/feather) * (t + 1))
                if x + samples_from.shape[3] < samples_to.shape[3]:
                    mask[:,:,:,mask.shape[3]- 1 - t: mask.shape[3]- t] *= ((1.0/feather) * (t + 1))
            rev_mask = torch.ones_like(mask) - mask
            s[:,:,y:y+samples_from.shape[2],x:x+samples_from.shape[3]] = samples_from[:,:,:samples_to.shape[2] - y, :samples_to.shape[3] - x] * mask + s[:,:,y:y+samples_from.shape[2],x:x+samples_from.shape[3]] * rev_mask
        samples_out["samples"] = s
        return (samples_out,)
comfyanonymous's avatar
comfyanonymous committed
870

comfyanonymous's avatar
comfyanonymous committed
871
872
873
874
class LatentCrop:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "samples": ("LATENT",),
875
876
                              "width": ("INT", {"default": 512, "min": 64, "max": MAX_RESOLUTION, "step": 8}),
                              "height": ("INT", {"default": 512, "min": 64, "max": MAX_RESOLUTION, "step": 8}),
877
878
                              "x": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 8}),
                              "y": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 8}),
comfyanonymous's avatar
comfyanonymous committed
879
880
881
882
                              }}
    RETURN_TYPES = ("LATENT",)
    FUNCTION = "crop"

comfyanonymous's avatar
comfyanonymous committed
883
    CATEGORY = "latent/transform"
comfyanonymous's avatar
comfyanonymous committed
884
885

    def crop(self, samples, width, height, x, y):
886
887
        s = samples.copy()
        samples = samples['samples']
comfyanonymous's avatar
comfyanonymous committed
888
889
890
891
892
893
894
895
896
897
898
899
900
        x =  x // 8
        y = y // 8

        #enfonce minimum size of 64
        if x > (samples.shape[3] - 8):
            x = samples.shape[3] - 8
        if y > (samples.shape[2] - 8):
            y = samples.shape[2] - 8

        new_height = height // 8
        new_width = width // 8
        to_x = new_width + x
        to_y = new_height + y
901
        s['samples'] = samples[:,:,y:to_y, x:to_x]
comfyanonymous's avatar
comfyanonymous committed
902
903
        return (s,)

904
905
906
907
908
909
910
911
912
class SetLatentNoiseMask:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "samples": ("LATENT",),
                              "mask": ("MASK",),
                              }}
    RETURN_TYPES = ("LATENT",)
    FUNCTION = "set_mask"

913
    CATEGORY = "latent/inpaint"
914
915
916

    def set_mask(self, samples, mask):
        s = samples.copy()
917
        s["noise_mask"] = mask.reshape((-1, 1, mask.shape[-2], mask.shape[-1]))
918
919
        return (s,)

920
def common_ksampler(model, seed, steps, cfg, sampler_name, scheduler, positive, negative, latent, denoise=1.0, disable_noise=False, start_step=None, last_step=None, force_full_denoise=False):
921
    device = comfy.model_management.get_torch_device()
922
    latent_image = latent["samples"]
923

comfyanonymous's avatar
comfyanonymous committed
924
925
926
    if disable_noise:
        noise = torch.zeros(latent_image.size(), dtype=latent_image.dtype, layout=latent_image.layout, device="cpu")
    else:
927
928
        batch_inds = latent["batch_index"] if "batch_index" in latent else None
        noise = comfy.sample.prepare_noise(latent_image, seed, batch_inds)
comfyanonymous's avatar
comfyanonymous committed
929

930
    noise_mask = None
931
    if "noise_mask" in latent:
932
        noise_mask = latent["noise_mask"]
comfyanonymous's avatar
comfyanonymous committed
933

934
    pbar = comfy.utils.ProgressBar(steps)
935
936
    def callback(step, x0, x, total_steps):
        pbar.update_absolute(step + 1, total_steps)
937

938
939
    samples = comfy.sample.sample(model, noise, steps, cfg, sampler_name, scheduler, positive, negative, latent_image,
                                  denoise=denoise, disable_noise=disable_noise, start_step=start_step, last_step=last_step,
940
                                  force_full_denoise=force_full_denoise, noise_mask=noise_mask, callback=callback)
941
942
943
    out = latent.copy()
    out["samples"] = samples
    return (out, )
comfyanonymous's avatar
comfyanonymous committed
944

comfyanonymous's avatar
comfyanonymous committed
945
946
947
class KSampler:
    @classmethod
    def INPUT_TYPES(s):
comfyanonymous's avatar
comfyanonymous committed
948
        return {"required":
comfyanonymous's avatar
comfyanonymous committed
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
                    {"model": ("MODEL",),
                    "seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}),
                    "steps": ("INT", {"default": 20, "min": 1, "max": 10000}),
                    "cfg": ("FLOAT", {"default": 8.0, "min": 0.0, "max": 100.0}),
                    "sampler_name": (comfy.samplers.KSampler.SAMPLERS, ),
                    "scheduler": (comfy.samplers.KSampler.SCHEDULERS, ),
                    "positive": ("CONDITIONING", ),
                    "negative": ("CONDITIONING", ),
                    "latent_image": ("LATENT", ),
                    "denoise": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}),
                    }}

    RETURN_TYPES = ("LATENT",)
    FUNCTION = "sample"

964
965
    CATEGORY = "sampling"

comfyanonymous's avatar
comfyanonymous committed
966
    def sample(self, model, seed, steps, cfg, sampler_name, scheduler, positive, negative, latent_image, denoise=1.0):
967
        return common_ksampler(model, seed, steps, cfg, sampler_name, scheduler, positive, negative, latent_image, denoise=denoise)
comfyanonymous's avatar
comfyanonymous committed
968

comfyanonymous's avatar
comfyanonymous committed
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
class KSamplerAdvanced:
    @classmethod
    def INPUT_TYPES(s):
        return {"required":
                    {"model": ("MODEL",),
                    "add_noise": (["enable", "disable"], ),
                    "noise_seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}),
                    "steps": ("INT", {"default": 20, "min": 1, "max": 10000}),
                    "cfg": ("FLOAT", {"default": 8.0, "min": 0.0, "max": 100.0}),
                    "sampler_name": (comfy.samplers.KSampler.SAMPLERS, ),
                    "scheduler": (comfy.samplers.KSampler.SCHEDULERS, ),
                    "positive": ("CONDITIONING", ),
                    "negative": ("CONDITIONING", ),
                    "latent_image": ("LATENT", ),
                    "start_at_step": ("INT", {"default": 0, "min": 0, "max": 10000}),
                    "end_at_step": ("INT", {"default": 10000, "min": 0, "max": 10000}),
                    "return_with_leftover_noise": (["disable", "enable"], ),
                    }}

    RETURN_TYPES = ("LATENT",)
    FUNCTION = "sample"

    CATEGORY = "sampling"
comfyanonymous's avatar
comfyanonymous committed
992

comfyanonymous's avatar
comfyanonymous committed
993
994
995
996
997
998
999
    def sample(self, model, add_noise, noise_seed, steps, cfg, sampler_name, scheduler, positive, negative, latent_image, start_at_step, end_at_step, return_with_leftover_noise, denoise=1.0):
        force_full_denoise = True
        if return_with_leftover_noise == "enable":
            force_full_denoise = False
        disable_noise = False
        if add_noise == "disable":
            disable_noise = True
1000
        return common_ksampler(model, noise_seed, steps, cfg, sampler_name, scheduler, positive, negative, latent_image, denoise=denoise, disable_noise=disable_noise, start_step=start_at_step, last_step=end_at_step, force_full_denoise=force_full_denoise)
comfyanonymous's avatar
comfyanonymous committed
1001
1002
1003

class SaveImage:
    def __init__(self):
1004
        self.output_dir = folder_paths.get_output_directory()
m957ymj75urz's avatar
m957ymj75urz committed
1005
        self.type = "output"
comfyanonymous's avatar
comfyanonymous committed
1006
1007
1008
1009

    @classmethod
    def INPUT_TYPES(s):
        return {"required": 
1010
                    {"images": ("IMAGE", ),
pythongosssss's avatar
tidy  
pythongosssss committed
1011
                     "filename_prefix": ("STRING", {"default": "ComfyUI"})},
pythongosssss's avatar
pythongosssss committed
1012
                "hidden": {"prompt": "PROMPT", "extra_pnginfo": "EXTRA_PNGINFO"},
comfyanonymous's avatar
comfyanonymous committed
1013
1014
1015
1016
1017
1018
1019
                }

    RETURN_TYPES = ()
    FUNCTION = "save_images"

    OUTPUT_NODE = True

1020
1021
    CATEGORY = "image"

pythongosssss's avatar
tidy  
pythongosssss committed
1022
    def save_images(self, images, filename_prefix="ComfyUI", prompt=None, extra_pnginfo=None):
1023
        def map_filename(filename):
1024
            prefix_len = len(os.path.basename(filename_prefix))
1025
1026
1027
1028
1029
1030
            prefix = filename[:prefix_len + 1]
            try:
                digits = int(filename[prefix_len + 1:].split('_')[0])
            except:
                digits = 0
            return (digits, prefix)
comfyanonymous's avatar
Style.  
comfyanonymous committed
1031

1032
1033
1034
1035
        def compute_vars(input):
            input = input.replace("%width%", str(images[0].shape[1]))
            input = input.replace("%height%", str(images[0].shape[0]))
            return input
comfyanonymous's avatar
Style.  
comfyanonymous committed
1036

1037
        filename_prefix = compute_vars(filename_prefix)
comfyanonymous's avatar
comfyanonymous committed
1038

m957ymj75urz's avatar
m957ymj75urz committed
1039
1040
1041
        subfolder = os.path.dirname(os.path.normpath(filename_prefix))
        filename = os.path.basename(os.path.normpath(filename_prefix))

comfyanonymous's avatar
comfyanonymous committed
1042
        full_output_folder = os.path.join(self.output_dir, subfolder)
1043

1044
        if os.path.commonpath((self.output_dir, os.path.abspath(full_output_folder))) != self.output_dir:
1045
            print("Saving image outside the output folder is not allowed.")
comfyanonymous's avatar
comfyanonymous committed
1046
1047
            return {}

1048
        try:
1049
            counter = max(filter(lambda a: a[1][:-1] == filename and a[1][-1] == "_", map(map_filename, os.listdir(full_output_folder))))[0] + 1
1050
1051
        except ValueError:
            counter = 1
1052
        except FileNotFoundError:
1053
            os.makedirs(full_output_folder, exist_ok=True)
1054
            counter = 1
pythongosssss's avatar
pythongosssss committed
1055

m957ymj75urz's avatar
m957ymj75urz committed
1056
        results = list()
comfyanonymous's avatar
comfyanonymous committed
1057
1058
        for image in images:
            i = 255. * image.cpu().numpy()
1059
            img = Image.fromarray(np.clip(i, 0, 255).astype(np.uint8))
comfyanonymous's avatar
comfyanonymous committed
1060
1061
1062
1063
1064
1065
            metadata = PngInfo()
            if prompt is not None:
                metadata.add_text("prompt", json.dumps(prompt))
            if extra_pnginfo is not None:
                for x in extra_pnginfo:
                    metadata.add_text(x, json.dumps(extra_pnginfo[x]))
1066

1067
            file = f"{filename}_{counter:05}_.png"
1068
            img.save(os.path.join(full_output_folder, file), pnginfo=metadata, compress_level=4)
m957ymj75urz's avatar
m957ymj75urz committed
1069
1070
1071
1072
            results.append({
                "filename": file,
                "subfolder": subfolder,
                "type": self.type
Gavroche CryptoRUSH's avatar
Gavroche CryptoRUSH committed
1073
            })
1074
            counter += 1
comfyanonymous's avatar
comfyanonymous committed
1075

m957ymj75urz's avatar
m957ymj75urz committed
1076
        return { "ui": { "images": results } }
comfyanonymous's avatar
comfyanonymous committed
1077

pythongosssss's avatar
pythongosssss committed
1078
1079
class PreviewImage(SaveImage):
    def __init__(self):
1080
        self.output_dir = folder_paths.get_temp_directory()
m957ymj75urz's avatar
m957ymj75urz committed
1081
        self.type = "temp"
pythongosssss's avatar
pythongosssss committed
1082
1083
1084

    @classmethod
    def INPUT_TYPES(s):
1085
        return {"required":
pythongosssss's avatar
pythongosssss committed
1086
1087
1088
                    {"images": ("IMAGE", ), },
                "hidden": {"prompt": "PROMPT", "extra_pnginfo": "EXTRA_PNGINFO"},
                }
1089

1090
1091
1092
class LoadImage:
    @classmethod
    def INPUT_TYPES(s):
1093
        input_dir = folder_paths.get_input_directory()
comfyanonymous's avatar
comfyanonymous committed
1094
        files = [f for f in os.listdir(input_dir) if os.path.isfile(os.path.join(input_dir, f))]
1095
        return {"required":
comfyanonymous's avatar
comfyanonymous committed
1096
                    {"image": (sorted(files), )},
1097
                }
1098
1099

    CATEGORY = "image"
1100

1101
    RETURN_TYPES = ("IMAGE", "MASK")
1102
1103
    FUNCTION = "load_image"
    def load_image(self, image):
1104
        image_path = folder_paths.get_annotated_filepath(image)
1105
1106
        i = Image.open(image_path)
        image = i.convert("RGB")
1107
        image = np.array(image).astype(np.float32) / 255.0
1108
        image = torch.from_numpy(image)[None,]
1109
1110
1111
1112
1113
1114
        if 'A' in i.getbands():
            mask = np.array(i.getchannel('A')).astype(np.float32) / 255.0
            mask = 1. - torch.from_numpy(mask)
        else:
            mask = torch.zeros((64,64), dtype=torch.float32, device="cpu")
        return (image, mask)
1115

1116
1117
    @classmethod
    def IS_CHANGED(s, image):
1118
        image_path = folder_paths.get_annotated_filepath(image)
1119
1120
1121
1122
        m = hashlib.sha256()
        with open(image_path, 'rb') as f:
            m.update(f.read())
        return m.digest().hex()
pythongosssss's avatar
pythongosssss committed
1123

1124
1125
1126
1127
1128
1129
1130
    @classmethod
    def VALIDATE_INPUTS(s, image):
        if not folder_paths.exists_annotated_filepath(image):
            return "Invalid image file: {}".format(image)

        return True

1131
class LoadImageMask:
1132
    _color_channels = ["alpha", "red", "green", "blue"]
1133
1134
    @classmethod
    def INPUT_TYPES(s):
1135
        input_dir = folder_paths.get_input_directory()
comfyanonymous's avatar
comfyanonymous committed
1136
        files = [f for f in os.listdir(input_dir) if os.path.isfile(os.path.join(input_dir, f))]
1137
        return {"required":
comfyanonymous's avatar
comfyanonymous committed
1138
                    {"image": (sorted(files), ),
1139
                     "channel": (s._color_channels, ), }
1140
1141
                }

1142
    CATEGORY = "mask"
1143
1144
1145
1146

    RETURN_TYPES = ("MASK",)
    FUNCTION = "load_image"
    def load_image(self, image, channel):
1147
        image_path = folder_paths.get_annotated_filepath(image)
1148
        i = Image.open(image_path)
1149
1150
        if i.getbands() != ("R", "G", "B", "A"):
            i = i.convert("RGBA")
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
        mask = None
        c = channel[0].upper()
        if c in i.getbands():
            mask = np.array(i.getchannel(c)).astype(np.float32) / 255.0
            mask = torch.from_numpy(mask)
            if c == 'A':
                mask = 1. - mask
        else:
            mask = torch.zeros((64,64), dtype=torch.float32, device="cpu")
        return (mask,)

    @classmethod
    def IS_CHANGED(s, image, channel):
1164
        image_path = folder_paths.get_annotated_filepath(image)
1165
1166
1167
1168
        m = hashlib.sha256()
        with open(image_path, 'rb') as f:
            m.update(f.read())
        return m.digest().hex()
pythongosssss's avatar
pythongosssss committed
1169

1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
    @classmethod
    def VALIDATE_INPUTS(s, image, channel):
        if not folder_paths.exists_annotated_filepath(image):
            return "Invalid image file: {}".format(image)

        if channel not in s._color_channels:
            return "Invalid color channel: {}".format(channel)

        return True

comfyanonymous's avatar
comfyanonymous committed
1180
1181
1182
1183
1184
1185
1186
class ImageScale:
    upscale_methods = ["nearest-exact", "bilinear", "area"]
    crop_methods = ["disabled", "center"]

    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "image": ("IMAGE",), "upscale_method": (s.upscale_methods,),
1187
1188
                              "width": ("INT", {"default": 512, "min": 1, "max": MAX_RESOLUTION, "step": 1}),
                              "height": ("INT", {"default": 512, "min": 1, "max": MAX_RESOLUTION, "step": 1}),
comfyanonymous's avatar
comfyanonymous committed
1189
1190
1191
1192
                              "crop": (s.crop_methods,)}}
    RETURN_TYPES = ("IMAGE",)
    FUNCTION = "upscale"

1193
    CATEGORY = "image/upscaling"
1194

comfyanonymous's avatar
comfyanonymous committed
1195
1196
    def upscale(self, image, upscale_method, width, height, crop):
        samples = image.movedim(-1,1)
comfyanonymous's avatar
comfyanonymous committed
1197
        s = comfy.utils.common_upscale(samples, width, height, upscale_method, crop)
comfyanonymous's avatar
comfyanonymous committed
1198
1199
        s = s.movedim(1,-1)
        return (s,)
comfyanonymous's avatar
comfyanonymous committed
1200

1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
class ImageInvert:

    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "image": ("IMAGE",)}}

    RETURN_TYPES = ("IMAGE",)
    FUNCTION = "invert"

    CATEGORY = "image"

    def invert(self, image):
        s = 1.0 - image
        return (s,)


Guo Y.K's avatar
Guo Y.K committed
1217
1218
1219
1220
1221
1222
1223
class ImagePadForOutpaint:

    @classmethod
    def INPUT_TYPES(s):
        return {
            "required": {
                "image": ("IMAGE",),
1224
1225
1226
1227
                "left": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 8}),
                "top": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 8}),
                "right": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 8}),
                "bottom": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 8}),
1228
                "feathering": ("INT", {"default": 40, "min": 0, "max": MAX_RESOLUTION, "step": 1}),
Guo Y.K's avatar
Guo Y.K committed
1229
1230
1231
1232
1233
1234
1235
1236
            }
        }

    RETURN_TYPES = ("IMAGE", "MASK")
    FUNCTION = "expand_image"

    CATEGORY = "image"

1237
    def expand_image(self, image, left, top, right, bottom, feathering):
Guo Y.K's avatar
Guo Y.K committed
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
        d1, d2, d3, d4 = image.size()

        new_image = torch.zeros(
            (d1, d2 + top + bottom, d3 + left + right, d4),
            dtype=torch.float32,
        )
        new_image[:, top:top + d2, left:left + d3, :] = image

        mask = torch.ones(
            (d2 + top + bottom, d3 + left + right),
            dtype=torch.float32,
        )
1250

1251
1252
1253
1254
1255
        t = torch.zeros(
            (d2, d3),
            dtype=torch.float32
        )

1256
        if feathering > 0 and feathering * 2 < d2 and feathering * 2 < d3:
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275

            for i in range(d2):
                for j in range(d3):
                    dt = i if top != 0 else d2
                    db = d2 - i if bottom != 0 else d2

                    dl = j if left != 0 else d3
                    dr = d3 - j if right != 0 else d3

                    d = min(dt, db, dl, dr)

                    if d >= feathering:
                        continue

                    v = (feathering - d) / feathering

                    t[i, j] = v * v

        mask[top:top + d2, left:left + d3] = t
1276

Guo Y.K's avatar
Guo Y.K committed
1277
1278
1279
        return (new_image, mask)


comfyanonymous's avatar
comfyanonymous committed
1280
1281
NODE_CLASS_MAPPINGS = {
    "KSampler": KSampler,
1282
    "CheckpointLoaderSimple": CheckpointLoaderSimple,
comfyanonymous's avatar
comfyanonymous committed
1283
    "CLIPTextEncode": CLIPTextEncode,
comfyanonymous's avatar
comfyanonymous committed
1284
    "CLIPSetLastLayer": CLIPSetLastLayer,
comfyanonymous's avatar
comfyanonymous committed
1285
1286
    "VAEDecode": VAEDecode,
    "VAEEncode": VAEEncode,
1287
    "VAEEncodeForInpaint": VAEEncodeForInpaint,
comfyanonymous's avatar
comfyanonymous committed
1288
1289
1290
    "VAELoader": VAELoader,
    "EmptyLatentImage": EmptyLatentImage,
    "LatentUpscale": LatentUpscale,
1291
    "LatentFromBatch": LatentFromBatch,
1292
    "RepeatLatentBatch": RepeatLatentBatch,
comfyanonymous's avatar
comfyanonymous committed
1293
    "SaveImage": SaveImage,
pythongosssss's avatar
pythongosssss committed
1294
    "PreviewImage": PreviewImage,
comfyanonymous's avatar
comfyanonymous committed
1295
    "LoadImage": LoadImage,
1296
    "LoadImageMask": LoadImageMask,
comfyanonymous's avatar
comfyanonymous committed
1297
    "ImageScale": ImageScale,
1298
    "ImageInvert": ImageInvert,
Guo Y.K's avatar
Guo Y.K committed
1299
    "ImagePadForOutpaint": ImagePadForOutpaint,
FizzleDorf's avatar
FizzleDorf committed
1300
    "ConditioningAverage ": ConditioningAverage ,
comfyanonymous's avatar
comfyanonymous committed
1301
1302
    "ConditioningCombine": ConditioningCombine,
    "ConditioningSetArea": ConditioningSetArea,
Jacob Segal's avatar
Jacob Segal committed
1303
    "ConditioningSetMask": ConditioningSetMask,
comfyanonymous's avatar
comfyanonymous committed
1304
    "KSamplerAdvanced": KSamplerAdvanced,
1305
    "SetLatentNoiseMask": SetLatentNoiseMask,
comfyanonymous's avatar
comfyanonymous committed
1306
    "LatentComposite": LatentComposite,
comfyanonymous's avatar
comfyanonymous committed
1307
    "LatentRotate": LatentRotate,
comfyanonymous's avatar
comfyanonymous committed
1308
    "LatentFlip": LatentFlip,
comfyanonymous's avatar
comfyanonymous committed
1309
    "LatentCrop": LatentCrop,
1310
    "LoraLoader": LoraLoader,
1311
    "CLIPLoader": CLIPLoader,
1312
    "CLIPVisionEncode": CLIPVisionEncode,
1313
    "StyleModelApply": StyleModelApply,
1314
    "unCLIPConditioning": unCLIPConditioning,
comfyanonymous's avatar
comfyanonymous committed
1315
1316
    "ControlNetApply": ControlNetApply,
    "ControlNetLoader": ControlNetLoader,
1317
    "DiffControlNetLoader": DiffControlNetLoader,
comfyanonymous's avatar
comfyanonymous committed
1318
1319
    "StyleModelLoader": StyleModelLoader,
    "CLIPVisionLoader": CLIPVisionLoader,
1320
    "VAEDecodeTiled": VAEDecodeTiled,
comfyanonymous's avatar
comfyanonymous committed
1321
    "VAEEncodeTiled": VAEEncodeTiled,
1322
    "TomePatchModel": TomePatchModel,
1323
    "unCLIPCheckpointLoader": unCLIPCheckpointLoader,
1324
1325
1326
    "GLIGENLoader": GLIGENLoader,
    "GLIGENTextBoxApply": GLIGENTextBoxApply,

1327
    "CheckpointLoader": CheckpointLoader,
sALTaccount's avatar
sALTaccount committed
1328
    "DiffusersLoader": DiffusersLoader,
Dr.Lt.Data's avatar
Dr.Lt.Data committed
1329
1330
1331

    "LoadLatent": LoadLatent,
    "SaveLatent": SaveLatent
comfyanonymous's avatar
comfyanonymous committed
1332
1333
}

City's avatar
City committed
1334
1335
1336
1337
1338
NODE_DISPLAY_NAME_MAPPINGS = {
    # Sampling
    "KSampler": "KSampler",
    "KSamplerAdvanced": "KSampler (Advanced)",
    # Loaders
comfyanonymous's avatar
Rename.  
comfyanonymous committed
1339
1340
    "CheckpointLoader": "Load Checkpoint (With Config)",
    "CheckpointLoaderSimple": "Load Checkpoint",
City's avatar
City committed
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
    "VAELoader": "Load VAE",
    "LoraLoader": "Load LoRA",
    "CLIPLoader": "Load CLIP",
    "ControlNetLoader": "Load ControlNet Model",
    "DiffControlNetLoader": "Load ControlNet Model (diff)",
    "StyleModelLoader": "Load Style Model",
    "CLIPVisionLoader": "Load CLIP Vision",
    "UpscaleModelLoader": "Load Upscale Model",
    # Conditioning
    "CLIPVisionEncode": "CLIP Vision Encode",
    "StyleModelApply": "Apply Style Model",
    "CLIPTextEncode": "CLIP Text Encode (Prompt)",
    "CLIPSetLastLayer": "CLIP Set Last Layer",
    "ConditioningCombine": "Conditioning (Combine)",
FizzleDorf's avatar
FizzleDorf committed
1355
    "ConditioningAverage ": "Conditioning (Average)",
City's avatar
City committed
1356
    "ConditioningSetArea": "Conditioning (Set Area)",
Jacob Segal's avatar
Jacob Segal committed
1357
    "ConditioningSetMask": "Conditioning (Set Mask)",
City's avatar
City committed
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369
    "ControlNetApply": "Apply ControlNet",
    # Latent
    "VAEEncodeForInpaint": "VAE Encode (for Inpainting)",
    "SetLatentNoiseMask": "Set Latent Noise Mask",
    "VAEDecode": "VAE Decode",
    "VAEEncode": "VAE Encode",
    "LatentRotate": "Rotate Latent",
    "LatentFlip": "Flip Latent",
    "LatentCrop": "Crop Latent",
    "EmptyLatentImage": "Empty Latent Image",
    "LatentUpscale": "Upscale Latent",
    "LatentComposite": "Latent Composite",
1370
1371
    "LatentFromBatch" : "Latent From Batch",
    "RepeatLatentBatch": "Repeat Latent Batch",
City's avatar
City committed
1372
1373
1374
1375
1376
1377
1378
1379
1380
1381
1382
1383
1384
1385
    # Image
    "SaveImage": "Save Image",
    "PreviewImage": "Preview Image",
    "LoadImage": "Load Image",
    "LoadImageMask": "Load Image (as Mask)",
    "ImageScale": "Upscale Image",
    "ImageUpscaleWithModel": "Upscale Image (using Model)",
    "ImageInvert": "Invert Image",
    "ImagePadForOutpaint": "Pad Image for Outpainting",
    # _for_testing
    "VAEDecodeTiled": "VAE Decode (Tiled)",
    "VAEEncodeTiled": "VAE Encode (Tiled)",
}

1386
1387
1388
1389
1390
1391
1392
1393
1394
1395
1396
1397
1398
1399
1400
def load_custom_node(module_path):
    module_name = os.path.basename(module_path)
    if os.path.isfile(module_path):
        sp = os.path.splitext(module_path)
        module_name = sp[0]
    try:
        if os.path.isfile(module_path):
            module_spec = importlib.util.spec_from_file_location(module_name, module_path)
        else:
            module_spec = importlib.util.spec_from_file_location(module_name, os.path.join(module_path, "__init__.py"))
        module = importlib.util.module_from_spec(module_spec)
        sys.modules[module_name] = module
        module_spec.loader.exec_module(module)
        if hasattr(module, "NODE_CLASS_MAPPINGS") and getattr(module, "NODE_CLASS_MAPPINGS") is not None:
            NODE_CLASS_MAPPINGS.update(module.NODE_CLASS_MAPPINGS)
1401
1402
            if hasattr(module, "NODE_DISPLAY_NAME_MAPPINGS") and getattr(module, "NODE_DISPLAY_NAME_MAPPINGS") is not None:
                NODE_DISPLAY_NAME_MAPPINGS.update(module.NODE_DISPLAY_NAME_MAPPINGS)
1403
            return True
1404
1405
        else:
            print(f"Skip {module_path} module for custom nodes due to the lack of NODE_CLASS_MAPPINGS.")
1406
            return False
1407
1408
1409
    except Exception as e:
        print(traceback.format_exc())
        print(f"Cannot import {module_path} module for custom nodes:", e)
1410
        return False
1411

Hacker 17082006's avatar
Hacker 17082006 committed
1412
def load_custom_nodes():
1413
    node_paths = folder_paths.get_folder_paths("custom_nodes")
1414
    node_import_times = []
1415
1416
1417
1418
1419
1420
1421
1422
    for custom_node_path in node_paths:
        possible_modules = os.listdir(custom_node_path)
        if "__pycache__" in possible_modules:
            possible_modules.remove("__pycache__")

        for possible_module in possible_modules:
            module_path = os.path.join(custom_node_path, possible_module)
            if os.path.isfile(module_path) and os.path.splitext(module_path)[1] != ".py": continue
1423
            if module_path.endswith(".disabled"): continue
1424
            time_before = time.perf_counter()
1425
            success = load_custom_node(module_path)
1426
            node_import_times.append((time.perf_counter() - time_before, module_path, success))
1427

1428
    if len(node_import_times) > 0:
comfyanonymous's avatar
comfyanonymous committed
1429
        print("\nImport times for custom nodes:")
1430
        for n in sorted(node_import_times):
1431
1432
1433
1434
1435
            if n[2]:
                import_message = ""
            else:
                import_message = " (IMPORT FAILED)"
            print("{:6.1f} seconds{}:".format(n[0], import_message), n[1])
1436
        print()
1437

1438
def init_custom_nodes():
1439
    load_custom_node(os.path.join(os.path.join(os.path.dirname(os.path.realpath(__file__)), "comfy_extras"), "nodes_hypernetwork.py"))
1440
1441
    load_custom_node(os.path.join(os.path.join(os.path.dirname(os.path.realpath(__file__)), "comfy_extras"), "nodes_upscale_model.py"))
    load_custom_node(os.path.join(os.path.join(os.path.dirname(os.path.realpath(__file__)), "comfy_extras"), "nodes_post_processing.py"))
1442
    load_custom_node(os.path.join(os.path.join(os.path.dirname(os.path.realpath(__file__)), "comfy_extras"), "nodes_mask.py"))
1443
    load_custom_node(os.path.join(os.path.join(os.path.dirname(os.path.realpath(__file__)), "comfy_extras"), "nodes_rebatch.py"))
1444
    load_custom_nodes()