nodes.py 52 KB
Newer Older
comfyanonymous's avatar
comfyanonymous committed
1
2
3
4
5
import torch

import os
import sys
import json
6
import hashlib
7
import traceback
8
import math
9
import time
comfyanonymous's avatar
comfyanonymous committed
10

11
from PIL import Image, ImageOps
comfyanonymous's avatar
comfyanonymous committed
12
13
from PIL.PngImagePlugin import PngInfo
import numpy as np
14
import safetensors.torch
comfyanonymous's avatar
comfyanonymous committed
15

sALTaccount's avatar
sALTaccount committed
16

comfyanonymous's avatar
comfyanonymous committed
17
sys.path.insert(0, os.path.join(os.path.dirname(os.path.realpath(__file__)), "comfy"))
comfyanonymous's avatar
comfyanonymous committed
18
19


comfyanonymous's avatar
comfyanonymous committed
20
import comfy.diffusers_convert
comfyanonymous's avatar
comfyanonymous committed
21
import comfy.samplers
22
import comfy.sample
comfyanonymous's avatar
comfyanonymous committed
23
import comfy.sd
comfyanonymous's avatar
comfyanonymous committed
24
25
import comfy.utils

26
import comfy.clip_vision
27

28
import comfy.model_management
29
import importlib
comfyanonymous's avatar
comfyanonymous committed
30

31
import folder_paths
32

Dr.Lt.Data's avatar
Dr.Lt.Data committed
33

34
def before_node_execution():
35
    comfy.model_management.throw_exception_if_processing_interrupted()
36

37
def interrupt_processing(value=True):
38
    comfy.model_management.interrupt_current_processing(value)
39

40
41
MAX_RESOLUTION=8192

comfyanonymous's avatar
comfyanonymous committed
42
43
44
class CLIPTextEncode:
    @classmethod
    def INPUT_TYPES(s):
45
        return {"required": {"text": ("STRING", {"multiline": True}), "clip": ("CLIP", )}}
comfyanonymous's avatar
comfyanonymous committed
46
47
48
    RETURN_TYPES = ("CONDITIONING",)
    FUNCTION = "encode"

49
50
    CATEGORY = "conditioning"

comfyanonymous's avatar
comfyanonymous committed
51
    def encode(self, clip, text):
comfyanonymous's avatar
comfyanonymous committed
52
53
54
55
56
57
58
59
60
        return ([[clip.encode(text), {}]], )

class ConditioningCombine:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": {"conditioning_1": ("CONDITIONING", ), "conditioning_2": ("CONDITIONING", )}}
    RETURN_TYPES = ("CONDITIONING",)
    FUNCTION = "combine"

61
62
    CATEGORY = "conditioning"

comfyanonymous's avatar
comfyanonymous committed
63
64
65
    def combine(self, conditioning_1, conditioning_2):
        return (conditioning_1 + conditioning_2, )

FizzleDorf's avatar
FizzleDorf committed
66
67
68
class ConditioningAverage :
    @classmethod
    def INPUT_TYPES(s):
comfyanonymous's avatar
comfyanonymous committed
69
70
        return {"required": {"conditioning_to": ("CONDITIONING", ), "conditioning_from": ("CONDITIONING", ),
                              "conditioning_to_strength": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01})
FizzleDorf's avatar
FizzleDorf committed
71
72
73
74
75
76
                             }}
    RETURN_TYPES = ("CONDITIONING",)
    FUNCTION = "addWeighted"

    CATEGORY = "conditioning"

comfyanonymous's avatar
comfyanonymous committed
77
    def addWeighted(self, conditioning_to, conditioning_from, conditioning_to_strength):
FizzleDorf's avatar
FizzleDorf committed
78
        out = []
comfyanonymous's avatar
comfyanonymous committed
79
80
81
82
83
84
85
86
87
88
89
90
91
92

        if len(conditioning_from) > 1:
            print("Warning: ConditioningAverage conditioning_from contains more than 1 cond, only the first one will actually be applied to conditioning_to.")

        cond_from = conditioning_from[0][0]

        for i in range(len(conditioning_to)):
            t1 = conditioning_to[i][0]
            t0 = cond_from[:,:t1.shape[1]]
            if t0.shape[1] < t1.shape[1]:
                t0 = torch.cat([t0] + [torch.zeros((1, (t1.shape[1] - t0.shape[1]), t1.shape[2]))], dim=1)

            tw = torch.mul(t1, conditioning_to_strength) + torch.mul(t0, (1.0 - conditioning_to_strength))
            n = [tw, conditioning_to[i][1].copy()]
FizzleDorf's avatar
FizzleDorf committed
93
94
95
            out.append(n)
        return (out, )

comfyanonymous's avatar
comfyanonymous committed
96
97
98
99
class ConditioningSetArea:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": {"conditioning": ("CONDITIONING", ),
100
101
102
103
                              "width": ("INT", {"default": 64, "min": 64, "max": MAX_RESOLUTION, "step": 8}),
                              "height": ("INT", {"default": 64, "min": 64, "max": MAX_RESOLUTION, "step": 8}),
                              "x": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 8}),
                              "y": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 8}),
comfyanonymous's avatar
comfyanonymous committed
104
105
106
107
108
                              "strength": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}),
                             }}
    RETURN_TYPES = ("CONDITIONING",)
    FUNCTION = "append"

109
110
    CATEGORY = "conditioning"

111
    def append(self, conditioning, width, height, x, y, strength):
comfyanonymous's avatar
comfyanonymous committed
112
113
114
115
116
        c = []
        for t in conditioning:
            n = [t[0], t[1].copy()]
            n[1]['area'] = (height // 8, width // 8, y // 8, x // 8)
            n[1]['strength'] = strength
117
            n[1]['set_area_to_bounds'] = False
comfyanonymous's avatar
comfyanonymous committed
118
            c.append(n)
comfyanonymous's avatar
comfyanonymous committed
119
        return (c, )
comfyanonymous's avatar
comfyanonymous committed
120

Jacob Segal's avatar
Jacob Segal committed
121
122
123
124
125
126
class ConditioningSetMask:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": {"conditioning": ("CONDITIONING", ),
                              "mask": ("MASK", ),
                              "strength": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}),
127
                              "set_cond_area": (["default", "mask bounds"],),
Jacob Segal's avatar
Jacob Segal committed
128
129
130
131
132
133
                             }}
    RETURN_TYPES = ("CONDITIONING",)
    FUNCTION = "append"

    CATEGORY = "conditioning"

134
    def append(self, conditioning, mask, set_cond_area, strength):
Jacob Segal's avatar
Jacob Segal committed
135
        c = []
136
137
138
        set_area_to_bounds = False
        if set_cond_area != "default":
            set_area_to_bounds = True
Jacob Segal's avatar
Jacob Segal committed
139
140
141
142
143
144
        if len(mask.shape) < 3:
            mask = mask.unsqueeze(0)
        for t in conditioning:
            n = [t[0], t[1].copy()]
            _, h, w = mask.shape
            n[1]['mask'] = mask
Jacob Segal's avatar
Jacob Segal committed
145
            n[1]['set_area_to_bounds'] = set_area_to_bounds
146
            n[1]['mask_strength'] = strength
Jacob Segal's avatar
Jacob Segal committed
147
148
149
            c.append(n)
        return (c, )

comfyanonymous's avatar
comfyanonymous committed
150
151
152
153
154
155
156
class VAEDecode:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "samples": ("LATENT", ), "vae": ("VAE", )}}
    RETURN_TYPES = ("IMAGE",)
    FUNCTION = "decode"

157
158
    CATEGORY = "latent"

comfyanonymous's avatar
comfyanonymous committed
159
    def decode(self, vae, samples):
160
        return (vae.decode(samples["samples"]), )
comfyanonymous's avatar
comfyanonymous committed
161

162
163
164
165
166
167
168
169
170
171
172
173
class VAEDecodeTiled:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "samples": ("LATENT", ), "vae": ("VAE", )}}
    RETURN_TYPES = ("IMAGE",)
    FUNCTION = "decode"

    CATEGORY = "_for_testing"

    def decode(self, vae, samples):
        return (vae.decode_tiled(samples["samples"]), )

comfyanonymous's avatar
comfyanonymous committed
174
175
176
177
178
179
180
class VAEEncode:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "pixels": ("IMAGE", ), "vae": ("VAE", )}}
    RETURN_TYPES = ("LATENT",)
    FUNCTION = "encode"

181
182
    CATEGORY = "latent"

183
184
185
186
    @staticmethod
    def vae_encode_crop_pixels(pixels):
        x = (pixels.shape[1] // 8) * 8
        y = (pixels.shape[2] // 8) * 8
187
        if pixels.shape[1] != x or pixels.shape[2] != y:
188
189
190
191
            x_offset = (pixels.shape[1] % 8) // 2
            y_offset = (pixels.shape[2] % 8) // 2
            pixels = pixels[:, x_offset:x + x_offset, y_offset:y + y_offset, :]
        return pixels
192

193
194
195
    def encode(self, vae, pixels):
        pixels = self.vae_encode_crop_pixels(pixels)
        t = vae.encode(pixels[:,:,:,:3])
196
        return ({"samples":t}, )
comfyanonymous's avatar
comfyanonymous committed
197

comfyanonymous's avatar
comfyanonymous committed
198
199
200
201
202
203
204
205
206
207
class VAEEncodeTiled:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "pixels": ("IMAGE", ), "vae": ("VAE", )}}
    RETURN_TYPES = ("LATENT",)
    FUNCTION = "encode"

    CATEGORY = "_for_testing"

    def encode(self, vae, pixels):
208
        pixels = VAEEncode.vae_encode_crop_pixels(pixels)
comfyanonymous's avatar
comfyanonymous committed
209
210
        t = vae.encode_tiled(pixels[:,:,:,:3])
        return ({"samples":t}, )
211

212
213
214
class VAEEncodeForInpaint:
    @classmethod
    def INPUT_TYPES(s):
215
        return {"required": { "pixels": ("IMAGE", ), "vae": ("VAE", ), "mask": ("MASK", ), "grow_mask_by": ("INT", {"default": 6, "min": 0, "max": 64, "step": 1}),}}
216
217
218
219
220
    RETURN_TYPES = ("LATENT",)
    FUNCTION = "encode"

    CATEGORY = "latent/inpaint"

221
    def encode(self, vae, pixels, mask, grow_mask_by=6):
222
223
        x = (pixels.shape[1] // 8) * 8
        y = (pixels.shape[2] // 8) * 8
224
        mask = torch.nn.functional.interpolate(mask.reshape((-1, 1, mask.shape[-2], mask.shape[-1])), size=(pixels.shape[1], pixels.shape[2]), mode="bilinear")
225

226
        pixels = pixels.clone()
227
        if pixels.shape[1] != x or pixels.shape[2] != y:
228
229
230
231
            x_offset = (pixels.shape[1] % 8) // 2
            y_offset = (pixels.shape[2] % 8) // 2
            pixels = pixels[:,x_offset:x + x_offset, y_offset:y + y_offset,:]
            mask = mask[:,:,x_offset:x + x_offset, y_offset:y + y_offset]
232

233
        #grow mask by a few pixels to keep things seamless in latent space
234
235
236
237
238
239
240
241
        if grow_mask_by == 0:
            mask_erosion = mask
        else:
            kernel_tensor = torch.ones((1, 1, grow_mask_by, grow_mask_by))
            padding = math.ceil((grow_mask_by - 1) / 2)

            mask_erosion = torch.clamp(torch.nn.functional.conv2d(mask.round(), kernel_tensor, padding=padding), 0, 1)

242
        m = (1.0 - mask.round()).squeeze(1)
243
244
        for i in range(3):
            pixels[:,:,:,i] -= 0.5
245
            pixels[:,:,:,i] *= m
246
247
248
            pixels[:,:,:,i] += 0.5
        t = vae.encode(pixels)

249
        return ({"samples":t, "noise_mask": (mask_erosion[:,:,:x,:y].round())}, )
comfyanonymous's avatar
comfyanonymous committed
250

Dr.Lt.Data's avatar
Dr.Lt.Data committed
251
252
253

class SaveLatent:
    def __init__(self):
254
        self.output_dir = folder_paths.get_output_directory()
Dr.Lt.Data's avatar
Dr.Lt.Data committed
255
256
257
258

    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "samples": ("LATENT", ),
259
                              "filename_prefix": ("STRING", {"default": "latents/ComfyUI"})},
Dr.Lt.Data's avatar
Dr.Lt.Data committed
260
261
262
263
264
265
266
267
268
269
                "hidden": {"prompt": "PROMPT", "extra_pnginfo": "EXTRA_PNGINFO"},
                }
    RETURN_TYPES = ()
    FUNCTION = "save"

    OUTPUT_NODE = True

    CATEGORY = "_for_testing"

    def save(self, samples, filename_prefix="ComfyUI", prompt=None, extra_pnginfo=None):
270
        full_output_folder, filename, counter, subfolder, filename_prefix = folder_paths.get_save_image_path(filename_prefix, self.output_dir)
Dr.Lt.Data's avatar
Dr.Lt.Data committed
271
272
273
274
275
276

        # support save metadata for latent sharing
        prompt_info = ""
        if prompt is not None:
            prompt_info = json.dumps(prompt)

277
        metadata = {"prompt": prompt_info}
Dr.Lt.Data's avatar
Dr.Lt.Data committed
278
279
280
281
282
283
284
        if extra_pnginfo is not None:
            for x in extra_pnginfo:
                metadata[x] = json.dumps(extra_pnginfo[x])

        file = f"{filename}_{counter:05}_.latent"
        file = os.path.join(full_output_folder, file)

285
286
287
288
        output = {}
        output["latent_tensor"] = samples["samples"]

        safetensors.torch.save_file(output, file, metadata=metadata)
Dr.Lt.Data's avatar
Dr.Lt.Data committed
289
290
291
292
293
294
295

        return {}


class LoadLatent:
    @classmethod
    def INPUT_TYPES(s):
296
297
        input_dir = folder_paths.get_input_directory()
        files = [f for f in os.listdir(input_dir) if os.path.isfile(os.path.join(input_dir, f)) and f.endswith(".latent")]
Dr.Lt.Data's avatar
Dr.Lt.Data committed
298
299
300
301
302
303
304
305
        return {"required": {"latent": [sorted(files), ]}, }

    CATEGORY = "_for_testing"

    RETURN_TYPES = ("LATENT", )
    FUNCTION = "load"

    def load(self, latent):
306
307
        latent_path = folder_paths.get_annotated_filepath(latent)
        latent = safetensors.torch.load_file(latent_path, device="cpu")
308
        samples = {"samples": latent["latent_tensor"].float()}
309
        return (samples, )
Dr.Lt.Data's avatar
Dr.Lt.Data committed
310

311
312
313
314
315
316
317
318
319
320
321
322
323
324
    @classmethod
    def IS_CHANGED(s, latent):
        image_path = folder_paths.get_annotated_filepath(latent)
        m = hashlib.sha256()
        with open(image_path, 'rb') as f:
            m.update(f.read())
        return m.digest().hex()

    @classmethod
    def VALIDATE_INPUTS(s, latent):
        if not folder_paths.exists_annotated_filepath(latent):
            return "Invalid latent file: {}".format(latent)
        return True

Dr.Lt.Data's avatar
Dr.Lt.Data committed
325

comfyanonymous's avatar
comfyanonymous committed
326
327
328
class CheckpointLoader:
    @classmethod
    def INPUT_TYPES(s):
329
330
        return {"required": { "config_name": (folder_paths.get_filename_list("configs"), ),
                              "ckpt_name": (folder_paths.get_filename_list("checkpoints"), )}}
comfyanonymous's avatar
comfyanonymous committed
331
332
333
    RETURN_TYPES = ("MODEL", "CLIP", "VAE")
    FUNCTION = "load_checkpoint"

334
    CATEGORY = "advanced/loaders"
335

comfyanonymous's avatar
comfyanonymous committed
336
    def load_checkpoint(self, config_name, ckpt_name, output_vae=True, output_clip=True):
337
338
        config_path = folder_paths.get_full_path("configs", config_name)
        ckpt_path = folder_paths.get_full_path("checkpoints", ckpt_name)
339
        return comfy.sd.load_checkpoint(config_path, ckpt_path, output_vae=True, output_clip=True, embedding_directory=folder_paths.get_folder_paths("embeddings"))
comfyanonymous's avatar
comfyanonymous committed
340

341
342
343
class CheckpointLoaderSimple:
    @classmethod
    def INPUT_TYPES(s):
344
        return {"required": { "ckpt_name": (folder_paths.get_filename_list("checkpoints"), ),
345
346
347
348
                             }}
    RETURN_TYPES = ("MODEL", "CLIP", "VAE")
    FUNCTION = "load_checkpoint"

349
    CATEGORY = "loaders"
350

351
    def load_checkpoint(self, ckpt_name, output_vae=True, output_clip=True):
352
        ckpt_path = folder_paths.get_full_path("checkpoints", ckpt_name)
353
        out = comfy.sd.load_checkpoint_guess_config(ckpt_path, output_vae=True, output_clip=True, embedding_directory=folder_paths.get_folder_paths("embeddings"))
354
355
        return out

sALTaccount's avatar
sALTaccount committed
356
357
358
class DiffusersLoader:
    @classmethod
    def INPUT_TYPES(cls):
359
        paths = []
sALTaccount's avatar
sALTaccount committed
360
        for search_path in folder_paths.get_folder_paths("diffusers"):
361
            if os.path.exists(search_path):
362
363
364
365
                for root, subdir, files in os.walk(search_path, followlinks=True):
                    if "model_index.json" in files:
                        paths.append(os.path.relpath(root, start=search_path))

366
        return {"required": {"model_path": (paths,), }}
sALTaccount's avatar
sALTaccount committed
367
368
369
    RETURN_TYPES = ("MODEL", "CLIP", "VAE")
    FUNCTION = "load_checkpoint"

370
    CATEGORY = "advanced/loaders"
sALTaccount's avatar
sALTaccount committed
371
372

    def load_checkpoint(self, model_path, output_vae=True, output_clip=True):
sALTaccount's avatar
sALTaccount committed
373
374
        for search_path in folder_paths.get_folder_paths("diffusers"):
            if os.path.exists(search_path):
375
376
377
                path = os.path.join(search_path, model_path)
                if os.path.exists(path):
                    model_path = path
sALTaccount's avatar
sALTaccount committed
378
                    break
379

380
        return comfy.diffusers_convert.load_diffusers(model_path, fp16=comfy.model_management.should_use_fp16(), output_vae=output_vae, output_clip=output_clip, embedding_directory=folder_paths.get_folder_paths("embeddings"))
sALTaccount's avatar
sALTaccount committed
381
382


383
384
385
386
387
388
389
390
class unCLIPCheckpointLoader:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "ckpt_name": (folder_paths.get_filename_list("checkpoints"), ),
                             }}
    RETURN_TYPES = ("MODEL", "CLIP", "VAE", "CLIP_VISION")
    FUNCTION = "load_checkpoint"

391
    CATEGORY = "loaders"
392
393
394
395
396
397

    def load_checkpoint(self, ckpt_name, output_vae=True, output_clip=True):
        ckpt_path = folder_paths.get_full_path("checkpoints", ckpt_name)
        out = comfy.sd.load_checkpoint_guess_config(ckpt_path, output_vae=True, output_clip=True, output_clipvision=True, embedding_directory=folder_paths.get_folder_paths("embeddings"))
        return out

comfyanonymous's avatar
comfyanonymous committed
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
class CLIPSetLastLayer:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "clip": ("CLIP", ),
                              "stop_at_clip_layer": ("INT", {"default": -1, "min": -24, "max": -1, "step": 1}),
                              }}
    RETURN_TYPES = ("CLIP",)
    FUNCTION = "set_last_layer"

    CATEGORY = "conditioning"

    def set_last_layer(self, clip, stop_at_clip_layer):
        clip = clip.clone()
        clip.clip_layer(stop_at_clip_layer)
        return (clip,)

414
415
416
417
418
class LoraLoader:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "model": ("MODEL",),
                              "clip": ("CLIP", ),
419
                              "lora_name": (folder_paths.get_filename_list("loras"), ),
420
421
                              "strength_model": ("FLOAT", {"default": 1.0, "min": -10.0, "max": 10.0, "step": 0.01}),
                              "strength_clip": ("FLOAT", {"default": 1.0, "min": -10.0, "max": 10.0, "step": 0.01}),
422
423
424
425
426
427
428
                              }}
    RETURN_TYPES = ("MODEL", "CLIP")
    FUNCTION = "load_lora"

    CATEGORY = "loaders"

    def load_lora(self, model, clip, lora_name, strength_model, strength_clip):
429
        lora_path = folder_paths.get_full_path("loras", lora_name)
430
431
432
        model_lora, clip_lora = comfy.sd.load_lora_for_models(model, clip, lora_path, strength_model, strength_clip)
        return (model_lora, clip_lora)

433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
class TomePatchModel:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "model": ("MODEL",),
                              "ratio": ("FLOAT", {"default": 0.3, "min": 0.0, "max": 1.0, "step": 0.01}),
                              }}
    RETURN_TYPES = ("MODEL",)
    FUNCTION = "patch"

    CATEGORY = "_for_testing"

    def patch(self, model, ratio):
        m = model.clone()
        m.set_model_tomesd(ratio)
        return (m, )

comfyanonymous's avatar
comfyanonymous committed
449
450
451
class VAELoader:
    @classmethod
    def INPUT_TYPES(s):
452
        return {"required": { "vae_name": (folder_paths.get_filename_list("vae"), )}}
comfyanonymous's avatar
comfyanonymous committed
453
454
455
    RETURN_TYPES = ("VAE",)
    FUNCTION = "load_vae"

456
457
    CATEGORY = "loaders"

comfyanonymous's avatar
comfyanonymous committed
458
459
    #TODO: scale factor?
    def load_vae(self, vae_name):
460
        vae_path = folder_paths.get_full_path("vae", vae_name)
comfyanonymous's avatar
comfyanonymous committed
461
462
463
        vae = comfy.sd.VAE(ckpt_path=vae_path)
        return (vae,)

comfyanonymous's avatar
comfyanonymous committed
464
465
466
class ControlNetLoader:
    @classmethod
    def INPUT_TYPES(s):
467
        return {"required": { "control_net_name": (folder_paths.get_filename_list("controlnet"), )}}
comfyanonymous's avatar
comfyanonymous committed
468
469
470
471
472
473
474

    RETURN_TYPES = ("CONTROL_NET",)
    FUNCTION = "load_controlnet"

    CATEGORY = "loaders"

    def load_controlnet(self, control_net_name):
475
        controlnet_path = folder_paths.get_full_path("controlnet", control_net_name)
comfyanonymous's avatar
comfyanonymous committed
476
477
478
        controlnet = comfy.sd.load_controlnet(controlnet_path)
        return (controlnet,)

479
480
481
482
class DiffControlNetLoader:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "model": ("MODEL",),
483
                              "control_net_name": (folder_paths.get_filename_list("controlnet"), )}}
484
485
486
487
488
489
490

    RETURN_TYPES = ("CONTROL_NET",)
    FUNCTION = "load_controlnet"

    CATEGORY = "loaders"

    def load_controlnet(self, model, control_net_name):
491
        controlnet_path = folder_paths.get_full_path("controlnet", control_net_name)
492
493
494
        controlnet = comfy.sd.load_controlnet(controlnet_path, model)
        return (controlnet,)

comfyanonymous's avatar
comfyanonymous committed
495
496
497
498

class ControlNetApply:
    @classmethod
    def INPUT_TYPES(s):
499
500
501
502
503
        return {"required": {"conditioning": ("CONDITIONING", ),
                             "control_net": ("CONTROL_NET", ),
                             "image": ("IMAGE", ),
                             "strength": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01})
                             }}
comfyanonymous's avatar
comfyanonymous committed
504
505
506
507
508
    RETURN_TYPES = ("CONDITIONING",)
    FUNCTION = "apply_controlnet"

    CATEGORY = "conditioning"

509
    def apply_controlnet(self, conditioning, control_net, image, strength):
comfyanonymous's avatar
comfyanonymous committed
510
511
512
513
        c = []
        control_hint = image.movedim(-1,1)
        for t in conditioning:
            n = [t[0], t[1].copy()]
comfyanonymous's avatar
comfyanonymous committed
514
515
516
517
            c_net = control_net.copy().set_cond_hint(control_hint, strength)
            if 'control' in t[1]:
                c_net.set_previous_controlnet(t[1]['control'])
            n[1]['control'] = c_net
comfyanonymous's avatar
comfyanonymous committed
518
519
520
            c.append(n)
        return (c, )

521
522
523
class CLIPLoader:
    @classmethod
    def INPUT_TYPES(s):
524
        return {"required": { "clip_name": (folder_paths.get_filename_list("clip"), ),
525
526
527
528
529
530
                             }}
    RETURN_TYPES = ("CLIP",)
    FUNCTION = "load_clip"

    CATEGORY = "loaders"

531
    def load_clip(self, clip_name):
532
        clip_path = folder_paths.get_full_path("clip", clip_name)
comfyanonymous's avatar
comfyanonymous committed
533
        clip = comfy.sd.load_clip(ckpt_path=clip_path, embedding_directory=folder_paths.get_folder_paths("embeddings"))
534
535
        return (clip,)

536
537
538
class CLIPVisionLoader:
    @classmethod
    def INPUT_TYPES(s):
539
        return {"required": { "clip_name": (folder_paths.get_filename_list("clip_vision"), ),
540
541
542
543
544
545
546
                             }}
    RETURN_TYPES = ("CLIP_VISION",)
    FUNCTION = "load_clip"

    CATEGORY = "loaders"

    def load_clip(self, clip_name):
547
        clip_path = folder_paths.get_full_path("clip_vision", clip_name)
548
        clip_vision = comfy.clip_vision.load(clip_path)
549
550
551
552
553
554
555
556
        return (clip_vision,)

class CLIPVisionEncode:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "clip_vision": ("CLIP_VISION",),
                              "image": ("IMAGE",)
                             }}
comfyanonymous's avatar
comfyanonymous committed
557
    RETURN_TYPES = ("CLIP_VISION_OUTPUT",)
558
559
    FUNCTION = "encode"

560
    CATEGORY = "conditioning"
561
562
563
564
565
566
567
568

    def encode(self, clip_vision, image):
        output = clip_vision.encode_image(image)
        return (output,)

class StyleModelLoader:
    @classmethod
    def INPUT_TYPES(s):
569
        return {"required": { "style_model_name": (folder_paths.get_filename_list("style_models"), )}}
570
571
572
573
574
575
576

    RETURN_TYPES = ("STYLE_MODEL",)
    FUNCTION = "load_style_model"

    CATEGORY = "loaders"

    def load_style_model(self, style_model_name):
577
        style_model_path = folder_paths.get_full_path("style_models", style_model_name)
578
579
580
581
582
583
584
        style_model = comfy.sd.load_style_model(style_model_path)
        return (style_model,)


class StyleModelApply:
    @classmethod
    def INPUT_TYPES(s):
585
586
587
        return {"required": {"conditioning": ("CONDITIONING", ),
                             "style_model": ("STYLE_MODEL", ),
                             "clip_vision_output": ("CLIP_VISION_OUTPUT", ),
588
589
590
591
                             }}
    RETURN_TYPES = ("CONDITIONING",)
    FUNCTION = "apply_stylemodel"

comfyanonymous's avatar
comfyanonymous committed
592
    CATEGORY = "conditioning/style_model"
593

594
595
    def apply_stylemodel(self, clip_vision_output, style_model, conditioning):
        cond = style_model.get_cond(clip_vision_output)
596
        c = []
597
598
        for t in conditioning:
            n = [torch.cat((t[0], cond), dim=1), t[1].copy()]
599
600
601
            c.append(n)
        return (c, )

602
603
604
605
606
607
class unCLIPConditioning:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": {"conditioning": ("CONDITIONING", ),
                             "clip_vision_output": ("CLIP_VISION_OUTPUT", ),
                             "strength": ("FLOAT", {"default": 1.0, "min": -10.0, "max": 10.0, "step": 0.01}),
608
                             "noise_augmentation": ("FLOAT", {"default": 0.0, "min": 0.0, "max": 1.0, "step": 0.01}),
609
610
611
612
                             }}
    RETURN_TYPES = ("CONDITIONING",)
    FUNCTION = "apply_adm"

613
    CATEGORY = "conditioning"
614

615
    def apply_adm(self, conditioning, clip_vision_output, strength, noise_augmentation):
616
617
618
        c = []
        for t in conditioning:
            o = t[1].copy()
619
            x = (clip_vision_output, strength, noise_augmentation)
620
621
622
623
624
625
626
627
            if "adm" in o:
                o["adm"] = o["adm"][:] + [x]
            else:
                o["adm"] = [x]
            n = [t[0], o]
            c.append(n)
        return (c, )

628
629
630
631
632
633
634
635
class GLIGENLoader:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "gligen_name": (folder_paths.get_filename_list("gligen"), )}}

    RETURN_TYPES = ("GLIGEN",)
    FUNCTION = "load_gligen"

comfyanonymous's avatar
comfyanonymous committed
636
    CATEGORY = "loaders"
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657

    def load_gligen(self, gligen_name):
        gligen_path = folder_paths.get_full_path("gligen", gligen_name)
        gligen = comfy.sd.load_gligen(gligen_path)
        return (gligen,)

class GLIGENTextBoxApply:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": {"conditioning_to": ("CONDITIONING", ),
                              "clip": ("CLIP", ),
                              "gligen_textbox_model": ("GLIGEN", ),
                              "text": ("STRING", {"multiline": True}),
                              "width": ("INT", {"default": 64, "min": 8, "max": MAX_RESOLUTION, "step": 8}),
                              "height": ("INT", {"default": 64, "min": 8, "max": MAX_RESOLUTION, "step": 8}),
                              "x": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 8}),
                              "y": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 8}),
                             }}
    RETURN_TYPES = ("CONDITIONING",)
    FUNCTION = "append"

comfyanonymous's avatar
comfyanonymous committed
658
    CATEGORY = "conditioning/gligen"
659
660
661
662
663
664
665
666
667
668
669
670
671
672

    def append(self, conditioning_to, clip, gligen_textbox_model, text, width, height, x, y):
        c = []
        cond, cond_pooled = clip.encode_from_tokens(clip.tokenize(text), return_pooled=True)
        for t in conditioning_to:
            n = [t[0], t[1].copy()]
            position_params = [(cond_pooled, height // 8, width // 8, y // 8, x // 8)]
            prev = []
            if "gligen" in n[1]:
                prev = n[1]['gligen'][2]

            n[1]['gligen'] = ("position", gligen_textbox_model, prev + position_params)
            c.append(n)
        return (c, )
673

comfyanonymous's avatar
comfyanonymous committed
674
675
676
677
678
679
class EmptyLatentImage:
    def __init__(self, device="cpu"):
        self.device = device

    @classmethod
    def INPUT_TYPES(s):
680
681
        return {"required": { "width": ("INT", {"default": 512, "min": 64, "max": MAX_RESOLUTION, "step": 8}),
                              "height": ("INT", {"default": 512, "min": 64, "max": MAX_RESOLUTION, "step": 8}),
comfyanonymous's avatar
comfyanonymous committed
682
683
684
685
                              "batch_size": ("INT", {"default": 1, "min": 1, "max": 64})}}
    RETURN_TYPES = ("LATENT",)
    FUNCTION = "generate"

686
687
    CATEGORY = "latent"

comfyanonymous's avatar
comfyanonymous committed
688
689
    def generate(self, width, height, batch_size=1):
        latent = torch.zeros([batch_size, 4, height // 8, width // 8])
690
        return ({"samples":latent}, )
comfyanonymous's avatar
comfyanonymous committed
691

comfyanonymous's avatar
comfyanonymous committed
692

693
694
695
696
697
class LatentFromBatch:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "samples": ("LATENT",),
                              "batch_index": ("INT", {"default": 0, "min": 0, "max": 63}),
698
                              "length": ("INT", {"default": 1, "min": 1, "max": 64}),
699
700
                              }}
    RETURN_TYPES = ("LATENT",)
701
    FUNCTION = "frombatch"
702

703
    CATEGORY = "latent/batch"
704

705
    def frombatch(self, samples, batch_index, length):
706
707
708
        s = samples.copy()
        s_in = samples["samples"]
        batch_index = min(s_in.shape[0] - 1, batch_index)
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
        length = min(s_in.shape[0] - batch_index, length)
        s["samples"] = s_in[batch_index:batch_index + length].clone()
        if "noise_mask" in samples:
            masks = samples["noise_mask"]
            if masks.shape[0] == 1:
                s["noise_mask"] = masks.clone()
            else:
                if masks.shape[0] < s_in.shape[0]:
                    masks = masks.repeat(math.ceil(s_in.shape[0] / masks.shape[0]), 1, 1, 1)[:s_in.shape[0]]
                s["noise_mask"] = masks[batch_index:batch_index + length].clone()
        if "batch_index" not in s:
            s["batch_index"] = [x for x in range(batch_index, batch_index+length)]
        else:
            s["batch_index"] = samples["batch_index"][batch_index:batch_index + length]
        return (s,)
    
class RepeatLatentBatch:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "samples": ("LATENT",),
                              "amount": ("INT", {"default": 1, "min": 1, "max": 64}),
                              }}
    RETURN_TYPES = ("LATENT",)
    FUNCTION = "repeat"

    CATEGORY = "latent/batch"

    def repeat(self, samples, amount):
        s = samples.copy()
        s_in = samples["samples"]
        
        s["samples"] = s_in.repeat((amount, 1,1,1))
        if "noise_mask" in samples and samples["noise_mask"].shape[0] > 1:
            masks = samples["noise_mask"]
            if masks.shape[0] < s_in.shape[0]:
                masks = masks.repeat(math.ceil(s_in.shape[0] / masks.shape[0]), 1, 1, 1)[:s_in.shape[0]]
            s["noise_mask"] = samples["noise_mask"].repeat((amount, 1,1,1))
        if "batch_index" in s:
            offset = max(s["batch_index"]) - min(s["batch_index"]) + 1
            s["batch_index"] = s["batch_index"] + [x + (i * offset) for i in range(1, amount) for x in s["batch_index"]]
749
        return (s,)
comfyanonymous's avatar
comfyanonymous committed
750

comfyanonymous's avatar
comfyanonymous committed
751
class LatentUpscale:
752
    upscale_methods = ["nearest-exact", "bilinear", "area", "bislerp"]
753
    crop_methods = ["disabled", "center"]
comfyanonymous's avatar
comfyanonymous committed
754
755
756
757

    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "samples": ("LATENT",), "upscale_method": (s.upscale_methods,),
758
759
                              "width": ("INT", {"default": 512, "min": 64, "max": MAX_RESOLUTION, "step": 8}),
                              "height": ("INT", {"default": 512, "min": 64, "max": MAX_RESOLUTION, "step": 8}),
760
                              "crop": (s.crop_methods,)}}
comfyanonymous's avatar
comfyanonymous committed
761
762
763
    RETURN_TYPES = ("LATENT",)
    FUNCTION = "upscale"

764
765
    CATEGORY = "latent"

766
    def upscale(self, samples, upscale_method, width, height, crop):
767
        s = samples.copy()
comfyanonymous's avatar
comfyanonymous committed
768
        s["samples"] = comfy.utils.common_upscale(samples["samples"], width // 8, height // 8, upscale_method, crop)
comfyanonymous's avatar
comfyanonymous committed
769
770
        return (s,)

comfyanonymous's avatar
comfyanonymous committed
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
class LatentUpscaleBy:
    upscale_methods = ["nearest-exact", "bilinear", "area", "bislerp"]

    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "samples": ("LATENT",), "upscale_method": (s.upscale_methods,),
                              "scale_by": ("FLOAT", {"default": 1.5, "min": 0.01, "max": 8.0, "step": 0.01}),}}
    RETURN_TYPES = ("LATENT",)
    FUNCTION = "upscale"

    CATEGORY = "latent"

    def upscale(self, samples, upscale_method, scale_by):
        s = samples.copy()
        width = round(samples["samples"].shape[3] * scale_by)
        height = round(samples["samples"].shape[2] * scale_by)
        s["samples"] = comfy.utils.common_upscale(samples["samples"], width, height, upscale_method, "disabled")
        return (s,)

comfyanonymous's avatar
comfyanonymous committed
790
791
792
793
794
795
796
797
798
class LatentRotate:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "samples": ("LATENT",),
                              "rotation": (["none", "90 degrees", "180 degrees", "270 degrees"],),
                              }}
    RETURN_TYPES = ("LATENT",)
    FUNCTION = "rotate"

comfyanonymous's avatar
comfyanonymous committed
799
    CATEGORY = "latent/transform"
comfyanonymous's avatar
comfyanonymous committed
800
801

    def rotate(self, samples, rotation):
802
        s = samples.copy()
comfyanonymous's avatar
comfyanonymous committed
803
804
805
806
807
808
809
810
        rotate_by = 0
        if rotation.startswith("90"):
            rotate_by = 1
        elif rotation.startswith("180"):
            rotate_by = 2
        elif rotation.startswith("270"):
            rotate_by = 3

811
        s["samples"] = torch.rot90(samples["samples"], k=rotate_by, dims=[3, 2])
comfyanonymous's avatar
comfyanonymous committed
812
        return (s,)
comfyanonymous's avatar
comfyanonymous committed
813
814
815
816
817
818
819
820
821
822

class LatentFlip:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "samples": ("LATENT",),
                              "flip_method": (["x-axis: vertically", "y-axis: horizontally"],),
                              }}
    RETURN_TYPES = ("LATENT",)
    FUNCTION = "flip"

comfyanonymous's avatar
comfyanonymous committed
823
    CATEGORY = "latent/transform"
comfyanonymous's avatar
comfyanonymous committed
824
825

    def flip(self, samples, flip_method):
826
        s = samples.copy()
comfyanonymous's avatar
comfyanonymous committed
827
        if flip_method.startswith("x"):
828
            s["samples"] = torch.flip(samples["samples"], dims=[2])
comfyanonymous's avatar
comfyanonymous committed
829
        elif flip_method.startswith("y"):
830
            s["samples"] = torch.flip(samples["samples"], dims=[3])
comfyanonymous's avatar
comfyanonymous committed
831
832

        return (s,)
comfyanonymous's avatar
comfyanonymous committed
833
834
835
836

class LatentComposite:
    @classmethod
    def INPUT_TYPES(s):
comfyanonymous's avatar
comfyanonymous committed
837
838
839
840
841
842
        return {"required": { "samples_to": ("LATENT",),
                              "samples_from": ("LATENT",),
                              "x": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 8}),
                              "y": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 8}),
                              "feather": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 8}),
                              }}
comfyanonymous's avatar
comfyanonymous committed
843
844
845
846
847
    RETURN_TYPES = ("LATENT",)
    FUNCTION = "composite"

    CATEGORY = "latent"

comfyanonymous's avatar
comfyanonymous committed
848
849
850
    def composite(self, samples_to, samples_from, x, y, composite_method="normal", feather=0):
        x =  x // 8
        y = y // 8
851
        feather = feather // 8
comfyanonymous's avatar
comfyanonymous committed
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
        samples_out = samples_to.copy()
        s = samples_to["samples"].clone()
        samples_to = samples_to["samples"]
        samples_from = samples_from["samples"]
        if feather == 0:
            s[:,:,y:y+samples_from.shape[2],x:x+samples_from.shape[3]] = samples_from[:,:,:samples_to.shape[2] - y, :samples_to.shape[3] - x]
        else:
            samples_from = samples_from[:,:,:samples_to.shape[2] - y, :samples_to.shape[3] - x]
            mask = torch.ones_like(samples_from)
            for t in range(feather):
                if y != 0:
                    mask[:,:,t:1+t,:] *= ((1.0/feather) * (t + 1))

                if y + samples_from.shape[2] < samples_to.shape[2]:
                    mask[:,:,mask.shape[2] -1 -t: mask.shape[2]-t,:] *= ((1.0/feather) * (t + 1))
                if x != 0:
                    mask[:,:,:,t:1+t] *= ((1.0/feather) * (t + 1))
                if x + samples_from.shape[3] < samples_to.shape[3]:
                    mask[:,:,:,mask.shape[3]- 1 - t: mask.shape[3]- t] *= ((1.0/feather) * (t + 1))
            rev_mask = torch.ones_like(mask) - mask
            s[:,:,y:y+samples_from.shape[2],x:x+samples_from.shape[3]] = samples_from[:,:,:samples_to.shape[2] - y, :samples_to.shape[3] - x] * mask + s[:,:,y:y+samples_from.shape[2],x:x+samples_from.shape[3]] * rev_mask
        samples_out["samples"] = s
        return (samples_out,)
comfyanonymous's avatar
comfyanonymous committed
875

comfyanonymous's avatar
comfyanonymous committed
876
877
878
879
class LatentCrop:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "samples": ("LATENT",),
880
881
                              "width": ("INT", {"default": 512, "min": 64, "max": MAX_RESOLUTION, "step": 8}),
                              "height": ("INT", {"default": 512, "min": 64, "max": MAX_RESOLUTION, "step": 8}),
882
883
                              "x": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 8}),
                              "y": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 8}),
comfyanonymous's avatar
comfyanonymous committed
884
885
886
887
                              }}
    RETURN_TYPES = ("LATENT",)
    FUNCTION = "crop"

comfyanonymous's avatar
comfyanonymous committed
888
    CATEGORY = "latent/transform"
comfyanonymous's avatar
comfyanonymous committed
889
890

    def crop(self, samples, width, height, x, y):
891
892
        s = samples.copy()
        samples = samples['samples']
comfyanonymous's avatar
comfyanonymous committed
893
894
895
896
897
898
899
900
901
902
903
904
905
        x =  x // 8
        y = y // 8

        #enfonce minimum size of 64
        if x > (samples.shape[3] - 8):
            x = samples.shape[3] - 8
        if y > (samples.shape[2] - 8):
            y = samples.shape[2] - 8

        new_height = height // 8
        new_width = width // 8
        to_x = new_width + x
        to_y = new_height + y
906
        s['samples'] = samples[:,:,y:to_y, x:to_x]
comfyanonymous's avatar
comfyanonymous committed
907
908
        return (s,)

909
910
911
912
913
914
915
916
917
class SetLatentNoiseMask:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "samples": ("LATENT",),
                              "mask": ("MASK",),
                              }}
    RETURN_TYPES = ("LATENT",)
    FUNCTION = "set_mask"

918
    CATEGORY = "latent/inpaint"
919
920
921

    def set_mask(self, samples, mask):
        s = samples.copy()
922
        s["noise_mask"] = mask.reshape((-1, 1, mask.shape[-2], mask.shape[-1]))
923
924
        return (s,)

925
def common_ksampler(model, seed, steps, cfg, sampler_name, scheduler, positive, negative, latent, denoise=1.0, disable_noise=False, start_step=None, last_step=None, force_full_denoise=False):
926
    device = comfy.model_management.get_torch_device()
927
    latent_image = latent["samples"]
928

comfyanonymous's avatar
comfyanonymous committed
929
930
931
    if disable_noise:
        noise = torch.zeros(latent_image.size(), dtype=latent_image.dtype, layout=latent_image.layout, device="cpu")
    else:
932
933
        batch_inds = latent["batch_index"] if "batch_index" in latent else None
        noise = comfy.sample.prepare_noise(latent_image, seed, batch_inds)
comfyanonymous's avatar
comfyanonymous committed
934

935
    noise_mask = None
936
    if "noise_mask" in latent:
937
        noise_mask = latent["noise_mask"]
comfyanonymous's avatar
comfyanonymous committed
938

939
    pbar = comfy.utils.ProgressBar(steps)
940
941
    def callback(step, x0, x, total_steps):
        pbar.update_absolute(step + 1, total_steps)
942

943
944
    samples = comfy.sample.sample(model, noise, steps, cfg, sampler_name, scheduler, positive, negative, latent_image,
                                  denoise=denoise, disable_noise=disable_noise, start_step=start_step, last_step=last_step,
945
                                  force_full_denoise=force_full_denoise, noise_mask=noise_mask, callback=callback)
946
947
948
    out = latent.copy()
    out["samples"] = samples
    return (out, )
comfyanonymous's avatar
comfyanonymous committed
949

comfyanonymous's avatar
comfyanonymous committed
950
951
952
class KSampler:
    @classmethod
    def INPUT_TYPES(s):
comfyanonymous's avatar
comfyanonymous committed
953
        return {"required":
comfyanonymous's avatar
comfyanonymous committed
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
                    {"model": ("MODEL",),
                    "seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}),
                    "steps": ("INT", {"default": 20, "min": 1, "max": 10000}),
                    "cfg": ("FLOAT", {"default": 8.0, "min": 0.0, "max": 100.0}),
                    "sampler_name": (comfy.samplers.KSampler.SAMPLERS, ),
                    "scheduler": (comfy.samplers.KSampler.SCHEDULERS, ),
                    "positive": ("CONDITIONING", ),
                    "negative": ("CONDITIONING", ),
                    "latent_image": ("LATENT", ),
                    "denoise": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}),
                    }}

    RETURN_TYPES = ("LATENT",)
    FUNCTION = "sample"

969
970
    CATEGORY = "sampling"

comfyanonymous's avatar
comfyanonymous committed
971
    def sample(self, model, seed, steps, cfg, sampler_name, scheduler, positive, negative, latent_image, denoise=1.0):
972
        return common_ksampler(model, seed, steps, cfg, sampler_name, scheduler, positive, negative, latent_image, denoise=denoise)
comfyanonymous's avatar
comfyanonymous committed
973

comfyanonymous's avatar
comfyanonymous committed
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
class KSamplerAdvanced:
    @classmethod
    def INPUT_TYPES(s):
        return {"required":
                    {"model": ("MODEL",),
                    "add_noise": (["enable", "disable"], ),
                    "noise_seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}),
                    "steps": ("INT", {"default": 20, "min": 1, "max": 10000}),
                    "cfg": ("FLOAT", {"default": 8.0, "min": 0.0, "max": 100.0}),
                    "sampler_name": (comfy.samplers.KSampler.SAMPLERS, ),
                    "scheduler": (comfy.samplers.KSampler.SCHEDULERS, ),
                    "positive": ("CONDITIONING", ),
                    "negative": ("CONDITIONING", ),
                    "latent_image": ("LATENT", ),
                    "start_at_step": ("INT", {"default": 0, "min": 0, "max": 10000}),
                    "end_at_step": ("INT", {"default": 10000, "min": 0, "max": 10000}),
                    "return_with_leftover_noise": (["disable", "enable"], ),
                    }}

    RETURN_TYPES = ("LATENT",)
    FUNCTION = "sample"

    CATEGORY = "sampling"
comfyanonymous's avatar
comfyanonymous committed
997

comfyanonymous's avatar
comfyanonymous committed
998
999
1000
1001
1002
1003
1004
    def sample(self, model, add_noise, noise_seed, steps, cfg, sampler_name, scheduler, positive, negative, latent_image, start_at_step, end_at_step, return_with_leftover_noise, denoise=1.0):
        force_full_denoise = True
        if return_with_leftover_noise == "enable":
            force_full_denoise = False
        disable_noise = False
        if add_noise == "disable":
            disable_noise = True
1005
        return common_ksampler(model, noise_seed, steps, cfg, sampler_name, scheduler, positive, negative, latent_image, denoise=denoise, disable_noise=disable_noise, start_step=start_at_step, last_step=end_at_step, force_full_denoise=force_full_denoise)
comfyanonymous's avatar
comfyanonymous committed
1006
1007
1008

class SaveImage:
    def __init__(self):
1009
        self.output_dir = folder_paths.get_output_directory()
m957ymj75urz's avatar
m957ymj75urz committed
1010
        self.type = "output"
comfyanonymous's avatar
comfyanonymous committed
1011
1012
1013
1014

    @classmethod
    def INPUT_TYPES(s):
        return {"required": 
1015
                    {"images": ("IMAGE", ),
pythongosssss's avatar
tidy  
pythongosssss committed
1016
                     "filename_prefix": ("STRING", {"default": "ComfyUI"})},
pythongosssss's avatar
pythongosssss committed
1017
                "hidden": {"prompt": "PROMPT", "extra_pnginfo": "EXTRA_PNGINFO"},
comfyanonymous's avatar
comfyanonymous committed
1018
1019
1020
1021
1022
1023
1024
                }

    RETURN_TYPES = ()
    FUNCTION = "save_images"

    OUTPUT_NODE = True

1025
1026
    CATEGORY = "image"

pythongosssss's avatar
tidy  
pythongosssss committed
1027
    def save_images(self, images, filename_prefix="ComfyUI", prompt=None, extra_pnginfo=None):
1028
        full_output_folder, filename, counter, subfolder, filename_prefix = folder_paths.get_save_image_path(filename_prefix, self.output_dir, images[0].shape[1], images[0].shape[0])
m957ymj75urz's avatar
m957ymj75urz committed
1029
        results = list()
comfyanonymous's avatar
comfyanonymous committed
1030
1031
        for image in images:
            i = 255. * image.cpu().numpy()
1032
            img = Image.fromarray(np.clip(i, 0, 255).astype(np.uint8))
comfyanonymous's avatar
comfyanonymous committed
1033
1034
1035
1036
1037
1038
            metadata = PngInfo()
            if prompt is not None:
                metadata.add_text("prompt", json.dumps(prompt))
            if extra_pnginfo is not None:
                for x in extra_pnginfo:
                    metadata.add_text(x, json.dumps(extra_pnginfo[x]))
1039

1040
            file = f"{filename}_{counter:05}_.png"
1041
            img.save(os.path.join(full_output_folder, file), pnginfo=metadata, compress_level=4)
m957ymj75urz's avatar
m957ymj75urz committed
1042
1043
1044
1045
            results.append({
                "filename": file,
                "subfolder": subfolder,
                "type": self.type
Gavroche CryptoRUSH's avatar
Gavroche CryptoRUSH committed
1046
            })
1047
            counter += 1
comfyanonymous's avatar
comfyanonymous committed
1048

m957ymj75urz's avatar
m957ymj75urz committed
1049
        return { "ui": { "images": results } }
comfyanonymous's avatar
comfyanonymous committed
1050

pythongosssss's avatar
pythongosssss committed
1051
1052
class PreviewImage(SaveImage):
    def __init__(self):
1053
        self.output_dir = folder_paths.get_temp_directory()
m957ymj75urz's avatar
m957ymj75urz committed
1054
        self.type = "temp"
pythongosssss's avatar
pythongosssss committed
1055
1056
1057

    @classmethod
    def INPUT_TYPES(s):
1058
        return {"required":
pythongosssss's avatar
pythongosssss committed
1059
1060
1061
                    {"images": ("IMAGE", ), },
                "hidden": {"prompt": "PROMPT", "extra_pnginfo": "EXTRA_PNGINFO"},
                }
1062

1063
1064
1065
class LoadImage:
    @classmethod
    def INPUT_TYPES(s):
1066
        input_dir = folder_paths.get_input_directory()
comfyanonymous's avatar
comfyanonymous committed
1067
        files = [f for f in os.listdir(input_dir) if os.path.isfile(os.path.join(input_dir, f))]
1068
        return {"required":
comfyanonymous's avatar
comfyanonymous committed
1069
                    {"image": (sorted(files), )},
1070
                }
1071
1072

    CATEGORY = "image"
1073

1074
    RETURN_TYPES = ("IMAGE", "MASK")
1075
1076
    FUNCTION = "load_image"
    def load_image(self, image):
1077
        image_path = folder_paths.get_annotated_filepath(image)
1078
        i = Image.open(image_path)
1079
        i = ImageOps.exif_transpose(i)
1080
        image = i.convert("RGB")
1081
        image = np.array(image).astype(np.float32) / 255.0
1082
        image = torch.from_numpy(image)[None,]
1083
1084
1085
1086
1087
1088
        if 'A' in i.getbands():
            mask = np.array(i.getchannel('A')).astype(np.float32) / 255.0
            mask = 1. - torch.from_numpy(mask)
        else:
            mask = torch.zeros((64,64), dtype=torch.float32, device="cpu")
        return (image, mask)
1089

1090
1091
    @classmethod
    def IS_CHANGED(s, image):
1092
        image_path = folder_paths.get_annotated_filepath(image)
1093
1094
1095
1096
        m = hashlib.sha256()
        with open(image_path, 'rb') as f:
            m.update(f.read())
        return m.digest().hex()
pythongosssss's avatar
pythongosssss committed
1097

1098
1099
1100
1101
1102
1103
1104
    @classmethod
    def VALIDATE_INPUTS(s, image):
        if not folder_paths.exists_annotated_filepath(image):
            return "Invalid image file: {}".format(image)

        return True

1105
class LoadImageMask:
1106
    _color_channels = ["alpha", "red", "green", "blue"]
1107
1108
    @classmethod
    def INPUT_TYPES(s):
1109
        input_dir = folder_paths.get_input_directory()
comfyanonymous's avatar
comfyanonymous committed
1110
        files = [f for f in os.listdir(input_dir) if os.path.isfile(os.path.join(input_dir, f))]
1111
        return {"required":
comfyanonymous's avatar
comfyanonymous committed
1112
                    {"image": (sorted(files), ),
1113
                     "channel": (s._color_channels, ), }
1114
1115
                }

1116
    CATEGORY = "mask"
1117
1118
1119
1120

    RETURN_TYPES = ("MASK",)
    FUNCTION = "load_image"
    def load_image(self, image, channel):
1121
        image_path = folder_paths.get_annotated_filepath(image)
1122
        i = Image.open(image_path)
1123
        i = ImageOps.exif_transpose(i)
1124
1125
        if i.getbands() != ("R", "G", "B", "A"):
            i = i.convert("RGBA")
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
        mask = None
        c = channel[0].upper()
        if c in i.getbands():
            mask = np.array(i.getchannel(c)).astype(np.float32) / 255.0
            mask = torch.from_numpy(mask)
            if c == 'A':
                mask = 1. - mask
        else:
            mask = torch.zeros((64,64), dtype=torch.float32, device="cpu")
        return (mask,)

    @classmethod
    def IS_CHANGED(s, image, channel):
1139
        image_path = folder_paths.get_annotated_filepath(image)
1140
1141
1142
1143
        m = hashlib.sha256()
        with open(image_path, 'rb') as f:
            m.update(f.read())
        return m.digest().hex()
pythongosssss's avatar
pythongosssss committed
1144

1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
    @classmethod
    def VALIDATE_INPUTS(s, image, channel):
        if not folder_paths.exists_annotated_filepath(image):
            return "Invalid image file: {}".format(image)

        if channel not in s._color_channels:
            return "Invalid color channel: {}".format(channel)

        return True

comfyanonymous's avatar
comfyanonymous committed
1155
1156
1157
1158
1159
1160
1161
class ImageScale:
    upscale_methods = ["nearest-exact", "bilinear", "area"]
    crop_methods = ["disabled", "center"]

    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "image": ("IMAGE",), "upscale_method": (s.upscale_methods,),
1162
1163
                              "width": ("INT", {"default": 512, "min": 1, "max": MAX_RESOLUTION, "step": 1}),
                              "height": ("INT", {"default": 512, "min": 1, "max": MAX_RESOLUTION, "step": 1}),
comfyanonymous's avatar
comfyanonymous committed
1164
1165
1166
1167
                              "crop": (s.crop_methods,)}}
    RETURN_TYPES = ("IMAGE",)
    FUNCTION = "upscale"

1168
    CATEGORY = "image/upscaling"
1169

comfyanonymous's avatar
comfyanonymous committed
1170
1171
    def upscale(self, image, upscale_method, width, height, crop):
        samples = image.movedim(-1,1)
comfyanonymous's avatar
comfyanonymous committed
1172
        s = comfy.utils.common_upscale(samples, width, height, upscale_method, crop)
comfyanonymous's avatar
comfyanonymous committed
1173
1174
        s = s.movedim(1,-1)
        return (s,)
comfyanonymous's avatar
comfyanonymous committed
1175

1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
class ImageInvert:

    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "image": ("IMAGE",)}}

    RETURN_TYPES = ("IMAGE",)
    FUNCTION = "invert"

    CATEGORY = "image"

    def invert(self, image):
        s = 1.0 - image
        return (s,)


Guo Y.K's avatar
Guo Y.K committed
1192
1193
1194
1195
1196
1197
1198
class ImagePadForOutpaint:

    @classmethod
    def INPUT_TYPES(s):
        return {
            "required": {
                "image": ("IMAGE",),
1199
1200
1201
1202
                "left": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 8}),
                "top": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 8}),
                "right": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 8}),
                "bottom": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 8}),
1203
                "feathering": ("INT", {"default": 40, "min": 0, "max": MAX_RESOLUTION, "step": 1}),
Guo Y.K's avatar
Guo Y.K committed
1204
1205
1206
1207
1208
1209
1210
1211
            }
        }

    RETURN_TYPES = ("IMAGE", "MASK")
    FUNCTION = "expand_image"

    CATEGORY = "image"

1212
    def expand_image(self, image, left, top, right, bottom, feathering):
Guo Y.K's avatar
Guo Y.K committed
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
        d1, d2, d3, d4 = image.size()

        new_image = torch.zeros(
            (d1, d2 + top + bottom, d3 + left + right, d4),
            dtype=torch.float32,
        )
        new_image[:, top:top + d2, left:left + d3, :] = image

        mask = torch.ones(
            (d2 + top + bottom, d3 + left + right),
            dtype=torch.float32,
        )
1225

1226
1227
1228
1229
1230
        t = torch.zeros(
            (d2, d3),
            dtype=torch.float32
        )

1231
        if feathering > 0 and feathering * 2 < d2 and feathering * 2 < d3:
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250

            for i in range(d2):
                for j in range(d3):
                    dt = i if top != 0 else d2
                    db = d2 - i if bottom != 0 else d2

                    dl = j if left != 0 else d3
                    dr = d3 - j if right != 0 else d3

                    d = min(dt, db, dl, dr)

                    if d >= feathering:
                        continue

                    v = (feathering - d) / feathering

                    t[i, j] = v * v

        mask[top:top + d2, left:left + d3] = t
1251

Guo Y.K's avatar
Guo Y.K committed
1252
1253
1254
        return (new_image, mask)


comfyanonymous's avatar
comfyanonymous committed
1255
1256
NODE_CLASS_MAPPINGS = {
    "KSampler": KSampler,
1257
    "CheckpointLoaderSimple": CheckpointLoaderSimple,
comfyanonymous's avatar
comfyanonymous committed
1258
    "CLIPTextEncode": CLIPTextEncode,
comfyanonymous's avatar
comfyanonymous committed
1259
    "CLIPSetLastLayer": CLIPSetLastLayer,
comfyanonymous's avatar
comfyanonymous committed
1260
1261
    "VAEDecode": VAEDecode,
    "VAEEncode": VAEEncode,
1262
    "VAEEncodeForInpaint": VAEEncodeForInpaint,
comfyanonymous's avatar
comfyanonymous committed
1263
1264
1265
    "VAELoader": VAELoader,
    "EmptyLatentImage": EmptyLatentImage,
    "LatentUpscale": LatentUpscale,
comfyanonymous's avatar
comfyanonymous committed
1266
    "LatentUpscaleBy": LatentUpscaleBy,
1267
    "LatentFromBatch": LatentFromBatch,
1268
    "RepeatLatentBatch": RepeatLatentBatch,
comfyanonymous's avatar
comfyanonymous committed
1269
    "SaveImage": SaveImage,
pythongosssss's avatar
pythongosssss committed
1270
    "PreviewImage": PreviewImage,
comfyanonymous's avatar
comfyanonymous committed
1271
    "LoadImage": LoadImage,
1272
    "LoadImageMask": LoadImageMask,
comfyanonymous's avatar
comfyanonymous committed
1273
    "ImageScale": ImageScale,
1274
    "ImageInvert": ImageInvert,
Guo Y.K's avatar
Guo Y.K committed
1275
    "ImagePadForOutpaint": ImagePadForOutpaint,
FizzleDorf's avatar
FizzleDorf committed
1276
    "ConditioningAverage ": ConditioningAverage ,
comfyanonymous's avatar
comfyanonymous committed
1277
1278
    "ConditioningCombine": ConditioningCombine,
    "ConditioningSetArea": ConditioningSetArea,
Jacob Segal's avatar
Jacob Segal committed
1279
    "ConditioningSetMask": ConditioningSetMask,
comfyanonymous's avatar
comfyanonymous committed
1280
    "KSamplerAdvanced": KSamplerAdvanced,
1281
    "SetLatentNoiseMask": SetLatentNoiseMask,
comfyanonymous's avatar
comfyanonymous committed
1282
    "LatentComposite": LatentComposite,
comfyanonymous's avatar
comfyanonymous committed
1283
    "LatentRotate": LatentRotate,
comfyanonymous's avatar
comfyanonymous committed
1284
    "LatentFlip": LatentFlip,
comfyanonymous's avatar
comfyanonymous committed
1285
    "LatentCrop": LatentCrop,
1286
    "LoraLoader": LoraLoader,
1287
    "CLIPLoader": CLIPLoader,
1288
    "CLIPVisionEncode": CLIPVisionEncode,
1289
    "StyleModelApply": StyleModelApply,
1290
    "unCLIPConditioning": unCLIPConditioning,
comfyanonymous's avatar
comfyanonymous committed
1291
1292
    "ControlNetApply": ControlNetApply,
    "ControlNetLoader": ControlNetLoader,
1293
    "DiffControlNetLoader": DiffControlNetLoader,
comfyanonymous's avatar
comfyanonymous committed
1294
1295
    "StyleModelLoader": StyleModelLoader,
    "CLIPVisionLoader": CLIPVisionLoader,
1296
    "VAEDecodeTiled": VAEDecodeTiled,
comfyanonymous's avatar
comfyanonymous committed
1297
    "VAEEncodeTiled": VAEEncodeTiled,
1298
    "TomePatchModel": TomePatchModel,
1299
    "unCLIPCheckpointLoader": unCLIPCheckpointLoader,
1300
1301
1302
    "GLIGENLoader": GLIGENLoader,
    "GLIGENTextBoxApply": GLIGENTextBoxApply,

1303
    "CheckpointLoader": CheckpointLoader,
sALTaccount's avatar
sALTaccount committed
1304
    "DiffusersLoader": DiffusersLoader,
Dr.Lt.Data's avatar
Dr.Lt.Data committed
1305
1306
1307

    "LoadLatent": LoadLatent,
    "SaveLatent": SaveLatent
comfyanonymous's avatar
comfyanonymous committed
1308
1309
}

City's avatar
City committed
1310
1311
1312
1313
1314
NODE_DISPLAY_NAME_MAPPINGS = {
    # Sampling
    "KSampler": "KSampler",
    "KSamplerAdvanced": "KSampler (Advanced)",
    # Loaders
comfyanonymous's avatar
Rename.  
comfyanonymous committed
1315
1316
    "CheckpointLoader": "Load Checkpoint (With Config)",
    "CheckpointLoaderSimple": "Load Checkpoint",
City's avatar
City committed
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
    "VAELoader": "Load VAE",
    "LoraLoader": "Load LoRA",
    "CLIPLoader": "Load CLIP",
    "ControlNetLoader": "Load ControlNet Model",
    "DiffControlNetLoader": "Load ControlNet Model (diff)",
    "StyleModelLoader": "Load Style Model",
    "CLIPVisionLoader": "Load CLIP Vision",
    "UpscaleModelLoader": "Load Upscale Model",
    # Conditioning
    "CLIPVisionEncode": "CLIP Vision Encode",
    "StyleModelApply": "Apply Style Model",
    "CLIPTextEncode": "CLIP Text Encode (Prompt)",
    "CLIPSetLastLayer": "CLIP Set Last Layer",
    "ConditioningCombine": "Conditioning (Combine)",
FizzleDorf's avatar
FizzleDorf committed
1331
    "ConditioningAverage ": "Conditioning (Average)",
City's avatar
City committed
1332
    "ConditioningSetArea": "Conditioning (Set Area)",
Jacob Segal's avatar
Jacob Segal committed
1333
    "ConditioningSetMask": "Conditioning (Set Mask)",
City's avatar
City committed
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
    "ControlNetApply": "Apply ControlNet",
    # Latent
    "VAEEncodeForInpaint": "VAE Encode (for Inpainting)",
    "SetLatentNoiseMask": "Set Latent Noise Mask",
    "VAEDecode": "VAE Decode",
    "VAEEncode": "VAE Encode",
    "LatentRotate": "Rotate Latent",
    "LatentFlip": "Flip Latent",
    "LatentCrop": "Crop Latent",
    "EmptyLatentImage": "Empty Latent Image",
    "LatentUpscale": "Upscale Latent",
comfyanonymous's avatar
comfyanonymous committed
1345
    "LatentUpscaleBy": "Upscale Latent By",
City's avatar
City committed
1346
    "LatentComposite": "Latent Composite",
1347
1348
    "LatentFromBatch" : "Latent From Batch",
    "RepeatLatentBatch": "Repeat Latent Batch",
City's avatar
City committed
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
    # Image
    "SaveImage": "Save Image",
    "PreviewImage": "Preview Image",
    "LoadImage": "Load Image",
    "LoadImageMask": "Load Image (as Mask)",
    "ImageScale": "Upscale Image",
    "ImageUpscaleWithModel": "Upscale Image (using Model)",
    "ImageInvert": "Invert Image",
    "ImagePadForOutpaint": "Pad Image for Outpainting",
    # _for_testing
    "VAEDecodeTiled": "VAE Decode (Tiled)",
    "VAEEncodeTiled": "VAE Encode (Tiled)",
}

1363
1364
1365
1366
1367
1368
1369
1370
1371
1372
1373
1374
1375
1376
1377
def load_custom_node(module_path):
    module_name = os.path.basename(module_path)
    if os.path.isfile(module_path):
        sp = os.path.splitext(module_path)
        module_name = sp[0]
    try:
        if os.path.isfile(module_path):
            module_spec = importlib.util.spec_from_file_location(module_name, module_path)
        else:
            module_spec = importlib.util.spec_from_file_location(module_name, os.path.join(module_path, "__init__.py"))
        module = importlib.util.module_from_spec(module_spec)
        sys.modules[module_name] = module
        module_spec.loader.exec_module(module)
        if hasattr(module, "NODE_CLASS_MAPPINGS") and getattr(module, "NODE_CLASS_MAPPINGS") is not None:
            NODE_CLASS_MAPPINGS.update(module.NODE_CLASS_MAPPINGS)
1378
1379
            if hasattr(module, "NODE_DISPLAY_NAME_MAPPINGS") and getattr(module, "NODE_DISPLAY_NAME_MAPPINGS") is not None:
                NODE_DISPLAY_NAME_MAPPINGS.update(module.NODE_DISPLAY_NAME_MAPPINGS)
1380
            return True
1381
1382
        else:
            print(f"Skip {module_path} module for custom nodes due to the lack of NODE_CLASS_MAPPINGS.")
1383
            return False
1384
1385
1386
    except Exception as e:
        print(traceback.format_exc())
        print(f"Cannot import {module_path} module for custom nodes:", e)
1387
        return False
1388

Hacker 17082006's avatar
Hacker 17082006 committed
1389
def load_custom_nodes():
1390
    node_paths = folder_paths.get_folder_paths("custom_nodes")
1391
    node_import_times = []
1392
1393
1394
1395
1396
1397
1398
1399
    for custom_node_path in node_paths:
        possible_modules = os.listdir(custom_node_path)
        if "__pycache__" in possible_modules:
            possible_modules.remove("__pycache__")

        for possible_module in possible_modules:
            module_path = os.path.join(custom_node_path, possible_module)
            if os.path.isfile(module_path) and os.path.splitext(module_path)[1] != ".py": continue
1400
            if module_path.endswith(".disabled"): continue
1401
            time_before = time.perf_counter()
1402
            success = load_custom_node(module_path)
1403
            node_import_times.append((time.perf_counter() - time_before, module_path, success))
1404

1405
    if len(node_import_times) > 0:
comfyanonymous's avatar
comfyanonymous committed
1406
        print("\nImport times for custom nodes:")
1407
        for n in sorted(node_import_times):
1408
1409
1410
1411
1412
            if n[2]:
                import_message = ""
            else:
                import_message = " (IMPORT FAILED)"
            print("{:6.1f} seconds{}:".format(n[0], import_message), n[1])
1413
        print()
1414

1415
def init_custom_nodes():
1416
    load_custom_node(os.path.join(os.path.join(os.path.dirname(os.path.realpath(__file__)), "comfy_extras"), "nodes_hypernetwork.py"))
1417
1418
    load_custom_node(os.path.join(os.path.join(os.path.dirname(os.path.realpath(__file__)), "comfy_extras"), "nodes_upscale_model.py"))
    load_custom_node(os.path.join(os.path.join(os.path.dirname(os.path.realpath(__file__)), "comfy_extras"), "nodes_post_processing.py"))
1419
    load_custom_node(os.path.join(os.path.join(os.path.dirname(os.path.realpath(__file__)), "comfy_extras"), "nodes_mask.py"))
1420
    load_custom_node(os.path.join(os.path.join(os.path.dirname(os.path.realpath(__file__)), "comfy_extras"), "nodes_rebatch.py"))
1421
    load_custom_nodes()