nodes.py 72.3 KB
Newer Older
comfyanonymous's avatar
comfyanonymous committed
1
2
3
4
5
import torch

import os
import sys
import json
6
import hashlib
7
import traceback
8
import math
9
import time
10
import random
comfyanonymous's avatar
comfyanonymous committed
11

12
from PIL import Image, ImageOps, ImageSequence
comfyanonymous's avatar
comfyanonymous committed
13
14
from PIL.PngImagePlugin import PngInfo
import numpy as np
15
import safetensors.torch
comfyanonymous's avatar
comfyanonymous committed
16

comfyanonymous's avatar
comfyanonymous committed
17
sys.path.insert(0, os.path.join(os.path.dirname(os.path.realpath(__file__)), "comfy"))
comfyanonymous's avatar
comfyanonymous committed
18
19


20
import comfy.diffusers_load
comfyanonymous's avatar
comfyanonymous committed
21
import comfy.samplers
22
import comfy.sample
comfyanonymous's avatar
comfyanonymous committed
23
import comfy.sd
comfyanonymous's avatar
comfyanonymous committed
24
import comfy.utils
25
import comfy.controlnet
comfyanonymous's avatar
comfyanonymous committed
26

27
import comfy.clip_vision
28

29
import comfy.model_management
30
31
from comfy.cli_args import args

32
import importlib
comfyanonymous's avatar
comfyanonymous committed
33

34
import folder_paths
35
import latent_preview
space-nuko's avatar
space-nuko committed
36

37
def before_node_execution():
38
    comfy.model_management.throw_exception_if_processing_interrupted()
39

40
def interrupt_processing(value=True):
41
    comfy.model_management.interrupt_current_processing(value)
42

43
44
MAX_RESOLUTION=8192

comfyanonymous's avatar
comfyanonymous committed
45
46
47
class CLIPTextEncode:
    @classmethod
    def INPUT_TYPES(s):
48
        return {"required": {"text": ("STRING", {"multiline": True}), "clip": ("CLIP", )}}
comfyanonymous's avatar
comfyanonymous committed
49
50
51
    RETURN_TYPES = ("CONDITIONING",)
    FUNCTION = "encode"

52
53
    CATEGORY = "conditioning"

comfyanonymous's avatar
comfyanonymous committed
54
    def encode(self, clip, text):
55
56
57
        tokens = clip.tokenize(text)
        cond, pooled = clip.encode_from_tokens(tokens, return_pooled=True)
        return ([[cond, {"pooled_output": pooled}]], )
comfyanonymous's avatar
comfyanonymous committed
58
59
60
61
62
63
64
65

class ConditioningCombine:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": {"conditioning_1": ("CONDITIONING", ), "conditioning_2": ("CONDITIONING", )}}
    RETURN_TYPES = ("CONDITIONING",)
    FUNCTION = "combine"

66
67
    CATEGORY = "conditioning"

comfyanonymous's avatar
comfyanonymous committed
68
69
70
    def combine(self, conditioning_1, conditioning_2):
        return (conditioning_1 + conditioning_2, )

FizzleDorf's avatar
FizzleDorf committed
71
72
73
class ConditioningAverage :
    @classmethod
    def INPUT_TYPES(s):
comfyanonymous's avatar
comfyanonymous committed
74
75
        return {"required": {"conditioning_to": ("CONDITIONING", ), "conditioning_from": ("CONDITIONING", ),
                              "conditioning_to_strength": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01})
FizzleDorf's avatar
FizzleDorf committed
76
77
78
79
80
81
                             }}
    RETURN_TYPES = ("CONDITIONING",)
    FUNCTION = "addWeighted"

    CATEGORY = "conditioning"

comfyanonymous's avatar
comfyanonymous committed
82
    def addWeighted(self, conditioning_to, conditioning_from, conditioning_to_strength):
FizzleDorf's avatar
FizzleDorf committed
83
        out = []
comfyanonymous's avatar
comfyanonymous committed
84
85
86
87
88

        if len(conditioning_from) > 1:
            print("Warning: ConditioningAverage conditioning_from contains more than 1 cond, only the first one will actually be applied to conditioning_to.")

        cond_from = conditioning_from[0][0]
89
        pooled_output_from = conditioning_from[0][1].get("pooled_output", None)
comfyanonymous's avatar
comfyanonymous committed
90
91
92

        for i in range(len(conditioning_to)):
            t1 = conditioning_to[i][0]
93
            pooled_output_to = conditioning_to[i][1].get("pooled_output", pooled_output_from)
comfyanonymous's avatar
comfyanonymous committed
94
95
96
97
98
            t0 = cond_from[:,:t1.shape[1]]
            if t0.shape[1] < t1.shape[1]:
                t0 = torch.cat([t0] + [torch.zeros((1, (t1.shape[1] - t0.shape[1]), t1.shape[2]))], dim=1)

            tw = torch.mul(t1, conditioning_to_strength) + torch.mul(t0, (1.0 - conditioning_to_strength))
99
100
101
102
103
104
105
            t_to = conditioning_to[i][1].copy()
            if pooled_output_from is not None and pooled_output_to is not None:
                t_to["pooled_output"] = torch.mul(pooled_output_to, conditioning_to_strength) + torch.mul(pooled_output_from, (1.0 - conditioning_to_strength))
            elif pooled_output_from is not None:
                t_to["pooled_output"] = pooled_output_from

            n = [tw, t_to]
FizzleDorf's avatar
FizzleDorf committed
106
107
108
            out.append(n)
        return (out, )

109
110
111
112
113
114
115
116
117
118
class ConditioningConcat:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": {
            "conditioning_to": ("CONDITIONING",),
            "conditioning_from": ("CONDITIONING",),
            }}
    RETURN_TYPES = ("CONDITIONING",)
    FUNCTION = "concat"

119
    CATEGORY = "conditioning"
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136

    def concat(self, conditioning_to, conditioning_from):
        out = []

        if len(conditioning_from) > 1:
            print("Warning: ConditioningConcat conditioning_from contains more than 1 cond, only the first one will actually be applied to conditioning_to.")

        cond_from = conditioning_from[0][0]

        for i in range(len(conditioning_to)):
            t1 = conditioning_to[i][0]
            tw = torch.cat((t1, cond_from),1)
            n = [tw, conditioning_to[i][1].copy()]
            out.append(n)

        return (out, )

comfyanonymous's avatar
comfyanonymous committed
137
138
139
140
class ConditioningSetArea:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": {"conditioning": ("CONDITIONING", ),
141
142
143
144
                              "width": ("INT", {"default": 64, "min": 64, "max": MAX_RESOLUTION, "step": 8}),
                              "height": ("INT", {"default": 64, "min": 64, "max": MAX_RESOLUTION, "step": 8}),
                              "x": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 8}),
                              "y": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 8}),
comfyanonymous's avatar
comfyanonymous committed
145
146
147
148
149
                              "strength": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}),
                             }}
    RETURN_TYPES = ("CONDITIONING",)
    FUNCTION = "append"

150
151
    CATEGORY = "conditioning"

152
    def append(self, conditioning, width, height, x, y, strength):
comfyanonymous's avatar
comfyanonymous committed
153
154
155
156
157
        c = []
        for t in conditioning:
            n = [t[0], t[1].copy()]
            n[1]['area'] = (height // 8, width // 8, y // 8, x // 8)
            n[1]['strength'] = strength
158
            n[1]['set_area_to_bounds'] = False
comfyanonymous's avatar
comfyanonymous committed
159
            c.append(n)
comfyanonymous's avatar
comfyanonymous committed
160
        return (c, )
comfyanonymous's avatar
comfyanonymous committed
161

162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
class ConditioningSetAreaPercentage:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": {"conditioning": ("CONDITIONING", ),
                              "width": ("FLOAT", {"default": 1.0, "min": 0, "max": 1.0, "step": 0.01}),
                              "height": ("FLOAT", {"default": 1.0, "min": 0, "max": 1.0, "step": 0.01}),
                              "x": ("FLOAT", {"default": 0, "min": 0, "max": 1.0, "step": 0.01}),
                              "y": ("FLOAT", {"default": 0, "min": 0, "max": 1.0, "step": 0.01}),
                              "strength": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}),
                             }}
    RETURN_TYPES = ("CONDITIONING",)
    FUNCTION = "append"

    CATEGORY = "conditioning"

    def append(self, conditioning, width, height, x, y, strength):
        c = []
        for t in conditioning:
            n = [t[0], t[1].copy()]
            n[1]['area'] = ("percentage", height, width, y, x)
            n[1]['strength'] = strength
            n[1]['set_area_to_bounds'] = False
            c.append(n)
        return (c, )

187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
class ConditioningSetAreaStrength:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": {"conditioning": ("CONDITIONING", ),
                              "strength": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}),
                             }}
    RETURN_TYPES = ("CONDITIONING",)
    FUNCTION = "append"

    CATEGORY = "conditioning"

    def append(self, conditioning, strength):
        c = []
        for t in conditioning:
            n = [t[0], t[1].copy()]
            n[1]['strength'] = strength
            c.append(n)
        return (c, )


Jacob Segal's avatar
Jacob Segal committed
207
208
209
210
211
212
class ConditioningSetMask:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": {"conditioning": ("CONDITIONING", ),
                              "mask": ("MASK", ),
                              "strength": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}),
213
                              "set_cond_area": (["default", "mask bounds"],),
Jacob Segal's avatar
Jacob Segal committed
214
215
216
217
218
219
                             }}
    RETURN_TYPES = ("CONDITIONING",)
    FUNCTION = "append"

    CATEGORY = "conditioning"

220
    def append(self, conditioning, mask, set_cond_area, strength):
Jacob Segal's avatar
Jacob Segal committed
221
        c = []
222
223
224
        set_area_to_bounds = False
        if set_cond_area != "default":
            set_area_to_bounds = True
Jacob Segal's avatar
Jacob Segal committed
225
226
227
228
229
230
        if len(mask.shape) < 3:
            mask = mask.unsqueeze(0)
        for t in conditioning:
            n = [t[0], t[1].copy()]
            _, h, w = mask.shape
            n[1]['mask'] = mask
Jacob Segal's avatar
Jacob Segal committed
231
            n[1]['set_area_to_bounds'] = set_area_to_bounds
232
            n[1]['mask_strength'] = strength
Jacob Segal's avatar
Jacob Segal committed
233
234
235
            c.append(n)
        return (c, )

236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
class ConditioningZeroOut:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": {"conditioning": ("CONDITIONING", )}}
    RETURN_TYPES = ("CONDITIONING",)
    FUNCTION = "zero_out"

    CATEGORY = "advanced/conditioning"

    def zero_out(self, conditioning):
        c = []
        for t in conditioning:
            d = t[1].copy()
            if "pooled_output" in d:
                d["pooled_output"] = torch.zeros_like(d["pooled_output"])
            n = [torch.zeros_like(t[0]), d]
            c.append(n)
        return (c, )

255
256
257
258
class ConditioningSetTimestepRange:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": {"conditioning": ("CONDITIONING", ),
259
260
                             "start": ("FLOAT", {"default": 0.0, "min": 0.0, "max": 1.0, "step": 0.001}),
                             "end": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.001})
261
262
263
264
265
266
267
268
269
270
                             }}
    RETURN_TYPES = ("CONDITIONING",)
    FUNCTION = "set_range"

    CATEGORY = "advanced/conditioning"

    def set_range(self, conditioning, start, end):
        c = []
        for t in conditioning:
            d = t[1].copy()
271
272
            d['start_percent'] = start
            d['end_percent'] = end
273
274
275
276
            n = [t[0], d]
            c.append(n)
        return (c, )

comfyanonymous's avatar
comfyanonymous committed
277
278
279
280
281
282
283
class VAEDecode:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "samples": ("LATENT", ), "vae": ("VAE", )}}
    RETURN_TYPES = ("IMAGE",)
    FUNCTION = "decode"

284
285
    CATEGORY = "latent"

comfyanonymous's avatar
comfyanonymous committed
286
    def decode(self, vae, samples):
287
        return (vae.decode(samples["samples"]), )
comfyanonymous's avatar
comfyanonymous committed
288

289
290
291
class VAEDecodeTiled:
    @classmethod
    def INPUT_TYPES(s):
292
        return {"required": {"samples": ("LATENT", ), "vae": ("VAE", ),
comfyanonymous's avatar
comfyanonymous committed
293
                             "tile_size": ("INT", {"default": 512, "min": 320, "max": 4096, "step": 64})
294
                            }}
295
296
297
298
299
    RETURN_TYPES = ("IMAGE",)
    FUNCTION = "decode"

    CATEGORY = "_for_testing"

300
    def decode(self, vae, samples, tile_size):
301
        return (vae.decode_tiled(samples["samples"], tile_x=tile_size // 8, tile_y=tile_size // 8, ), )
302

comfyanonymous's avatar
comfyanonymous committed
303
304
305
306
307
308
309
class VAEEncode:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "pixels": ("IMAGE", ), "vae": ("VAE", )}}
    RETURN_TYPES = ("LATENT",)
    FUNCTION = "encode"

310
311
    CATEGORY = "latent"

312
313
    def encode(self, vae, pixels):
        t = vae.encode(pixels[:,:,:,:3])
314
        return ({"samples":t}, )
comfyanonymous's avatar
comfyanonymous committed
315

comfyanonymous's avatar
comfyanonymous committed
316
317
318
class VAEEncodeTiled:
    @classmethod
    def INPUT_TYPES(s):
319
        return {"required": {"pixels": ("IMAGE", ), "vae": ("VAE", ),
320
                             "tile_size": ("INT", {"default": 512, "min": 320, "max": 4096, "step": 64})
321
                            }}
comfyanonymous's avatar
comfyanonymous committed
322
323
324
325
326
    RETURN_TYPES = ("LATENT",)
    FUNCTION = "encode"

    CATEGORY = "_for_testing"

327
328
    def encode(self, vae, pixels, tile_size):
        t = vae.encode_tiled(pixels[:,:,:,:3], tile_x=tile_size, tile_y=tile_size, )
comfyanonymous's avatar
comfyanonymous committed
329
        return ({"samples":t}, )
330

331
332
333
class VAEEncodeForInpaint:
    @classmethod
    def INPUT_TYPES(s):
334
        return {"required": { "pixels": ("IMAGE", ), "vae": ("VAE", ), "mask": ("MASK", ), "grow_mask_by": ("INT", {"default": 6, "min": 0, "max": 64, "step": 1}),}}
335
336
337
338
339
    RETURN_TYPES = ("LATENT",)
    FUNCTION = "encode"

    CATEGORY = "latent/inpaint"

340
    def encode(self, vae, pixels, mask, grow_mask_by=6):
341
342
        x = (pixels.shape[1] // vae.downscale_ratio) * vae.downscale_ratio
        y = (pixels.shape[2] // vae.downscale_ratio) * vae.downscale_ratio
343
        mask = torch.nn.functional.interpolate(mask.reshape((-1, 1, mask.shape[-2], mask.shape[-1])), size=(pixels.shape[1], pixels.shape[2]), mode="bilinear")
344

345
        pixels = pixels.clone()
346
        if pixels.shape[1] != x or pixels.shape[2] != y:
347
348
            x_offset = (pixels.shape[1] % vae.downscale_ratio) // 2
            y_offset = (pixels.shape[2] % vae.downscale_ratio) // 2
349
350
            pixels = pixels[:,x_offset:x + x_offset, y_offset:y + y_offset,:]
            mask = mask[:,:,x_offset:x + x_offset, y_offset:y + y_offset]
351

352
        #grow mask by a few pixels to keep things seamless in latent space
353
354
355
356
357
358
359
360
        if grow_mask_by == 0:
            mask_erosion = mask
        else:
            kernel_tensor = torch.ones((1, 1, grow_mask_by, grow_mask_by))
            padding = math.ceil((grow_mask_by - 1) / 2)

            mask_erosion = torch.clamp(torch.nn.functional.conv2d(mask.round(), kernel_tensor, padding=padding), 0, 1)

361
        m = (1.0 - mask.round()).squeeze(1)
362
363
        for i in range(3):
            pixels[:,:,:,i] -= 0.5
364
            pixels[:,:,:,i] *= m
365
366
367
            pixels[:,:,:,i] += 0.5
        t = vae.encode(pixels)

368
        return ({"samples":t, "noise_mask": (mask_erosion[:,:,:x,:y].round())}, )
comfyanonymous's avatar
comfyanonymous committed
369

370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425

class InpaintModelConditioning:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": {"positive": ("CONDITIONING", ),
                             "negative": ("CONDITIONING", ),
                             "vae": ("VAE", ),
                             "pixels": ("IMAGE", ),
                             "mask": ("MASK", ),
                             }}

    RETURN_TYPES = ("CONDITIONING","CONDITIONING","LATENT")
    RETURN_NAMES = ("positive", "negative", "latent")
    FUNCTION = "encode"

    CATEGORY = "conditioning/inpaint"

    def encode(self, positive, negative, pixels, vae, mask):
        x = (pixels.shape[1] // 8) * 8
        y = (pixels.shape[2] // 8) * 8
        mask = torch.nn.functional.interpolate(mask.reshape((-1, 1, mask.shape[-2], mask.shape[-1])), size=(pixels.shape[1], pixels.shape[2]), mode="bilinear")

        orig_pixels = pixels
        pixels = orig_pixels.clone()
        if pixels.shape[1] != x or pixels.shape[2] != y:
            x_offset = (pixels.shape[1] % 8) // 2
            y_offset = (pixels.shape[2] % 8) // 2
            pixels = pixels[:,x_offset:x + x_offset, y_offset:y + y_offset,:]
            mask = mask[:,:,x_offset:x + x_offset, y_offset:y + y_offset]

        m = (1.0 - mask.round()).squeeze(1)
        for i in range(3):
            pixels[:,:,:,i] -= 0.5
            pixels[:,:,:,i] *= m
            pixels[:,:,:,i] += 0.5
        concat_latent = vae.encode(pixels)
        orig_latent = vae.encode(orig_pixels)

        out_latent = {}

        out_latent["samples"] = orig_latent
        out_latent["noise_mask"] = mask

        out = []
        for conditioning in [positive, negative]:
            c = []
            for t in conditioning:
                d = t[1].copy()
                d["concat_latent_image"] = concat_latent
                d["concat_mask"] = mask
                n = [t[0], d]
                c.append(n)
            out.append(c)
        return (out[0], out[1], out_latent)


Dr.Lt.Data's avatar
Dr.Lt.Data committed
426
427
class SaveLatent:
    def __init__(self):
428
        self.output_dir = folder_paths.get_output_directory()
Dr.Lt.Data's avatar
Dr.Lt.Data committed
429
430
431
432

    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "samples": ("LATENT", ),
433
                              "filename_prefix": ("STRING", {"default": "latents/ComfyUI"})},
Dr.Lt.Data's avatar
Dr.Lt.Data committed
434
435
436
437
438
439
440
441
442
443
                "hidden": {"prompt": "PROMPT", "extra_pnginfo": "EXTRA_PNGINFO"},
                }
    RETURN_TYPES = ()
    FUNCTION = "save"

    OUTPUT_NODE = True

    CATEGORY = "_for_testing"

    def save(self, samples, filename_prefix="ComfyUI", prompt=None, extra_pnginfo=None):
444
        full_output_folder, filename, counter, subfolder, filename_prefix = folder_paths.get_save_image_path(filename_prefix, self.output_dir)
Dr.Lt.Data's avatar
Dr.Lt.Data committed
445
446
447
448
449
450

        # support save metadata for latent sharing
        prompt_info = ""
        if prompt is not None:
            prompt_info = json.dumps(prompt)

451
452
453
454
455
456
        metadata = None
        if not args.disable_metadata:
            metadata = {"prompt": prompt_info}
            if extra_pnginfo is not None:
                for x in extra_pnginfo:
                    metadata[x] = json.dumps(extra_pnginfo[x])
Dr.Lt.Data's avatar
Dr.Lt.Data committed
457
458

        file = f"{filename}_{counter:05}_.latent"
459
460
461
462
463
464
465
466

        results = list()
        results.append({
            "filename": file,
            "subfolder": subfolder,
            "type": "output"
        })

Dr.Lt.Data's avatar
Dr.Lt.Data committed
467
468
        file = os.path.join(full_output_folder, file)

469
470
        output = {}
        output["latent_tensor"] = samples["samples"]
471
        output["latent_format_version_0"] = torch.tensor([])
472

473
        comfy.utils.save_torch_file(output, file, metadata=metadata)
474
        return { "ui": { "latents": results } }
Dr.Lt.Data's avatar
Dr.Lt.Data committed
475
476
477
478
479


class LoadLatent:
    @classmethod
    def INPUT_TYPES(s):
480
481
        input_dir = folder_paths.get_input_directory()
        files = [f for f in os.listdir(input_dir) if os.path.isfile(os.path.join(input_dir, f)) and f.endswith(".latent")]
Dr.Lt.Data's avatar
Dr.Lt.Data committed
482
483
484
485
486
487
488
489
        return {"required": {"latent": [sorted(files), ]}, }

    CATEGORY = "_for_testing"

    RETURN_TYPES = ("LATENT", )
    FUNCTION = "load"

    def load(self, latent):
490
491
        latent_path = folder_paths.get_annotated_filepath(latent)
        latent = safetensors.torch.load_file(latent_path, device="cpu")
492
493
494
495
        multiplier = 1.0
        if "latent_format_version_0" not in latent:
            multiplier = 1.0 / 0.18215
        samples = {"samples": latent["latent_tensor"].float() * multiplier}
496
        return (samples, )
Dr.Lt.Data's avatar
Dr.Lt.Data committed
497

498
499
500
501
502
503
504
505
506
507
508
509
510
511
    @classmethod
    def IS_CHANGED(s, latent):
        image_path = folder_paths.get_annotated_filepath(latent)
        m = hashlib.sha256()
        with open(image_path, 'rb') as f:
            m.update(f.read())
        return m.digest().hex()

    @classmethod
    def VALIDATE_INPUTS(s, latent):
        if not folder_paths.exists_annotated_filepath(latent):
            return "Invalid latent file: {}".format(latent)
        return True

Dr.Lt.Data's avatar
Dr.Lt.Data committed
512

comfyanonymous's avatar
comfyanonymous committed
513
514
515
class CheckpointLoader:
    @classmethod
    def INPUT_TYPES(s):
516
517
        return {"required": { "config_name": (folder_paths.get_filename_list("configs"), ),
                              "ckpt_name": (folder_paths.get_filename_list("checkpoints"), )}}
comfyanonymous's avatar
comfyanonymous committed
518
519
520
    RETURN_TYPES = ("MODEL", "CLIP", "VAE")
    FUNCTION = "load_checkpoint"

521
    CATEGORY = "advanced/loaders"
522

comfyanonymous's avatar
comfyanonymous committed
523
    def load_checkpoint(self, config_name, ckpt_name, output_vae=True, output_clip=True):
524
525
        config_path = folder_paths.get_full_path("configs", config_name)
        ckpt_path = folder_paths.get_full_path("checkpoints", ckpt_name)
526
        return comfy.sd.load_checkpoint(config_path, ckpt_path, output_vae=True, output_clip=True, embedding_directory=folder_paths.get_folder_paths("embeddings"))
comfyanonymous's avatar
comfyanonymous committed
527

528
529
530
class CheckpointLoaderSimple:
    @classmethod
    def INPUT_TYPES(s):
531
        return {"required": { "ckpt_name": (folder_paths.get_filename_list("checkpoints"), ),
532
533
534
535
                             }}
    RETURN_TYPES = ("MODEL", "CLIP", "VAE")
    FUNCTION = "load_checkpoint"

536
    CATEGORY = "loaders"
537

538
    def load_checkpoint(self, ckpt_name, output_vae=True, output_clip=True):
539
        ckpt_path = folder_paths.get_full_path("checkpoints", ckpt_name)
540
        out = comfy.sd.load_checkpoint_guess_config(ckpt_path, output_vae=True, output_clip=True, embedding_directory=folder_paths.get_folder_paths("embeddings"))
541
        return out[:3]
542

sALTaccount's avatar
sALTaccount committed
543
544
545
class DiffusersLoader:
    @classmethod
    def INPUT_TYPES(cls):
546
        paths = []
sALTaccount's avatar
sALTaccount committed
547
        for search_path in folder_paths.get_folder_paths("diffusers"):
548
            if os.path.exists(search_path):
549
550
551
552
                for root, subdir, files in os.walk(search_path, followlinks=True):
                    if "model_index.json" in files:
                        paths.append(os.path.relpath(root, start=search_path))

553
        return {"required": {"model_path": (paths,), }}
sALTaccount's avatar
sALTaccount committed
554
555
556
    RETURN_TYPES = ("MODEL", "CLIP", "VAE")
    FUNCTION = "load_checkpoint"

557
    CATEGORY = "advanced/loaders/deprecated"
sALTaccount's avatar
sALTaccount committed
558
559

    def load_checkpoint(self, model_path, output_vae=True, output_clip=True):
sALTaccount's avatar
sALTaccount committed
560
561
        for search_path in folder_paths.get_folder_paths("diffusers"):
            if os.path.exists(search_path):
562
563
564
                path = os.path.join(search_path, model_path)
                if os.path.exists(path):
                    model_path = path
sALTaccount's avatar
sALTaccount committed
565
                    break
566

567
        return comfy.diffusers_load.load_diffusers(model_path, output_vae=output_vae, output_clip=output_clip, embedding_directory=folder_paths.get_folder_paths("embeddings"))
sALTaccount's avatar
sALTaccount committed
568
569


570
571
572
573
574
575
576
577
class unCLIPCheckpointLoader:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "ckpt_name": (folder_paths.get_filename_list("checkpoints"), ),
                             }}
    RETURN_TYPES = ("MODEL", "CLIP", "VAE", "CLIP_VISION")
    FUNCTION = "load_checkpoint"

578
    CATEGORY = "loaders"
579
580
581
582
583
584

    def load_checkpoint(self, ckpt_name, output_vae=True, output_clip=True):
        ckpt_path = folder_paths.get_full_path("checkpoints", ckpt_name)
        out = comfy.sd.load_checkpoint_guess_config(ckpt_path, output_vae=True, output_clip=True, output_clipvision=True, embedding_directory=folder_paths.get_folder_paths("embeddings"))
        return out

comfyanonymous's avatar
comfyanonymous committed
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
class CLIPSetLastLayer:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "clip": ("CLIP", ),
                              "stop_at_clip_layer": ("INT", {"default": -1, "min": -24, "max": -1, "step": 1}),
                              }}
    RETURN_TYPES = ("CLIP",)
    FUNCTION = "set_last_layer"

    CATEGORY = "conditioning"

    def set_last_layer(self, clip, stop_at_clip_layer):
        clip = clip.clone()
        clip.clip_layer(stop_at_clip_layer)
        return (clip,)

601
class LoraLoader:
602
603
604
    def __init__(self):
        self.loaded_lora = None

605
606
607
608
    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "model": ("MODEL",),
                              "clip": ("CLIP", ),
609
                              "lora_name": (folder_paths.get_filename_list("loras"), ),
610
611
                              "strength_model": ("FLOAT", {"default": 1.0, "min": -20.0, "max": 20.0, "step": 0.01}),
                              "strength_clip": ("FLOAT", {"default": 1.0, "min": -20.0, "max": 20.0, "step": 0.01}),
612
613
614
615
616
617
618
                              }}
    RETURN_TYPES = ("MODEL", "CLIP")
    FUNCTION = "load_lora"

    CATEGORY = "loaders"

    def load_lora(self, model, clip, lora_name, strength_model, strength_clip):
619
620
621
        if strength_model == 0 and strength_clip == 0:
            return (model, clip)

622
        lora_path = folder_paths.get_full_path("loras", lora_name)
623
624
625
626
627
        lora = None
        if self.loaded_lora is not None:
            if self.loaded_lora[0] == lora_path:
                lora = self.loaded_lora[1]
            else:
628
629
630
                temp = self.loaded_lora
                self.loaded_lora = None
                del temp
631
632
633
634
635
636

        if lora is None:
            lora = comfy.utils.load_torch_file(lora_path, safe_load=True)
            self.loaded_lora = (lora_path, lora)

        model_lora, clip_lora = comfy.sd.load_lora_for_models(model, clip, lora, strength_model, strength_clip)
637
638
        return (model_lora, clip_lora)

639
640
641
642
643
644
645
646
647
648
649
650
651
class LoraLoaderModelOnly(LoraLoader):
    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "model": ("MODEL",),
                              "lora_name": (folder_paths.get_filename_list("loras"), ),
                              "strength_model": ("FLOAT", {"default": 1.0, "min": -20.0, "max": 20.0, "step": 0.01}),
                              }}
    RETURN_TYPES = ("MODEL",)
    FUNCTION = "load_lora_model_only"

    def load_lora_model_only(self, model, lora_name, strength_model):
        return (self.load_lora(model, None, lora_name, strength_model, 0)[0],)

comfyanonymous's avatar
comfyanonymous committed
652
class VAELoader:
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
    @staticmethod
    def vae_list():
        vaes = folder_paths.get_filename_list("vae")
        approx_vaes = folder_paths.get_filename_list("vae_approx")
        sdxl_taesd_enc = False
        sdxl_taesd_dec = False
        sd1_taesd_enc = False
        sd1_taesd_dec = False

        for v in approx_vaes:
            if v.startswith("taesd_decoder."):
                sd1_taesd_dec = True
            elif v.startswith("taesd_encoder."):
                sd1_taesd_enc = True
            elif v.startswith("taesdxl_decoder."):
                sdxl_taesd_dec = True
            elif v.startswith("taesdxl_encoder."):
                sdxl_taesd_enc = True
        if sd1_taesd_dec and sd1_taesd_enc:
            vaes.append("taesd")
        if sdxl_taesd_dec and sdxl_taesd_enc:
            vaes.append("taesdxl")
        return vaes

    @staticmethod
    def load_taesd(name):
        sd = {}
        approx_vaes = folder_paths.get_filename_list("vae_approx")

        encoder = next(filter(lambda a: a.startswith("{}_encoder.".format(name)), approx_vaes))
        decoder = next(filter(lambda a: a.startswith("{}_decoder.".format(name)), approx_vaes))

        enc = comfy.utils.load_torch_file(folder_paths.get_full_path("vae_approx", encoder))
        for k in enc:
            sd["taesd_encoder.{}".format(k)] = enc[k]

        dec = comfy.utils.load_torch_file(folder_paths.get_full_path("vae_approx", decoder))
        for k in dec:
            sd["taesd_decoder.{}".format(k)] = dec[k]

        if name == "taesd":
            sd["vae_scale"] = torch.tensor(0.18215)
        elif name == "taesdxl":
            sd["vae_scale"] = torch.tensor(0.13025)
        return sd

comfyanonymous's avatar
comfyanonymous committed
699
700
    @classmethod
    def INPUT_TYPES(s):
701
        return {"required": { "vae_name": (s.vae_list(), )}}
comfyanonymous's avatar
comfyanonymous committed
702
703
704
    RETURN_TYPES = ("VAE",)
    FUNCTION = "load_vae"

705
706
    CATEGORY = "loaders"

comfyanonymous's avatar
comfyanonymous committed
707
708
    #TODO: scale factor?
    def load_vae(self, vae_name):
709
710
711
712
713
        if vae_name in ["taesd", "taesdxl"]:
            sd = self.load_taesd(vae_name)
        else:
            vae_path = folder_paths.get_full_path("vae", vae_name)
            sd = comfy.utils.load_torch_file(vae_path)
comfyanonymous's avatar
comfyanonymous committed
714
        vae = comfy.sd.VAE(sd=sd)
comfyanonymous's avatar
comfyanonymous committed
715
716
        return (vae,)

comfyanonymous's avatar
comfyanonymous committed
717
718
719
class ControlNetLoader:
    @classmethod
    def INPUT_TYPES(s):
720
        return {"required": { "control_net_name": (folder_paths.get_filename_list("controlnet"), )}}
comfyanonymous's avatar
comfyanonymous committed
721
722
723
724
725
726
727

    RETURN_TYPES = ("CONTROL_NET",)
    FUNCTION = "load_controlnet"

    CATEGORY = "loaders"

    def load_controlnet(self, control_net_name):
728
        controlnet_path = folder_paths.get_full_path("controlnet", control_net_name)
729
        controlnet = comfy.controlnet.load_controlnet(controlnet_path)
comfyanonymous's avatar
comfyanonymous committed
730
731
        return (controlnet,)

732
733
734
735
class DiffControlNetLoader:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "model": ("MODEL",),
736
                              "control_net_name": (folder_paths.get_filename_list("controlnet"), )}}
737
738
739
740
741
742
743

    RETURN_TYPES = ("CONTROL_NET",)
    FUNCTION = "load_controlnet"

    CATEGORY = "loaders"

    def load_controlnet(self, model, control_net_name):
744
        controlnet_path = folder_paths.get_full_path("controlnet", control_net_name)
745
        controlnet = comfy.controlnet.load_controlnet(controlnet_path, model)
746
747
        return (controlnet,)

comfyanonymous's avatar
comfyanonymous committed
748
749
750
751

class ControlNetApply:
    @classmethod
    def INPUT_TYPES(s):
752
753
754
755
756
        return {"required": {"conditioning": ("CONDITIONING", ),
                             "control_net": ("CONTROL_NET", ),
                             "image": ("IMAGE", ),
                             "strength": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01})
                             }}
comfyanonymous's avatar
comfyanonymous committed
757
758
759
760
761
    RETURN_TYPES = ("CONDITIONING",)
    FUNCTION = "apply_controlnet"

    CATEGORY = "conditioning"

762
    def apply_controlnet(self, conditioning, control_net, image, strength):
763
764
765
        if strength == 0:
            return (conditioning, )

comfyanonymous's avatar
comfyanonymous committed
766
767
768
769
        c = []
        control_hint = image.movedim(-1,1)
        for t in conditioning:
            n = [t[0], t[1].copy()]
comfyanonymous's avatar
comfyanonymous committed
770
771
772
773
            c_net = control_net.copy().set_cond_hint(control_hint, strength)
            if 'control' in t[1]:
                c_net.set_previous_controlnet(t[1]['control'])
            n[1]['control'] = c_net
774
            n[1]['control_apply_to_uncond'] = True
comfyanonymous's avatar
comfyanonymous committed
775
776
777
            c.append(n)
        return (c, )

778
779
780
781
782
783
784
785
786

class ControlNetApplyAdvanced:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": {"positive": ("CONDITIONING", ),
                             "negative": ("CONDITIONING", ),
                             "control_net": ("CONTROL_NET", ),
                             "image": ("IMAGE", ),
                             "strength": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}),
787
788
                             "start_percent": ("FLOAT", {"default": 0.0, "min": 0.0, "max": 1.0, "step": 0.001}),
                             "end_percent": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.001})
789
790
791
792
793
794
795
796
                             }}

    RETURN_TYPES = ("CONDITIONING","CONDITIONING")
    RETURN_NAMES = ("positive", "negative")
    FUNCTION = "apply_controlnet"

    CATEGORY = "conditioning"

797
    def apply_controlnet(self, positive, negative, control_net, image, strength, start_percent, end_percent):
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
        if strength == 0:
            return (positive, negative)

        control_hint = image.movedim(-1,1)
        cnets = {}

        out = []
        for conditioning in [positive, negative]:
            c = []
            for t in conditioning:
                d = t[1].copy()

                prev_cnet = d.get('control', None)
                if prev_cnet in cnets:
                    c_net = cnets[prev_cnet]
                else:
814
                    c_net = control_net.copy().set_cond_hint(control_hint, strength, (start_percent, end_percent))
815
816
817
818
819
820
821
822
823
824
825
                    c_net.set_previous_controlnet(prev_cnet)
                    cnets[prev_cnet] = c_net

                d['control'] = c_net
                d['control_apply_to_uncond'] = False
                n = [t[0], d]
                c.append(n)
            out.append(c)
        return (out[0], out[1])


826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
class UNETLoader:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "unet_name": (folder_paths.get_filename_list("unet"), ),
                             }}
    RETURN_TYPES = ("MODEL",)
    FUNCTION = "load_unet"

    CATEGORY = "advanced/loaders"

    def load_unet(self, unet_name):
        unet_path = folder_paths.get_full_path("unet", unet_name)
        model = comfy.sd.load_unet(unet_path)
        return (model,)

841
842
843
class CLIPLoader:
    @classmethod
    def INPUT_TYPES(s):
844
        return {"required": { "clip_name": (folder_paths.get_filename_list("clip"), ),
845
                              "type": (["stable_diffusion", "stable_cascade"], ),
846
847
848
849
                             }}
    RETURN_TYPES = ("CLIP",)
    FUNCTION = "load_clip"

850
    CATEGORY = "advanced/loaders"
851

852
853
854
855
856
    def load_clip(self, clip_name, type="stable_diffusion"):
        clip_type = comfy.sd.CLIPType.STABLE_DIFFUSION
        if type == "stable_cascade":
            clip_type = comfy.sd.CLIPType.STABLE_CASCADE

857
        clip_path = folder_paths.get_full_path("clip", clip_name)
858
        clip = comfy.sd.load_clip(ckpt_paths=[clip_path], embedding_directory=folder_paths.get_folder_paths("embeddings"), clip_type=clip_type)
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
        return (clip,)

class DualCLIPLoader:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "clip_name1": (folder_paths.get_filename_list("clip"), ), "clip_name2": (folder_paths.get_filename_list("clip"), ),
                             }}
    RETURN_TYPES = ("CLIP",)
    FUNCTION = "load_clip"

    CATEGORY = "advanced/loaders"

    def load_clip(self, clip_name1, clip_name2):
        clip_path1 = folder_paths.get_full_path("clip", clip_name1)
        clip_path2 = folder_paths.get_full_path("clip", clip_name2)
        clip = comfy.sd.load_clip(ckpt_paths=[clip_path1, clip_path2], embedding_directory=folder_paths.get_folder_paths("embeddings"))
875
876
        return (clip,)

877
878
879
class CLIPVisionLoader:
    @classmethod
    def INPUT_TYPES(s):
880
        return {"required": { "clip_name": (folder_paths.get_filename_list("clip_vision"), ),
881
882
883
884
885
886
887
                             }}
    RETURN_TYPES = ("CLIP_VISION",)
    FUNCTION = "load_clip"

    CATEGORY = "loaders"

    def load_clip(self, clip_name):
888
        clip_path = folder_paths.get_full_path("clip_vision", clip_name)
889
        clip_vision = comfy.clip_vision.load(clip_path)
890
891
892
893
894
895
896
897
        return (clip_vision,)

class CLIPVisionEncode:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "clip_vision": ("CLIP_VISION",),
                              "image": ("IMAGE",)
                             }}
comfyanonymous's avatar
comfyanonymous committed
898
    RETURN_TYPES = ("CLIP_VISION_OUTPUT",)
899
900
    FUNCTION = "encode"

901
    CATEGORY = "conditioning"
902
903
904
905
906
907
908
909

    def encode(self, clip_vision, image):
        output = clip_vision.encode_image(image)
        return (output,)

class StyleModelLoader:
    @classmethod
    def INPUT_TYPES(s):
910
        return {"required": { "style_model_name": (folder_paths.get_filename_list("style_models"), )}}
911
912
913
914
915
916
917

    RETURN_TYPES = ("STYLE_MODEL",)
    FUNCTION = "load_style_model"

    CATEGORY = "loaders"

    def load_style_model(self, style_model_name):
918
        style_model_path = folder_paths.get_full_path("style_models", style_model_name)
919
920
921
922
923
924
925
        style_model = comfy.sd.load_style_model(style_model_path)
        return (style_model,)


class StyleModelApply:
    @classmethod
    def INPUT_TYPES(s):
926
927
928
        return {"required": {"conditioning": ("CONDITIONING", ),
                             "style_model": ("STYLE_MODEL", ),
                             "clip_vision_output": ("CLIP_VISION_OUTPUT", ),
929
930
931
932
                             }}
    RETURN_TYPES = ("CONDITIONING",)
    FUNCTION = "apply_stylemodel"

comfyanonymous's avatar
comfyanonymous committed
933
    CATEGORY = "conditioning/style_model"
934

935
    def apply_stylemodel(self, clip_vision_output, style_model, conditioning):
936
        cond = style_model.get_cond(clip_vision_output).flatten(start_dim=0, end_dim=1).unsqueeze(dim=0)
937
        c = []
938
939
        for t in conditioning:
            n = [torch.cat((t[0], cond), dim=1), t[1].copy()]
940
941
942
            c.append(n)
        return (c, )

943
944
945
946
947
948
class unCLIPConditioning:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": {"conditioning": ("CONDITIONING", ),
                             "clip_vision_output": ("CLIP_VISION_OUTPUT", ),
                             "strength": ("FLOAT", {"default": 1.0, "min": -10.0, "max": 10.0, "step": 0.01}),
949
                             "noise_augmentation": ("FLOAT", {"default": 0.0, "min": 0.0, "max": 1.0, "step": 0.01}),
950
951
952
953
                             }}
    RETURN_TYPES = ("CONDITIONING",)
    FUNCTION = "apply_adm"

954
    CATEGORY = "conditioning"
955

956
    def apply_adm(self, conditioning, clip_vision_output, strength, noise_augmentation):
957
958
959
        if strength == 0:
            return (conditioning, )

960
961
962
        c = []
        for t in conditioning:
            o = t[1].copy()
963
964
965
            x = {"clip_vision_output": clip_vision_output, "strength": strength, "noise_augmentation": noise_augmentation}
            if "unclip_conditioning" in o:
                o["unclip_conditioning"] = o["unclip_conditioning"][:] + [x]
966
            else:
967
                o["unclip_conditioning"] = [x]
968
969
970
971
            n = [t[0], o]
            c.append(n)
        return (c, )

972
973
974
975
976
977
978
979
class GLIGENLoader:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "gligen_name": (folder_paths.get_filename_list("gligen"), )}}

    RETURN_TYPES = ("GLIGEN",)
    FUNCTION = "load_gligen"

comfyanonymous's avatar
comfyanonymous committed
980
    CATEGORY = "loaders"
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001

    def load_gligen(self, gligen_name):
        gligen_path = folder_paths.get_full_path("gligen", gligen_name)
        gligen = comfy.sd.load_gligen(gligen_path)
        return (gligen,)

class GLIGENTextBoxApply:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": {"conditioning_to": ("CONDITIONING", ),
                              "clip": ("CLIP", ),
                              "gligen_textbox_model": ("GLIGEN", ),
                              "text": ("STRING", {"multiline": True}),
                              "width": ("INT", {"default": 64, "min": 8, "max": MAX_RESOLUTION, "step": 8}),
                              "height": ("INT", {"default": 64, "min": 8, "max": MAX_RESOLUTION, "step": 8}),
                              "x": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 8}),
                              "y": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 8}),
                             }}
    RETURN_TYPES = ("CONDITIONING",)
    FUNCTION = "append"

comfyanonymous's avatar
comfyanonymous committed
1002
    CATEGORY = "conditioning/gligen"
1003
1004
1005

    def append(self, conditioning_to, clip, gligen_textbox_model, text, width, height, x, y):
        c = []
1006
        cond, cond_pooled = clip.encode_from_tokens(clip.tokenize(text), return_pooled="unprojected")
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
        for t in conditioning_to:
            n = [t[0], t[1].copy()]
            position_params = [(cond_pooled, height // 8, width // 8, y // 8, x // 8)]
            prev = []
            if "gligen" in n[1]:
                prev = n[1]['gligen'][2]

            n[1]['gligen'] = ("position", gligen_textbox_model, prev + position_params)
            c.append(n)
        return (c, )
1017

comfyanonymous's avatar
comfyanonymous committed
1018
class EmptyLatentImage:
1019
1020
    def __init__(self):
        self.device = comfy.model_management.intermediate_device()
comfyanonymous's avatar
comfyanonymous committed
1021
1022
1023

    @classmethod
    def INPUT_TYPES(s):
1024
1025
        return {"required": { "width": ("INT", {"default": 512, "min": 16, "max": MAX_RESOLUTION, "step": 8}),
                              "height": ("INT", {"default": 512, "min": 16, "max": MAX_RESOLUTION, "step": 8}),
comfyanonymous's avatar
comfyanonymous committed
1026
                              "batch_size": ("INT", {"default": 1, "min": 1, "max": 4096})}}
comfyanonymous's avatar
comfyanonymous committed
1027
1028
1029
    RETURN_TYPES = ("LATENT",)
    FUNCTION = "generate"

1030
1031
    CATEGORY = "latent"

comfyanonymous's avatar
comfyanonymous committed
1032
    def generate(self, width, height, batch_size=1):
1033
        latent = torch.zeros([batch_size, 4, height // 8, width // 8], device=self.device)
1034
        return ({"samples":latent}, )
comfyanonymous's avatar
comfyanonymous committed
1035

comfyanonymous's avatar
comfyanonymous committed
1036

1037
1038
1039
1040
1041
class LatentFromBatch:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "samples": ("LATENT",),
                              "batch_index": ("INT", {"default": 0, "min": 0, "max": 63}),
1042
                              "length": ("INT", {"default": 1, "min": 1, "max": 64}),
1043
1044
                              }}
    RETURN_TYPES = ("LATENT",)
1045
    FUNCTION = "frombatch"
1046

1047
    CATEGORY = "latent/batch"
1048

1049
    def frombatch(self, samples, batch_index, length):
1050
1051
1052
        s = samples.copy()
        s_in = samples["samples"]
        batch_index = min(s_in.shape[0] - 1, batch_index)
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
        length = min(s_in.shape[0] - batch_index, length)
        s["samples"] = s_in[batch_index:batch_index + length].clone()
        if "noise_mask" in samples:
            masks = samples["noise_mask"]
            if masks.shape[0] == 1:
                s["noise_mask"] = masks.clone()
            else:
                if masks.shape[0] < s_in.shape[0]:
                    masks = masks.repeat(math.ceil(s_in.shape[0] / masks.shape[0]), 1, 1, 1)[:s_in.shape[0]]
                s["noise_mask"] = masks[batch_index:batch_index + length].clone()
        if "batch_index" not in s:
            s["batch_index"] = [x for x in range(batch_index, batch_index+length)]
        else:
            s["batch_index"] = samples["batch_index"][batch_index:batch_index + length]
        return (s,)
    
class RepeatLatentBatch:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "samples": ("LATENT",),
                              "amount": ("INT", {"default": 1, "min": 1, "max": 64}),
                              }}
    RETURN_TYPES = ("LATENT",)
    FUNCTION = "repeat"

    CATEGORY = "latent/batch"

    def repeat(self, samples, amount):
        s = samples.copy()
        s_in = samples["samples"]
        
        s["samples"] = s_in.repeat((amount, 1,1,1))
        if "noise_mask" in samples and samples["noise_mask"].shape[0] > 1:
            masks = samples["noise_mask"]
            if masks.shape[0] < s_in.shape[0]:
                masks = masks.repeat(math.ceil(s_in.shape[0] / masks.shape[0]), 1, 1, 1)[:s_in.shape[0]]
            s["noise_mask"] = samples["noise_mask"].repeat((amount, 1,1,1))
        if "batch_index" in s:
            offset = max(s["batch_index"]) - min(s["batch_index"]) + 1
            s["batch_index"] = s["batch_index"] + [x + (i * offset) for i in range(1, amount) for x in s["batch_index"]]
1093
        return (s,)
comfyanonymous's avatar
comfyanonymous committed
1094

comfyanonymous's avatar
comfyanonymous committed
1095
class LatentUpscale:
comfyanonymous's avatar
comfyanonymous committed
1096
    upscale_methods = ["nearest-exact", "bilinear", "area", "bicubic", "bislerp"]
1097
    crop_methods = ["disabled", "center"]
comfyanonymous's avatar
comfyanonymous committed
1098
1099
1100
1101

    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "samples": ("LATENT",), "upscale_method": (s.upscale_methods,),
1102
1103
                              "width": ("INT", {"default": 512, "min": 0, "max": MAX_RESOLUTION, "step": 8}),
                              "height": ("INT", {"default": 512, "min": 0, "max": MAX_RESOLUTION, "step": 8}),
1104
                              "crop": (s.crop_methods,)}}
comfyanonymous's avatar
comfyanonymous committed
1105
1106
1107
    RETURN_TYPES = ("LATENT",)
    FUNCTION = "upscale"

1108
1109
    CATEGORY = "latent"

1110
    def upscale(self, samples, upscale_method, width, height, crop):
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
        if width == 0 and height == 0:
            s = samples
        else:
            s = samples.copy()

            if width == 0:
                height = max(64, height)
                width = max(64, round(samples["samples"].shape[3] * height / samples["samples"].shape[2]))
            elif height == 0:
                width = max(64, width)
                height = max(64, round(samples["samples"].shape[2] * width / samples["samples"].shape[3]))
            else:
                width = max(64, width)
                height = max(64, height)

            s["samples"] = comfy.utils.common_upscale(samples["samples"], width // 8, height // 8, upscale_method, crop)
comfyanonymous's avatar
comfyanonymous committed
1127
1128
        return (s,)

comfyanonymous's avatar
comfyanonymous committed
1129
class LatentUpscaleBy:
comfyanonymous's avatar
comfyanonymous committed
1130
    upscale_methods = ["nearest-exact", "bilinear", "area", "bicubic", "bislerp"]
comfyanonymous's avatar
comfyanonymous committed
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147

    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "samples": ("LATENT",), "upscale_method": (s.upscale_methods,),
                              "scale_by": ("FLOAT", {"default": 1.5, "min": 0.01, "max": 8.0, "step": 0.01}),}}
    RETURN_TYPES = ("LATENT",)
    FUNCTION = "upscale"

    CATEGORY = "latent"

    def upscale(self, samples, upscale_method, scale_by):
        s = samples.copy()
        width = round(samples["samples"].shape[3] * scale_by)
        height = round(samples["samples"].shape[2] * scale_by)
        s["samples"] = comfy.utils.common_upscale(samples["samples"], width, height, upscale_method, "disabled")
        return (s,)

comfyanonymous's avatar
comfyanonymous committed
1148
1149
1150
1151
1152
1153
1154
1155
1156
class LatentRotate:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "samples": ("LATENT",),
                              "rotation": (["none", "90 degrees", "180 degrees", "270 degrees"],),
                              }}
    RETURN_TYPES = ("LATENT",)
    FUNCTION = "rotate"

comfyanonymous's avatar
comfyanonymous committed
1157
    CATEGORY = "latent/transform"
comfyanonymous's avatar
comfyanonymous committed
1158
1159

    def rotate(self, samples, rotation):
1160
        s = samples.copy()
comfyanonymous's avatar
comfyanonymous committed
1161
1162
1163
1164
1165
1166
1167
1168
        rotate_by = 0
        if rotation.startswith("90"):
            rotate_by = 1
        elif rotation.startswith("180"):
            rotate_by = 2
        elif rotation.startswith("270"):
            rotate_by = 3

1169
        s["samples"] = torch.rot90(samples["samples"], k=rotate_by, dims=[3, 2])
comfyanonymous's avatar
comfyanonymous committed
1170
        return (s,)
comfyanonymous's avatar
comfyanonymous committed
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180

class LatentFlip:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "samples": ("LATENT",),
                              "flip_method": (["x-axis: vertically", "y-axis: horizontally"],),
                              }}
    RETURN_TYPES = ("LATENT",)
    FUNCTION = "flip"

comfyanonymous's avatar
comfyanonymous committed
1181
    CATEGORY = "latent/transform"
comfyanonymous's avatar
comfyanonymous committed
1182
1183

    def flip(self, samples, flip_method):
1184
        s = samples.copy()
comfyanonymous's avatar
comfyanonymous committed
1185
        if flip_method.startswith("x"):
1186
            s["samples"] = torch.flip(samples["samples"], dims=[2])
comfyanonymous's avatar
comfyanonymous committed
1187
        elif flip_method.startswith("y"):
1188
            s["samples"] = torch.flip(samples["samples"], dims=[3])
comfyanonymous's avatar
comfyanonymous committed
1189
1190

        return (s,)
comfyanonymous's avatar
comfyanonymous committed
1191
1192
1193
1194

class LatentComposite:
    @classmethod
    def INPUT_TYPES(s):
comfyanonymous's avatar
comfyanonymous committed
1195
1196
1197
1198
1199
1200
        return {"required": { "samples_to": ("LATENT",),
                              "samples_from": ("LATENT",),
                              "x": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 8}),
                              "y": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 8}),
                              "feather": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 8}),
                              }}
comfyanonymous's avatar
comfyanonymous committed
1201
1202
1203
1204
1205
    RETURN_TYPES = ("LATENT",)
    FUNCTION = "composite"

    CATEGORY = "latent"

comfyanonymous's avatar
comfyanonymous committed
1206
1207
1208
    def composite(self, samples_to, samples_from, x, y, composite_method="normal", feather=0):
        x =  x // 8
        y = y // 8
1209
        feather = feather // 8
comfyanonymous's avatar
comfyanonymous committed
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
        samples_out = samples_to.copy()
        s = samples_to["samples"].clone()
        samples_to = samples_to["samples"]
        samples_from = samples_from["samples"]
        if feather == 0:
            s[:,:,y:y+samples_from.shape[2],x:x+samples_from.shape[3]] = samples_from[:,:,:samples_to.shape[2] - y, :samples_to.shape[3] - x]
        else:
            samples_from = samples_from[:,:,:samples_to.shape[2] - y, :samples_to.shape[3] - x]
            mask = torch.ones_like(samples_from)
            for t in range(feather):
                if y != 0:
                    mask[:,:,t:1+t,:] *= ((1.0/feather) * (t + 1))

                if y + samples_from.shape[2] < samples_to.shape[2]:
                    mask[:,:,mask.shape[2] -1 -t: mask.shape[2]-t,:] *= ((1.0/feather) * (t + 1))
                if x != 0:
                    mask[:,:,:,t:1+t] *= ((1.0/feather) * (t + 1))
                if x + samples_from.shape[3] < samples_to.shape[3]:
                    mask[:,:,:,mask.shape[3]- 1 - t: mask.shape[3]- t] *= ((1.0/feather) * (t + 1))
            rev_mask = torch.ones_like(mask) - mask
            s[:,:,y:y+samples_from.shape[2],x:x+samples_from.shape[3]] = samples_from[:,:,:samples_to.shape[2] - y, :samples_to.shape[3] - x] * mask + s[:,:,y:y+samples_from.shape[2],x:x+samples_from.shape[3]] * rev_mask
        samples_out["samples"] = s
        return (samples_out,)
comfyanonymous's avatar
comfyanonymous committed
1233

1234
1235
1236
1237
class LatentBlend:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": {
1238
1239
            "samples1": ("LATENT",),
            "samples2": ("LATENT",),
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
            "blend_factor": ("FLOAT", {
                "default": 0.5,
                "min": 0,
                "max": 1,
                "step": 0.01
            }),
        }}

    RETURN_TYPES = ("LATENT",)
    FUNCTION = "blend"

    CATEGORY = "_for_testing"

1253
    def blend(self, samples1, samples2, blend_factor:float, blend_mode: str="normal"):
1254

1255
1256
1257
        samples_out = samples1.copy()
        samples1 = samples1["samples"]
        samples2 = samples2["samples"]
1258

1259
1260
1261
1262
        if samples1.shape != samples2.shape:
            samples2.permute(0, 3, 1, 2)
            samples2 = comfy.utils.common_upscale(samples2, samples1.shape[3], samples1.shape[2], 'bicubic', crop='center')
            samples2.permute(0, 2, 3, 1)
1263

1264
1265
        samples_blended = self.blend_mode(samples1, samples2, blend_mode)
        samples_blended = samples1 * blend_factor + samples_blended * (1 - blend_factor)
1266
1267
1268
1269
1270
1271
1272
1273
1274
        samples_out["samples"] = samples_blended
        return (samples_out,)

    def blend_mode(self, img1, img2, mode):
        if mode == "normal":
            return img2
        else:
            raise ValueError(f"Unsupported blend mode: {mode}")

comfyanonymous's avatar
comfyanonymous committed
1275
1276
1277
1278
class LatentCrop:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "samples": ("LATENT",),
1279
1280
                              "width": ("INT", {"default": 512, "min": 64, "max": MAX_RESOLUTION, "step": 8}),
                              "height": ("INT", {"default": 512, "min": 64, "max": MAX_RESOLUTION, "step": 8}),
1281
1282
                              "x": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 8}),
                              "y": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 8}),
comfyanonymous's avatar
comfyanonymous committed
1283
1284
1285
1286
                              }}
    RETURN_TYPES = ("LATENT",)
    FUNCTION = "crop"

comfyanonymous's avatar
comfyanonymous committed
1287
    CATEGORY = "latent/transform"
comfyanonymous's avatar
comfyanonymous committed
1288
1289

    def crop(self, samples, width, height, x, y):
1290
1291
        s = samples.copy()
        samples = samples['samples']
comfyanonymous's avatar
comfyanonymous committed
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
        x =  x // 8
        y = y // 8

        #enfonce minimum size of 64
        if x > (samples.shape[3] - 8):
            x = samples.shape[3] - 8
        if y > (samples.shape[2] - 8):
            y = samples.shape[2] - 8

        new_height = height // 8
        new_width = width // 8
        to_x = new_width + x
        to_y = new_height + y
1305
        s['samples'] = samples[:,:,y:to_y, x:to_x]
comfyanonymous's avatar
comfyanonymous committed
1306
1307
        return (s,)

1308
1309
1310
1311
1312
1313
1314
1315
1316
class SetLatentNoiseMask:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "samples": ("LATENT",),
                              "mask": ("MASK",),
                              }}
    RETURN_TYPES = ("LATENT",)
    FUNCTION = "set_mask"

1317
    CATEGORY = "latent/inpaint"
1318
1319
1320

    def set_mask(self, samples, mask):
        s = samples.copy()
1321
        s["noise_mask"] = mask.reshape((-1, 1, mask.shape[-2], mask.shape[-1]))
1322
1323
        return (s,)

space-nuko's avatar
space-nuko committed
1324
def common_ksampler(model, seed, steps, cfg, sampler_name, scheduler, positive, negative, latent, denoise=1.0, disable_noise=False, start_step=None, last_step=None, force_full_denoise=False):
1325
    latent_image = latent["samples"]
comfyanonymous's avatar
comfyanonymous committed
1326
1327
1328
    if disable_noise:
        noise = torch.zeros(latent_image.size(), dtype=latent_image.dtype, layout=latent_image.layout, device="cpu")
    else:
1329
1330
        batch_inds = latent["batch_index"] if "batch_index" in latent else None
        noise = comfy.sample.prepare_noise(latent_image, seed, batch_inds)
comfyanonymous's avatar
comfyanonymous committed
1331

1332
    noise_mask = None
1333
    if "noise_mask" in latent:
1334
        noise_mask = latent["noise_mask"]
comfyanonymous's avatar
comfyanonymous committed
1335

1336
    callback = latent_preview.prepare_callback(model, steps)
1337
    disable_pbar = not comfy.utils.PROGRESS_BAR_ENABLED
1338
1339
    samples = comfy.sample.sample(model, noise, steps, cfg, sampler_name, scheduler, positive, negative, latent_image,
                                  denoise=denoise, disable_noise=disable_noise, start_step=start_step, last_step=last_step,
comfyanonymous's avatar
comfyanonymous committed
1340
                                  force_full_denoise=force_full_denoise, noise_mask=noise_mask, callback=callback, disable_pbar=disable_pbar, seed=seed)
1341
1342
1343
    out = latent.copy()
    out["samples"] = samples
    return (out, )
comfyanonymous's avatar
comfyanonymous committed
1344

comfyanonymous's avatar
comfyanonymous committed
1345
1346
1347
class KSampler:
    @classmethod
    def INPUT_TYPES(s):
comfyanonymous's avatar
comfyanonymous committed
1348
        return {"required":
comfyanonymous's avatar
comfyanonymous committed
1349
1350
1351
                    {"model": ("MODEL",),
                    "seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}),
                    "steps": ("INT", {"default": 20, "min": 1, "max": 10000}),
1352
                    "cfg": ("FLOAT", {"default": 8.0, "min": 0.0, "max": 100.0, "step":0.1, "round": 0.01}),
comfyanonymous's avatar
comfyanonymous committed
1353
1354
1355
1356
1357
1358
                    "sampler_name": (comfy.samplers.KSampler.SAMPLERS, ),
                    "scheduler": (comfy.samplers.KSampler.SCHEDULERS, ),
                    "positive": ("CONDITIONING", ),
                    "negative": ("CONDITIONING", ),
                    "latent_image": ("LATENT", ),
                    "denoise": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}),
space-nuko's avatar
space-nuko committed
1359
1360
                     }
                }
comfyanonymous's avatar
comfyanonymous committed
1361
1362
1363
1364

    RETURN_TYPES = ("LATENT",)
    FUNCTION = "sample"

1365
1366
    CATEGORY = "sampling"

space-nuko's avatar
space-nuko committed
1367
1368
    def sample(self, model, seed, steps, cfg, sampler_name, scheduler, positive, negative, latent_image, denoise=1.0):
        return common_ksampler(model, seed, steps, cfg, sampler_name, scheduler, positive, negative, latent_image, denoise=denoise)
comfyanonymous's avatar
comfyanonymous committed
1369

comfyanonymous's avatar
comfyanonymous committed
1370
1371
1372
1373
1374
1375
1376
1377
class KSamplerAdvanced:
    @classmethod
    def INPUT_TYPES(s):
        return {"required":
                    {"model": ("MODEL",),
                    "add_noise": (["enable", "disable"], ),
                    "noise_seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}),
                    "steps": ("INT", {"default": 20, "min": 1, "max": 10000}),
1378
                    "cfg": ("FLOAT", {"default": 8.0, "min": 0.0, "max": 100.0, "step":0.1, "round": 0.01}),
comfyanonymous's avatar
comfyanonymous committed
1379
1380
1381
1382
1383
1384
1385
1386
                    "sampler_name": (comfy.samplers.KSampler.SAMPLERS, ),
                    "scheduler": (comfy.samplers.KSampler.SCHEDULERS, ),
                    "positive": ("CONDITIONING", ),
                    "negative": ("CONDITIONING", ),
                    "latent_image": ("LATENT", ),
                    "start_at_step": ("INT", {"default": 0, "min": 0, "max": 10000}),
                    "end_at_step": ("INT", {"default": 10000, "min": 0, "max": 10000}),
                    "return_with_leftover_noise": (["disable", "enable"], ),
space-nuko's avatar
space-nuko committed
1387
1388
                     }
                }
comfyanonymous's avatar
comfyanonymous committed
1389
1390
1391
1392
1393

    RETURN_TYPES = ("LATENT",)
    FUNCTION = "sample"

    CATEGORY = "sampling"
comfyanonymous's avatar
comfyanonymous committed
1394

space-nuko's avatar
space-nuko committed
1395
    def sample(self, model, add_noise, noise_seed, steps, cfg, sampler_name, scheduler, positive, negative, latent_image, start_at_step, end_at_step, return_with_leftover_noise, denoise=1.0):
comfyanonymous's avatar
comfyanonymous committed
1396
1397
1398
1399
1400
1401
        force_full_denoise = True
        if return_with_leftover_noise == "enable":
            force_full_denoise = False
        disable_noise = False
        if add_noise == "disable":
            disable_noise = True
space-nuko's avatar
space-nuko committed
1402
        return common_ksampler(model, noise_seed, steps, cfg, sampler_name, scheduler, positive, negative, latent_image, denoise=denoise, disable_noise=disable_noise, start_step=start_at_step, last_step=end_at_step, force_full_denoise=force_full_denoise)
comfyanonymous's avatar
comfyanonymous committed
1403
1404
1405

class SaveImage:
    def __init__(self):
1406
        self.output_dir = folder_paths.get_output_directory()
m957ymj75urz's avatar
m957ymj75urz committed
1407
        self.type = "output"
1408
        self.prefix_append = ""
1409
        self.compress_level = 4
comfyanonymous's avatar
comfyanonymous committed
1410
1411
1412
1413

    @classmethod
    def INPUT_TYPES(s):
        return {"required": 
1414
                    {"images": ("IMAGE", ),
pythongosssss's avatar
tidy  
pythongosssss committed
1415
                     "filename_prefix": ("STRING", {"default": "ComfyUI"})},
pythongosssss's avatar
pythongosssss committed
1416
                "hidden": {"prompt": "PROMPT", "extra_pnginfo": "EXTRA_PNGINFO"},
comfyanonymous's avatar
comfyanonymous committed
1417
1418
1419
1420
1421
1422
1423
                }

    RETURN_TYPES = ()
    FUNCTION = "save_images"

    OUTPUT_NODE = True

1424
1425
    CATEGORY = "image"

pythongosssss's avatar
tidy  
pythongosssss committed
1426
    def save_images(self, images, filename_prefix="ComfyUI", prompt=None, extra_pnginfo=None):
1427
        filename_prefix += self.prefix_append
1428
        full_output_folder, filename, counter, subfolder, filename_prefix = folder_paths.get_save_image_path(filename_prefix, self.output_dir, images[0].shape[1], images[0].shape[0])
m957ymj75urz's avatar
m957ymj75urz committed
1429
        results = list()
1430
        for (batch_number, image) in enumerate(images):
comfyanonymous's avatar
comfyanonymous committed
1431
            i = 255. * image.cpu().numpy()
1432
            img = Image.fromarray(np.clip(i, 0, 255).astype(np.uint8))
1433
1434
1435
1436
1437
1438
1439
1440
            metadata = None
            if not args.disable_metadata:
                metadata = PngInfo()
                if prompt is not None:
                    metadata.add_text("prompt", json.dumps(prompt))
                if extra_pnginfo is not None:
                    for x in extra_pnginfo:
                        metadata.add_text(x, json.dumps(extra_pnginfo[x]))
1441

1442
1443
            filename_with_batch_num = filename.replace("%batch_num%", str(batch_number))
            file = f"{filename_with_batch_num}_{counter:05}_.png"
1444
            img.save(os.path.join(full_output_folder, file), pnginfo=metadata, compress_level=self.compress_level)
m957ymj75urz's avatar
m957ymj75urz committed
1445
1446
1447
1448
            results.append({
                "filename": file,
                "subfolder": subfolder,
                "type": self.type
Gavroche CryptoRUSH's avatar
Gavroche CryptoRUSH committed
1449
            })
1450
            counter += 1
comfyanonymous's avatar
comfyanonymous committed
1451

m957ymj75urz's avatar
m957ymj75urz committed
1452
        return { "ui": { "images": results } }
comfyanonymous's avatar
comfyanonymous committed
1453

pythongosssss's avatar
pythongosssss committed
1454
1455
class PreviewImage(SaveImage):
    def __init__(self):
1456
        self.output_dir = folder_paths.get_temp_directory()
m957ymj75urz's avatar
m957ymj75urz committed
1457
        self.type = "temp"
1458
        self.prefix_append = "_temp_" + ''.join(random.choice("abcdefghijklmnopqrstupvxyz") for x in range(5))
1459
        self.compress_level = 1
pythongosssss's avatar
pythongosssss committed
1460
1461
1462

    @classmethod
    def INPUT_TYPES(s):
1463
        return {"required":
pythongosssss's avatar
pythongosssss committed
1464
1465
1466
                    {"images": ("IMAGE", ), },
                "hidden": {"prompt": "PROMPT", "extra_pnginfo": "EXTRA_PNGINFO"},
                }
1467

1468
1469
1470
class LoadImage:
    @classmethod
    def INPUT_TYPES(s):
1471
        input_dir = folder_paths.get_input_directory()
comfyanonymous's avatar
comfyanonymous committed
1472
        files = [f for f in os.listdir(input_dir) if os.path.isfile(os.path.join(input_dir, f))]
1473
        return {"required":
1474
                    {"image": (sorted(files), {"image_upload": True})},
1475
                }
1476
1477

    CATEGORY = "image"
1478

1479
    RETURN_TYPES = ("IMAGE", "MASK")
1480
1481
    FUNCTION = "load_image"
    def load_image(self, image):
1482
        image_path = folder_paths.get_annotated_filepath(image)
1483
1484
1485
1486
1487
        img = Image.open(image_path)
        output_images = []
        output_masks = []
        for i in ImageSequence.Iterator(img):
            i = ImageOps.exif_transpose(i)
1488
1489
            if i.mode == 'I':
                i = i.point(lambda i: i * (1 / 255))
1490
1491
1492
1493
1494
1495
1496
1497
1498
1499
1500
1501
1502
1503
            image = i.convert("RGB")
            image = np.array(image).astype(np.float32) / 255.0
            image = torch.from_numpy(image)[None,]
            if 'A' in i.getbands():
                mask = np.array(i.getchannel('A')).astype(np.float32) / 255.0
                mask = 1. - torch.from_numpy(mask)
            else:
                mask = torch.zeros((64,64), dtype=torch.float32, device="cpu")
            output_images.append(image)
            output_masks.append(mask.unsqueeze(0))

        if len(output_images) > 1:
            output_image = torch.cat(output_images, dim=0)
            output_mask = torch.cat(output_masks, dim=0)
1504
        else:
1505
1506
1507
1508
            output_image = output_images[0]
            output_mask = output_masks[0]

        return (output_image, output_mask)
1509

1510
1511
    @classmethod
    def IS_CHANGED(s, image):
1512
        image_path = folder_paths.get_annotated_filepath(image)
1513
1514
1515
1516
        m = hashlib.sha256()
        with open(image_path, 'rb') as f:
            m.update(f.read())
        return m.digest().hex()
pythongosssss's avatar
pythongosssss committed
1517

1518
1519
1520
1521
1522
1523
1524
    @classmethod
    def VALIDATE_INPUTS(s, image):
        if not folder_paths.exists_annotated_filepath(image):
            return "Invalid image file: {}".format(image)

        return True

1525
class LoadImageMask:
1526
    _color_channels = ["alpha", "red", "green", "blue"]
1527
1528
    @classmethod
    def INPUT_TYPES(s):
1529
        input_dir = folder_paths.get_input_directory()
comfyanonymous's avatar
comfyanonymous committed
1530
        files = [f for f in os.listdir(input_dir) if os.path.isfile(os.path.join(input_dir, f))]
1531
        return {"required":
1532
                    {"image": (sorted(files), {"image_upload": True}),
1533
                     "channel": (s._color_channels, ), }
1534
1535
                }

1536
    CATEGORY = "mask"
1537
1538
1539
1540

    RETURN_TYPES = ("MASK",)
    FUNCTION = "load_image"
    def load_image(self, image, channel):
1541
        image_path = folder_paths.get_annotated_filepath(image)
1542
        i = Image.open(image_path)
1543
        i = ImageOps.exif_transpose(i)
1544
        if i.getbands() != ("R", "G", "B", "A"):
1545
1546
            if i.mode == 'I':
                i = i.point(lambda i: i * (1 / 255))
1547
            i = i.convert("RGBA")
1548
1549
1550
1551
1552
1553
1554
1555
1556
        mask = None
        c = channel[0].upper()
        if c in i.getbands():
            mask = np.array(i.getchannel(c)).astype(np.float32) / 255.0
            mask = torch.from_numpy(mask)
            if c == 'A':
                mask = 1. - mask
        else:
            mask = torch.zeros((64,64), dtype=torch.float32, device="cpu")
1557
        return (mask.unsqueeze(0),)
1558
1559
1560

    @classmethod
    def IS_CHANGED(s, image, channel):
1561
        image_path = folder_paths.get_annotated_filepath(image)
1562
1563
1564
1565
        m = hashlib.sha256()
        with open(image_path, 'rb') as f:
            m.update(f.read())
        return m.digest().hex()
pythongosssss's avatar
pythongosssss committed
1566

1567
    @classmethod
1568
    def VALIDATE_INPUTS(s, image):
1569
1570
1571
1572
1573
        if not folder_paths.exists_annotated_filepath(image):
            return "Invalid image file: {}".format(image)

        return True

comfyanonymous's avatar
comfyanonymous committed
1574
class ImageScale:
1575
    upscale_methods = ["nearest-exact", "bilinear", "area", "bicubic", "lanczos"]
comfyanonymous's avatar
comfyanonymous committed
1576
1577
1578
1579
1580
    crop_methods = ["disabled", "center"]

    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "image": ("IMAGE",), "upscale_method": (s.upscale_methods,),
1581
1582
                              "width": ("INT", {"default": 512, "min": 0, "max": MAX_RESOLUTION, "step": 1}),
                              "height": ("INT", {"default": 512, "min": 0, "max": MAX_RESOLUTION, "step": 1}),
comfyanonymous's avatar
comfyanonymous committed
1583
1584
1585
1586
                              "crop": (s.crop_methods,)}}
    RETURN_TYPES = ("IMAGE",)
    FUNCTION = "upscale"

1587
    CATEGORY = "image/upscaling"
1588

comfyanonymous's avatar
comfyanonymous committed
1589
    def upscale(self, image, upscale_method, width, height, crop):
1590
1591
1592
1593
1594
1595
1596
1597
1598
1599
1600
1601
        if width == 0 and height == 0:
            s = image
        else:
            samples = image.movedim(-1,1)

            if width == 0:
                width = max(1, round(samples.shape[3] * height / samples.shape[2]))
            elif height == 0:
                height = max(1, round(samples.shape[2] * width / samples.shape[3]))

            s = comfy.utils.common_upscale(samples, width, height, upscale_method, crop)
            s = s.movedim(1,-1)
comfyanonymous's avatar
comfyanonymous committed
1602
        return (s,)
comfyanonymous's avatar
comfyanonymous committed
1603

comfyanonymous's avatar
comfyanonymous committed
1604
class ImageScaleBy:
1605
    upscale_methods = ["nearest-exact", "bilinear", "area", "bicubic", "lanczos"]
comfyanonymous's avatar
comfyanonymous committed
1606
1607
1608
1609
1610
1611
1612
1613
1614
1615
1616
1617
1618
1619
1620
1621
1622
1623

    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "image": ("IMAGE",), "upscale_method": (s.upscale_methods,),
                              "scale_by": ("FLOAT", {"default": 1.0, "min": 0.01, "max": 8.0, "step": 0.01}),}}
    RETURN_TYPES = ("IMAGE",)
    FUNCTION = "upscale"

    CATEGORY = "image/upscaling"

    def upscale(self, image, upscale_method, scale_by):
        samples = image.movedim(-1,1)
        width = round(samples.shape[3] * scale_by)
        height = round(samples.shape[2] * scale_by)
        s = comfy.utils.common_upscale(samples, width, height, upscale_method, "disabled")
        s = s.movedim(1,-1)
        return (s,)

1624
1625
1626
1627
1628
1629
1630
1631
1632
1633
1634
1635
1636
1637
1638
class ImageInvert:

    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "image": ("IMAGE",)}}

    RETURN_TYPES = ("IMAGE",)
    FUNCTION = "invert"

    CATEGORY = "image"

    def invert(self, image):
        s = 1.0 - image
        return (s,)

1639
1640
1641
1642
1643
1644
1645
1646
1647
1648
1649
1650
1651
1652
1653
1654
class ImageBatch:

    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "image1": ("IMAGE",), "image2": ("IMAGE",)}}

    RETURN_TYPES = ("IMAGE",)
    FUNCTION = "batch"

    CATEGORY = "image"

    def batch(self, image1, image2):
        if image1.shape[1:] != image2.shape[1:]:
            image2 = comfy.utils.common_upscale(image2.movedim(-1,1), image1.shape[2], image1.shape[1], "bilinear", "center").movedim(1,-1)
        s = torch.cat((image1, image2), dim=0)
        return (s,)
1655

comfyanonymous's avatar
comfyanonymous committed
1656
1657
1658
1659
1660
1661
1662
1663
class EmptyImage:
    def __init__(self, device="cpu"):
        self.device = device

    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "width": ("INT", {"default": 512, "min": 1, "max": MAX_RESOLUTION, "step": 1}),
                              "height": ("INT", {"default": 512, "min": 1, "max": MAX_RESOLUTION, "step": 1}),
comfyanonymous's avatar
comfyanonymous committed
1664
                              "batch_size": ("INT", {"default": 1, "min": 1, "max": 4096}),
comfyanonymous's avatar
comfyanonymous committed
1665
1666
1667
1668
1669
1670
1671
1672
1673
1674
1675
1676
1677
                              "color": ("INT", {"default": 0, "min": 0, "max": 0xFFFFFF, "step": 1, "display": "color"}),
                              }}
    RETURN_TYPES = ("IMAGE",)
    FUNCTION = "generate"

    CATEGORY = "image"

    def generate(self, width, height, batch_size=1, color=0):
        r = torch.full([batch_size, height, width, 1], ((color >> 16) & 0xFF) / 0xFF)
        g = torch.full([batch_size, height, width, 1], ((color >> 8) & 0xFF) / 0xFF)
        b = torch.full([batch_size, height, width, 1], ((color) & 0xFF) / 0xFF)
        return (torch.cat((r, g, b), dim=-1), )

Guo Y.K's avatar
Guo Y.K committed
1678
1679
1680
1681
1682
1683
1684
class ImagePadForOutpaint:

    @classmethod
    def INPUT_TYPES(s):
        return {
            "required": {
                "image": ("IMAGE",),
1685
1686
1687
1688
                "left": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 8}),
                "top": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 8}),
                "right": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 8}),
                "bottom": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 8}),
1689
                "feathering": ("INT", {"default": 40, "min": 0, "max": MAX_RESOLUTION, "step": 1}),
Guo Y.K's avatar
Guo Y.K committed
1690
1691
1692
1693
1694
1695
1696
1697
            }
        }

    RETURN_TYPES = ("IMAGE", "MASK")
    FUNCTION = "expand_image"

    CATEGORY = "image"

1698
    def expand_image(self, image, left, top, right, bottom, feathering):
Guo Y.K's avatar
Guo Y.K committed
1699
1700
        d1, d2, d3, d4 = image.size()

1701
        new_image = torch.ones(
Guo Y.K's avatar
Guo Y.K committed
1702
1703
            (d1, d2 + top + bottom, d3 + left + right, d4),
            dtype=torch.float32,
1704
1705
        ) * 0.5

Guo Y.K's avatar
Guo Y.K committed
1706
1707
1708
1709
1710
1711
        new_image[:, top:top + d2, left:left + d3, :] = image

        mask = torch.ones(
            (d2 + top + bottom, d3 + left + right),
            dtype=torch.float32,
        )
1712

1713
1714
1715
1716
1717
        t = torch.zeros(
            (d2, d3),
            dtype=torch.float32
        )

1718
        if feathering > 0 and feathering * 2 < d2 and feathering * 2 < d3:
1719
1720
1721
1722
1723
1724
1725
1726
1727
1728
1729
1730
1731
1732
1733
1734
1735
1736
1737

            for i in range(d2):
                for j in range(d3):
                    dt = i if top != 0 else d2
                    db = d2 - i if bottom != 0 else d2

                    dl = j if left != 0 else d3
                    dr = d3 - j if right != 0 else d3

                    d = min(dt, db, dl, dr)

                    if d >= feathering:
                        continue

                    v = (feathering - d) / feathering

                    t[i, j] = v * v

        mask[top:top + d2, left:left + d3] = t
1738

Guo Y.K's avatar
Guo Y.K committed
1739
1740
1741
        return (new_image, mask)


comfyanonymous's avatar
comfyanonymous committed
1742
1743
NODE_CLASS_MAPPINGS = {
    "KSampler": KSampler,
1744
    "CheckpointLoaderSimple": CheckpointLoaderSimple,
comfyanonymous's avatar
comfyanonymous committed
1745
    "CLIPTextEncode": CLIPTextEncode,
comfyanonymous's avatar
comfyanonymous committed
1746
    "CLIPSetLastLayer": CLIPSetLastLayer,
comfyanonymous's avatar
comfyanonymous committed
1747
1748
    "VAEDecode": VAEDecode,
    "VAEEncode": VAEEncode,
1749
    "VAEEncodeForInpaint": VAEEncodeForInpaint,
comfyanonymous's avatar
comfyanonymous committed
1750
1751
1752
    "VAELoader": VAELoader,
    "EmptyLatentImage": EmptyLatentImage,
    "LatentUpscale": LatentUpscale,
comfyanonymous's avatar
comfyanonymous committed
1753
    "LatentUpscaleBy": LatentUpscaleBy,
1754
    "LatentFromBatch": LatentFromBatch,
1755
    "RepeatLatentBatch": RepeatLatentBatch,
comfyanonymous's avatar
comfyanonymous committed
1756
    "SaveImage": SaveImage,
pythongosssss's avatar
pythongosssss committed
1757
    "PreviewImage": PreviewImage,
comfyanonymous's avatar
comfyanonymous committed
1758
    "LoadImage": LoadImage,
1759
    "LoadImageMask": LoadImageMask,
comfyanonymous's avatar
comfyanonymous committed
1760
    "ImageScale": ImageScale,
comfyanonymous's avatar
comfyanonymous committed
1761
    "ImageScaleBy": ImageScaleBy,
1762
    "ImageInvert": ImageInvert,
1763
    "ImageBatch": ImageBatch,
Guo Y.K's avatar
Guo Y.K committed
1764
    "ImagePadForOutpaint": ImagePadForOutpaint,
comfyanonymous's avatar
comfyanonymous committed
1765
    "EmptyImage": EmptyImage,
comfyanonymous's avatar
comfyanonymous committed
1766
    "ConditioningAverage": ConditioningAverage ,
comfyanonymous's avatar
comfyanonymous committed
1767
    "ConditioningCombine": ConditioningCombine,
1768
    "ConditioningConcat": ConditioningConcat,
comfyanonymous's avatar
comfyanonymous committed
1769
    "ConditioningSetArea": ConditioningSetArea,
1770
    "ConditioningSetAreaPercentage": ConditioningSetAreaPercentage,
1771
    "ConditioningSetAreaStrength": ConditioningSetAreaStrength,
Jacob Segal's avatar
Jacob Segal committed
1772
    "ConditioningSetMask": ConditioningSetMask,
comfyanonymous's avatar
comfyanonymous committed
1773
    "KSamplerAdvanced": KSamplerAdvanced,
1774
    "SetLatentNoiseMask": SetLatentNoiseMask,
comfyanonymous's avatar
comfyanonymous committed
1775
    "LatentComposite": LatentComposite,
1776
    "LatentBlend": LatentBlend,
comfyanonymous's avatar
comfyanonymous committed
1777
    "LatentRotate": LatentRotate,
comfyanonymous's avatar
comfyanonymous committed
1778
    "LatentFlip": LatentFlip,
comfyanonymous's avatar
comfyanonymous committed
1779
    "LatentCrop": LatentCrop,
1780
    "LoraLoader": LoraLoader,
1781
    "CLIPLoader": CLIPLoader,
1782
    "UNETLoader": UNETLoader,
1783
    "DualCLIPLoader": DualCLIPLoader,
1784
    "CLIPVisionEncode": CLIPVisionEncode,
1785
    "StyleModelApply": StyleModelApply,
1786
    "unCLIPConditioning": unCLIPConditioning,
comfyanonymous's avatar
comfyanonymous committed
1787
    "ControlNetApply": ControlNetApply,
1788
    "ControlNetApplyAdvanced": ControlNetApplyAdvanced,
comfyanonymous's avatar
comfyanonymous committed
1789
    "ControlNetLoader": ControlNetLoader,
1790
    "DiffControlNetLoader": DiffControlNetLoader,
comfyanonymous's avatar
comfyanonymous committed
1791
1792
    "StyleModelLoader": StyleModelLoader,
    "CLIPVisionLoader": CLIPVisionLoader,
1793
    "VAEDecodeTiled": VAEDecodeTiled,
comfyanonymous's avatar
comfyanonymous committed
1794
    "VAEEncodeTiled": VAEEncodeTiled,
1795
    "unCLIPCheckpointLoader": unCLIPCheckpointLoader,
1796
1797
    "GLIGENLoader": GLIGENLoader,
    "GLIGENTextBoxApply": GLIGENTextBoxApply,
1798
    "InpaintModelConditioning": InpaintModelConditioning,
1799

1800
    "CheckpointLoader": CheckpointLoader,
sALTaccount's avatar
sALTaccount committed
1801
    "DiffusersLoader": DiffusersLoader,
Dr.Lt.Data's avatar
Dr.Lt.Data committed
1802
1803

    "LoadLatent": LoadLatent,
1804
    "SaveLatent": SaveLatent,
1805
1806

    "ConditioningZeroOut": ConditioningZeroOut,
1807
    "ConditioningSetTimestepRange": ConditioningSetTimestepRange,
1808
    "LoraLoaderModelOnly": LoraLoaderModelOnly,
comfyanonymous's avatar
comfyanonymous committed
1809
1810
}

City's avatar
City committed
1811
1812
1813
1814
1815
NODE_DISPLAY_NAME_MAPPINGS = {
    # Sampling
    "KSampler": "KSampler",
    "KSamplerAdvanced": "KSampler (Advanced)",
    # Loaders
1816
    "CheckpointLoader": "Load Checkpoint With Config (DEPRECATED)",
comfyanonymous's avatar
Rename.  
comfyanonymous committed
1817
    "CheckpointLoaderSimple": "Load Checkpoint",
City's avatar
City committed
1818
1819
1820
1821
1822
1823
1824
1825
1826
1827
1828
1829
1830
1831
    "VAELoader": "Load VAE",
    "LoraLoader": "Load LoRA",
    "CLIPLoader": "Load CLIP",
    "ControlNetLoader": "Load ControlNet Model",
    "DiffControlNetLoader": "Load ControlNet Model (diff)",
    "StyleModelLoader": "Load Style Model",
    "CLIPVisionLoader": "Load CLIP Vision",
    "UpscaleModelLoader": "Load Upscale Model",
    # Conditioning
    "CLIPVisionEncode": "CLIP Vision Encode",
    "StyleModelApply": "Apply Style Model",
    "CLIPTextEncode": "CLIP Text Encode (Prompt)",
    "CLIPSetLastLayer": "CLIP Set Last Layer",
    "ConditioningCombine": "Conditioning (Combine)",
FizzleDorf's avatar
FizzleDorf committed
1832
    "ConditioningAverage ": "Conditioning (Average)",
1833
    "ConditioningConcat": "Conditioning (Concat)",
City's avatar
City committed
1834
    "ConditioningSetArea": "Conditioning (Set Area)",
1835
    "ConditioningSetAreaPercentage": "Conditioning (Set Area with Percentage)",
Jacob Segal's avatar
Jacob Segal committed
1836
    "ConditioningSetMask": "Conditioning (Set Mask)",
City's avatar
City committed
1837
    "ControlNetApply": "Apply ControlNet",
1838
    "ControlNetApplyAdvanced": "Apply ControlNet (Advanced)",
City's avatar
City committed
1839
1840
1841
1842
1843
1844
1845
1846
1847
1848
    # Latent
    "VAEEncodeForInpaint": "VAE Encode (for Inpainting)",
    "SetLatentNoiseMask": "Set Latent Noise Mask",
    "VAEDecode": "VAE Decode",
    "VAEEncode": "VAE Encode",
    "LatentRotate": "Rotate Latent",
    "LatentFlip": "Flip Latent",
    "LatentCrop": "Crop Latent",
    "EmptyLatentImage": "Empty Latent Image",
    "LatentUpscale": "Upscale Latent",
comfyanonymous's avatar
comfyanonymous committed
1849
    "LatentUpscaleBy": "Upscale Latent By",
City's avatar
City committed
1850
    "LatentComposite": "Latent Composite",
1851
    "LatentBlend": "Latent Blend",
1852
1853
    "LatentFromBatch" : "Latent From Batch",
    "RepeatLatentBatch": "Repeat Latent Batch",
City's avatar
City committed
1854
1855
1856
1857
1858
1859
    # Image
    "SaveImage": "Save Image",
    "PreviewImage": "Preview Image",
    "LoadImage": "Load Image",
    "LoadImageMask": "Load Image (as Mask)",
    "ImageScale": "Upscale Image",
comfyanonymous's avatar
comfyanonymous committed
1860
    "ImageScaleBy": "Upscale Image By",
City's avatar
City committed
1861
1862
1863
    "ImageUpscaleWithModel": "Upscale Image (using Model)",
    "ImageInvert": "Invert Image",
    "ImagePadForOutpaint": "Pad Image for Outpainting",
1864
    "ImageBatch": "Batch Images",
City's avatar
City committed
1865
1866
1867
1868
1869
    # _for_testing
    "VAEDecodeTiled": "VAE Decode (Tiled)",
    "VAEEncodeTiled": "VAE Encode (Tiled)",
}

1870
1871
EXTENSION_WEB_DIRS = {}

1872
def load_custom_node(module_path, ignore=set()):
1873
1874
1875
1876
1877
1878
1879
    module_name = os.path.basename(module_path)
    if os.path.isfile(module_path):
        sp = os.path.splitext(module_path)
        module_name = sp[0]
    try:
        if os.path.isfile(module_path):
            module_spec = importlib.util.spec_from_file_location(module_name, module_path)
1880
            module_dir = os.path.split(module_path)[0]
1881
1882
        else:
            module_spec = importlib.util.spec_from_file_location(module_name, os.path.join(module_path, "__init__.py"))
1883
1884
            module_dir = module_path

1885
1886
1887
        module = importlib.util.module_from_spec(module_spec)
        sys.modules[module_name] = module
        module_spec.loader.exec_module(module)
1888
1889
1890
1891
1892
1893

        if hasattr(module, "WEB_DIRECTORY") and getattr(module, "WEB_DIRECTORY") is not None:
            web_dir = os.path.abspath(os.path.join(module_dir, getattr(module, "WEB_DIRECTORY")))
            if os.path.isdir(web_dir):
                EXTENSION_WEB_DIRS[module_name] = web_dir

1894
        if hasattr(module, "NODE_CLASS_MAPPINGS") and getattr(module, "NODE_CLASS_MAPPINGS") is not None:
1895
1896
1897
            for name in module.NODE_CLASS_MAPPINGS:
                if name not in ignore:
                    NODE_CLASS_MAPPINGS[name] = module.NODE_CLASS_MAPPINGS[name]
1898
1899
            if hasattr(module, "NODE_DISPLAY_NAME_MAPPINGS") and getattr(module, "NODE_DISPLAY_NAME_MAPPINGS") is not None:
                NODE_DISPLAY_NAME_MAPPINGS.update(module.NODE_DISPLAY_NAME_MAPPINGS)
1900
            return True
1901
1902
        else:
            print(f"Skip {module_path} module for custom nodes due to the lack of NODE_CLASS_MAPPINGS.")
1903
            return False
1904
1905
1906
    except Exception as e:
        print(traceback.format_exc())
        print(f"Cannot import {module_path} module for custom nodes:", e)
1907
        return False
1908

Hacker 17082006's avatar
Hacker 17082006 committed
1909
def load_custom_nodes():
1910
    base_node_names = set(NODE_CLASS_MAPPINGS.keys())
1911
    node_paths = folder_paths.get_folder_paths("custom_nodes")
1912
    node_import_times = []
1913
    for custom_node_path in node_paths:
Enrico Fasoli's avatar
Enrico Fasoli committed
1914
        possible_modules = os.listdir(os.path.realpath(custom_node_path))
1915
1916
1917
1918
1919
1920
        if "__pycache__" in possible_modules:
            possible_modules.remove("__pycache__")

        for possible_module in possible_modules:
            module_path = os.path.join(custom_node_path, possible_module)
            if os.path.isfile(module_path) and os.path.splitext(module_path)[1] != ".py": continue
1921
            if module_path.endswith(".disabled"): continue
1922
            time_before = time.perf_counter()
1923
            success = load_custom_node(module_path, base_node_names)
1924
            node_import_times.append((time.perf_counter() - time_before, module_path, success))
1925

1926
    if len(node_import_times) > 0:
comfyanonymous's avatar
comfyanonymous committed
1927
        print("\nImport times for custom nodes:")
1928
        for n in sorted(node_import_times):
1929
1930
1931
1932
1933
            if n[2]:
                import_message = ""
            else:
                import_message = " (IMPORT FAILED)"
            print("{:6.1f} seconds{}:".format(n[0], import_message), n[1])
1934
        print()
1935

1936
def init_custom_nodes():
1937
1938
1939
1940
1941
1942
1943
    extras_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), "comfy_extras")
    extras_files = [
        "nodes_latent.py",
        "nodes_hypernetwork.py",
        "nodes_upscale_model.py",
        "nodes_post_processing.py",
        "nodes_mask.py",
1944
        "nodes_compositing.py",
1945
1946
1947
1948
1949
1950
        "nodes_rebatch.py",
        "nodes_model_merging.py",
        "nodes_tomesd.py",
        "nodes_clip_sdxl.py",
        "nodes_canny.py",
        "nodes_freelunch.py",
1951
1952
        "nodes_custom_sampler.py",
        "nodes_hypertile.py",
1953
        "nodes_model_advanced.py",
1954
        "nodes_model_downscale.py",
comfyanonymous's avatar
comfyanonymous committed
1955
        "nodes_images.py",
1956
        "nodes_video_model.py",
1957
        "nodes_sag.py",
Hari's avatar
Hari committed
1958
        "nodes_perpneg.py",
1959
        "nodes_stable3d.py",
1960
        "nodes_sdupscale.py",
1961
        "nodes_photomaker.py",
1962
        "nodes_cond.py",
1963
        "nodes_morphology.py",
comfyanonymous's avatar
comfyanonymous committed
1964
        "nodes_stable_cascade.py",
1965
        "nodes_differential_diffusion.py",
1966
1967
1968
1969
1970
    ]

    for node_file in extras_files:
        load_custom_node(os.path.join(extras_dir, node_file))

1971
    load_custom_nodes()