nodes.py 77.2 KB
Newer Older
comfyanonymous's avatar
comfyanonymous committed
1
2
3
4
5
import torch

import os
import sys
import json
6
import hashlib
7
import traceback
8
import math
9
import time
10
import random
11
import logging
comfyanonymous's avatar
comfyanonymous committed
12

13
from PIL import Image, ImageOps, ImageSequence, ImageFile
comfyanonymous's avatar
comfyanonymous committed
14
from PIL.PngImagePlugin import PngInfo
15

comfyanonymous's avatar
comfyanonymous committed
16
import numpy as np
17
import safetensors.torch
comfyanonymous's avatar
comfyanonymous committed
18

19
20
sys.path.insert(0, os.path.join(os.path.dirname(os.path.realpath(__file__)), "comfy"))

21
import comfy.diffusers_load
comfyanonymous's avatar
comfyanonymous committed
22
import comfy.samplers
23
import comfy.sample
comfyanonymous's avatar
comfyanonymous committed
24
import comfy.sd
comfyanonymous's avatar
comfyanonymous committed
25
import comfy.utils
26
import comfy.controlnet
comfyanonymous's avatar
comfyanonymous committed
27

28
import comfy.clip_vision
29

30
import comfy.model_management
31
32
from comfy.cli_args import args

33
import importlib
comfyanonymous's avatar
comfyanonymous committed
34

35
import folder_paths
36
import latent_preview
37
import node_helpers
space-nuko's avatar
space-nuko committed
38

39
def before_node_execution():
40
    comfy.model_management.throw_exception_if_processing_interrupted()
41

42
def interrupt_processing(value=True):
43
    comfy.model_management.interrupt_current_processing(value)
44

comfyanonymous's avatar
comfyanonymous committed
45
MAX_RESOLUTION=16384
46

comfyanonymous's avatar
comfyanonymous committed
47
48
49
class CLIPTextEncode:
    @classmethod
    def INPUT_TYPES(s):
50
        return {"required": {"text": ("STRING", {"multiline": True, "dynamicPrompts": True}), "clip": ("CLIP", )}}
comfyanonymous's avatar
comfyanonymous committed
51
52
53
    RETURN_TYPES = ("CONDITIONING",)
    FUNCTION = "encode"

54
55
    CATEGORY = "conditioning"

comfyanonymous's avatar
comfyanonymous committed
56
    def encode(self, clip, text):
57
58
59
        tokens = clip.tokenize(text)
        cond, pooled = clip.encode_from_tokens(tokens, return_pooled=True)
        return ([[cond, {"pooled_output": pooled}]], )
comfyanonymous's avatar
comfyanonymous committed
60
61
62
63
64
65
66
67

class ConditioningCombine:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": {"conditioning_1": ("CONDITIONING", ), "conditioning_2": ("CONDITIONING", )}}
    RETURN_TYPES = ("CONDITIONING",)
    FUNCTION = "combine"

68
69
    CATEGORY = "conditioning"

comfyanonymous's avatar
comfyanonymous committed
70
71
72
    def combine(self, conditioning_1, conditioning_2):
        return (conditioning_1 + conditioning_2, )

FizzleDorf's avatar
FizzleDorf committed
73
74
75
class ConditioningAverage :
    @classmethod
    def INPUT_TYPES(s):
comfyanonymous's avatar
comfyanonymous committed
76
77
        return {"required": {"conditioning_to": ("CONDITIONING", ), "conditioning_from": ("CONDITIONING", ),
                              "conditioning_to_strength": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01})
FizzleDorf's avatar
FizzleDorf committed
78
79
80
81
82
83
                             }}
    RETURN_TYPES = ("CONDITIONING",)
    FUNCTION = "addWeighted"

    CATEGORY = "conditioning"

comfyanonymous's avatar
comfyanonymous committed
84
    def addWeighted(self, conditioning_to, conditioning_from, conditioning_to_strength):
FizzleDorf's avatar
FizzleDorf committed
85
        out = []
comfyanonymous's avatar
comfyanonymous committed
86
87

        if len(conditioning_from) > 1:
88
            logging.warning("Warning: ConditioningAverage conditioning_from contains more than 1 cond, only the first one will actually be applied to conditioning_to.")
comfyanonymous's avatar
comfyanonymous committed
89
90

        cond_from = conditioning_from[0][0]
91
        pooled_output_from = conditioning_from[0][1].get("pooled_output", None)
comfyanonymous's avatar
comfyanonymous committed
92
93
94

        for i in range(len(conditioning_to)):
            t1 = conditioning_to[i][0]
95
            pooled_output_to = conditioning_to[i][1].get("pooled_output", pooled_output_from)
comfyanonymous's avatar
comfyanonymous committed
96
97
98
99
100
            t0 = cond_from[:,:t1.shape[1]]
            if t0.shape[1] < t1.shape[1]:
                t0 = torch.cat([t0] + [torch.zeros((1, (t1.shape[1] - t0.shape[1]), t1.shape[2]))], dim=1)

            tw = torch.mul(t1, conditioning_to_strength) + torch.mul(t0, (1.0 - conditioning_to_strength))
101
102
103
104
105
106
107
            t_to = conditioning_to[i][1].copy()
            if pooled_output_from is not None and pooled_output_to is not None:
                t_to["pooled_output"] = torch.mul(pooled_output_to, conditioning_to_strength) + torch.mul(pooled_output_from, (1.0 - conditioning_to_strength))
            elif pooled_output_from is not None:
                t_to["pooled_output"] = pooled_output_from

            n = [tw, t_to]
FizzleDorf's avatar
FizzleDorf committed
108
109
110
            out.append(n)
        return (out, )

111
112
113
114
115
116
117
118
119
120
class ConditioningConcat:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": {
            "conditioning_to": ("CONDITIONING",),
            "conditioning_from": ("CONDITIONING",),
            }}
    RETURN_TYPES = ("CONDITIONING",)
    FUNCTION = "concat"

121
    CATEGORY = "conditioning"
122
123
124
125
126

    def concat(self, conditioning_to, conditioning_from):
        out = []

        if len(conditioning_from) > 1:
127
            logging.warning("Warning: ConditioningConcat conditioning_from contains more than 1 cond, only the first one will actually be applied to conditioning_to.")
128
129
130
131
132
133
134
135
136
137
138

        cond_from = conditioning_from[0][0]

        for i in range(len(conditioning_to)):
            t1 = conditioning_to[i][0]
            tw = torch.cat((t1, cond_from),1)
            n = [tw, conditioning_to[i][1].copy()]
            out.append(n)

        return (out, )

comfyanonymous's avatar
comfyanonymous committed
139
140
141
142
class ConditioningSetArea:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": {"conditioning": ("CONDITIONING", ),
143
144
145
146
                              "width": ("INT", {"default": 64, "min": 64, "max": MAX_RESOLUTION, "step": 8}),
                              "height": ("INT", {"default": 64, "min": 64, "max": MAX_RESOLUTION, "step": 8}),
                              "x": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 8}),
                              "y": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 8}),
comfyanonymous's avatar
comfyanonymous committed
147
148
149
150
151
                              "strength": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}),
                             }}
    RETURN_TYPES = ("CONDITIONING",)
    FUNCTION = "append"

152
153
    CATEGORY = "conditioning"

154
    def append(self, conditioning, width, height, x, y, strength):
155
156
157
        c = node_helpers.conditioning_set_values(conditioning, {"area": (height // 8, width // 8, y // 8, x // 8),
                                                                "strength": strength,
                                                                "set_area_to_bounds": False})
comfyanonymous's avatar
comfyanonymous committed
158
        return (c, )
comfyanonymous's avatar
comfyanonymous committed
159

160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
class ConditioningSetAreaPercentage:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": {"conditioning": ("CONDITIONING", ),
                              "width": ("FLOAT", {"default": 1.0, "min": 0, "max": 1.0, "step": 0.01}),
                              "height": ("FLOAT", {"default": 1.0, "min": 0, "max": 1.0, "step": 0.01}),
                              "x": ("FLOAT", {"default": 0, "min": 0, "max": 1.0, "step": 0.01}),
                              "y": ("FLOAT", {"default": 0, "min": 0, "max": 1.0, "step": 0.01}),
                              "strength": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}),
                             }}
    RETURN_TYPES = ("CONDITIONING",)
    FUNCTION = "append"

    CATEGORY = "conditioning"

    def append(self, conditioning, width, height, x, y, strength):
176
177
178
        c = node_helpers.conditioning_set_values(conditioning, {"area": ("percentage", height, width, y, x),
                                                                "strength": strength,
                                                                "set_area_to_bounds": False})
179
180
        return (c, )

181
182
183
184
185
186
187
188
189
190
191
192
class ConditioningSetAreaStrength:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": {"conditioning": ("CONDITIONING", ),
                              "strength": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}),
                             }}
    RETURN_TYPES = ("CONDITIONING",)
    FUNCTION = "append"

    CATEGORY = "conditioning"

    def append(self, conditioning, strength):
193
        c = node_helpers.conditioning_set_values(conditioning, {"strength": strength})
194
195
196
        return (c, )


Jacob Segal's avatar
Jacob Segal committed
197
198
199
200
201
202
class ConditioningSetMask:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": {"conditioning": ("CONDITIONING", ),
                              "mask": ("MASK", ),
                              "strength": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}),
203
                              "set_cond_area": (["default", "mask bounds"],),
Jacob Segal's avatar
Jacob Segal committed
204
205
206
207
208
209
                             }}
    RETURN_TYPES = ("CONDITIONING",)
    FUNCTION = "append"

    CATEGORY = "conditioning"

210
211
212
213
    def append(self, conditioning, mask, set_cond_area, strength):
        set_area_to_bounds = False
        if set_cond_area != "default":
            set_area_to_bounds = True
Jacob Segal's avatar
Jacob Segal committed
214
215
        if len(mask.shape) < 3:
            mask = mask.unsqueeze(0)
216
217
218
219

        c = node_helpers.conditioning_set_values(conditioning, {"mask": mask,
                                                                "set_area_to_bounds": set_area_to_bounds,
                                                                "mask_strength": strength})
Jacob Segal's avatar
Jacob Segal committed
220
221
        return (c, )

222
223
224
225
226
227
228
229
230
231
232
233
234
class ConditioningZeroOut:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": {"conditioning": ("CONDITIONING", )}}
    RETURN_TYPES = ("CONDITIONING",)
    FUNCTION = "zero_out"

    CATEGORY = "advanced/conditioning"

    def zero_out(self, conditioning):
        c = []
        for t in conditioning:
            d = t[1].copy()
235
236
237
            pooled_output = d.get("pooled_output", None)
            if pooled_output is not None:
                d["pooled_output"] = torch.zeros_like(pooled_output)
238
239
240
241
            n = [torch.zeros_like(t[0]), d]
            c.append(n)
        return (c, )

242
243
244
245
class ConditioningSetTimestepRange:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": {"conditioning": ("CONDITIONING", ),
246
247
                             "start": ("FLOAT", {"default": 0.0, "min": 0.0, "max": 1.0, "step": 0.001}),
                             "end": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.001})
248
249
250
251
252
253
254
                             }}
    RETURN_TYPES = ("CONDITIONING",)
    FUNCTION = "set_range"

    CATEGORY = "advanced/conditioning"

    def set_range(self, conditioning, start, end):
255
256
        c = node_helpers.conditioning_set_values(conditioning, {"start_percent": start,
                                                                "end_percent": end})
257
258
        return (c, )

comfyanonymous's avatar
comfyanonymous committed
259
260
261
262
263
264
265
class VAEDecode:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "samples": ("LATENT", ), "vae": ("VAE", )}}
    RETURN_TYPES = ("IMAGE",)
    FUNCTION = "decode"

266
267
    CATEGORY = "latent"

comfyanonymous's avatar
comfyanonymous committed
268
    def decode(self, vae, samples):
269
        return (vae.decode(samples["samples"]), )
comfyanonymous's avatar
comfyanonymous committed
270

271
272
273
class VAEDecodeTiled:
    @classmethod
    def INPUT_TYPES(s):
274
        return {"required": {"samples": ("LATENT", ), "vae": ("VAE", ),
comfyanonymous's avatar
comfyanonymous committed
275
                             "tile_size": ("INT", {"default": 512, "min": 320, "max": 4096, "step": 64})
276
                            }}
277
278
279
280
281
    RETURN_TYPES = ("IMAGE",)
    FUNCTION = "decode"

    CATEGORY = "_for_testing"

282
    def decode(self, vae, samples, tile_size):
283
        return (vae.decode_tiled(samples["samples"], tile_x=tile_size // 8, tile_y=tile_size // 8, ), )
284

comfyanonymous's avatar
comfyanonymous committed
285
286
287
288
289
290
291
class VAEEncode:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "pixels": ("IMAGE", ), "vae": ("VAE", )}}
    RETURN_TYPES = ("LATENT",)
    FUNCTION = "encode"

292
293
    CATEGORY = "latent"

294
295
    def encode(self, vae, pixels):
        t = vae.encode(pixels[:,:,:,:3])
296
        return ({"samples":t}, )
comfyanonymous's avatar
comfyanonymous committed
297

comfyanonymous's avatar
comfyanonymous committed
298
299
300
class VAEEncodeTiled:
    @classmethod
    def INPUT_TYPES(s):
301
        return {"required": {"pixels": ("IMAGE", ), "vae": ("VAE", ),
302
                             "tile_size": ("INT", {"default": 512, "min": 320, "max": 4096, "step": 64})
303
                            }}
comfyanonymous's avatar
comfyanonymous committed
304
305
306
307
308
    RETURN_TYPES = ("LATENT",)
    FUNCTION = "encode"

    CATEGORY = "_for_testing"

309
310
    def encode(self, vae, pixels, tile_size):
        t = vae.encode_tiled(pixels[:,:,:,:3], tile_x=tile_size, tile_y=tile_size, )
comfyanonymous's avatar
comfyanonymous committed
311
        return ({"samples":t}, )
312

313
314
315
class VAEEncodeForInpaint:
    @classmethod
    def INPUT_TYPES(s):
316
        return {"required": { "pixels": ("IMAGE", ), "vae": ("VAE", ), "mask": ("MASK", ), "grow_mask_by": ("INT", {"default": 6, "min": 0, "max": 64, "step": 1}),}}
317
318
319
320
321
    RETURN_TYPES = ("LATENT",)
    FUNCTION = "encode"

    CATEGORY = "latent/inpaint"

322
    def encode(self, vae, pixels, mask, grow_mask_by=6):
323
324
        x = (pixels.shape[1] // vae.downscale_ratio) * vae.downscale_ratio
        y = (pixels.shape[2] // vae.downscale_ratio) * vae.downscale_ratio
325
        mask = torch.nn.functional.interpolate(mask.reshape((-1, 1, mask.shape[-2], mask.shape[-1])), size=(pixels.shape[1], pixels.shape[2]), mode="bilinear")
326

327
        pixels = pixels.clone()
328
        if pixels.shape[1] != x or pixels.shape[2] != y:
329
330
            x_offset = (pixels.shape[1] % vae.downscale_ratio) // 2
            y_offset = (pixels.shape[2] % vae.downscale_ratio) // 2
331
332
            pixels = pixels[:,x_offset:x + x_offset, y_offset:y + y_offset,:]
            mask = mask[:,:,x_offset:x + x_offset, y_offset:y + y_offset]
333

334
        #grow mask by a few pixels to keep things seamless in latent space
335
336
337
338
339
340
341
342
        if grow_mask_by == 0:
            mask_erosion = mask
        else:
            kernel_tensor = torch.ones((1, 1, grow_mask_by, grow_mask_by))
            padding = math.ceil((grow_mask_by - 1) / 2)

            mask_erosion = torch.clamp(torch.nn.functional.conv2d(mask.round(), kernel_tensor, padding=padding), 0, 1)

343
        m = (1.0 - mask.round()).squeeze(1)
344
345
        for i in range(3):
            pixels[:,:,:,i] -= 0.5
346
            pixels[:,:,:,i] *= m
347
348
349
            pixels[:,:,:,i] += 0.5
        t = vae.encode(pixels)

350
        return ({"samples":t, "noise_mask": (mask_erosion[:,:,:x,:y].round())}, )
comfyanonymous's avatar
comfyanonymous committed
351

352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396

class InpaintModelConditioning:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": {"positive": ("CONDITIONING", ),
                             "negative": ("CONDITIONING", ),
                             "vae": ("VAE", ),
                             "pixels": ("IMAGE", ),
                             "mask": ("MASK", ),
                             }}

    RETURN_TYPES = ("CONDITIONING","CONDITIONING","LATENT")
    RETURN_NAMES = ("positive", "negative", "latent")
    FUNCTION = "encode"

    CATEGORY = "conditioning/inpaint"

    def encode(self, positive, negative, pixels, vae, mask):
        x = (pixels.shape[1] // 8) * 8
        y = (pixels.shape[2] // 8) * 8
        mask = torch.nn.functional.interpolate(mask.reshape((-1, 1, mask.shape[-2], mask.shape[-1])), size=(pixels.shape[1], pixels.shape[2]), mode="bilinear")

        orig_pixels = pixels
        pixels = orig_pixels.clone()
        if pixels.shape[1] != x or pixels.shape[2] != y:
            x_offset = (pixels.shape[1] % 8) // 2
            y_offset = (pixels.shape[2] % 8) // 2
            pixels = pixels[:,x_offset:x + x_offset, y_offset:y + y_offset,:]
            mask = mask[:,:,x_offset:x + x_offset, y_offset:y + y_offset]

        m = (1.0 - mask.round()).squeeze(1)
        for i in range(3):
            pixels[:,:,:,i] -= 0.5
            pixels[:,:,:,i] *= m
            pixels[:,:,:,i] += 0.5
        concat_latent = vae.encode(pixels)
        orig_latent = vae.encode(orig_pixels)

        out_latent = {}

        out_latent["samples"] = orig_latent
        out_latent["noise_mask"] = mask

        out = []
        for conditioning in [positive, negative]:
397
398
            c = node_helpers.conditioning_set_values(conditioning, {"concat_latent_image": concat_latent,
                                                                    "concat_mask": mask})
399
400
401
402
            out.append(c)
        return (out[0], out[1], out_latent)


Dr.Lt.Data's avatar
Dr.Lt.Data committed
403
404
class SaveLatent:
    def __init__(self):
405
        self.output_dir = folder_paths.get_output_directory()
Dr.Lt.Data's avatar
Dr.Lt.Data committed
406
407
408
409

    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "samples": ("LATENT", ),
410
                              "filename_prefix": ("STRING", {"default": "latents/ComfyUI"})},
Dr.Lt.Data's avatar
Dr.Lt.Data committed
411
412
413
414
415
416
417
418
419
420
                "hidden": {"prompt": "PROMPT", "extra_pnginfo": "EXTRA_PNGINFO"},
                }
    RETURN_TYPES = ()
    FUNCTION = "save"

    OUTPUT_NODE = True

    CATEGORY = "_for_testing"

    def save(self, samples, filename_prefix="ComfyUI", prompt=None, extra_pnginfo=None):
421
        full_output_folder, filename, counter, subfolder, filename_prefix = folder_paths.get_save_image_path(filename_prefix, self.output_dir)
Dr.Lt.Data's avatar
Dr.Lt.Data committed
422
423
424
425
426
427

        # support save metadata for latent sharing
        prompt_info = ""
        if prompt is not None:
            prompt_info = json.dumps(prompt)

428
429
430
431
432
433
        metadata = None
        if not args.disable_metadata:
            metadata = {"prompt": prompt_info}
            if extra_pnginfo is not None:
                for x in extra_pnginfo:
                    metadata[x] = json.dumps(extra_pnginfo[x])
Dr.Lt.Data's avatar
Dr.Lt.Data committed
434
435

        file = f"{filename}_{counter:05}_.latent"
436
437
438
439
440
441
442
443

        results = list()
        results.append({
            "filename": file,
            "subfolder": subfolder,
            "type": "output"
        })

Dr.Lt.Data's avatar
Dr.Lt.Data committed
444
445
        file = os.path.join(full_output_folder, file)

446
447
        output = {}
        output["latent_tensor"] = samples["samples"]
448
        output["latent_format_version_0"] = torch.tensor([])
449

450
        comfy.utils.save_torch_file(output, file, metadata=metadata)
451
        return { "ui": { "latents": results } }
Dr.Lt.Data's avatar
Dr.Lt.Data committed
452
453
454
455
456


class LoadLatent:
    @classmethod
    def INPUT_TYPES(s):
457
458
        input_dir = folder_paths.get_input_directory()
        files = [f for f in os.listdir(input_dir) if os.path.isfile(os.path.join(input_dir, f)) and f.endswith(".latent")]
Dr.Lt.Data's avatar
Dr.Lt.Data committed
459
460
461
462
463
464
465
466
        return {"required": {"latent": [sorted(files), ]}, }

    CATEGORY = "_for_testing"

    RETURN_TYPES = ("LATENT", )
    FUNCTION = "load"

    def load(self, latent):
467
468
        latent_path = folder_paths.get_annotated_filepath(latent)
        latent = safetensors.torch.load_file(latent_path, device="cpu")
469
470
471
472
        multiplier = 1.0
        if "latent_format_version_0" not in latent:
            multiplier = 1.0 / 0.18215
        samples = {"samples": latent["latent_tensor"].float() * multiplier}
473
        return (samples, )
Dr.Lt.Data's avatar
Dr.Lt.Data committed
474

475
476
477
478
479
480
481
482
483
484
485
486
487
488
    @classmethod
    def IS_CHANGED(s, latent):
        image_path = folder_paths.get_annotated_filepath(latent)
        m = hashlib.sha256()
        with open(image_path, 'rb') as f:
            m.update(f.read())
        return m.digest().hex()

    @classmethod
    def VALIDATE_INPUTS(s, latent):
        if not folder_paths.exists_annotated_filepath(latent):
            return "Invalid latent file: {}".format(latent)
        return True

Dr.Lt.Data's avatar
Dr.Lt.Data committed
489

comfyanonymous's avatar
comfyanonymous committed
490
491
492
class CheckpointLoader:
    @classmethod
    def INPUT_TYPES(s):
493
494
        return {"required": { "config_name": (folder_paths.get_filename_list("configs"), ),
                              "ckpt_name": (folder_paths.get_filename_list("checkpoints"), )}}
comfyanonymous's avatar
comfyanonymous committed
495
496
497
    RETURN_TYPES = ("MODEL", "CLIP", "VAE")
    FUNCTION = "load_checkpoint"

498
    CATEGORY = "advanced/loaders"
499

500
    def load_checkpoint(self, config_name, ckpt_name):
501
502
        config_path = folder_paths.get_full_path("configs", config_name)
        ckpt_path = folder_paths.get_full_path("checkpoints", ckpt_name)
503
        return comfy.sd.load_checkpoint(config_path, ckpt_path, output_vae=True, output_clip=True, embedding_directory=folder_paths.get_folder_paths("embeddings"))
comfyanonymous's avatar
comfyanonymous committed
504

505
506
507
class CheckpointLoaderSimple:
    @classmethod
    def INPUT_TYPES(s):
508
        return {"required": { "ckpt_name": (folder_paths.get_filename_list("checkpoints"), ),
509
510
511
512
                             }}
    RETURN_TYPES = ("MODEL", "CLIP", "VAE")
    FUNCTION = "load_checkpoint"

513
    CATEGORY = "loaders"
514

515
    def load_checkpoint(self, ckpt_name):
516
        ckpt_path = folder_paths.get_full_path("checkpoints", ckpt_name)
517
        out = comfy.sd.load_checkpoint_guess_config(ckpt_path, output_vae=True, output_clip=True, embedding_directory=folder_paths.get_folder_paths("embeddings"))
518
        return out[:3]
519

sALTaccount's avatar
sALTaccount committed
520
521
522
class DiffusersLoader:
    @classmethod
    def INPUT_TYPES(cls):
523
        paths = []
sALTaccount's avatar
sALTaccount committed
524
        for search_path in folder_paths.get_folder_paths("diffusers"):
525
            if os.path.exists(search_path):
526
527
528
529
                for root, subdir, files in os.walk(search_path, followlinks=True):
                    if "model_index.json" in files:
                        paths.append(os.path.relpath(root, start=search_path))

530
        return {"required": {"model_path": (paths,), }}
sALTaccount's avatar
sALTaccount committed
531
532
533
    RETURN_TYPES = ("MODEL", "CLIP", "VAE")
    FUNCTION = "load_checkpoint"

534
    CATEGORY = "advanced/loaders/deprecated"
sALTaccount's avatar
sALTaccount committed
535
536

    def load_checkpoint(self, model_path, output_vae=True, output_clip=True):
sALTaccount's avatar
sALTaccount committed
537
538
        for search_path in folder_paths.get_folder_paths("diffusers"):
            if os.path.exists(search_path):
539
540
541
                path = os.path.join(search_path, model_path)
                if os.path.exists(path):
                    model_path = path
sALTaccount's avatar
sALTaccount committed
542
                    break
543

544
        return comfy.diffusers_load.load_diffusers(model_path, output_vae=output_vae, output_clip=output_clip, embedding_directory=folder_paths.get_folder_paths("embeddings"))
sALTaccount's avatar
sALTaccount committed
545
546


547
548
549
550
551
552
553
554
class unCLIPCheckpointLoader:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "ckpt_name": (folder_paths.get_filename_list("checkpoints"), ),
                             }}
    RETURN_TYPES = ("MODEL", "CLIP", "VAE", "CLIP_VISION")
    FUNCTION = "load_checkpoint"

555
    CATEGORY = "loaders"
556
557
558
559
560
561

    def load_checkpoint(self, ckpt_name, output_vae=True, output_clip=True):
        ckpt_path = folder_paths.get_full_path("checkpoints", ckpt_name)
        out = comfy.sd.load_checkpoint_guess_config(ckpt_path, output_vae=True, output_clip=True, output_clipvision=True, embedding_directory=folder_paths.get_folder_paths("embeddings"))
        return out

comfyanonymous's avatar
comfyanonymous committed
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
class CLIPSetLastLayer:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "clip": ("CLIP", ),
                              "stop_at_clip_layer": ("INT", {"default": -1, "min": -24, "max": -1, "step": 1}),
                              }}
    RETURN_TYPES = ("CLIP",)
    FUNCTION = "set_last_layer"

    CATEGORY = "conditioning"

    def set_last_layer(self, clip, stop_at_clip_layer):
        clip = clip.clone()
        clip.clip_layer(stop_at_clip_layer)
        return (clip,)

578
class LoraLoader:
579
580
581
    def __init__(self):
        self.loaded_lora = None

582
583
584
585
    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "model": ("MODEL",),
                              "clip": ("CLIP", ),
586
                              "lora_name": (folder_paths.get_filename_list("loras"), ),
587
588
                              "strength_model": ("FLOAT", {"default": 1.0, "min": -100.0, "max": 100.0, "step": 0.01}),
                              "strength_clip": ("FLOAT", {"default": 1.0, "min": -100.0, "max": 100.0, "step": 0.01}),
589
590
591
592
593
594
595
                              }}
    RETURN_TYPES = ("MODEL", "CLIP")
    FUNCTION = "load_lora"

    CATEGORY = "loaders"

    def load_lora(self, model, clip, lora_name, strength_model, strength_clip):
596
597
598
        if strength_model == 0 and strength_clip == 0:
            return (model, clip)

599
        lora_path = folder_paths.get_full_path("loras", lora_name)
600
601
602
603
604
        lora = None
        if self.loaded_lora is not None:
            if self.loaded_lora[0] == lora_path:
                lora = self.loaded_lora[1]
            else:
605
606
607
                temp = self.loaded_lora
                self.loaded_lora = None
                del temp
608
609
610
611
612
613

        if lora is None:
            lora = comfy.utils.load_torch_file(lora_path, safe_load=True)
            self.loaded_lora = (lora_path, lora)

        model_lora, clip_lora = comfy.sd.load_lora_for_models(model, clip, lora, strength_model, strength_clip)
614
615
        return (model_lora, clip_lora)

616
617
618
619
620
class LoraLoaderModelOnly(LoraLoader):
    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "model": ("MODEL",),
                              "lora_name": (folder_paths.get_filename_list("loras"), ),
621
                              "strength_model": ("FLOAT", {"default": 1.0, "min": -100.0, "max": 100.0, "step": 0.01}),
622
623
624
625
626
627
628
                              }}
    RETURN_TYPES = ("MODEL",)
    FUNCTION = "load_lora_model_only"

    def load_lora_model_only(self, model, lora_name, strength_model):
        return (self.load_lora(model, None, lora_name, strength_model, 0)[0],)

comfyanonymous's avatar
comfyanonymous committed
629
class VAELoader:
630
631
632
633
634
635
636
637
    @staticmethod
    def vae_list():
        vaes = folder_paths.get_filename_list("vae")
        approx_vaes = folder_paths.get_filename_list("vae_approx")
        sdxl_taesd_enc = False
        sdxl_taesd_dec = False
        sd1_taesd_enc = False
        sd1_taesd_dec = False
Dr.Lt.Data's avatar
Dr.Lt.Data committed
638
639
        sd3_taesd_enc = False
        sd3_taesd_dec = False
640
641
642
643
644
645
646
647
648
649

        for v in approx_vaes:
            if v.startswith("taesd_decoder."):
                sd1_taesd_dec = True
            elif v.startswith("taesd_encoder."):
                sd1_taesd_enc = True
            elif v.startswith("taesdxl_decoder."):
                sdxl_taesd_dec = True
            elif v.startswith("taesdxl_encoder."):
                sdxl_taesd_enc = True
Dr.Lt.Data's avatar
Dr.Lt.Data committed
650
651
652
653
            elif v.startswith("taesd3_decoder."):
                sd3_taesd_dec = True
            elif v.startswith("taesd3_encoder."):
                sd3_taesd_enc = True
654
655
656
657
        if sd1_taesd_dec and sd1_taesd_enc:
            vaes.append("taesd")
        if sdxl_taesd_dec and sdxl_taesd_enc:
            vaes.append("taesdxl")
Dr.Lt.Data's avatar
Dr.Lt.Data committed
658
659
        if sd3_taesd_dec and sd3_taesd_enc:
            vaes.append("taesd3")
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
        return vaes

    @staticmethod
    def load_taesd(name):
        sd = {}
        approx_vaes = folder_paths.get_filename_list("vae_approx")

        encoder = next(filter(lambda a: a.startswith("{}_encoder.".format(name)), approx_vaes))
        decoder = next(filter(lambda a: a.startswith("{}_decoder.".format(name)), approx_vaes))

        enc = comfy.utils.load_torch_file(folder_paths.get_full_path("vae_approx", encoder))
        for k in enc:
            sd["taesd_encoder.{}".format(k)] = enc[k]

        dec = comfy.utils.load_torch_file(folder_paths.get_full_path("vae_approx", decoder))
        for k in dec:
            sd["taesd_decoder.{}".format(k)] = dec[k]

        if name == "taesd":
            sd["vae_scale"] = torch.tensor(0.18215)
680
            sd["vae_shift"] = torch.tensor(0.0)
681
682
        elif name == "taesdxl":
            sd["vae_scale"] = torch.tensor(0.13025)
683
            sd["vae_shift"] = torch.tensor(0.0)
Dr.Lt.Data's avatar
Dr.Lt.Data committed
684
685
        elif name == "taesd3":
            sd["vae_scale"] = torch.tensor(1.5305)
686
            sd["vae_shift"] = torch.tensor(0.0609)
687
688
        return sd

comfyanonymous's avatar
comfyanonymous committed
689
690
    @classmethod
    def INPUT_TYPES(s):
691
        return {"required": { "vae_name": (s.vae_list(), )}}
comfyanonymous's avatar
comfyanonymous committed
692
693
694
    RETURN_TYPES = ("VAE",)
    FUNCTION = "load_vae"

695
696
    CATEGORY = "loaders"

comfyanonymous's avatar
comfyanonymous committed
697
698
    #TODO: scale factor?
    def load_vae(self, vae_name):
Dr.Lt.Data's avatar
Dr.Lt.Data committed
699
        if vae_name in ["taesd", "taesdxl", "taesd3"]:
700
701
702
703
            sd = self.load_taesd(vae_name)
        else:
            vae_path = folder_paths.get_full_path("vae", vae_name)
            sd = comfy.utils.load_torch_file(vae_path)
704
        vae = comfy.sd.VAE(sd=sd)
comfyanonymous's avatar
comfyanonymous committed
705
706
        return (vae,)

comfyanonymous's avatar
comfyanonymous committed
707
708
709
class ControlNetLoader:
    @classmethod
    def INPUT_TYPES(s):
710
        return {"required": { "control_net_name": (folder_paths.get_filename_list("controlnet"), )}}
comfyanonymous's avatar
comfyanonymous committed
711
712
713
714
715
716
717

    RETURN_TYPES = ("CONTROL_NET",)
    FUNCTION = "load_controlnet"

    CATEGORY = "loaders"

    def load_controlnet(self, control_net_name):
718
        controlnet_path = folder_paths.get_full_path("controlnet", control_net_name)
719
        controlnet = comfy.controlnet.load_controlnet(controlnet_path)
comfyanonymous's avatar
comfyanonymous committed
720
721
        return (controlnet,)

722
723
724
725
class DiffControlNetLoader:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "model": ("MODEL",),
726
                              "control_net_name": (folder_paths.get_filename_list("controlnet"), )}}
727
728
729
730
731
732
733

    RETURN_TYPES = ("CONTROL_NET",)
    FUNCTION = "load_controlnet"

    CATEGORY = "loaders"

    def load_controlnet(self, model, control_net_name):
734
        controlnet_path = folder_paths.get_full_path("controlnet", control_net_name)
735
        controlnet = comfy.controlnet.load_controlnet(controlnet_path, model)
736
737
        return (controlnet,)

comfyanonymous's avatar
comfyanonymous committed
738
739
740
741

class ControlNetApply:
    @classmethod
    def INPUT_TYPES(s):
742
743
744
745
746
        return {"required": {"conditioning": ("CONDITIONING", ),
                             "control_net": ("CONTROL_NET", ),
                             "image": ("IMAGE", ),
                             "strength": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01})
                             }}
comfyanonymous's avatar
comfyanonymous committed
747
748
749
750
751
    RETURN_TYPES = ("CONDITIONING",)
    FUNCTION = "apply_controlnet"

    CATEGORY = "conditioning"

752
    def apply_controlnet(self, conditioning, control_net, image, strength):
753
754
755
        if strength == 0:
            return (conditioning, )

comfyanonymous's avatar
comfyanonymous committed
756
757
758
759
        c = []
        control_hint = image.movedim(-1,1)
        for t in conditioning:
            n = [t[0], t[1].copy()]
comfyanonymous's avatar
comfyanonymous committed
760
761
762
763
            c_net = control_net.copy().set_cond_hint(control_hint, strength)
            if 'control' in t[1]:
                c_net.set_previous_controlnet(t[1]['control'])
            n[1]['control'] = c_net
764
            n[1]['control_apply_to_uncond'] = True
comfyanonymous's avatar
comfyanonymous committed
765
766
767
            c.append(n)
        return (c, )

768
769
770
771
772
773
774
775
776

class ControlNetApplyAdvanced:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": {"positive": ("CONDITIONING", ),
                             "negative": ("CONDITIONING", ),
                             "control_net": ("CONTROL_NET", ),
                             "image": ("IMAGE", ),
                             "strength": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}),
777
778
                             "start_percent": ("FLOAT", {"default": 0.0, "min": 0.0, "max": 1.0, "step": 0.001}),
                             "end_percent": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.001})
779
780
781
782
783
784
785
786
                             }}

    RETURN_TYPES = ("CONDITIONING","CONDITIONING")
    RETURN_NAMES = ("positive", "negative")
    FUNCTION = "apply_controlnet"

    CATEGORY = "conditioning"

787
    def apply_controlnet(self, positive, negative, control_net, image, strength, start_percent, end_percent, vae=None):
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
        if strength == 0:
            return (positive, negative)

        control_hint = image.movedim(-1,1)
        cnets = {}

        out = []
        for conditioning in [positive, negative]:
            c = []
            for t in conditioning:
                d = t[1].copy()

                prev_cnet = d.get('control', None)
                if prev_cnet in cnets:
                    c_net = cnets[prev_cnet]
                else:
804
                    c_net = control_net.copy().set_cond_hint(control_hint, strength, (start_percent, end_percent), vae)
805
806
807
808
809
810
811
812
813
814
815
                    c_net.set_previous_controlnet(prev_cnet)
                    cnets[prev_cnet] = c_net

                d['control'] = c_net
                d['control_apply_to_uncond'] = False
                n = [t[0], d]
                c.append(n)
            out.append(c)
        return (out[0], out[1])


816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
class UNETLoader:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "unet_name": (folder_paths.get_filename_list("unet"), ),
                             }}
    RETURN_TYPES = ("MODEL",)
    FUNCTION = "load_unet"

    CATEGORY = "advanced/loaders"

    def load_unet(self, unet_name):
        unet_path = folder_paths.get_full_path("unet", unet_name)
        model = comfy.sd.load_unet(unet_path)
        return (model,)

831
832
833
class CLIPLoader:
    @classmethod
    def INPUT_TYPES(s):
834
        return {"required": { "clip_name": (folder_paths.get_filename_list("clip"), ),
835
                              "type": (["stable_diffusion", "stable_cascade", "sd3", "stable_audio"], ),
836
837
838
839
                             }}
    RETURN_TYPES = ("CLIP",)
    FUNCTION = "load_clip"

840
    CATEGORY = "advanced/loaders"
841

842
843
844
    def load_clip(self, clip_name, type="stable_diffusion"):
        if type == "stable_cascade":
            clip_type = comfy.sd.CLIPType.STABLE_CASCADE
845
846
        elif type == "sd3":
            clip_type = comfy.sd.CLIPType.SD3
847
848
849
850
        elif type == "stable_audio":
            clip_type = comfy.sd.CLIPType.STABLE_AUDIO
        else:
            clip_type = comfy.sd.CLIPType.STABLE_DIFFUSION
851

852
        clip_path = folder_paths.get_full_path("clip", clip_name)
853
        clip = comfy.sd.load_clip(ckpt_paths=[clip_path], embedding_directory=folder_paths.get_folder_paths("embeddings"), clip_type=clip_type)
854
855
856
857
858
        return (clip,)

class DualCLIPLoader:
    @classmethod
    def INPUT_TYPES(s):
859
860
861
        return {"required": { "clip_name1": (folder_paths.get_filename_list("clip"), ),
                              "clip_name2": (folder_paths.get_filename_list("clip"), ),
                              "type": (["sdxl", "sd3"], ),
862
863
864
865
866
867
                             }}
    RETURN_TYPES = ("CLIP",)
    FUNCTION = "load_clip"

    CATEGORY = "advanced/loaders"

868
    def load_clip(self, clip_name1, clip_name2, type):
869
870
        clip_path1 = folder_paths.get_full_path("clip", clip_name1)
        clip_path2 = folder_paths.get_full_path("clip", clip_name2)
871
872
873
874
875
876
        if type == "sdxl":
            clip_type = comfy.sd.CLIPType.STABLE_DIFFUSION
        elif type == "sd3":
            clip_type = comfy.sd.CLIPType.SD3

        clip = comfy.sd.load_clip(ckpt_paths=[clip_path1, clip_path2], embedding_directory=folder_paths.get_folder_paths("embeddings"), clip_type=clip_type)
877
878
        return (clip,)

879
880
881
class CLIPVisionLoader:
    @classmethod
    def INPUT_TYPES(s):
882
        return {"required": { "clip_name": (folder_paths.get_filename_list("clip_vision"), ),
883
884
885
886
887
888
889
                             }}
    RETURN_TYPES = ("CLIP_VISION",)
    FUNCTION = "load_clip"

    CATEGORY = "loaders"

    def load_clip(self, clip_name):
890
        clip_path = folder_paths.get_full_path("clip_vision", clip_name)
891
        clip_vision = comfy.clip_vision.load(clip_path)
892
893
894
895
896
897
898
899
        return (clip_vision,)

class CLIPVisionEncode:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "clip_vision": ("CLIP_VISION",),
                              "image": ("IMAGE",)
                             }}
comfyanonymous's avatar
comfyanonymous committed
900
    RETURN_TYPES = ("CLIP_VISION_OUTPUT",)
901
902
    FUNCTION = "encode"

903
    CATEGORY = "conditioning"
904
905
906
907
908
909
910
911

    def encode(self, clip_vision, image):
        output = clip_vision.encode_image(image)
        return (output,)

class StyleModelLoader:
    @classmethod
    def INPUT_TYPES(s):
912
        return {"required": { "style_model_name": (folder_paths.get_filename_list("style_models"), )}}
913
914
915
916
917
918
919

    RETURN_TYPES = ("STYLE_MODEL",)
    FUNCTION = "load_style_model"

    CATEGORY = "loaders"

    def load_style_model(self, style_model_name):
920
        style_model_path = folder_paths.get_full_path("style_models", style_model_name)
921
922
923
924
925
926
927
        style_model = comfy.sd.load_style_model(style_model_path)
        return (style_model,)


class StyleModelApply:
    @classmethod
    def INPUT_TYPES(s):
928
929
930
        return {"required": {"conditioning": ("CONDITIONING", ),
                             "style_model": ("STYLE_MODEL", ),
                             "clip_vision_output": ("CLIP_VISION_OUTPUT", ),
931
932
933
934
                             }}
    RETURN_TYPES = ("CONDITIONING",)
    FUNCTION = "apply_stylemodel"

comfyanonymous's avatar
comfyanonymous committed
935
    CATEGORY = "conditioning/style_model"
936

937
    def apply_stylemodel(self, clip_vision_output, style_model, conditioning):
938
        cond = style_model.get_cond(clip_vision_output).flatten(start_dim=0, end_dim=1).unsqueeze(dim=0)
939
        c = []
940
941
        for t in conditioning:
            n = [torch.cat((t[0], cond), dim=1), t[1].copy()]
942
943
944
            c.append(n)
        return (c, )

945
946
947
948
949
950
class unCLIPConditioning:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": {"conditioning": ("CONDITIONING", ),
                             "clip_vision_output": ("CLIP_VISION_OUTPUT", ),
                             "strength": ("FLOAT", {"default": 1.0, "min": -10.0, "max": 10.0, "step": 0.01}),
951
                             "noise_augmentation": ("FLOAT", {"default": 0.0, "min": 0.0, "max": 1.0, "step": 0.01}),
952
953
954
955
                             }}
    RETURN_TYPES = ("CONDITIONING",)
    FUNCTION = "apply_adm"

956
    CATEGORY = "conditioning"
957

958
    def apply_adm(self, conditioning, clip_vision_output, strength, noise_augmentation):
959
960
961
        if strength == 0:
            return (conditioning, )

962
963
964
        c = []
        for t in conditioning:
            o = t[1].copy()
965
966
967
            x = {"clip_vision_output": clip_vision_output, "strength": strength, "noise_augmentation": noise_augmentation}
            if "unclip_conditioning" in o:
                o["unclip_conditioning"] = o["unclip_conditioning"][:] + [x]
968
            else:
969
                o["unclip_conditioning"] = [x]
970
971
972
973
            n = [t[0], o]
            c.append(n)
        return (c, )

974
975
976
977
978
979
980
981
class GLIGENLoader:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "gligen_name": (folder_paths.get_filename_list("gligen"), )}}

    RETURN_TYPES = ("GLIGEN",)
    FUNCTION = "load_gligen"

comfyanonymous's avatar
comfyanonymous committed
982
    CATEGORY = "loaders"
983
984
985
986
987
988
989
990
991
992
993
994

    def load_gligen(self, gligen_name):
        gligen_path = folder_paths.get_full_path("gligen", gligen_name)
        gligen = comfy.sd.load_gligen(gligen_path)
        return (gligen,)

class GLIGENTextBoxApply:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": {"conditioning_to": ("CONDITIONING", ),
                              "clip": ("CLIP", ),
                              "gligen_textbox_model": ("GLIGEN", ),
995
                              "text": ("STRING", {"multiline": True, "dynamicPrompts": True}),
996
997
998
999
1000
1001
1002
1003
                              "width": ("INT", {"default": 64, "min": 8, "max": MAX_RESOLUTION, "step": 8}),
                              "height": ("INT", {"default": 64, "min": 8, "max": MAX_RESOLUTION, "step": 8}),
                              "x": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 8}),
                              "y": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 8}),
                             }}
    RETURN_TYPES = ("CONDITIONING",)
    FUNCTION = "append"

comfyanonymous's avatar
comfyanonymous committed
1004
    CATEGORY = "conditioning/gligen"
1005
1006
1007

    def append(self, conditioning_to, clip, gligen_textbox_model, text, width, height, x, y):
        c = []
1008
        cond, cond_pooled = clip.encode_from_tokens(clip.tokenize(text), return_pooled="unprojected")
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
        for t in conditioning_to:
            n = [t[0], t[1].copy()]
            position_params = [(cond_pooled, height // 8, width // 8, y // 8, x // 8)]
            prev = []
            if "gligen" in n[1]:
                prev = n[1]['gligen'][2]

            n[1]['gligen'] = ("position", gligen_textbox_model, prev + position_params)
            c.append(n)
        return (c, )
1019

comfyanonymous's avatar
comfyanonymous committed
1020
class EmptyLatentImage:
1021
1022
    def __init__(self):
        self.device = comfy.model_management.intermediate_device()
comfyanonymous's avatar
comfyanonymous committed
1023
1024
1025

    @classmethod
    def INPUT_TYPES(s):
1026
1027
        return {"required": { "width": ("INT", {"default": 512, "min": 16, "max": MAX_RESOLUTION, "step": 8}),
                              "height": ("INT", {"default": 512, "min": 16, "max": MAX_RESOLUTION, "step": 8}),
comfyanonymous's avatar
comfyanonymous committed
1028
                              "batch_size": ("INT", {"default": 1, "min": 1, "max": 4096})}}
comfyanonymous's avatar
comfyanonymous committed
1029
1030
1031
    RETURN_TYPES = ("LATENT",)
    FUNCTION = "generate"

1032
1033
    CATEGORY = "latent"

comfyanonymous's avatar
comfyanonymous committed
1034
    def generate(self, width, height, batch_size=1):
1035
        latent = torch.zeros([batch_size, 4, height // 8, width // 8], device=self.device)
1036
        return ({"samples":latent}, )
comfyanonymous's avatar
comfyanonymous committed
1037

comfyanonymous's avatar
comfyanonymous committed
1038

1039
1040
1041
1042
1043
class LatentFromBatch:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "samples": ("LATENT",),
                              "batch_index": ("INT", {"default": 0, "min": 0, "max": 63}),
1044
                              "length": ("INT", {"default": 1, "min": 1, "max": 64}),
1045
1046
                              }}
    RETURN_TYPES = ("LATENT",)
1047
    FUNCTION = "frombatch"
1048

1049
    CATEGORY = "latent/batch"
1050

1051
    def frombatch(self, samples, batch_index, length):
1052
1053
1054
        s = samples.copy()
        s_in = samples["samples"]
        batch_index = min(s_in.shape[0] - 1, batch_index)
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
        length = min(s_in.shape[0] - batch_index, length)
        s["samples"] = s_in[batch_index:batch_index + length].clone()
        if "noise_mask" in samples:
            masks = samples["noise_mask"]
            if masks.shape[0] == 1:
                s["noise_mask"] = masks.clone()
            else:
                if masks.shape[0] < s_in.shape[0]:
                    masks = masks.repeat(math.ceil(s_in.shape[0] / masks.shape[0]), 1, 1, 1)[:s_in.shape[0]]
                s["noise_mask"] = masks[batch_index:batch_index + length].clone()
        if "batch_index" not in s:
            s["batch_index"] = [x for x in range(batch_index, batch_index+length)]
        else:
            s["batch_index"] = samples["batch_index"][batch_index:batch_index + length]
        return (s,)
    
class RepeatLatentBatch:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "samples": ("LATENT",),
                              "amount": ("INT", {"default": 1, "min": 1, "max": 64}),
                              }}
    RETURN_TYPES = ("LATENT",)
    FUNCTION = "repeat"

    CATEGORY = "latent/batch"

    def repeat(self, samples, amount):
        s = samples.copy()
        s_in = samples["samples"]
        
        s["samples"] = s_in.repeat((amount, 1,1,1))
        if "noise_mask" in samples and samples["noise_mask"].shape[0] > 1:
            masks = samples["noise_mask"]
            if masks.shape[0] < s_in.shape[0]:
                masks = masks.repeat(math.ceil(s_in.shape[0] / masks.shape[0]), 1, 1, 1)[:s_in.shape[0]]
            s["noise_mask"] = samples["noise_mask"].repeat((amount, 1,1,1))
        if "batch_index" in s:
            offset = max(s["batch_index"]) - min(s["batch_index"]) + 1
            s["batch_index"] = s["batch_index"] + [x + (i * offset) for i in range(1, amount) for x in s["batch_index"]]
1095
        return (s,)
comfyanonymous's avatar
comfyanonymous committed
1096

comfyanonymous's avatar
comfyanonymous committed
1097
class LatentUpscale:
comfyanonymous's avatar
comfyanonymous committed
1098
    upscale_methods = ["nearest-exact", "bilinear", "area", "bicubic", "bislerp"]
1099
    crop_methods = ["disabled", "center"]
comfyanonymous's avatar
comfyanonymous committed
1100
1101
1102
1103

    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "samples": ("LATENT",), "upscale_method": (s.upscale_methods,),
1104
1105
                              "width": ("INT", {"default": 512, "min": 0, "max": MAX_RESOLUTION, "step": 8}),
                              "height": ("INT", {"default": 512, "min": 0, "max": MAX_RESOLUTION, "step": 8}),
1106
                              "crop": (s.crop_methods,)}}
comfyanonymous's avatar
comfyanonymous committed
1107
1108
1109
    RETURN_TYPES = ("LATENT",)
    FUNCTION = "upscale"

1110
1111
    CATEGORY = "latent"

1112
    def upscale(self, samples, upscale_method, width, height, crop):
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
        if width == 0 and height == 0:
            s = samples
        else:
            s = samples.copy()

            if width == 0:
                height = max(64, height)
                width = max(64, round(samples["samples"].shape[3] * height / samples["samples"].shape[2]))
            elif height == 0:
                width = max(64, width)
                height = max(64, round(samples["samples"].shape[2] * width / samples["samples"].shape[3]))
            else:
                width = max(64, width)
                height = max(64, height)

            s["samples"] = comfy.utils.common_upscale(samples["samples"], width // 8, height // 8, upscale_method, crop)
comfyanonymous's avatar
comfyanonymous committed
1129
1130
        return (s,)

comfyanonymous's avatar
comfyanonymous committed
1131
class LatentUpscaleBy:
comfyanonymous's avatar
comfyanonymous committed
1132
    upscale_methods = ["nearest-exact", "bilinear", "area", "bicubic", "bislerp"]
comfyanonymous's avatar
comfyanonymous committed
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149

    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "samples": ("LATENT",), "upscale_method": (s.upscale_methods,),
                              "scale_by": ("FLOAT", {"default": 1.5, "min": 0.01, "max": 8.0, "step": 0.01}),}}
    RETURN_TYPES = ("LATENT",)
    FUNCTION = "upscale"

    CATEGORY = "latent"

    def upscale(self, samples, upscale_method, scale_by):
        s = samples.copy()
        width = round(samples["samples"].shape[3] * scale_by)
        height = round(samples["samples"].shape[2] * scale_by)
        s["samples"] = comfy.utils.common_upscale(samples["samples"], width, height, upscale_method, "disabled")
        return (s,)

comfyanonymous's avatar
comfyanonymous committed
1150
1151
1152
1153
1154
1155
1156
1157
1158
class LatentRotate:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "samples": ("LATENT",),
                              "rotation": (["none", "90 degrees", "180 degrees", "270 degrees"],),
                              }}
    RETURN_TYPES = ("LATENT",)
    FUNCTION = "rotate"

comfyanonymous's avatar
comfyanonymous committed
1159
    CATEGORY = "latent/transform"
comfyanonymous's avatar
comfyanonymous committed
1160
1161

    def rotate(self, samples, rotation):
1162
        s = samples.copy()
comfyanonymous's avatar
comfyanonymous committed
1163
1164
1165
1166
1167
1168
1169
1170
        rotate_by = 0
        if rotation.startswith("90"):
            rotate_by = 1
        elif rotation.startswith("180"):
            rotate_by = 2
        elif rotation.startswith("270"):
            rotate_by = 3

1171
        s["samples"] = torch.rot90(samples["samples"], k=rotate_by, dims=[3, 2])
comfyanonymous's avatar
comfyanonymous committed
1172
        return (s,)
comfyanonymous's avatar
comfyanonymous committed
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182

class LatentFlip:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "samples": ("LATENT",),
                              "flip_method": (["x-axis: vertically", "y-axis: horizontally"],),
                              }}
    RETURN_TYPES = ("LATENT",)
    FUNCTION = "flip"

comfyanonymous's avatar
comfyanonymous committed
1183
    CATEGORY = "latent/transform"
comfyanonymous's avatar
comfyanonymous committed
1184
1185

    def flip(self, samples, flip_method):
1186
        s = samples.copy()
comfyanonymous's avatar
comfyanonymous committed
1187
        if flip_method.startswith("x"):
1188
            s["samples"] = torch.flip(samples["samples"], dims=[2])
comfyanonymous's avatar
comfyanonymous committed
1189
        elif flip_method.startswith("y"):
1190
            s["samples"] = torch.flip(samples["samples"], dims=[3])
comfyanonymous's avatar
comfyanonymous committed
1191
1192

        return (s,)
comfyanonymous's avatar
comfyanonymous committed
1193
1194
1195
1196

class LatentComposite:
    @classmethod
    def INPUT_TYPES(s):
comfyanonymous's avatar
comfyanonymous committed
1197
1198
1199
1200
1201
1202
        return {"required": { "samples_to": ("LATENT",),
                              "samples_from": ("LATENT",),
                              "x": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 8}),
                              "y": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 8}),
                              "feather": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 8}),
                              }}
comfyanonymous's avatar
comfyanonymous committed
1203
1204
1205
1206
1207
    RETURN_TYPES = ("LATENT",)
    FUNCTION = "composite"

    CATEGORY = "latent"

comfyanonymous's avatar
comfyanonymous committed
1208
1209
1210
    def composite(self, samples_to, samples_from, x, y, composite_method="normal", feather=0):
        x =  x // 8
        y = y // 8
1211
        feather = feather // 8
comfyanonymous's avatar
comfyanonymous committed
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
        samples_out = samples_to.copy()
        s = samples_to["samples"].clone()
        samples_to = samples_to["samples"]
        samples_from = samples_from["samples"]
        if feather == 0:
            s[:,:,y:y+samples_from.shape[2],x:x+samples_from.shape[3]] = samples_from[:,:,:samples_to.shape[2] - y, :samples_to.shape[3] - x]
        else:
            samples_from = samples_from[:,:,:samples_to.shape[2] - y, :samples_to.shape[3] - x]
            mask = torch.ones_like(samples_from)
            for t in range(feather):
                if y != 0:
                    mask[:,:,t:1+t,:] *= ((1.0/feather) * (t + 1))

                if y + samples_from.shape[2] < samples_to.shape[2]:
                    mask[:,:,mask.shape[2] -1 -t: mask.shape[2]-t,:] *= ((1.0/feather) * (t + 1))
                if x != 0:
                    mask[:,:,:,t:1+t] *= ((1.0/feather) * (t + 1))
                if x + samples_from.shape[3] < samples_to.shape[3]:
                    mask[:,:,:,mask.shape[3]- 1 - t: mask.shape[3]- t] *= ((1.0/feather) * (t + 1))
            rev_mask = torch.ones_like(mask) - mask
            s[:,:,y:y+samples_from.shape[2],x:x+samples_from.shape[3]] = samples_from[:,:,:samples_to.shape[2] - y, :samples_to.shape[3] - x] * mask + s[:,:,y:y+samples_from.shape[2],x:x+samples_from.shape[3]] * rev_mask
        samples_out["samples"] = s
        return (samples_out,)
comfyanonymous's avatar
comfyanonymous committed
1235

1236
1237
1238
1239
class LatentBlend:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": {
1240
1241
            "samples1": ("LATENT",),
            "samples2": ("LATENT",),
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
            "blend_factor": ("FLOAT", {
                "default": 0.5,
                "min": 0,
                "max": 1,
                "step": 0.01
            }),
        }}

    RETURN_TYPES = ("LATENT",)
    FUNCTION = "blend"

    CATEGORY = "_for_testing"

1255
    def blend(self, samples1, samples2, blend_factor:float, blend_mode: str="normal"):
1256

1257
1258
1259
        samples_out = samples1.copy()
        samples1 = samples1["samples"]
        samples2 = samples2["samples"]
1260

1261
1262
1263
1264
        if samples1.shape != samples2.shape:
            samples2.permute(0, 3, 1, 2)
            samples2 = comfy.utils.common_upscale(samples2, samples1.shape[3], samples1.shape[2], 'bicubic', crop='center')
            samples2.permute(0, 2, 3, 1)
1265

1266
1267
        samples_blended = self.blend_mode(samples1, samples2, blend_mode)
        samples_blended = samples1 * blend_factor + samples_blended * (1 - blend_factor)
1268
1269
1270
1271
1272
1273
1274
1275
1276
        samples_out["samples"] = samples_blended
        return (samples_out,)

    def blend_mode(self, img1, img2, mode):
        if mode == "normal":
            return img2
        else:
            raise ValueError(f"Unsupported blend mode: {mode}")

comfyanonymous's avatar
comfyanonymous committed
1277
1278
1279
1280
class LatentCrop:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "samples": ("LATENT",),
1281
1282
                              "width": ("INT", {"default": 512, "min": 64, "max": MAX_RESOLUTION, "step": 8}),
                              "height": ("INT", {"default": 512, "min": 64, "max": MAX_RESOLUTION, "step": 8}),
1283
1284
                              "x": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 8}),
                              "y": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 8}),
comfyanonymous's avatar
comfyanonymous committed
1285
1286
1287
1288
                              }}
    RETURN_TYPES = ("LATENT",)
    FUNCTION = "crop"

comfyanonymous's avatar
comfyanonymous committed
1289
    CATEGORY = "latent/transform"
comfyanonymous's avatar
comfyanonymous committed
1290
1291

    def crop(self, samples, width, height, x, y):
1292
1293
        s = samples.copy()
        samples = samples['samples']
comfyanonymous's avatar
comfyanonymous committed
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
        x =  x // 8
        y = y // 8

        #enfonce minimum size of 64
        if x > (samples.shape[3] - 8):
            x = samples.shape[3] - 8
        if y > (samples.shape[2] - 8):
            y = samples.shape[2] - 8

        new_height = height // 8
        new_width = width // 8
        to_x = new_width + x
        to_y = new_height + y
1307
        s['samples'] = samples[:,:,y:to_y, x:to_x]
comfyanonymous's avatar
comfyanonymous committed
1308
1309
        return (s,)

1310
1311
1312
1313
1314
1315
1316
1317
1318
class SetLatentNoiseMask:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "samples": ("LATENT",),
                              "mask": ("MASK",),
                              }}
    RETURN_TYPES = ("LATENT",)
    FUNCTION = "set_mask"

1319
    CATEGORY = "latent/inpaint"
1320
1321
1322

    def set_mask(self, samples, mask):
        s = samples.copy()
1323
        s["noise_mask"] = mask.reshape((-1, 1, mask.shape[-2], mask.shape[-1]))
1324
1325
        return (s,)

space-nuko's avatar
space-nuko committed
1326
def common_ksampler(model, seed, steps, cfg, sampler_name, scheduler, positive, negative, latent, denoise=1.0, disable_noise=False, start_step=None, last_step=None, force_full_denoise=False):
1327
    latent_image = latent["samples"]
1328
1329
    latent_image = comfy.sample.fix_empty_latent_channels(model, latent_image)

comfyanonymous's avatar
comfyanonymous committed
1330
1331
1332
    if disable_noise:
        noise = torch.zeros(latent_image.size(), dtype=latent_image.dtype, layout=latent_image.layout, device="cpu")
    else:
1333
1334
        batch_inds = latent["batch_index"] if "batch_index" in latent else None
        noise = comfy.sample.prepare_noise(latent_image, seed, batch_inds)
comfyanonymous's avatar
comfyanonymous committed
1335

1336
    noise_mask = None
1337
    if "noise_mask" in latent:
1338
        noise_mask = latent["noise_mask"]
comfyanonymous's avatar
comfyanonymous committed
1339

1340
    callback = latent_preview.prepare_callback(model, steps)
1341
    disable_pbar = not comfy.utils.PROGRESS_BAR_ENABLED
1342
1343
    samples = comfy.sample.sample(model, noise, steps, cfg, sampler_name, scheduler, positive, negative, latent_image,
                                  denoise=denoise, disable_noise=disable_noise, start_step=start_step, last_step=last_step,
comfyanonymous's avatar
comfyanonymous committed
1344
                                  force_full_denoise=force_full_denoise, noise_mask=noise_mask, callback=callback, disable_pbar=disable_pbar, seed=seed)
1345
1346
1347
    out = latent.copy()
    out["samples"] = samples
    return (out, )
comfyanonymous's avatar
comfyanonymous committed
1348

comfyanonymous's avatar
comfyanonymous committed
1349
1350
1351
class KSampler:
    @classmethod
    def INPUT_TYPES(s):
comfyanonymous's avatar
comfyanonymous committed
1352
        return {"required":
comfyanonymous's avatar
comfyanonymous committed
1353
1354
1355
                    {"model": ("MODEL",),
                    "seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}),
                    "steps": ("INT", {"default": 20, "min": 1, "max": 10000}),
1356
                    "cfg": ("FLOAT", {"default": 8.0, "min": 0.0, "max": 100.0, "step":0.1, "round": 0.01}),
comfyanonymous's avatar
comfyanonymous committed
1357
1358
1359
1360
1361
1362
                    "sampler_name": (comfy.samplers.KSampler.SAMPLERS, ),
                    "scheduler": (comfy.samplers.KSampler.SCHEDULERS, ),
                    "positive": ("CONDITIONING", ),
                    "negative": ("CONDITIONING", ),
                    "latent_image": ("LATENT", ),
                    "denoise": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}),
space-nuko's avatar
space-nuko committed
1363
1364
                     }
                }
comfyanonymous's avatar
comfyanonymous committed
1365
1366
1367
1368

    RETURN_TYPES = ("LATENT",)
    FUNCTION = "sample"

1369
1370
    CATEGORY = "sampling"

space-nuko's avatar
space-nuko committed
1371
1372
    def sample(self, model, seed, steps, cfg, sampler_name, scheduler, positive, negative, latent_image, denoise=1.0):
        return common_ksampler(model, seed, steps, cfg, sampler_name, scheduler, positive, negative, latent_image, denoise=denoise)
comfyanonymous's avatar
comfyanonymous committed
1373

comfyanonymous's avatar
comfyanonymous committed
1374
1375
1376
1377
1378
1379
1380
1381
class KSamplerAdvanced:
    @classmethod
    def INPUT_TYPES(s):
        return {"required":
                    {"model": ("MODEL",),
                    "add_noise": (["enable", "disable"], ),
                    "noise_seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}),
                    "steps": ("INT", {"default": 20, "min": 1, "max": 10000}),
1382
                    "cfg": ("FLOAT", {"default": 8.0, "min": 0.0, "max": 100.0, "step":0.1, "round": 0.01}),
comfyanonymous's avatar
comfyanonymous committed
1383
1384
1385
1386
1387
1388
1389
1390
                    "sampler_name": (comfy.samplers.KSampler.SAMPLERS, ),
                    "scheduler": (comfy.samplers.KSampler.SCHEDULERS, ),
                    "positive": ("CONDITIONING", ),
                    "negative": ("CONDITIONING", ),
                    "latent_image": ("LATENT", ),
                    "start_at_step": ("INT", {"default": 0, "min": 0, "max": 10000}),
                    "end_at_step": ("INT", {"default": 10000, "min": 0, "max": 10000}),
                    "return_with_leftover_noise": (["disable", "enable"], ),
space-nuko's avatar
space-nuko committed
1391
1392
                     }
                }
comfyanonymous's avatar
comfyanonymous committed
1393
1394
1395
1396
1397

    RETURN_TYPES = ("LATENT",)
    FUNCTION = "sample"

    CATEGORY = "sampling"
comfyanonymous's avatar
comfyanonymous committed
1398

space-nuko's avatar
space-nuko committed
1399
    def sample(self, model, add_noise, noise_seed, steps, cfg, sampler_name, scheduler, positive, negative, latent_image, start_at_step, end_at_step, return_with_leftover_noise, denoise=1.0):
comfyanonymous's avatar
comfyanonymous committed
1400
1401
1402
1403
1404
1405
        force_full_denoise = True
        if return_with_leftover_noise == "enable":
            force_full_denoise = False
        disable_noise = False
        if add_noise == "disable":
            disable_noise = True
space-nuko's avatar
space-nuko committed
1406
        return common_ksampler(model, noise_seed, steps, cfg, sampler_name, scheduler, positive, negative, latent_image, denoise=denoise, disable_noise=disable_noise, start_step=start_at_step, last_step=end_at_step, force_full_denoise=force_full_denoise)
comfyanonymous's avatar
comfyanonymous committed
1407
1408
1409

class SaveImage:
    def __init__(self):
1410
        self.output_dir = folder_paths.get_output_directory()
m957ymj75urz's avatar
m957ymj75urz committed
1411
        self.type = "output"
1412
        self.prefix_append = ""
1413
        self.compress_level = 4
comfyanonymous's avatar
comfyanonymous committed
1414
1415
1416
1417

    @classmethod
    def INPUT_TYPES(s):
        return {"required": 
1418
                    {"images": ("IMAGE", ),
pythongosssss's avatar
tidy  
pythongosssss committed
1419
                     "filename_prefix": ("STRING", {"default": "ComfyUI"})},
pythongosssss's avatar
pythongosssss committed
1420
                "hidden": {"prompt": "PROMPT", "extra_pnginfo": "EXTRA_PNGINFO"},
comfyanonymous's avatar
comfyanonymous committed
1421
1422
1423
1424
1425
1426
1427
                }

    RETURN_TYPES = ()
    FUNCTION = "save_images"

    OUTPUT_NODE = True

1428
1429
    CATEGORY = "image"

pythongosssss's avatar
tidy  
pythongosssss committed
1430
    def save_images(self, images, filename_prefix="ComfyUI", prompt=None, extra_pnginfo=None):
1431
        filename_prefix += self.prefix_append
1432
        full_output_folder, filename, counter, subfolder, filename_prefix = folder_paths.get_save_image_path(filename_prefix, self.output_dir, images[0].shape[1], images[0].shape[0])
m957ymj75urz's avatar
m957ymj75urz committed
1433
        results = list()
1434
        for (batch_number, image) in enumerate(images):
comfyanonymous's avatar
comfyanonymous committed
1435
            i = 255. * image.cpu().numpy()
1436
            img = Image.fromarray(np.clip(i, 0, 255).astype(np.uint8))
1437
1438
1439
1440
1441
1442
1443
1444
            metadata = None
            if not args.disable_metadata:
                metadata = PngInfo()
                if prompt is not None:
                    metadata.add_text("prompt", json.dumps(prompt))
                if extra_pnginfo is not None:
                    for x in extra_pnginfo:
                        metadata.add_text(x, json.dumps(extra_pnginfo[x]))
1445

1446
1447
            filename_with_batch_num = filename.replace("%batch_num%", str(batch_number))
            file = f"{filename_with_batch_num}_{counter:05}_.png"
1448
            img.save(os.path.join(full_output_folder, file), pnginfo=metadata, compress_level=self.compress_level)
m957ymj75urz's avatar
m957ymj75urz committed
1449
1450
1451
1452
            results.append({
                "filename": file,
                "subfolder": subfolder,
                "type": self.type
Gavroche CryptoRUSH's avatar
Gavroche CryptoRUSH committed
1453
            })
1454
            counter += 1
comfyanonymous's avatar
comfyanonymous committed
1455

m957ymj75urz's avatar
m957ymj75urz committed
1456
        return { "ui": { "images": results } }
comfyanonymous's avatar
comfyanonymous committed
1457

pythongosssss's avatar
pythongosssss committed
1458
1459
class PreviewImage(SaveImage):
    def __init__(self):
1460
        self.output_dir = folder_paths.get_temp_directory()
m957ymj75urz's avatar
m957ymj75urz committed
1461
        self.type = "temp"
1462
        self.prefix_append = "_temp_" + ''.join(random.choice("abcdefghijklmnopqrstupvxyz") for x in range(5))
1463
        self.compress_level = 1
pythongosssss's avatar
pythongosssss committed
1464
1465
1466

    @classmethod
    def INPUT_TYPES(s):
1467
        return {"required":
pythongosssss's avatar
pythongosssss committed
1468
1469
1470
                    {"images": ("IMAGE", ), },
                "hidden": {"prompt": "PROMPT", "extra_pnginfo": "EXTRA_PNGINFO"},
                }
1471

1472
1473
1474
class LoadImage:
    @classmethod
    def INPUT_TYPES(s):
1475
        input_dir = folder_paths.get_input_directory()
comfyanonymous's avatar
comfyanonymous committed
1476
        files = [f for f in os.listdir(input_dir) if os.path.isfile(os.path.join(input_dir, f))]
1477
        return {"required":
1478
                    {"image": (sorted(files), {"image_upload": True})},
1479
                }
1480
1481

    CATEGORY = "image"
1482

1483
    RETURN_TYPES = ("IMAGE", "MASK")
1484
1485
    FUNCTION = "load_image"
    def load_image(self, image):
1486
        image_path = folder_paths.get_annotated_filepath(image)
1487
        
1488
        img = node_helpers.pillow(Image.open, image_path)
1489
        
1490
1491
        output_images = []
        output_masks = []
1492
1493
1494
1495
        w, h = None, None

        excluded_formats = ['MPO']
        
1496
        for i in ImageSequence.Iterator(img):
1497
            i = node_helpers.pillow(ImageOps.exif_transpose, i)
1498

1499
1500
            if i.mode == 'I':
                i = i.point(lambda i: i * (1 / 255))
1501
            image = i.convert("RGB")
1502
1503
1504
1505
1506
1507
1508
1509

            if len(output_images) == 0:
                w = image.size[0]
                h = image.size[1]
            
            if image.size[0] != w or image.size[1] != h:
                continue
            
1510
1511
1512
1513
1514
1515
1516
1517
1518
1519
            image = np.array(image).astype(np.float32) / 255.0
            image = torch.from_numpy(image)[None,]
            if 'A' in i.getbands():
                mask = np.array(i.getchannel('A')).astype(np.float32) / 255.0
                mask = 1. - torch.from_numpy(mask)
            else:
                mask = torch.zeros((64,64), dtype=torch.float32, device="cpu")
            output_images.append(image)
            output_masks.append(mask.unsqueeze(0))

1520
        if len(output_images) > 1 and img.format not in excluded_formats:
1521
1522
            output_image = torch.cat(output_images, dim=0)
            output_mask = torch.cat(output_masks, dim=0)
1523
        else:
1524
1525
1526
1527
            output_image = output_images[0]
            output_mask = output_masks[0]

        return (output_image, output_mask)
1528

1529
1530
    @classmethod
    def IS_CHANGED(s, image):
1531
        image_path = folder_paths.get_annotated_filepath(image)
1532
1533
1534
1535
        m = hashlib.sha256()
        with open(image_path, 'rb') as f:
            m.update(f.read())
        return m.digest().hex()
pythongosssss's avatar
pythongosssss committed
1536

1537
1538
1539
1540
1541
1542
1543
    @classmethod
    def VALIDATE_INPUTS(s, image):
        if not folder_paths.exists_annotated_filepath(image):
            return "Invalid image file: {}".format(image)

        return True

1544
class LoadImageMask:
1545
    _color_channels = ["alpha", "red", "green", "blue"]
1546
1547
    @classmethod
    def INPUT_TYPES(s):
1548
        input_dir = folder_paths.get_input_directory()
comfyanonymous's avatar
comfyanonymous committed
1549
        files = [f for f in os.listdir(input_dir) if os.path.isfile(os.path.join(input_dir, f))]
1550
        return {"required":
1551
                    {"image": (sorted(files), {"image_upload": True}),
1552
                     "channel": (s._color_channels, ), }
1553
1554
                }

1555
    CATEGORY = "mask"
1556
1557
1558
1559

    RETURN_TYPES = ("MASK",)
    FUNCTION = "load_image"
    def load_image(self, image, channel):
1560
        image_path = folder_paths.get_annotated_filepath(image)
1561
1562
        i = node_helpers.pillow(Image.open, image_path)
        i = node_helpers.pillow(ImageOps.exif_transpose, i)
1563
        if i.getbands() != ("R", "G", "B", "A"):
1564
1565
            if i.mode == 'I':
                i = i.point(lambda i: i * (1 / 255))
1566
            i = i.convert("RGBA")
1567
1568
1569
1570
1571
1572
1573
1574
1575
        mask = None
        c = channel[0].upper()
        if c in i.getbands():
            mask = np.array(i.getchannel(c)).astype(np.float32) / 255.0
            mask = torch.from_numpy(mask)
            if c == 'A':
                mask = 1. - mask
        else:
            mask = torch.zeros((64,64), dtype=torch.float32, device="cpu")
1576
        return (mask.unsqueeze(0),)
1577
1578
1579

    @classmethod
    def IS_CHANGED(s, image, channel):
1580
        image_path = folder_paths.get_annotated_filepath(image)
1581
1582
1583
1584
        m = hashlib.sha256()
        with open(image_path, 'rb') as f:
            m.update(f.read())
        return m.digest().hex()
pythongosssss's avatar
pythongosssss committed
1585

1586
    @classmethod
1587
    def VALIDATE_INPUTS(s, image):
1588
1589
1590
1591
1592
        if not folder_paths.exists_annotated_filepath(image):
            return "Invalid image file: {}".format(image)

        return True

comfyanonymous's avatar
comfyanonymous committed
1593
class ImageScale:
1594
    upscale_methods = ["nearest-exact", "bilinear", "area", "bicubic", "lanczos"]
comfyanonymous's avatar
comfyanonymous committed
1595
1596
1597
1598
1599
    crop_methods = ["disabled", "center"]

    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "image": ("IMAGE",), "upscale_method": (s.upscale_methods,),
1600
1601
                              "width": ("INT", {"default": 512, "min": 0, "max": MAX_RESOLUTION, "step": 1}),
                              "height": ("INT", {"default": 512, "min": 0, "max": MAX_RESOLUTION, "step": 1}),
comfyanonymous's avatar
comfyanonymous committed
1602
1603
1604
1605
                              "crop": (s.crop_methods,)}}
    RETURN_TYPES = ("IMAGE",)
    FUNCTION = "upscale"

1606
    CATEGORY = "image/upscaling"
1607

comfyanonymous's avatar
comfyanonymous committed
1608
    def upscale(self, image, upscale_method, width, height, crop):
1609
1610
1611
1612
1613
1614
1615
1616
1617
1618
1619
1620
        if width == 0 and height == 0:
            s = image
        else:
            samples = image.movedim(-1,1)

            if width == 0:
                width = max(1, round(samples.shape[3] * height / samples.shape[2]))
            elif height == 0:
                height = max(1, round(samples.shape[2] * width / samples.shape[3]))

            s = comfy.utils.common_upscale(samples, width, height, upscale_method, crop)
            s = s.movedim(1,-1)
comfyanonymous's avatar
comfyanonymous committed
1621
        return (s,)
comfyanonymous's avatar
comfyanonymous committed
1622

comfyanonymous's avatar
comfyanonymous committed
1623
class ImageScaleBy:
1624
    upscale_methods = ["nearest-exact", "bilinear", "area", "bicubic", "lanczos"]
comfyanonymous's avatar
comfyanonymous committed
1625
1626
1627
1628
1629
1630
1631
1632
1633
1634
1635
1636
1637
1638
1639
1640
1641
1642

    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "image": ("IMAGE",), "upscale_method": (s.upscale_methods,),
                              "scale_by": ("FLOAT", {"default": 1.0, "min": 0.01, "max": 8.0, "step": 0.01}),}}
    RETURN_TYPES = ("IMAGE",)
    FUNCTION = "upscale"

    CATEGORY = "image/upscaling"

    def upscale(self, image, upscale_method, scale_by):
        samples = image.movedim(-1,1)
        width = round(samples.shape[3] * scale_by)
        height = round(samples.shape[2] * scale_by)
        s = comfy.utils.common_upscale(samples, width, height, upscale_method, "disabled")
        s = s.movedim(1,-1)
        return (s,)

1643
1644
1645
1646
1647
1648
1649
1650
1651
1652
1653
1654
1655
1656
1657
class ImageInvert:

    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "image": ("IMAGE",)}}

    RETURN_TYPES = ("IMAGE",)
    FUNCTION = "invert"

    CATEGORY = "image"

    def invert(self, image):
        s = 1.0 - image
        return (s,)

1658
1659
1660
1661
1662
1663
1664
1665
1666
1667
1668
1669
1670
1671
1672
1673
class ImageBatch:

    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "image1": ("IMAGE",), "image2": ("IMAGE",)}}

    RETURN_TYPES = ("IMAGE",)
    FUNCTION = "batch"

    CATEGORY = "image"

    def batch(self, image1, image2):
        if image1.shape[1:] != image2.shape[1:]:
            image2 = comfy.utils.common_upscale(image2.movedim(-1,1), image1.shape[2], image1.shape[1], "bilinear", "center").movedim(1,-1)
        s = torch.cat((image1, image2), dim=0)
        return (s,)
1674

comfyanonymous's avatar
comfyanonymous committed
1675
1676
1677
1678
1679
1680
1681
1682
class EmptyImage:
    def __init__(self, device="cpu"):
        self.device = device

    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "width": ("INT", {"default": 512, "min": 1, "max": MAX_RESOLUTION, "step": 1}),
                              "height": ("INT", {"default": 512, "min": 1, "max": MAX_RESOLUTION, "step": 1}),
comfyanonymous's avatar
comfyanonymous committed
1683
                              "batch_size": ("INT", {"default": 1, "min": 1, "max": 4096}),
comfyanonymous's avatar
comfyanonymous committed
1684
1685
1686
1687
1688
1689
1690
1691
1692
1693
1694
1695
1696
                              "color": ("INT", {"default": 0, "min": 0, "max": 0xFFFFFF, "step": 1, "display": "color"}),
                              }}
    RETURN_TYPES = ("IMAGE",)
    FUNCTION = "generate"

    CATEGORY = "image"

    def generate(self, width, height, batch_size=1, color=0):
        r = torch.full([batch_size, height, width, 1], ((color >> 16) & 0xFF) / 0xFF)
        g = torch.full([batch_size, height, width, 1], ((color >> 8) & 0xFF) / 0xFF)
        b = torch.full([batch_size, height, width, 1], ((color) & 0xFF) / 0xFF)
        return (torch.cat((r, g, b), dim=-1), )

Guo Y.K's avatar
Guo Y.K committed
1697
1698
1699
1700
1701
1702
1703
class ImagePadForOutpaint:

    @classmethod
    def INPUT_TYPES(s):
        return {
            "required": {
                "image": ("IMAGE",),
1704
1705
1706
1707
                "left": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 8}),
                "top": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 8}),
                "right": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 8}),
                "bottom": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 8}),
1708
                "feathering": ("INT", {"default": 40, "min": 0, "max": MAX_RESOLUTION, "step": 1}),
Guo Y.K's avatar
Guo Y.K committed
1709
1710
1711
1712
1713
1714
1715
1716
            }
        }

    RETURN_TYPES = ("IMAGE", "MASK")
    FUNCTION = "expand_image"

    CATEGORY = "image"

1717
    def expand_image(self, image, left, top, right, bottom, feathering):
Guo Y.K's avatar
Guo Y.K committed
1718
1719
        d1, d2, d3, d4 = image.size()

1720
        new_image = torch.ones(
Guo Y.K's avatar
Guo Y.K committed
1721
1722
            (d1, d2 + top + bottom, d3 + left + right, d4),
            dtype=torch.float32,
1723
1724
        ) * 0.5

Guo Y.K's avatar
Guo Y.K committed
1725
1726
1727
1728
1729
1730
        new_image[:, top:top + d2, left:left + d3, :] = image

        mask = torch.ones(
            (d2 + top + bottom, d3 + left + right),
            dtype=torch.float32,
        )
1731

1732
1733
1734
1735
1736
        t = torch.zeros(
            (d2, d3),
            dtype=torch.float32
        )

1737
        if feathering > 0 and feathering * 2 < d2 and feathering * 2 < d3:
1738
1739
1740
1741
1742
1743
1744
1745
1746
1747
1748
1749
1750
1751
1752
1753
1754
1755
1756

            for i in range(d2):
                for j in range(d3):
                    dt = i if top != 0 else d2
                    db = d2 - i if bottom != 0 else d2

                    dl = j if left != 0 else d3
                    dr = d3 - j if right != 0 else d3

                    d = min(dt, db, dl, dr)

                    if d >= feathering:
                        continue

                    v = (feathering - d) / feathering

                    t[i, j] = v * v

        mask[top:top + d2, left:left + d3] = t
1757

Guo Y.K's avatar
Guo Y.K committed
1758
1759
1760
        return (new_image, mask)


comfyanonymous's avatar
comfyanonymous committed
1761
1762
NODE_CLASS_MAPPINGS = {
    "KSampler": KSampler,
1763
    "CheckpointLoaderSimple": CheckpointLoaderSimple,
comfyanonymous's avatar
comfyanonymous committed
1764
    "CLIPTextEncode": CLIPTextEncode,
comfyanonymous's avatar
comfyanonymous committed
1765
    "CLIPSetLastLayer": CLIPSetLastLayer,
comfyanonymous's avatar
comfyanonymous committed
1766
1767
    "VAEDecode": VAEDecode,
    "VAEEncode": VAEEncode,
1768
    "VAEEncodeForInpaint": VAEEncodeForInpaint,
comfyanonymous's avatar
comfyanonymous committed
1769
1770
1771
    "VAELoader": VAELoader,
    "EmptyLatentImage": EmptyLatentImage,
    "LatentUpscale": LatentUpscale,
comfyanonymous's avatar
comfyanonymous committed
1772
    "LatentUpscaleBy": LatentUpscaleBy,
1773
    "LatentFromBatch": LatentFromBatch,
1774
    "RepeatLatentBatch": RepeatLatentBatch,
comfyanonymous's avatar
comfyanonymous committed
1775
    "SaveImage": SaveImage,
pythongosssss's avatar
pythongosssss committed
1776
    "PreviewImage": PreviewImage,
comfyanonymous's avatar
comfyanonymous committed
1777
    "LoadImage": LoadImage,
1778
    "LoadImageMask": LoadImageMask,
comfyanonymous's avatar
comfyanonymous committed
1779
    "ImageScale": ImageScale,
comfyanonymous's avatar
comfyanonymous committed
1780
    "ImageScaleBy": ImageScaleBy,
1781
    "ImageInvert": ImageInvert,
1782
    "ImageBatch": ImageBatch,
Guo Y.K's avatar
Guo Y.K committed
1783
    "ImagePadForOutpaint": ImagePadForOutpaint,
comfyanonymous's avatar
comfyanonymous committed
1784
    "EmptyImage": EmptyImage,
comfyanonymous's avatar
comfyanonymous committed
1785
    "ConditioningAverage": ConditioningAverage ,
comfyanonymous's avatar
comfyanonymous committed
1786
    "ConditioningCombine": ConditioningCombine,
1787
    "ConditioningConcat": ConditioningConcat,
comfyanonymous's avatar
comfyanonymous committed
1788
    "ConditioningSetArea": ConditioningSetArea,
1789
    "ConditioningSetAreaPercentage": ConditioningSetAreaPercentage,
1790
    "ConditioningSetAreaStrength": ConditioningSetAreaStrength,
Jacob Segal's avatar
Jacob Segal committed
1791
    "ConditioningSetMask": ConditioningSetMask,
comfyanonymous's avatar
comfyanonymous committed
1792
    "KSamplerAdvanced": KSamplerAdvanced,
1793
    "SetLatentNoiseMask": SetLatentNoiseMask,
comfyanonymous's avatar
comfyanonymous committed
1794
    "LatentComposite": LatentComposite,
1795
    "LatentBlend": LatentBlend,
comfyanonymous's avatar
comfyanonymous committed
1796
    "LatentRotate": LatentRotate,
comfyanonymous's avatar
comfyanonymous committed
1797
    "LatentFlip": LatentFlip,
comfyanonymous's avatar
comfyanonymous committed
1798
    "LatentCrop": LatentCrop,
1799
    "LoraLoader": LoraLoader,
1800
    "CLIPLoader": CLIPLoader,
1801
    "UNETLoader": UNETLoader,
1802
    "DualCLIPLoader": DualCLIPLoader,
1803
    "CLIPVisionEncode": CLIPVisionEncode,
1804
    "StyleModelApply": StyleModelApply,
1805
    "unCLIPConditioning": unCLIPConditioning,
comfyanonymous's avatar
comfyanonymous committed
1806
    "ControlNetApply": ControlNetApply,
1807
    "ControlNetApplyAdvanced": ControlNetApplyAdvanced,
comfyanonymous's avatar
comfyanonymous committed
1808
    "ControlNetLoader": ControlNetLoader,
1809
    "DiffControlNetLoader": DiffControlNetLoader,
comfyanonymous's avatar
comfyanonymous committed
1810
1811
    "StyleModelLoader": StyleModelLoader,
    "CLIPVisionLoader": CLIPVisionLoader,
1812
    "VAEDecodeTiled": VAEDecodeTiled,
comfyanonymous's avatar
comfyanonymous committed
1813
    "VAEEncodeTiled": VAEEncodeTiled,
1814
    "unCLIPCheckpointLoader": unCLIPCheckpointLoader,
1815
1816
    "GLIGENLoader": GLIGENLoader,
    "GLIGENTextBoxApply": GLIGENTextBoxApply,
1817
    "InpaintModelConditioning": InpaintModelConditioning,
1818

1819
    "CheckpointLoader": CheckpointLoader,
sALTaccount's avatar
sALTaccount committed
1820
    "DiffusersLoader": DiffusersLoader,
Dr.Lt.Data's avatar
Dr.Lt.Data committed
1821
1822

    "LoadLatent": LoadLatent,
1823
    "SaveLatent": SaveLatent,
1824
1825

    "ConditioningZeroOut": ConditioningZeroOut,
1826
    "ConditioningSetTimestepRange": ConditioningSetTimestepRange,
1827
    "LoraLoaderModelOnly": LoraLoaderModelOnly,
comfyanonymous's avatar
comfyanonymous committed
1828
1829
}

City's avatar
City committed
1830
1831
1832
1833
1834
NODE_DISPLAY_NAME_MAPPINGS = {
    # Sampling
    "KSampler": "KSampler",
    "KSamplerAdvanced": "KSampler (Advanced)",
    # Loaders
1835
    "CheckpointLoader": "Load Checkpoint With Config (DEPRECATED)",
comfyanonymous's avatar
Rename.  
comfyanonymous committed
1836
    "CheckpointLoaderSimple": "Load Checkpoint",
City's avatar
City committed
1837
1838
1839
1840
1841
1842
1843
1844
1845
1846
1847
1848
1849
1850
    "VAELoader": "Load VAE",
    "LoraLoader": "Load LoRA",
    "CLIPLoader": "Load CLIP",
    "ControlNetLoader": "Load ControlNet Model",
    "DiffControlNetLoader": "Load ControlNet Model (diff)",
    "StyleModelLoader": "Load Style Model",
    "CLIPVisionLoader": "Load CLIP Vision",
    "UpscaleModelLoader": "Load Upscale Model",
    # Conditioning
    "CLIPVisionEncode": "CLIP Vision Encode",
    "StyleModelApply": "Apply Style Model",
    "CLIPTextEncode": "CLIP Text Encode (Prompt)",
    "CLIPSetLastLayer": "CLIP Set Last Layer",
    "ConditioningCombine": "Conditioning (Combine)",
FizzleDorf's avatar
FizzleDorf committed
1851
    "ConditioningAverage ": "Conditioning (Average)",
1852
    "ConditioningConcat": "Conditioning (Concat)",
City's avatar
City committed
1853
    "ConditioningSetArea": "Conditioning (Set Area)",
1854
    "ConditioningSetAreaPercentage": "Conditioning (Set Area with Percentage)",
Jacob Segal's avatar
Jacob Segal committed
1855
    "ConditioningSetMask": "Conditioning (Set Mask)",
City's avatar
City committed
1856
    "ControlNetApply": "Apply ControlNet",
1857
    "ControlNetApplyAdvanced": "Apply ControlNet (Advanced)",
City's avatar
City committed
1858
1859
1860
1861
1862
1863
1864
1865
1866
1867
    # Latent
    "VAEEncodeForInpaint": "VAE Encode (for Inpainting)",
    "SetLatentNoiseMask": "Set Latent Noise Mask",
    "VAEDecode": "VAE Decode",
    "VAEEncode": "VAE Encode",
    "LatentRotate": "Rotate Latent",
    "LatentFlip": "Flip Latent",
    "LatentCrop": "Crop Latent",
    "EmptyLatentImage": "Empty Latent Image",
    "LatentUpscale": "Upscale Latent",
comfyanonymous's avatar
comfyanonymous committed
1868
    "LatentUpscaleBy": "Upscale Latent By",
City's avatar
City committed
1869
    "LatentComposite": "Latent Composite",
1870
    "LatentBlend": "Latent Blend",
1871
1872
    "LatentFromBatch" : "Latent From Batch",
    "RepeatLatentBatch": "Repeat Latent Batch",
City's avatar
City committed
1873
1874
1875
1876
1877
1878
    # Image
    "SaveImage": "Save Image",
    "PreviewImage": "Preview Image",
    "LoadImage": "Load Image",
    "LoadImageMask": "Load Image (as Mask)",
    "ImageScale": "Upscale Image",
comfyanonymous's avatar
comfyanonymous committed
1879
    "ImageScaleBy": "Upscale Image By",
City's avatar
City committed
1880
1881
1882
    "ImageUpscaleWithModel": "Upscale Image (using Model)",
    "ImageInvert": "Invert Image",
    "ImagePadForOutpaint": "Pad Image for Outpainting",
1883
    "ImageBatch": "Batch Images",
City's avatar
City committed
1884
1885
1886
1887
1888
    # _for_testing
    "VAEDecodeTiled": "VAE Decode (Tiled)",
    "VAEEncodeTiled": "VAE Encode (Tiled)",
}

1889
1890
EXTENSION_WEB_DIRS = {}

1891

1892
1893
1894
1895
1896
1897
1898
1899
1900
1901
1902
1903
1904
1905
1906
1907
1908
1909
1910
1911
1912
1913
1914
def get_relative_module_name(module_path: str) -> str:
    """
    Returns the module name based on the given module path.
    Examples:
        get_module_name("C:/Users/username/ComfyUI/custom_nodes/my_custom_node.py") -> "custom_nodes.my_custom_node"
        get_module_name("C:/Users/username/ComfyUI/custom_nodes/my_custom_node") -> "custom_nodes.my_custom_node"
        get_module_name("C:/Users/username/ComfyUI/custom_nodes/my_custom_node/") -> "custom_nodes.my_custom_node"
        get_module_name("C:/Users/username/ComfyUI/custom_nodes/my_custom_node/__init__.py") -> "custom_nodes.my_custom_node"
        get_module_name("C:/Users/username/ComfyUI/custom_nodes/my_custom_node/__init__") -> "custom_nodes.my_custom_node"
        get_module_name("C:/Users/username/ComfyUI/custom_nodes/my_custom_node/__init__/") -> "custom_nodes.my_custom_node"
        get_module_name("C:/Users/username/ComfyUI/custom_nodes/my_custom_node.disabled") -> "custom_nodes.my
    Args:
        module_path (str): The path of the module.
    Returns:
        str: The module name.
    """
    relative_path = os.path.relpath(module_path, folder_paths.base_path)
    if os.path.isfile(module_path):
        relative_path = os.path.splitext(relative_path)[0]
    return relative_path.replace(os.sep, '.')


def load_custom_node(module_path: str, ignore=set()) -> bool:
comfyanonymous's avatar
comfyanonymous committed
1915
    module_name = os.path.basename(module_path)
comfyanonymous's avatar
comfyanonymous committed
1916
1917
1918
    if os.path.isfile(module_path):
        sp = os.path.splitext(module_path)
        module_name = sp[0]
1919
    try:
1920
        logging.debug("Trying to load custom node {}".format(module_path))
1921
1922
        if os.path.isfile(module_path):
            module_spec = importlib.util.spec_from_file_location(module_name, module_path)
1923
            module_dir = os.path.split(module_path)[0]
1924
1925
        else:
            module_spec = importlib.util.spec_from_file_location(module_name, os.path.join(module_path, "__init__.py"))
1926
1927
            module_dir = module_path

1928
1929
1930
        module = importlib.util.module_from_spec(module_spec)
        sys.modules[module_name] = module
        module_spec.loader.exec_module(module)
1931
1932
1933
1934
1935
1936

        if hasattr(module, "WEB_DIRECTORY") and getattr(module, "WEB_DIRECTORY") is not None:
            web_dir = os.path.abspath(os.path.join(module_dir, getattr(module, "WEB_DIRECTORY")))
            if os.path.isdir(web_dir):
                EXTENSION_WEB_DIRS[module_name] = web_dir

1937
        if hasattr(module, "NODE_CLASS_MAPPINGS") and getattr(module, "NODE_CLASS_MAPPINGS") is not None:
1938
            for name, node_cls in module.NODE_CLASS_MAPPINGS.items():
1939
                if name not in ignore:
1940
1941
                    NODE_CLASS_MAPPINGS[name] = node_cls
                    node_cls.RELATIVE_PYTHON_MODULE = get_relative_module_name(module_path)
1942
1943
            if hasattr(module, "NODE_DISPLAY_NAME_MAPPINGS") and getattr(module, "NODE_DISPLAY_NAME_MAPPINGS") is not None:
                NODE_DISPLAY_NAME_MAPPINGS.update(module.NODE_DISPLAY_NAME_MAPPINGS)
1944
            return True
1945
        else:
1946
            logging.warning(f"Skip {module_path} module for custom nodes due to the lack of NODE_CLASS_MAPPINGS.")
1947
            return False
1948
    except Exception as e:
1949
        logging.warning(traceback.format_exc())
1950
        logging.warning(f"Cannot import {module_path} module for custom nodes: {e}")
1951
        return False
1952

1953
1954
1955
1956
1957
1958
1959
1960
1961
1962
def init_external_custom_nodes():
    """
    Initializes the external custom nodes.

    This function loads custom nodes from the specified folder paths and imports them into the application.
    It measures the import times for each custom node and logs the results.

    Returns:
        None
    """
1963
    base_node_names = set(NODE_CLASS_MAPPINGS.keys())
1964
    node_paths = folder_paths.get_folder_paths("custom_nodes")
1965
    node_import_times = []
1966
    for custom_node_path in node_paths:
Enrico Fasoli's avatar
Enrico Fasoli committed
1967
        possible_modules = os.listdir(os.path.realpath(custom_node_path))
1968
1969
1970
1971
1972
1973
        if "__pycache__" in possible_modules:
            possible_modules.remove("__pycache__")

        for possible_module in possible_modules:
            module_path = os.path.join(custom_node_path, possible_module)
            if os.path.isfile(module_path) and os.path.splitext(module_path)[1] != ".py": continue
1974
            if module_path.endswith(".disabled"): continue
1975
            time_before = time.perf_counter()
1976
            success = load_custom_node(module_path, base_node_names)
1977
            node_import_times.append((time.perf_counter() - time_before, module_path, success))
1978

1979
    if len(node_import_times) > 0:
comfyanonymous's avatar
comfyanonymous committed
1980
        logging.info("\nImport times for custom nodes:")
1981
        for n in sorted(node_import_times):
1982
1983
1984
1985
            if n[2]:
                import_message = ""
            else:
                import_message = " (IMPORT FAILED)"
comfyanonymous's avatar
comfyanonymous committed
1986
1987
            logging.info("{:6.1f} seconds{}: {}".format(n[0], import_message, n[1]))
        logging.info("")
1988

1989
1990
1991
1992
1993
1994
1995
1996
1997
1998
def init_builtin_extra_nodes():
    """
    Initializes the built-in extra nodes in ComfyUI.

    This function loads the extra node files located in the "comfy_extras" directory and imports them into ComfyUI.
    If any of the extra node files fail to import, a warning message is logged.

    Returns:
        None
    """
1999
2000
2001
2002
2003
2004
2005
    extras_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), "comfy_extras")
    extras_files = [
        "nodes_latent.py",
        "nodes_hypernetwork.py",
        "nodes_upscale_model.py",
        "nodes_post_processing.py",
        "nodes_mask.py",
2006
        "nodes_compositing.py",
2007
2008
2009
2010
2011
2012
        "nodes_rebatch.py",
        "nodes_model_merging.py",
        "nodes_tomesd.py",
        "nodes_clip_sdxl.py",
        "nodes_canny.py",
        "nodes_freelunch.py",
2013
2014
        "nodes_custom_sampler.py",
        "nodes_hypertile.py",
2015
        "nodes_model_advanced.py",
2016
        "nodes_model_downscale.py",
comfyanonymous's avatar
comfyanonymous committed
2017
        "nodes_images.py",
2018
        "nodes_video_model.py",
2019
        "nodes_sag.py",
Hari's avatar
Hari committed
2020
        "nodes_perpneg.py",
2021
        "nodes_stable3d.py",
2022
        "nodes_sdupscale.py",
2023
        "nodes_photomaker.py",
2024
        "nodes_cond.py",
2025
        "nodes_morphology.py",
comfyanonymous's avatar
comfyanonymous committed
2026
        "nodes_stable_cascade.py",
2027
        "nodes_differential_diffusion.py",
2028
        "nodes_ip2p.py",
2029
        "nodes_model_merging_model_specific.py",
comfyanonymous's avatar
comfyanonymous committed
2030
        "nodes_pag.py",
2031
        "nodes_align_your_steps.py",
2032
        "nodes_attention_multiply.py",
comfyanonymous's avatar
comfyanonymous committed
2033
        "nodes_advanced_samplers.py",
pythongosssss's avatar
pythongosssss committed
2034
        "nodes_webcam.py",
2035
        "nodes_audio.py",
comfyanonymous's avatar
comfyanonymous committed
2036
        "nodes_sd3.py",
Zhenyu Zhou's avatar
Zhenyu Zhou committed
2037
        "nodes_gits.py",
2038
2039
    ]

2040
    import_failed = []
2041
    for node_file in extras_files:
2042
2043
        if not load_custom_node(os.path.join(extras_dir, node_file)):
            import_failed.append(node_file)
2044

2045
2046
2047
2048
    return import_failed


def init_extra_nodes(init_custom_nodes=True):
comfyanonymous's avatar
comfyanonymous committed
2049
    import_failed = init_builtin_extra_nodes()
2050
2051
2052
2053
2054
2055

    if init_custom_nodes:
        init_external_custom_nodes()
    else:
        logging.info("Skipping loading of custom nodes")

2056
    if len(import_failed) > 0:
2057
        logging.warning("WARNING: some comfy_extras/ nodes did not import correctly. This may be because they are missing some dependencies.\n")
2058
        for node in import_failed:
2059
2060
            logging.warning("IMPORT FAILED: {}".format(node))
        logging.warning("\nThis issue might be caused by new missing dependencies added the last time you updated ComfyUI.")
2061
        if args.windows_standalone_build:
2062
            logging.warning("Please run the update script: update/update_comfyui.bat")
2063
        else:
2064
2065
            logging.warning("Please do a: pip install -r requirements.txt")
        logging.warning("")