nodes.py 60.5 KB
Newer Older
comfyanonymous's avatar
comfyanonymous committed
1
2
3
4
5
import torch

import os
import sys
import json
6
import hashlib
7
import traceback
8
import math
9
import time
10
import random
comfyanonymous's avatar
comfyanonymous committed
11

12
from PIL import Image, ImageOps
comfyanonymous's avatar
comfyanonymous committed
13
14
from PIL.PngImagePlugin import PngInfo
import numpy as np
15
import safetensors.torch
comfyanonymous's avatar
comfyanonymous committed
16

comfyanonymous's avatar
comfyanonymous committed
17
sys.path.insert(0, os.path.join(os.path.dirname(os.path.realpath(__file__)), "comfy"))
comfyanonymous's avatar
comfyanonymous committed
18
19


20
import comfy.diffusers_load
comfyanonymous's avatar
comfyanonymous committed
21
import comfy.samplers
22
import comfy.sample
comfyanonymous's avatar
comfyanonymous committed
23
import comfy.sd
comfyanonymous's avatar
comfyanonymous committed
24
25
import comfy.utils

26
import comfy.clip_vision
27

28
import comfy.model_management
29
import importlib
comfyanonymous's avatar
comfyanonymous committed
30

31
import folder_paths
32
import latent_preview
space-nuko's avatar
space-nuko committed
33

34
def before_node_execution():
35
    comfy.model_management.throw_exception_if_processing_interrupted()
36

37
def interrupt_processing(value=True):
38
    comfy.model_management.interrupt_current_processing(value)
39

40
41
MAX_RESOLUTION=8192

comfyanonymous's avatar
comfyanonymous committed
42
43
44
class CLIPTextEncode:
    @classmethod
    def INPUT_TYPES(s):
45
        return {"required": {"text": ("STRING", {"multiline": True}), "clip": ("CLIP", )}}
comfyanonymous's avatar
comfyanonymous committed
46
47
48
    RETURN_TYPES = ("CONDITIONING",)
    FUNCTION = "encode"

49
50
    CATEGORY = "conditioning"

comfyanonymous's avatar
comfyanonymous committed
51
    def encode(self, clip, text):
52
53
54
        tokens = clip.tokenize(text)
        cond, pooled = clip.encode_from_tokens(tokens, return_pooled=True)
        return ([[cond, {"pooled_output": pooled}]], )
comfyanonymous's avatar
comfyanonymous committed
55
56
57
58
59
60
61
62

class ConditioningCombine:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": {"conditioning_1": ("CONDITIONING", ), "conditioning_2": ("CONDITIONING", )}}
    RETURN_TYPES = ("CONDITIONING",)
    FUNCTION = "combine"

63
64
    CATEGORY = "conditioning"

comfyanonymous's avatar
comfyanonymous committed
65
66
67
    def combine(self, conditioning_1, conditioning_2):
        return (conditioning_1 + conditioning_2, )

FizzleDorf's avatar
FizzleDorf committed
68
69
70
class ConditioningAverage :
    @classmethod
    def INPUT_TYPES(s):
comfyanonymous's avatar
comfyanonymous committed
71
72
        return {"required": {"conditioning_to": ("CONDITIONING", ), "conditioning_from": ("CONDITIONING", ),
                              "conditioning_to_strength": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01})
FizzleDorf's avatar
FizzleDorf committed
73
74
75
76
77
78
                             }}
    RETURN_TYPES = ("CONDITIONING",)
    FUNCTION = "addWeighted"

    CATEGORY = "conditioning"

comfyanonymous's avatar
comfyanonymous committed
79
    def addWeighted(self, conditioning_to, conditioning_from, conditioning_to_strength):
FizzleDorf's avatar
FizzleDorf committed
80
        out = []
comfyanonymous's avatar
comfyanonymous committed
81
82
83
84
85

        if len(conditioning_from) > 1:
            print("Warning: ConditioningAverage conditioning_from contains more than 1 cond, only the first one will actually be applied to conditioning_to.")

        cond_from = conditioning_from[0][0]
86
        pooled_output_from = conditioning_from[0][1].get("pooled_output", None)
comfyanonymous's avatar
comfyanonymous committed
87
88
89

        for i in range(len(conditioning_to)):
            t1 = conditioning_to[i][0]
90
            pooled_output_to = conditioning_to[i][1].get("pooled_output", pooled_output_from)
comfyanonymous's avatar
comfyanonymous committed
91
92
93
94
95
            t0 = cond_from[:,:t1.shape[1]]
            if t0.shape[1] < t1.shape[1]:
                t0 = torch.cat([t0] + [torch.zeros((1, (t1.shape[1] - t0.shape[1]), t1.shape[2]))], dim=1)

            tw = torch.mul(t1, conditioning_to_strength) + torch.mul(t0, (1.0 - conditioning_to_strength))
96
97
98
99
100
101
102
            t_to = conditioning_to[i][1].copy()
            if pooled_output_from is not None and pooled_output_to is not None:
                t_to["pooled_output"] = torch.mul(pooled_output_to, conditioning_to_strength) + torch.mul(pooled_output_from, (1.0 - conditioning_to_strength))
            elif pooled_output_from is not None:
                t_to["pooled_output"] = pooled_output_from

            n = [tw, t_to]
FizzleDorf's avatar
FizzleDorf committed
103
104
105
            out.append(n)
        return (out, )

106
107
108
109
110
111
112
113
114
115
class ConditioningConcat:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": {
            "conditioning_to": ("CONDITIONING",),
            "conditioning_from": ("CONDITIONING",),
            }}
    RETURN_TYPES = ("CONDITIONING",)
    FUNCTION = "concat"

116
    CATEGORY = "conditioning"
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133

    def concat(self, conditioning_to, conditioning_from):
        out = []

        if len(conditioning_from) > 1:
            print("Warning: ConditioningConcat conditioning_from contains more than 1 cond, only the first one will actually be applied to conditioning_to.")

        cond_from = conditioning_from[0][0]

        for i in range(len(conditioning_to)):
            t1 = conditioning_to[i][0]
            tw = torch.cat((t1, cond_from),1)
            n = [tw, conditioning_to[i][1].copy()]
            out.append(n)

        return (out, )

comfyanonymous's avatar
comfyanonymous committed
134
135
136
137
class ConditioningSetArea:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": {"conditioning": ("CONDITIONING", ),
138
139
140
141
                              "width": ("INT", {"default": 64, "min": 64, "max": MAX_RESOLUTION, "step": 8}),
                              "height": ("INT", {"default": 64, "min": 64, "max": MAX_RESOLUTION, "step": 8}),
                              "x": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 8}),
                              "y": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 8}),
comfyanonymous's avatar
comfyanonymous committed
142
143
144
145
146
                              "strength": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}),
                             }}
    RETURN_TYPES = ("CONDITIONING",)
    FUNCTION = "append"

147
148
    CATEGORY = "conditioning"

149
    def append(self, conditioning, width, height, x, y, strength):
comfyanonymous's avatar
comfyanonymous committed
150
151
152
153
154
        c = []
        for t in conditioning:
            n = [t[0], t[1].copy()]
            n[1]['area'] = (height // 8, width // 8, y // 8, x // 8)
            n[1]['strength'] = strength
155
            n[1]['set_area_to_bounds'] = False
comfyanonymous's avatar
comfyanonymous committed
156
            c.append(n)
comfyanonymous's avatar
comfyanonymous committed
157
        return (c, )
comfyanonymous's avatar
comfyanonymous committed
158

Jacob Segal's avatar
Jacob Segal committed
159
160
161
162
163
164
class ConditioningSetMask:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": {"conditioning": ("CONDITIONING", ),
                              "mask": ("MASK", ),
                              "strength": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}),
165
                              "set_cond_area": (["default", "mask bounds"],),
Jacob Segal's avatar
Jacob Segal committed
166
167
168
169
170
171
                             }}
    RETURN_TYPES = ("CONDITIONING",)
    FUNCTION = "append"

    CATEGORY = "conditioning"

172
    def append(self, conditioning, mask, set_cond_area, strength):
Jacob Segal's avatar
Jacob Segal committed
173
        c = []
174
175
176
        set_area_to_bounds = False
        if set_cond_area != "default":
            set_area_to_bounds = True
Jacob Segal's avatar
Jacob Segal committed
177
178
179
180
181
182
        if len(mask.shape) < 3:
            mask = mask.unsqueeze(0)
        for t in conditioning:
            n = [t[0], t[1].copy()]
            _, h, w = mask.shape
            n[1]['mask'] = mask
Jacob Segal's avatar
Jacob Segal committed
183
            n[1]['set_area_to_bounds'] = set_area_to_bounds
184
            n[1]['mask_strength'] = strength
Jacob Segal's avatar
Jacob Segal committed
185
186
187
            c.append(n)
        return (c, )

188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
class ConditioningZeroOut:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": {"conditioning": ("CONDITIONING", )}}
    RETURN_TYPES = ("CONDITIONING",)
    FUNCTION = "zero_out"

    CATEGORY = "advanced/conditioning"

    def zero_out(self, conditioning):
        c = []
        for t in conditioning:
            d = t[1].copy()
            if "pooled_output" in d:
                d["pooled_output"] = torch.zeros_like(d["pooled_output"])
            n = [torch.zeros_like(t[0]), d]
            c.append(n)
        return (c, )

207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
class ConditioningSetTimestepRange:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": {"conditioning": ("CONDITIONING", ),
                             "start": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.001}),
                             "end": ("FLOAT", {"default": 0.0, "min": 0.0, "max": 1.0, "step": 0.001})
                             }}
    RETURN_TYPES = ("CONDITIONING",)
    FUNCTION = "set_range"

    CATEGORY = "advanced/conditioning"

    def set_range(self, conditioning, start, end):
        c = []
        for t in conditioning:
            d = t[1].copy()
            d['start_percent'] = start
            d['end_percent'] = end
            n = [t[0], d]
            c.append(n)
        return (c, )

comfyanonymous's avatar
comfyanonymous committed
229
230
231
232
233
234
235
class VAEDecode:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "samples": ("LATENT", ), "vae": ("VAE", )}}
    RETURN_TYPES = ("IMAGE",)
    FUNCTION = "decode"

236
237
    CATEGORY = "latent"

comfyanonymous's avatar
comfyanonymous committed
238
    def decode(self, vae, samples):
239
        return (vae.decode(samples["samples"]), )
comfyanonymous's avatar
comfyanonymous committed
240

241
242
243
244
245
246
247
248
249
250
251
252
class VAEDecodeTiled:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "samples": ("LATENT", ), "vae": ("VAE", )}}
    RETURN_TYPES = ("IMAGE",)
    FUNCTION = "decode"

    CATEGORY = "_for_testing"

    def decode(self, vae, samples):
        return (vae.decode_tiled(samples["samples"]), )

comfyanonymous's avatar
comfyanonymous committed
253
254
255
256
257
258
259
class VAEEncode:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "pixels": ("IMAGE", ), "vae": ("VAE", )}}
    RETURN_TYPES = ("LATENT",)
    FUNCTION = "encode"

260
261
    CATEGORY = "latent"

262
263
264
265
    @staticmethod
    def vae_encode_crop_pixels(pixels):
        x = (pixels.shape[1] // 8) * 8
        y = (pixels.shape[2] // 8) * 8
266
        if pixels.shape[1] != x or pixels.shape[2] != y:
267
268
269
270
            x_offset = (pixels.shape[1] % 8) // 2
            y_offset = (pixels.shape[2] % 8) // 2
            pixels = pixels[:, x_offset:x + x_offset, y_offset:y + y_offset, :]
        return pixels
271

272
273
274
    def encode(self, vae, pixels):
        pixels = self.vae_encode_crop_pixels(pixels)
        t = vae.encode(pixels[:,:,:,:3])
275
        return ({"samples":t}, )
comfyanonymous's avatar
comfyanonymous committed
276

comfyanonymous's avatar
comfyanonymous committed
277
278
279
280
281
282
283
284
285
286
class VAEEncodeTiled:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "pixels": ("IMAGE", ), "vae": ("VAE", )}}
    RETURN_TYPES = ("LATENT",)
    FUNCTION = "encode"

    CATEGORY = "_for_testing"

    def encode(self, vae, pixels):
287
        pixels = VAEEncode.vae_encode_crop_pixels(pixels)
comfyanonymous's avatar
comfyanonymous committed
288
289
        t = vae.encode_tiled(pixels[:,:,:,:3])
        return ({"samples":t}, )
290

291
292
293
class VAEEncodeForInpaint:
    @classmethod
    def INPUT_TYPES(s):
294
        return {"required": { "pixels": ("IMAGE", ), "vae": ("VAE", ), "mask": ("MASK", ), "grow_mask_by": ("INT", {"default": 6, "min": 0, "max": 64, "step": 1}),}}
295
296
297
298
299
    RETURN_TYPES = ("LATENT",)
    FUNCTION = "encode"

    CATEGORY = "latent/inpaint"

300
    def encode(self, vae, pixels, mask, grow_mask_by=6):
301
302
        x = (pixels.shape[1] // 8) * 8
        y = (pixels.shape[2] // 8) * 8
303
        mask = torch.nn.functional.interpolate(mask.reshape((-1, 1, mask.shape[-2], mask.shape[-1])), size=(pixels.shape[1], pixels.shape[2]), mode="bilinear")
304

305
        pixels = pixels.clone()
306
        if pixels.shape[1] != x or pixels.shape[2] != y:
307
308
309
310
            x_offset = (pixels.shape[1] % 8) // 2
            y_offset = (pixels.shape[2] % 8) // 2
            pixels = pixels[:,x_offset:x + x_offset, y_offset:y + y_offset,:]
            mask = mask[:,:,x_offset:x + x_offset, y_offset:y + y_offset]
311

312
        #grow mask by a few pixels to keep things seamless in latent space
313
314
315
316
317
318
319
320
        if grow_mask_by == 0:
            mask_erosion = mask
        else:
            kernel_tensor = torch.ones((1, 1, grow_mask_by, grow_mask_by))
            padding = math.ceil((grow_mask_by - 1) / 2)

            mask_erosion = torch.clamp(torch.nn.functional.conv2d(mask.round(), kernel_tensor, padding=padding), 0, 1)

321
        m = (1.0 - mask.round()).squeeze(1)
322
323
        for i in range(3):
            pixels[:,:,:,i] -= 0.5
324
            pixels[:,:,:,i] *= m
325
326
327
            pixels[:,:,:,i] += 0.5
        t = vae.encode(pixels)

328
        return ({"samples":t, "noise_mask": (mask_erosion[:,:,:x,:y].round())}, )
comfyanonymous's avatar
comfyanonymous committed
329

Dr.Lt.Data's avatar
Dr.Lt.Data committed
330
331
class SaveLatent:
    def __init__(self):
332
        self.output_dir = folder_paths.get_output_directory()
Dr.Lt.Data's avatar
Dr.Lt.Data committed
333
334
335
336

    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "samples": ("LATENT", ),
337
                              "filename_prefix": ("STRING", {"default": "latents/ComfyUI"})},
Dr.Lt.Data's avatar
Dr.Lt.Data committed
338
339
340
341
342
343
344
345
346
347
                "hidden": {"prompt": "PROMPT", "extra_pnginfo": "EXTRA_PNGINFO"},
                }
    RETURN_TYPES = ()
    FUNCTION = "save"

    OUTPUT_NODE = True

    CATEGORY = "_for_testing"

    def save(self, samples, filename_prefix="ComfyUI", prompt=None, extra_pnginfo=None):
348
        full_output_folder, filename, counter, subfolder, filename_prefix = folder_paths.get_save_image_path(filename_prefix, self.output_dir)
Dr.Lt.Data's avatar
Dr.Lt.Data committed
349
350
351
352
353
354

        # support save metadata for latent sharing
        prompt_info = ""
        if prompt is not None:
            prompt_info = json.dumps(prompt)

355
        metadata = {"prompt": prompt_info}
Dr.Lt.Data's avatar
Dr.Lt.Data committed
356
357
358
359
360
361
362
        if extra_pnginfo is not None:
            for x in extra_pnginfo:
                metadata[x] = json.dumps(extra_pnginfo[x])

        file = f"{filename}_{counter:05}_.latent"
        file = os.path.join(full_output_folder, file)

363
364
        output = {}
        output["latent_tensor"] = samples["samples"]
365
        output["latent_format_version_0"] = torch.tensor([])
366

367
        comfy.utils.save_torch_file(output, file, metadata=metadata)
Dr.Lt.Data's avatar
Dr.Lt.Data committed
368
369
370
371
372
373
        return {}


class LoadLatent:
    @classmethod
    def INPUT_TYPES(s):
374
375
        input_dir = folder_paths.get_input_directory()
        files = [f for f in os.listdir(input_dir) if os.path.isfile(os.path.join(input_dir, f)) and f.endswith(".latent")]
Dr.Lt.Data's avatar
Dr.Lt.Data committed
376
377
378
379
380
381
382
383
        return {"required": {"latent": [sorted(files), ]}, }

    CATEGORY = "_for_testing"

    RETURN_TYPES = ("LATENT", )
    FUNCTION = "load"

    def load(self, latent):
384
385
        latent_path = folder_paths.get_annotated_filepath(latent)
        latent = safetensors.torch.load_file(latent_path, device="cpu")
386
387
388
389
        multiplier = 1.0
        if "latent_format_version_0" not in latent:
            multiplier = 1.0 / 0.18215
        samples = {"samples": latent["latent_tensor"].float() * multiplier}
390
        return (samples, )
Dr.Lt.Data's avatar
Dr.Lt.Data committed
391

392
393
394
395
396
397
398
399
400
401
402
403
404
405
    @classmethod
    def IS_CHANGED(s, latent):
        image_path = folder_paths.get_annotated_filepath(latent)
        m = hashlib.sha256()
        with open(image_path, 'rb') as f:
            m.update(f.read())
        return m.digest().hex()

    @classmethod
    def VALIDATE_INPUTS(s, latent):
        if not folder_paths.exists_annotated_filepath(latent):
            return "Invalid latent file: {}".format(latent)
        return True

Dr.Lt.Data's avatar
Dr.Lt.Data committed
406

comfyanonymous's avatar
comfyanonymous committed
407
408
409
class CheckpointLoader:
    @classmethod
    def INPUT_TYPES(s):
410
411
        return {"required": { "config_name": (folder_paths.get_filename_list("configs"), ),
                              "ckpt_name": (folder_paths.get_filename_list("checkpoints"), )}}
comfyanonymous's avatar
comfyanonymous committed
412
413
414
    RETURN_TYPES = ("MODEL", "CLIP", "VAE")
    FUNCTION = "load_checkpoint"

415
    CATEGORY = "advanced/loaders"
416

comfyanonymous's avatar
comfyanonymous committed
417
    def load_checkpoint(self, config_name, ckpt_name, output_vae=True, output_clip=True):
418
419
        config_path = folder_paths.get_full_path("configs", config_name)
        ckpt_path = folder_paths.get_full_path("checkpoints", ckpt_name)
420
        return comfy.sd.load_checkpoint(config_path, ckpt_path, output_vae=True, output_clip=True, embedding_directory=folder_paths.get_folder_paths("embeddings"))
comfyanonymous's avatar
comfyanonymous committed
421

422
423
424
class CheckpointLoaderSimple:
    @classmethod
    def INPUT_TYPES(s):
425
        return {"required": { "ckpt_name": (folder_paths.get_filename_list("checkpoints"), ),
426
427
428
429
                             }}
    RETURN_TYPES = ("MODEL", "CLIP", "VAE")
    FUNCTION = "load_checkpoint"

430
    CATEGORY = "loaders"
431

432
    def load_checkpoint(self, ckpt_name, output_vae=True, output_clip=True):
433
        ckpt_path = folder_paths.get_full_path("checkpoints", ckpt_name)
434
        out = comfy.sd.load_checkpoint_guess_config(ckpt_path, output_vae=True, output_clip=True, embedding_directory=folder_paths.get_folder_paths("embeddings"))
435
436
        return out

sALTaccount's avatar
sALTaccount committed
437
438
439
class DiffusersLoader:
    @classmethod
    def INPUT_TYPES(cls):
440
        paths = []
sALTaccount's avatar
sALTaccount committed
441
        for search_path in folder_paths.get_folder_paths("diffusers"):
442
            if os.path.exists(search_path):
443
444
445
446
                for root, subdir, files in os.walk(search_path, followlinks=True):
                    if "model_index.json" in files:
                        paths.append(os.path.relpath(root, start=search_path))

447
        return {"required": {"model_path": (paths,), }}
sALTaccount's avatar
sALTaccount committed
448
449
450
    RETURN_TYPES = ("MODEL", "CLIP", "VAE")
    FUNCTION = "load_checkpoint"

451
    CATEGORY = "advanced/loaders/deprecated"
sALTaccount's avatar
sALTaccount committed
452
453

    def load_checkpoint(self, model_path, output_vae=True, output_clip=True):
sALTaccount's avatar
sALTaccount committed
454
455
        for search_path in folder_paths.get_folder_paths("diffusers"):
            if os.path.exists(search_path):
456
457
458
                path = os.path.join(search_path, model_path)
                if os.path.exists(path):
                    model_path = path
sALTaccount's avatar
sALTaccount committed
459
                    break
460

461
        return comfy.diffusers_load.load_diffusers(model_path, fp16=comfy.model_management.should_use_fp16(), output_vae=output_vae, output_clip=output_clip, embedding_directory=folder_paths.get_folder_paths("embeddings"))
sALTaccount's avatar
sALTaccount committed
462
463


464
465
466
467
468
469
470
471
class unCLIPCheckpointLoader:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "ckpt_name": (folder_paths.get_filename_list("checkpoints"), ),
                             }}
    RETURN_TYPES = ("MODEL", "CLIP", "VAE", "CLIP_VISION")
    FUNCTION = "load_checkpoint"

472
    CATEGORY = "loaders"
473
474
475
476
477
478

    def load_checkpoint(self, ckpt_name, output_vae=True, output_clip=True):
        ckpt_path = folder_paths.get_full_path("checkpoints", ckpt_name)
        out = comfy.sd.load_checkpoint_guess_config(ckpt_path, output_vae=True, output_clip=True, output_clipvision=True, embedding_directory=folder_paths.get_folder_paths("embeddings"))
        return out

comfyanonymous's avatar
comfyanonymous committed
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
class CLIPSetLastLayer:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "clip": ("CLIP", ),
                              "stop_at_clip_layer": ("INT", {"default": -1, "min": -24, "max": -1, "step": 1}),
                              }}
    RETURN_TYPES = ("CLIP",)
    FUNCTION = "set_last_layer"

    CATEGORY = "conditioning"

    def set_last_layer(self, clip, stop_at_clip_layer):
        clip = clip.clone()
        clip.clip_layer(stop_at_clip_layer)
        return (clip,)

495
class LoraLoader:
496
497
498
    def __init__(self):
        self.loaded_lora = None

499
500
501
502
    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "model": ("MODEL",),
                              "clip": ("CLIP", ),
503
                              "lora_name": (folder_paths.get_filename_list("loras"), ),
504
505
                              "strength_model": ("FLOAT", {"default": 1.0, "min": -10.0, "max": 10.0, "step": 0.01}),
                              "strength_clip": ("FLOAT", {"default": 1.0, "min": -10.0, "max": 10.0, "step": 0.01}),
506
507
508
509
510
511
512
                              }}
    RETURN_TYPES = ("MODEL", "CLIP")
    FUNCTION = "load_lora"

    CATEGORY = "loaders"

    def load_lora(self, model, clip, lora_name, strength_model, strength_clip):
513
514
515
        if strength_model == 0 and strength_clip == 0:
            return (model, clip)

516
        lora_path = folder_paths.get_full_path("loras", lora_name)
517
518
519
520
521
        lora = None
        if self.loaded_lora is not None:
            if self.loaded_lora[0] == lora_path:
                lora = self.loaded_lora[1]
            else:
522
523
524
                temp = self.loaded_lora
                self.loaded_lora = None
                del temp
525
526
527
528
529
530

        if lora is None:
            lora = comfy.utils.load_torch_file(lora_path, safe_load=True)
            self.loaded_lora = (lora_path, lora)

        model_lora, clip_lora = comfy.sd.load_lora_for_models(model, clip, lora, strength_model, strength_clip)
531
532
        return (model_lora, clip_lora)

comfyanonymous's avatar
comfyanonymous committed
533
534
535
class VAELoader:
    @classmethod
    def INPUT_TYPES(s):
536
        return {"required": { "vae_name": (folder_paths.get_filename_list("vae"), )}}
comfyanonymous's avatar
comfyanonymous committed
537
538
539
    RETURN_TYPES = ("VAE",)
    FUNCTION = "load_vae"

540
541
    CATEGORY = "loaders"

comfyanonymous's avatar
comfyanonymous committed
542
543
    #TODO: scale factor?
    def load_vae(self, vae_name):
544
        vae_path = folder_paths.get_full_path("vae", vae_name)
comfyanonymous's avatar
comfyanonymous committed
545
546
547
        vae = comfy.sd.VAE(ckpt_path=vae_path)
        return (vae,)

comfyanonymous's avatar
comfyanonymous committed
548
549
550
class ControlNetLoader:
    @classmethod
    def INPUT_TYPES(s):
551
        return {"required": { "control_net_name": (folder_paths.get_filename_list("controlnet"), )}}
comfyanonymous's avatar
comfyanonymous committed
552
553
554
555
556
557
558

    RETURN_TYPES = ("CONTROL_NET",)
    FUNCTION = "load_controlnet"

    CATEGORY = "loaders"

    def load_controlnet(self, control_net_name):
559
        controlnet_path = folder_paths.get_full_path("controlnet", control_net_name)
comfyanonymous's avatar
comfyanonymous committed
560
561
562
        controlnet = comfy.sd.load_controlnet(controlnet_path)
        return (controlnet,)

563
564
565
566
class DiffControlNetLoader:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "model": ("MODEL",),
567
                              "control_net_name": (folder_paths.get_filename_list("controlnet"), )}}
568
569
570
571
572
573
574

    RETURN_TYPES = ("CONTROL_NET",)
    FUNCTION = "load_controlnet"

    CATEGORY = "loaders"

    def load_controlnet(self, model, control_net_name):
575
        controlnet_path = folder_paths.get_full_path("controlnet", control_net_name)
576
577
578
        controlnet = comfy.sd.load_controlnet(controlnet_path, model)
        return (controlnet,)

comfyanonymous's avatar
comfyanonymous committed
579
580
581
582

class ControlNetApply:
    @classmethod
    def INPUT_TYPES(s):
583
584
585
586
587
        return {"required": {"conditioning": ("CONDITIONING", ),
                             "control_net": ("CONTROL_NET", ),
                             "image": ("IMAGE", ),
                             "strength": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01})
                             }}
comfyanonymous's avatar
comfyanonymous committed
588
589
590
591
592
    RETURN_TYPES = ("CONDITIONING",)
    FUNCTION = "apply_controlnet"

    CATEGORY = "conditioning"

593
    def apply_controlnet(self, conditioning, control_net, image, strength):
594
595
596
        if strength == 0:
            return (conditioning, )

comfyanonymous's avatar
comfyanonymous committed
597
598
599
600
        c = []
        control_hint = image.movedim(-1,1)
        for t in conditioning:
            n = [t[0], t[1].copy()]
comfyanonymous's avatar
comfyanonymous committed
601
602
603
604
            c_net = control_net.copy().set_cond_hint(control_hint, strength)
            if 'control' in t[1]:
                c_net.set_previous_controlnet(t[1]['control'])
            n[1]['control'] = c_net
605
            n[1]['control_apply_to_uncond'] = True
comfyanonymous's avatar
comfyanonymous committed
606
607
608
            c.append(n)
        return (c, )

609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654

class ControlNetApplyAdvanced:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": {"positive": ("CONDITIONING", ),
                             "negative": ("CONDITIONING", ),
                             "control_net": ("CONTROL_NET", ),
                             "image": ("IMAGE", ),
                             "strength": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}),
                             }}

    RETURN_TYPES = ("CONDITIONING","CONDITIONING")
    RETURN_NAMES = ("positive", "negative")
    FUNCTION = "apply_controlnet"

    CATEGORY = "conditioning"

    def apply_controlnet(self, positive, negative, control_net, image, strength):
        if strength == 0:
            return (positive, negative)

        control_hint = image.movedim(-1,1)
        cnets = {}

        out = []
        for conditioning in [positive, negative]:
            c = []
            for t in conditioning:
                d = t[1].copy()

                prev_cnet = d.get('control', None)
                if prev_cnet in cnets:
                    c_net = cnets[prev_cnet]
                else:
                    c_net = control_net.copy().set_cond_hint(control_hint, strength)
                    c_net.set_previous_controlnet(prev_cnet)
                    cnets[prev_cnet] = c_net

                d['control'] = c_net
                d['control_apply_to_uncond'] = False
                n = [t[0], d]
                c.append(n)
            out.append(c)
        return (out[0], out[1])


655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
class UNETLoader:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "unet_name": (folder_paths.get_filename_list("unet"), ),
                             }}
    RETURN_TYPES = ("MODEL",)
    FUNCTION = "load_unet"

    CATEGORY = "advanced/loaders"

    def load_unet(self, unet_name):
        unet_path = folder_paths.get_full_path("unet", unet_name)
        model = comfy.sd.load_unet(unet_path)
        return (model,)

670
671
672
class CLIPLoader:
    @classmethod
    def INPUT_TYPES(s):
673
        return {"required": { "clip_name": (folder_paths.get_filename_list("clip"), ),
674
675
676
677
                             }}
    RETURN_TYPES = ("CLIP",)
    FUNCTION = "load_clip"

678
    CATEGORY = "advanced/loaders"
679

680
    def load_clip(self, clip_name):
681
        clip_path = folder_paths.get_full_path("clip", clip_name)
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
        clip = comfy.sd.load_clip(ckpt_paths=[clip_path], embedding_directory=folder_paths.get_folder_paths("embeddings"))
        return (clip,)

class DualCLIPLoader:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "clip_name1": (folder_paths.get_filename_list("clip"), ), "clip_name2": (folder_paths.get_filename_list("clip"), ),
                             }}
    RETURN_TYPES = ("CLIP",)
    FUNCTION = "load_clip"

    CATEGORY = "advanced/loaders"

    def load_clip(self, clip_name1, clip_name2):
        clip_path1 = folder_paths.get_full_path("clip", clip_name1)
        clip_path2 = folder_paths.get_full_path("clip", clip_name2)
        clip = comfy.sd.load_clip(ckpt_paths=[clip_path1, clip_path2], embedding_directory=folder_paths.get_folder_paths("embeddings"))
699
700
        return (clip,)

701
702
703
class CLIPVisionLoader:
    @classmethod
    def INPUT_TYPES(s):
704
        return {"required": { "clip_name": (folder_paths.get_filename_list("clip_vision"), ),
705
706
707
708
709
710
711
                             }}
    RETURN_TYPES = ("CLIP_VISION",)
    FUNCTION = "load_clip"

    CATEGORY = "loaders"

    def load_clip(self, clip_name):
712
        clip_path = folder_paths.get_full_path("clip_vision", clip_name)
713
        clip_vision = comfy.clip_vision.load(clip_path)
714
715
716
717
718
719
720
721
        return (clip_vision,)

class CLIPVisionEncode:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "clip_vision": ("CLIP_VISION",),
                              "image": ("IMAGE",)
                             }}
comfyanonymous's avatar
comfyanonymous committed
722
    RETURN_TYPES = ("CLIP_VISION_OUTPUT",)
723
724
    FUNCTION = "encode"

725
    CATEGORY = "conditioning"
726
727
728
729
730
731
732
733

    def encode(self, clip_vision, image):
        output = clip_vision.encode_image(image)
        return (output,)

class StyleModelLoader:
    @classmethod
    def INPUT_TYPES(s):
734
        return {"required": { "style_model_name": (folder_paths.get_filename_list("style_models"), )}}
735
736
737
738
739
740
741

    RETURN_TYPES = ("STYLE_MODEL",)
    FUNCTION = "load_style_model"

    CATEGORY = "loaders"

    def load_style_model(self, style_model_name):
742
        style_model_path = folder_paths.get_full_path("style_models", style_model_name)
743
744
745
746
747
748
749
        style_model = comfy.sd.load_style_model(style_model_path)
        return (style_model,)


class StyleModelApply:
    @classmethod
    def INPUT_TYPES(s):
750
751
752
        return {"required": {"conditioning": ("CONDITIONING", ),
                             "style_model": ("STYLE_MODEL", ),
                             "clip_vision_output": ("CLIP_VISION_OUTPUT", ),
753
754
755
756
                             }}
    RETURN_TYPES = ("CONDITIONING",)
    FUNCTION = "apply_stylemodel"

comfyanonymous's avatar
comfyanonymous committed
757
    CATEGORY = "conditioning/style_model"
758

759
760
    def apply_stylemodel(self, clip_vision_output, style_model, conditioning):
        cond = style_model.get_cond(clip_vision_output)
761
        c = []
762
763
        for t in conditioning:
            n = [torch.cat((t[0], cond), dim=1), t[1].copy()]
764
765
766
            c.append(n)
        return (c, )

767
768
769
770
771
772
class unCLIPConditioning:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": {"conditioning": ("CONDITIONING", ),
                             "clip_vision_output": ("CLIP_VISION_OUTPUT", ),
                             "strength": ("FLOAT", {"default": 1.0, "min": -10.0, "max": 10.0, "step": 0.01}),
773
                             "noise_augmentation": ("FLOAT", {"default": 0.0, "min": 0.0, "max": 1.0, "step": 0.01}),
774
775
776
777
                             }}
    RETURN_TYPES = ("CONDITIONING",)
    FUNCTION = "apply_adm"

778
    CATEGORY = "conditioning"
779

780
    def apply_adm(self, conditioning, clip_vision_output, strength, noise_augmentation):
781
782
783
        if strength == 0:
            return (conditioning, )

784
785
786
        c = []
        for t in conditioning:
            o = t[1].copy()
787
788
789
            x = {"clip_vision_output": clip_vision_output, "strength": strength, "noise_augmentation": noise_augmentation}
            if "unclip_conditioning" in o:
                o["unclip_conditioning"] = o["unclip_conditioning"][:] + [x]
790
            else:
791
                o["unclip_conditioning"] = [x]
792
793
794
795
            n = [t[0], o]
            c.append(n)
        return (c, )

796
797
798
799
800
801
802
803
class GLIGENLoader:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "gligen_name": (folder_paths.get_filename_list("gligen"), )}}

    RETURN_TYPES = ("GLIGEN",)
    FUNCTION = "load_gligen"

comfyanonymous's avatar
comfyanonymous committed
804
    CATEGORY = "loaders"
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825

    def load_gligen(self, gligen_name):
        gligen_path = folder_paths.get_full_path("gligen", gligen_name)
        gligen = comfy.sd.load_gligen(gligen_path)
        return (gligen,)

class GLIGENTextBoxApply:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": {"conditioning_to": ("CONDITIONING", ),
                              "clip": ("CLIP", ),
                              "gligen_textbox_model": ("GLIGEN", ),
                              "text": ("STRING", {"multiline": True}),
                              "width": ("INT", {"default": 64, "min": 8, "max": MAX_RESOLUTION, "step": 8}),
                              "height": ("INT", {"default": 64, "min": 8, "max": MAX_RESOLUTION, "step": 8}),
                              "x": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 8}),
                              "y": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 8}),
                             }}
    RETURN_TYPES = ("CONDITIONING",)
    FUNCTION = "append"

comfyanonymous's avatar
comfyanonymous committed
826
    CATEGORY = "conditioning/gligen"
827
828
829
830
831
832
833
834
835
836
837
838
839
840

    def append(self, conditioning_to, clip, gligen_textbox_model, text, width, height, x, y):
        c = []
        cond, cond_pooled = clip.encode_from_tokens(clip.tokenize(text), return_pooled=True)
        for t in conditioning_to:
            n = [t[0], t[1].copy()]
            position_params = [(cond_pooled, height // 8, width // 8, y // 8, x // 8)]
            prev = []
            if "gligen" in n[1]:
                prev = n[1]['gligen'][2]

            n[1]['gligen'] = ("position", gligen_textbox_model, prev + position_params)
            c.append(n)
        return (c, )
841

comfyanonymous's avatar
comfyanonymous committed
842
843
844
845
846
847
class EmptyLatentImage:
    def __init__(self, device="cpu"):
        self.device = device

    @classmethod
    def INPUT_TYPES(s):
848
849
        return {"required": { "width": ("INT", {"default": 512, "min": 64, "max": MAX_RESOLUTION, "step": 8}),
                              "height": ("INT", {"default": 512, "min": 64, "max": MAX_RESOLUTION, "step": 8}),
comfyanonymous's avatar
comfyanonymous committed
850
851
852
853
                              "batch_size": ("INT", {"default": 1, "min": 1, "max": 64})}}
    RETURN_TYPES = ("LATENT",)
    FUNCTION = "generate"

854
855
    CATEGORY = "latent"

comfyanonymous's avatar
comfyanonymous committed
856
857
    def generate(self, width, height, batch_size=1):
        latent = torch.zeros([batch_size, 4, height // 8, width // 8])
858
        return ({"samples":latent}, )
comfyanonymous's avatar
comfyanonymous committed
859

comfyanonymous's avatar
comfyanonymous committed
860

861
862
863
864
865
class LatentFromBatch:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "samples": ("LATENT",),
                              "batch_index": ("INT", {"default": 0, "min": 0, "max": 63}),
866
                              "length": ("INT", {"default": 1, "min": 1, "max": 64}),
867
868
                              }}
    RETURN_TYPES = ("LATENT",)
869
    FUNCTION = "frombatch"
870

871
    CATEGORY = "latent/batch"
872

873
    def frombatch(self, samples, batch_index, length):
874
875
876
        s = samples.copy()
        s_in = samples["samples"]
        batch_index = min(s_in.shape[0] - 1, batch_index)
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
        length = min(s_in.shape[0] - batch_index, length)
        s["samples"] = s_in[batch_index:batch_index + length].clone()
        if "noise_mask" in samples:
            masks = samples["noise_mask"]
            if masks.shape[0] == 1:
                s["noise_mask"] = masks.clone()
            else:
                if masks.shape[0] < s_in.shape[0]:
                    masks = masks.repeat(math.ceil(s_in.shape[0] / masks.shape[0]), 1, 1, 1)[:s_in.shape[0]]
                s["noise_mask"] = masks[batch_index:batch_index + length].clone()
        if "batch_index" not in s:
            s["batch_index"] = [x for x in range(batch_index, batch_index+length)]
        else:
            s["batch_index"] = samples["batch_index"][batch_index:batch_index + length]
        return (s,)
    
class RepeatLatentBatch:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "samples": ("LATENT",),
                              "amount": ("INT", {"default": 1, "min": 1, "max": 64}),
                              }}
    RETURN_TYPES = ("LATENT",)
    FUNCTION = "repeat"

    CATEGORY = "latent/batch"

    def repeat(self, samples, amount):
        s = samples.copy()
        s_in = samples["samples"]
        
        s["samples"] = s_in.repeat((amount, 1,1,1))
        if "noise_mask" in samples and samples["noise_mask"].shape[0] > 1:
            masks = samples["noise_mask"]
            if masks.shape[0] < s_in.shape[0]:
                masks = masks.repeat(math.ceil(s_in.shape[0] / masks.shape[0]), 1, 1, 1)[:s_in.shape[0]]
            s["noise_mask"] = samples["noise_mask"].repeat((amount, 1,1,1))
        if "batch_index" in s:
            offset = max(s["batch_index"]) - min(s["batch_index"]) + 1
            s["batch_index"] = s["batch_index"] + [x + (i * offset) for i in range(1, amount) for x in s["batch_index"]]
917
        return (s,)
comfyanonymous's avatar
comfyanonymous committed
918

comfyanonymous's avatar
comfyanonymous committed
919
class LatentUpscale:
comfyanonymous's avatar
comfyanonymous committed
920
    upscale_methods = ["nearest-exact", "bilinear", "area", "bicubic", "bislerp"]
921
    crop_methods = ["disabled", "center"]
comfyanonymous's avatar
comfyanonymous committed
922
923
924
925

    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "samples": ("LATENT",), "upscale_method": (s.upscale_methods,),
926
927
                              "width": ("INT", {"default": 512, "min": 64, "max": MAX_RESOLUTION, "step": 8}),
                              "height": ("INT", {"default": 512, "min": 64, "max": MAX_RESOLUTION, "step": 8}),
928
                              "crop": (s.crop_methods,)}}
comfyanonymous's avatar
comfyanonymous committed
929
930
931
    RETURN_TYPES = ("LATENT",)
    FUNCTION = "upscale"

932
933
    CATEGORY = "latent"

934
    def upscale(self, samples, upscale_method, width, height, crop):
935
        s = samples.copy()
comfyanonymous's avatar
comfyanonymous committed
936
        s["samples"] = comfy.utils.common_upscale(samples["samples"], width // 8, height // 8, upscale_method, crop)
comfyanonymous's avatar
comfyanonymous committed
937
938
        return (s,)

comfyanonymous's avatar
comfyanonymous committed
939
class LatentUpscaleBy:
comfyanonymous's avatar
comfyanonymous committed
940
    upscale_methods = ["nearest-exact", "bilinear", "area", "bicubic", "bislerp"]
comfyanonymous's avatar
comfyanonymous committed
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957

    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "samples": ("LATENT",), "upscale_method": (s.upscale_methods,),
                              "scale_by": ("FLOAT", {"default": 1.5, "min": 0.01, "max": 8.0, "step": 0.01}),}}
    RETURN_TYPES = ("LATENT",)
    FUNCTION = "upscale"

    CATEGORY = "latent"

    def upscale(self, samples, upscale_method, scale_by):
        s = samples.copy()
        width = round(samples["samples"].shape[3] * scale_by)
        height = round(samples["samples"].shape[2] * scale_by)
        s["samples"] = comfy.utils.common_upscale(samples["samples"], width, height, upscale_method, "disabled")
        return (s,)

comfyanonymous's avatar
comfyanonymous committed
958
959
960
961
962
963
964
965
966
class LatentRotate:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "samples": ("LATENT",),
                              "rotation": (["none", "90 degrees", "180 degrees", "270 degrees"],),
                              }}
    RETURN_TYPES = ("LATENT",)
    FUNCTION = "rotate"

comfyanonymous's avatar
comfyanonymous committed
967
    CATEGORY = "latent/transform"
comfyanonymous's avatar
comfyanonymous committed
968
969

    def rotate(self, samples, rotation):
970
        s = samples.copy()
comfyanonymous's avatar
comfyanonymous committed
971
972
973
974
975
976
977
978
        rotate_by = 0
        if rotation.startswith("90"):
            rotate_by = 1
        elif rotation.startswith("180"):
            rotate_by = 2
        elif rotation.startswith("270"):
            rotate_by = 3

979
        s["samples"] = torch.rot90(samples["samples"], k=rotate_by, dims=[3, 2])
comfyanonymous's avatar
comfyanonymous committed
980
        return (s,)
comfyanonymous's avatar
comfyanonymous committed
981
982
983
984
985
986
987
988
989
990

class LatentFlip:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "samples": ("LATENT",),
                              "flip_method": (["x-axis: vertically", "y-axis: horizontally"],),
                              }}
    RETURN_TYPES = ("LATENT",)
    FUNCTION = "flip"

comfyanonymous's avatar
comfyanonymous committed
991
    CATEGORY = "latent/transform"
comfyanonymous's avatar
comfyanonymous committed
992
993

    def flip(self, samples, flip_method):
994
        s = samples.copy()
comfyanonymous's avatar
comfyanonymous committed
995
        if flip_method.startswith("x"):
996
            s["samples"] = torch.flip(samples["samples"], dims=[2])
comfyanonymous's avatar
comfyanonymous committed
997
        elif flip_method.startswith("y"):
998
            s["samples"] = torch.flip(samples["samples"], dims=[3])
comfyanonymous's avatar
comfyanonymous committed
999
1000

        return (s,)
comfyanonymous's avatar
comfyanonymous committed
1001
1002
1003
1004

class LatentComposite:
    @classmethod
    def INPUT_TYPES(s):
comfyanonymous's avatar
comfyanonymous committed
1005
1006
1007
1008
1009
1010
        return {"required": { "samples_to": ("LATENT",),
                              "samples_from": ("LATENT",),
                              "x": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 8}),
                              "y": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 8}),
                              "feather": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 8}),
                              }}
comfyanonymous's avatar
comfyanonymous committed
1011
1012
1013
1014
1015
    RETURN_TYPES = ("LATENT",)
    FUNCTION = "composite"

    CATEGORY = "latent"

comfyanonymous's avatar
comfyanonymous committed
1016
1017
1018
    def composite(self, samples_to, samples_from, x, y, composite_method="normal", feather=0):
        x =  x // 8
        y = y // 8
1019
        feather = feather // 8
comfyanonymous's avatar
comfyanonymous committed
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
        samples_out = samples_to.copy()
        s = samples_to["samples"].clone()
        samples_to = samples_to["samples"]
        samples_from = samples_from["samples"]
        if feather == 0:
            s[:,:,y:y+samples_from.shape[2],x:x+samples_from.shape[3]] = samples_from[:,:,:samples_to.shape[2] - y, :samples_to.shape[3] - x]
        else:
            samples_from = samples_from[:,:,:samples_to.shape[2] - y, :samples_to.shape[3] - x]
            mask = torch.ones_like(samples_from)
            for t in range(feather):
                if y != 0:
                    mask[:,:,t:1+t,:] *= ((1.0/feather) * (t + 1))

                if y + samples_from.shape[2] < samples_to.shape[2]:
                    mask[:,:,mask.shape[2] -1 -t: mask.shape[2]-t,:] *= ((1.0/feather) * (t + 1))
                if x != 0:
                    mask[:,:,:,t:1+t] *= ((1.0/feather) * (t + 1))
                if x + samples_from.shape[3] < samples_to.shape[3]:
                    mask[:,:,:,mask.shape[3]- 1 - t: mask.shape[3]- t] *= ((1.0/feather) * (t + 1))
            rev_mask = torch.ones_like(mask) - mask
            s[:,:,y:y+samples_from.shape[2],x:x+samples_from.shape[3]] = samples_from[:,:,:samples_to.shape[2] - y, :samples_to.shape[3] - x] * mask + s[:,:,y:y+samples_from.shape[2],x:x+samples_from.shape[3]] * rev_mask
        samples_out["samples"] = s
        return (samples_out,)
comfyanonymous's avatar
comfyanonymous committed
1043

comfyanonymous's avatar
comfyanonymous committed
1044
1045
1046
1047
class LatentCrop:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "samples": ("LATENT",),
1048
1049
                              "width": ("INT", {"default": 512, "min": 64, "max": MAX_RESOLUTION, "step": 8}),
                              "height": ("INT", {"default": 512, "min": 64, "max": MAX_RESOLUTION, "step": 8}),
1050
1051
                              "x": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 8}),
                              "y": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 8}),
comfyanonymous's avatar
comfyanonymous committed
1052
1053
1054
1055
                              }}
    RETURN_TYPES = ("LATENT",)
    FUNCTION = "crop"

comfyanonymous's avatar
comfyanonymous committed
1056
    CATEGORY = "latent/transform"
comfyanonymous's avatar
comfyanonymous committed
1057
1058

    def crop(self, samples, width, height, x, y):
1059
1060
        s = samples.copy()
        samples = samples['samples']
comfyanonymous's avatar
comfyanonymous committed
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
        x =  x // 8
        y = y // 8

        #enfonce minimum size of 64
        if x > (samples.shape[3] - 8):
            x = samples.shape[3] - 8
        if y > (samples.shape[2] - 8):
            y = samples.shape[2] - 8

        new_height = height // 8
        new_width = width // 8
        to_x = new_width + x
        to_y = new_height + y
1074
        s['samples'] = samples[:,:,y:to_y, x:to_x]
comfyanonymous's avatar
comfyanonymous committed
1075
1076
        return (s,)

1077
1078
1079
1080
1081
1082
1083
1084
1085
class SetLatentNoiseMask:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "samples": ("LATENT",),
                              "mask": ("MASK",),
                              }}
    RETURN_TYPES = ("LATENT",)
    FUNCTION = "set_mask"

1086
    CATEGORY = "latent/inpaint"
1087
1088
1089

    def set_mask(self, samples, mask):
        s = samples.copy()
1090
        s["noise_mask"] = mask.reshape((-1, 1, mask.shape[-2], mask.shape[-1]))
1091
1092
        return (s,)

space-nuko's avatar
space-nuko committed
1093

space-nuko's avatar
space-nuko committed
1094
def common_ksampler(model, seed, steps, cfg, sampler_name, scheduler, positive, negative, latent, denoise=1.0, disable_noise=False, start_step=None, last_step=None, force_full_denoise=False):
1095
    device = comfy.model_management.get_torch_device()
1096
    latent_image = latent["samples"]
1097

comfyanonymous's avatar
comfyanonymous committed
1098
1099
1100
    if disable_noise:
        noise = torch.zeros(latent_image.size(), dtype=latent_image.dtype, layout=latent_image.layout, device="cpu")
    else:
1101
1102
        batch_inds = latent["batch_index"] if "batch_index" in latent else None
        noise = comfy.sample.prepare_noise(latent_image, seed, batch_inds)
comfyanonymous's avatar
comfyanonymous committed
1103

1104
    noise_mask = None
1105
    if "noise_mask" in latent:
1106
        noise_mask = latent["noise_mask"]
comfyanonymous's avatar
comfyanonymous committed
1107

space-nuko's avatar
space-nuko committed
1108
1109
1110
1111
    preview_format = "JPEG"
    if preview_format not in ["JPEG", "PNG"]:
        preview_format = "JPEG"

1112
    previewer = latent_preview.get_previewer(device, model.model.latent_format)
space-nuko's avatar
space-nuko committed
1113

1114
    pbar = comfy.utils.ProgressBar(steps)
1115
    def callback(step, x0, x, total_steps):
space-nuko's avatar
space-nuko committed
1116
        preview_bytes = None
1117
        if previewer:
1118
            preview_bytes = previewer.decode_latent_to_preview_image(preview_format, x0)
space-nuko's avatar
space-nuko committed
1119
        pbar.update_absolute(step + 1, total_steps, preview_bytes)
1120

1121
1122
    samples = comfy.sample.sample(model, noise, steps, cfg, sampler_name, scheduler, positive, negative, latent_image,
                                  denoise=denoise, disable_noise=disable_noise, start_step=start_step, last_step=last_step,
1123
                                  force_full_denoise=force_full_denoise, noise_mask=noise_mask, callback=callback, seed=seed)
1124
1125
1126
    out = latent.copy()
    out["samples"] = samples
    return (out, )
comfyanonymous's avatar
comfyanonymous committed
1127

comfyanonymous's avatar
comfyanonymous committed
1128
1129
1130
class KSampler:
    @classmethod
    def INPUT_TYPES(s):
comfyanonymous's avatar
comfyanonymous committed
1131
        return {"required":
comfyanonymous's avatar
comfyanonymous committed
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
                    {"model": ("MODEL",),
                    "seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}),
                    "steps": ("INT", {"default": 20, "min": 1, "max": 10000}),
                    "cfg": ("FLOAT", {"default": 8.0, "min": 0.0, "max": 100.0}),
                    "sampler_name": (comfy.samplers.KSampler.SAMPLERS, ),
                    "scheduler": (comfy.samplers.KSampler.SCHEDULERS, ),
                    "positive": ("CONDITIONING", ),
                    "negative": ("CONDITIONING", ),
                    "latent_image": ("LATENT", ),
                    "denoise": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}),
space-nuko's avatar
space-nuko committed
1142
1143
                     }
                }
comfyanonymous's avatar
comfyanonymous committed
1144
1145
1146
1147

    RETURN_TYPES = ("LATENT",)
    FUNCTION = "sample"

1148
1149
    CATEGORY = "sampling"

space-nuko's avatar
space-nuko committed
1150
1151
    def sample(self, model, seed, steps, cfg, sampler_name, scheduler, positive, negative, latent_image, denoise=1.0):
        return common_ksampler(model, seed, steps, cfg, sampler_name, scheduler, positive, negative, latent_image, denoise=denoise)
comfyanonymous's avatar
comfyanonymous committed
1152

comfyanonymous's avatar
comfyanonymous committed
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
class KSamplerAdvanced:
    @classmethod
    def INPUT_TYPES(s):
        return {"required":
                    {"model": ("MODEL",),
                    "add_noise": (["enable", "disable"], ),
                    "noise_seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}),
                    "steps": ("INT", {"default": 20, "min": 1, "max": 10000}),
                    "cfg": ("FLOAT", {"default": 8.0, "min": 0.0, "max": 100.0}),
                    "sampler_name": (comfy.samplers.KSampler.SAMPLERS, ),
                    "scheduler": (comfy.samplers.KSampler.SCHEDULERS, ),
                    "positive": ("CONDITIONING", ),
                    "negative": ("CONDITIONING", ),
                    "latent_image": ("LATENT", ),
                    "start_at_step": ("INT", {"default": 0, "min": 0, "max": 10000}),
                    "end_at_step": ("INT", {"default": 10000, "min": 0, "max": 10000}),
                    "return_with_leftover_noise": (["disable", "enable"], ),
space-nuko's avatar
space-nuko committed
1170
1171
                     }
                }
comfyanonymous's avatar
comfyanonymous committed
1172
1173
1174
1175
1176

    RETURN_TYPES = ("LATENT",)
    FUNCTION = "sample"

    CATEGORY = "sampling"
comfyanonymous's avatar
comfyanonymous committed
1177

space-nuko's avatar
space-nuko committed
1178
    def sample(self, model, add_noise, noise_seed, steps, cfg, sampler_name, scheduler, positive, negative, latent_image, start_at_step, end_at_step, return_with_leftover_noise, denoise=1.0):
comfyanonymous's avatar
comfyanonymous committed
1179
1180
1181
1182
1183
1184
        force_full_denoise = True
        if return_with_leftover_noise == "enable":
            force_full_denoise = False
        disable_noise = False
        if add_noise == "disable":
            disable_noise = True
space-nuko's avatar
space-nuko committed
1185
        return common_ksampler(model, noise_seed, steps, cfg, sampler_name, scheduler, positive, negative, latent_image, denoise=denoise, disable_noise=disable_noise, start_step=start_at_step, last_step=end_at_step, force_full_denoise=force_full_denoise)
comfyanonymous's avatar
comfyanonymous committed
1186
1187
1188

class SaveImage:
    def __init__(self):
1189
        self.output_dir = folder_paths.get_output_directory()
m957ymj75urz's avatar
m957ymj75urz committed
1190
        self.type = "output"
1191
        self.prefix_append = ""
comfyanonymous's avatar
comfyanonymous committed
1192
1193
1194
1195

    @classmethod
    def INPUT_TYPES(s):
        return {"required": 
1196
                    {"images": ("IMAGE", ),
pythongosssss's avatar
tidy  
pythongosssss committed
1197
                     "filename_prefix": ("STRING", {"default": "ComfyUI"})},
pythongosssss's avatar
pythongosssss committed
1198
                "hidden": {"prompt": "PROMPT", "extra_pnginfo": "EXTRA_PNGINFO"},
comfyanonymous's avatar
comfyanonymous committed
1199
1200
1201
1202
1203
1204
1205
                }

    RETURN_TYPES = ()
    FUNCTION = "save_images"

    OUTPUT_NODE = True

1206
1207
    CATEGORY = "image"

pythongosssss's avatar
tidy  
pythongosssss committed
1208
    def save_images(self, images, filename_prefix="ComfyUI", prompt=None, extra_pnginfo=None):
1209
        filename_prefix += self.prefix_append
1210
        full_output_folder, filename, counter, subfolder, filename_prefix = folder_paths.get_save_image_path(filename_prefix, self.output_dir, images[0].shape[1], images[0].shape[0])
m957ymj75urz's avatar
m957ymj75urz committed
1211
        results = list()
comfyanonymous's avatar
comfyanonymous committed
1212
1213
        for image in images:
            i = 255. * image.cpu().numpy()
1214
            img = Image.fromarray(np.clip(i, 0, 255).astype(np.uint8))
comfyanonymous's avatar
comfyanonymous committed
1215
1216
1217
1218
1219
1220
            metadata = PngInfo()
            if prompt is not None:
                metadata.add_text("prompt", json.dumps(prompt))
            if extra_pnginfo is not None:
                for x in extra_pnginfo:
                    metadata.add_text(x, json.dumps(extra_pnginfo[x]))
1221

1222
            file = f"{filename}_{counter:05}_.png"
1223
            img.save(os.path.join(full_output_folder, file), pnginfo=metadata, compress_level=4)
m957ymj75urz's avatar
m957ymj75urz committed
1224
1225
1226
1227
            results.append({
                "filename": file,
                "subfolder": subfolder,
                "type": self.type
Gavroche CryptoRUSH's avatar
Gavroche CryptoRUSH committed
1228
            })
1229
            counter += 1
comfyanonymous's avatar
comfyanonymous committed
1230

m957ymj75urz's avatar
m957ymj75urz committed
1231
        return { "ui": { "images": results } }
comfyanonymous's avatar
comfyanonymous committed
1232

pythongosssss's avatar
pythongosssss committed
1233
1234
class PreviewImage(SaveImage):
    def __init__(self):
1235
        self.output_dir = folder_paths.get_temp_directory()
m957ymj75urz's avatar
m957ymj75urz committed
1236
        self.type = "temp"
1237
        self.prefix_append = "_temp_" + ''.join(random.choice("abcdefghijklmnopqrstupvxyz") for x in range(5))
pythongosssss's avatar
pythongosssss committed
1238
1239
1240

    @classmethod
    def INPUT_TYPES(s):
1241
        return {"required":
pythongosssss's avatar
pythongosssss committed
1242
1243
1244
                    {"images": ("IMAGE", ), },
                "hidden": {"prompt": "PROMPT", "extra_pnginfo": "EXTRA_PNGINFO"},
                }
1245

1246
1247
1248
class LoadImage:
    @classmethod
    def INPUT_TYPES(s):
1249
        input_dir = folder_paths.get_input_directory()
comfyanonymous's avatar
comfyanonymous committed
1250
        files = [f for f in os.listdir(input_dir) if os.path.isfile(os.path.join(input_dir, f))]
1251
        return {"required":
comfyanonymous's avatar
comfyanonymous committed
1252
                    {"image": (sorted(files), )},
1253
                }
1254
1255

    CATEGORY = "image"
1256

1257
    RETURN_TYPES = ("IMAGE", "MASK")
1258
1259
    FUNCTION = "load_image"
    def load_image(self, image):
1260
        image_path = folder_paths.get_annotated_filepath(image)
1261
        i = Image.open(image_path)
1262
        i = ImageOps.exif_transpose(i)
1263
        image = i.convert("RGB")
1264
        image = np.array(image).astype(np.float32) / 255.0
1265
        image = torch.from_numpy(image)[None,]
1266
1267
1268
1269
1270
1271
        if 'A' in i.getbands():
            mask = np.array(i.getchannel('A')).astype(np.float32) / 255.0
            mask = 1. - torch.from_numpy(mask)
        else:
            mask = torch.zeros((64,64), dtype=torch.float32, device="cpu")
        return (image, mask)
1272

1273
1274
    @classmethod
    def IS_CHANGED(s, image):
1275
        image_path = folder_paths.get_annotated_filepath(image)
1276
1277
1278
1279
        m = hashlib.sha256()
        with open(image_path, 'rb') as f:
            m.update(f.read())
        return m.digest().hex()
pythongosssss's avatar
pythongosssss committed
1280

1281
1282
1283
1284
1285
1286
1287
    @classmethod
    def VALIDATE_INPUTS(s, image):
        if not folder_paths.exists_annotated_filepath(image):
            return "Invalid image file: {}".format(image)

        return True

1288
class LoadImageMask:
1289
    _color_channels = ["alpha", "red", "green", "blue"]
1290
1291
    @classmethod
    def INPUT_TYPES(s):
1292
        input_dir = folder_paths.get_input_directory()
comfyanonymous's avatar
comfyanonymous committed
1293
        files = [f for f in os.listdir(input_dir) if os.path.isfile(os.path.join(input_dir, f))]
1294
        return {"required":
comfyanonymous's avatar
comfyanonymous committed
1295
                    {"image": (sorted(files), ),
1296
                     "channel": (s._color_channels, ), }
1297
1298
                }

1299
    CATEGORY = "mask"
1300
1301
1302
1303

    RETURN_TYPES = ("MASK",)
    FUNCTION = "load_image"
    def load_image(self, image, channel):
1304
        image_path = folder_paths.get_annotated_filepath(image)
1305
        i = Image.open(image_path)
1306
        i = ImageOps.exif_transpose(i)
1307
1308
        if i.getbands() != ("R", "G", "B", "A"):
            i = i.convert("RGBA")
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321
        mask = None
        c = channel[0].upper()
        if c in i.getbands():
            mask = np.array(i.getchannel(c)).astype(np.float32) / 255.0
            mask = torch.from_numpy(mask)
            if c == 'A':
                mask = 1. - mask
        else:
            mask = torch.zeros((64,64), dtype=torch.float32, device="cpu")
        return (mask,)

    @classmethod
    def IS_CHANGED(s, image, channel):
1322
        image_path = folder_paths.get_annotated_filepath(image)
1323
1324
1325
1326
        m = hashlib.sha256()
        with open(image_path, 'rb') as f:
            m.update(f.read())
        return m.digest().hex()
pythongosssss's avatar
pythongosssss committed
1327

1328
1329
1330
1331
1332
1333
1334
1335
1336
1337
    @classmethod
    def VALIDATE_INPUTS(s, image, channel):
        if not folder_paths.exists_annotated_filepath(image):
            return "Invalid image file: {}".format(image)

        if channel not in s._color_channels:
            return "Invalid color channel: {}".format(channel)

        return True

comfyanonymous's avatar
comfyanonymous committed
1338
class ImageScale:
comfyanonymous's avatar
comfyanonymous committed
1339
    upscale_methods = ["nearest-exact", "bilinear", "area", "bicubic"]
comfyanonymous's avatar
comfyanonymous committed
1340
1341
1342
1343
1344
    crop_methods = ["disabled", "center"]

    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "image": ("IMAGE",), "upscale_method": (s.upscale_methods,),
1345
1346
                              "width": ("INT", {"default": 512, "min": 1, "max": MAX_RESOLUTION, "step": 1}),
                              "height": ("INT", {"default": 512, "min": 1, "max": MAX_RESOLUTION, "step": 1}),
comfyanonymous's avatar
comfyanonymous committed
1347
1348
1349
1350
                              "crop": (s.crop_methods,)}}
    RETURN_TYPES = ("IMAGE",)
    FUNCTION = "upscale"

1351
    CATEGORY = "image/upscaling"
1352

comfyanonymous's avatar
comfyanonymous committed
1353
1354
    def upscale(self, image, upscale_method, width, height, crop):
        samples = image.movedim(-1,1)
comfyanonymous's avatar
comfyanonymous committed
1355
        s = comfy.utils.common_upscale(samples, width, height, upscale_method, crop)
comfyanonymous's avatar
comfyanonymous committed
1356
1357
        s = s.movedim(1,-1)
        return (s,)
comfyanonymous's avatar
comfyanonymous committed
1358

comfyanonymous's avatar
comfyanonymous committed
1359
class ImageScaleBy:
comfyanonymous's avatar
comfyanonymous committed
1360
    upscale_methods = ["nearest-exact", "bilinear", "area", "bicubic"]
comfyanonymous's avatar
comfyanonymous committed
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372
1373
1374
1375
1376
1377
1378

    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "image": ("IMAGE",), "upscale_method": (s.upscale_methods,),
                              "scale_by": ("FLOAT", {"default": 1.0, "min": 0.01, "max": 8.0, "step": 0.01}),}}
    RETURN_TYPES = ("IMAGE",)
    FUNCTION = "upscale"

    CATEGORY = "image/upscaling"

    def upscale(self, image, upscale_method, scale_by):
        samples = image.movedim(-1,1)
        width = round(samples.shape[3] * scale_by)
        height = round(samples.shape[2] * scale_by)
        s = comfy.utils.common_upscale(samples, width, height, upscale_method, "disabled")
        s = s.movedim(1,-1)
        return (s,)

1379
1380
1381
1382
1383
1384
1385
1386
1387
1388
1389
1390
1391
1392
1393
1394
class ImageInvert:

    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "image": ("IMAGE",)}}

    RETURN_TYPES = ("IMAGE",)
    FUNCTION = "invert"

    CATEGORY = "image"

    def invert(self, image):
        s = 1.0 - image
        return (s,)


Guo Y.K's avatar
Guo Y.K committed
1395
1396
1397
1398
1399
1400
1401
class ImagePadForOutpaint:

    @classmethod
    def INPUT_TYPES(s):
        return {
            "required": {
                "image": ("IMAGE",),
1402
1403
1404
1405
                "left": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 8}),
                "top": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 8}),
                "right": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 8}),
                "bottom": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 8}),
1406
                "feathering": ("INT", {"default": 40, "min": 0, "max": MAX_RESOLUTION, "step": 1}),
Guo Y.K's avatar
Guo Y.K committed
1407
1408
1409
1410
1411
1412
1413
1414
            }
        }

    RETURN_TYPES = ("IMAGE", "MASK")
    FUNCTION = "expand_image"

    CATEGORY = "image"

1415
    def expand_image(self, image, left, top, right, bottom, feathering):
Guo Y.K's avatar
Guo Y.K committed
1416
1417
1418
1419
1420
1421
1422
1423
1424
1425
1426
1427
        d1, d2, d3, d4 = image.size()

        new_image = torch.zeros(
            (d1, d2 + top + bottom, d3 + left + right, d4),
            dtype=torch.float32,
        )
        new_image[:, top:top + d2, left:left + d3, :] = image

        mask = torch.ones(
            (d2 + top + bottom, d3 + left + right),
            dtype=torch.float32,
        )
1428

1429
1430
1431
1432
1433
        t = torch.zeros(
            (d2, d3),
            dtype=torch.float32
        )

1434
        if feathering > 0 and feathering * 2 < d2 and feathering * 2 < d3:
1435
1436
1437
1438
1439
1440
1441
1442
1443
1444
1445
1446
1447
1448
1449
1450
1451
1452
1453

            for i in range(d2):
                for j in range(d3):
                    dt = i if top != 0 else d2
                    db = d2 - i if bottom != 0 else d2

                    dl = j if left != 0 else d3
                    dr = d3 - j if right != 0 else d3

                    d = min(dt, db, dl, dr)

                    if d >= feathering:
                        continue

                    v = (feathering - d) / feathering

                    t[i, j] = v * v

        mask[top:top + d2, left:left + d3] = t
1454

Guo Y.K's avatar
Guo Y.K committed
1455
1456
1457
        return (new_image, mask)


comfyanonymous's avatar
comfyanonymous committed
1458
1459
NODE_CLASS_MAPPINGS = {
    "KSampler": KSampler,
1460
    "CheckpointLoaderSimple": CheckpointLoaderSimple,
comfyanonymous's avatar
comfyanonymous committed
1461
    "CLIPTextEncode": CLIPTextEncode,
comfyanonymous's avatar
comfyanonymous committed
1462
    "CLIPSetLastLayer": CLIPSetLastLayer,
comfyanonymous's avatar
comfyanonymous committed
1463
1464
    "VAEDecode": VAEDecode,
    "VAEEncode": VAEEncode,
1465
    "VAEEncodeForInpaint": VAEEncodeForInpaint,
comfyanonymous's avatar
comfyanonymous committed
1466
1467
1468
    "VAELoader": VAELoader,
    "EmptyLatentImage": EmptyLatentImage,
    "LatentUpscale": LatentUpscale,
comfyanonymous's avatar
comfyanonymous committed
1469
    "LatentUpscaleBy": LatentUpscaleBy,
1470
    "LatentFromBatch": LatentFromBatch,
1471
    "RepeatLatentBatch": RepeatLatentBatch,
comfyanonymous's avatar
comfyanonymous committed
1472
    "SaveImage": SaveImage,
pythongosssss's avatar
pythongosssss committed
1473
    "PreviewImage": PreviewImage,
comfyanonymous's avatar
comfyanonymous committed
1474
    "LoadImage": LoadImage,
1475
    "LoadImageMask": LoadImageMask,
comfyanonymous's avatar
comfyanonymous committed
1476
    "ImageScale": ImageScale,
comfyanonymous's avatar
comfyanonymous committed
1477
    "ImageScaleBy": ImageScaleBy,
1478
    "ImageInvert": ImageInvert,
Guo Y.K's avatar
Guo Y.K committed
1479
    "ImagePadForOutpaint": ImagePadForOutpaint,
FizzleDorf's avatar
FizzleDorf committed
1480
    "ConditioningAverage ": ConditioningAverage ,
comfyanonymous's avatar
comfyanonymous committed
1481
    "ConditioningCombine": ConditioningCombine,
1482
    "ConditioningConcat": ConditioningConcat,
comfyanonymous's avatar
comfyanonymous committed
1483
    "ConditioningSetArea": ConditioningSetArea,
Jacob Segal's avatar
Jacob Segal committed
1484
    "ConditioningSetMask": ConditioningSetMask,
comfyanonymous's avatar
comfyanonymous committed
1485
    "KSamplerAdvanced": KSamplerAdvanced,
1486
    "SetLatentNoiseMask": SetLatentNoiseMask,
comfyanonymous's avatar
comfyanonymous committed
1487
    "LatentComposite": LatentComposite,
comfyanonymous's avatar
comfyanonymous committed
1488
    "LatentRotate": LatentRotate,
comfyanonymous's avatar
comfyanonymous committed
1489
    "LatentFlip": LatentFlip,
comfyanonymous's avatar
comfyanonymous committed
1490
    "LatentCrop": LatentCrop,
1491
    "LoraLoader": LoraLoader,
1492
    "CLIPLoader": CLIPLoader,
1493
    "UNETLoader": UNETLoader,
1494
    "DualCLIPLoader": DualCLIPLoader,
1495
    "CLIPVisionEncode": CLIPVisionEncode,
1496
    "StyleModelApply": StyleModelApply,
1497
    "unCLIPConditioning": unCLIPConditioning,
comfyanonymous's avatar
comfyanonymous committed
1498
    "ControlNetApply": ControlNetApply,
1499
    "ControlNetApplyAdvanced": ControlNetApplyAdvanced,
comfyanonymous's avatar
comfyanonymous committed
1500
    "ControlNetLoader": ControlNetLoader,
1501
    "DiffControlNetLoader": DiffControlNetLoader,
comfyanonymous's avatar
comfyanonymous committed
1502
1503
    "StyleModelLoader": StyleModelLoader,
    "CLIPVisionLoader": CLIPVisionLoader,
1504
    "VAEDecodeTiled": VAEDecodeTiled,
comfyanonymous's avatar
comfyanonymous committed
1505
    "VAEEncodeTiled": VAEEncodeTiled,
1506
    "unCLIPCheckpointLoader": unCLIPCheckpointLoader,
1507
1508
1509
    "GLIGENLoader": GLIGENLoader,
    "GLIGENTextBoxApply": GLIGENTextBoxApply,

1510
    "CheckpointLoader": CheckpointLoader,
sALTaccount's avatar
sALTaccount committed
1511
    "DiffusersLoader": DiffusersLoader,
Dr.Lt.Data's avatar
Dr.Lt.Data committed
1512
1513

    "LoadLatent": LoadLatent,
1514
    "SaveLatent": SaveLatent,
1515
1516

    "ConditioningZeroOut": ConditioningZeroOut,
1517
    "ConditioningSetTimestepRange": ConditioningSetTimestepRange,
comfyanonymous's avatar
comfyanonymous committed
1518
1519
}

City's avatar
City committed
1520
1521
1522
1523
1524
NODE_DISPLAY_NAME_MAPPINGS = {
    # Sampling
    "KSampler": "KSampler",
    "KSamplerAdvanced": "KSampler (Advanced)",
    # Loaders
comfyanonymous's avatar
Rename.  
comfyanonymous committed
1525
1526
    "CheckpointLoader": "Load Checkpoint (With Config)",
    "CheckpointLoaderSimple": "Load Checkpoint",
City's avatar
City committed
1527
1528
1529
1530
1531
1532
1533
1534
1535
1536
1537
1538
1539
1540
    "VAELoader": "Load VAE",
    "LoraLoader": "Load LoRA",
    "CLIPLoader": "Load CLIP",
    "ControlNetLoader": "Load ControlNet Model",
    "DiffControlNetLoader": "Load ControlNet Model (diff)",
    "StyleModelLoader": "Load Style Model",
    "CLIPVisionLoader": "Load CLIP Vision",
    "UpscaleModelLoader": "Load Upscale Model",
    # Conditioning
    "CLIPVisionEncode": "CLIP Vision Encode",
    "StyleModelApply": "Apply Style Model",
    "CLIPTextEncode": "CLIP Text Encode (Prompt)",
    "CLIPSetLastLayer": "CLIP Set Last Layer",
    "ConditioningCombine": "Conditioning (Combine)",
FizzleDorf's avatar
FizzleDorf committed
1541
    "ConditioningAverage ": "Conditioning (Average)",
1542
    "ConditioningConcat": "Conditioning (Concat)",
City's avatar
City committed
1543
    "ConditioningSetArea": "Conditioning (Set Area)",
Jacob Segal's avatar
Jacob Segal committed
1544
    "ConditioningSetMask": "Conditioning (Set Mask)",
City's avatar
City committed
1545
    "ControlNetApply": "Apply ControlNet",
1546
    "ControlNetApplyAdvanced": "Apply ControlNet (Advanced)",
City's avatar
City committed
1547
1548
1549
1550
1551
1552
1553
1554
1555
1556
    # Latent
    "VAEEncodeForInpaint": "VAE Encode (for Inpainting)",
    "SetLatentNoiseMask": "Set Latent Noise Mask",
    "VAEDecode": "VAE Decode",
    "VAEEncode": "VAE Encode",
    "LatentRotate": "Rotate Latent",
    "LatentFlip": "Flip Latent",
    "LatentCrop": "Crop Latent",
    "EmptyLatentImage": "Empty Latent Image",
    "LatentUpscale": "Upscale Latent",
comfyanonymous's avatar
comfyanonymous committed
1557
    "LatentUpscaleBy": "Upscale Latent By",
City's avatar
City committed
1558
    "LatentComposite": "Latent Composite",
1559
1560
    "LatentFromBatch" : "Latent From Batch",
    "RepeatLatentBatch": "Repeat Latent Batch",
City's avatar
City committed
1561
1562
1563
1564
1565
1566
    # Image
    "SaveImage": "Save Image",
    "PreviewImage": "Preview Image",
    "LoadImage": "Load Image",
    "LoadImageMask": "Load Image (as Mask)",
    "ImageScale": "Upscale Image",
comfyanonymous's avatar
comfyanonymous committed
1567
    "ImageScaleBy": "Upscale Image By",
City's avatar
City committed
1568
1569
1570
1571
1572
1573
1574
1575
    "ImageUpscaleWithModel": "Upscale Image (using Model)",
    "ImageInvert": "Invert Image",
    "ImagePadForOutpaint": "Pad Image for Outpainting",
    # _for_testing
    "VAEDecodeTiled": "VAE Decode (Tiled)",
    "VAEEncodeTiled": "VAE Encode (Tiled)",
}

1576
def load_custom_node(module_path, ignore=set()):
1577
1578
1579
1580
1581
1582
1583
1584
1585
1586
1587
1588
1589
    module_name = os.path.basename(module_path)
    if os.path.isfile(module_path):
        sp = os.path.splitext(module_path)
        module_name = sp[0]
    try:
        if os.path.isfile(module_path):
            module_spec = importlib.util.spec_from_file_location(module_name, module_path)
        else:
            module_spec = importlib.util.spec_from_file_location(module_name, os.path.join(module_path, "__init__.py"))
        module = importlib.util.module_from_spec(module_spec)
        sys.modules[module_name] = module
        module_spec.loader.exec_module(module)
        if hasattr(module, "NODE_CLASS_MAPPINGS") and getattr(module, "NODE_CLASS_MAPPINGS") is not None:
1590
1591
1592
            for name in module.NODE_CLASS_MAPPINGS:
                if name not in ignore:
                    NODE_CLASS_MAPPINGS[name] = module.NODE_CLASS_MAPPINGS[name]
1593
1594
            if hasattr(module, "NODE_DISPLAY_NAME_MAPPINGS") and getattr(module, "NODE_DISPLAY_NAME_MAPPINGS") is not None:
                NODE_DISPLAY_NAME_MAPPINGS.update(module.NODE_DISPLAY_NAME_MAPPINGS)
1595
            return True
1596
1597
        else:
            print(f"Skip {module_path} module for custom nodes due to the lack of NODE_CLASS_MAPPINGS.")
1598
            return False
1599
1600
1601
    except Exception as e:
        print(traceback.format_exc())
        print(f"Cannot import {module_path} module for custom nodes:", e)
1602
        return False
1603

Hacker 17082006's avatar
Hacker 17082006 committed
1604
def load_custom_nodes():
1605
    base_node_names = set(NODE_CLASS_MAPPINGS.keys())
1606
    node_paths = folder_paths.get_folder_paths("custom_nodes")
1607
    node_import_times = []
1608
1609
1610
1611
1612
1613
1614
1615
    for custom_node_path in node_paths:
        possible_modules = os.listdir(custom_node_path)
        if "__pycache__" in possible_modules:
            possible_modules.remove("__pycache__")

        for possible_module in possible_modules:
            module_path = os.path.join(custom_node_path, possible_module)
            if os.path.isfile(module_path) and os.path.splitext(module_path)[1] != ".py": continue
1616
            if module_path.endswith(".disabled"): continue
1617
            time_before = time.perf_counter()
1618
            success = load_custom_node(module_path, base_node_names)
1619
            node_import_times.append((time.perf_counter() - time_before, module_path, success))
1620

1621
    if len(node_import_times) > 0:
comfyanonymous's avatar
comfyanonymous committed
1622
        print("\nImport times for custom nodes:")
1623
        for n in sorted(node_import_times):
1624
1625
1626
1627
1628
            if n[2]:
                import_message = ""
            else:
                import_message = " (IMPORT FAILED)"
            print("{:6.1f} seconds{}:".format(n[0], import_message), n[1])
1629
        print()
1630

1631
def init_custom_nodes():
1632
    load_custom_node(os.path.join(os.path.join(os.path.dirname(os.path.realpath(__file__)), "comfy_extras"), "nodes_hypernetwork.py"))
1633
1634
    load_custom_node(os.path.join(os.path.join(os.path.dirname(os.path.realpath(__file__)), "comfy_extras"), "nodes_upscale_model.py"))
    load_custom_node(os.path.join(os.path.join(os.path.dirname(os.path.realpath(__file__)), "comfy_extras"), "nodes_post_processing.py"))
1635
    load_custom_node(os.path.join(os.path.join(os.path.dirname(os.path.realpath(__file__)), "comfy_extras"), "nodes_mask.py"))
1636
    load_custom_node(os.path.join(os.path.join(os.path.dirname(os.path.realpath(__file__)), "comfy_extras"), "nodes_rebatch.py"))
1637
    load_custom_node(os.path.join(os.path.join(os.path.dirname(os.path.realpath(__file__)), "comfy_extras"), "nodes_model_merging.py"))
1638
    load_custom_node(os.path.join(os.path.join(os.path.dirname(os.path.realpath(__file__)), "comfy_extras"), "nodes_tomesd.py"))
1639
    load_custom_node(os.path.join(os.path.join(os.path.dirname(os.path.realpath(__file__)), "comfy_extras"), "nodes_clip_sdxl.py"))
1640
    load_custom_node(os.path.join(os.path.join(os.path.dirname(os.path.realpath(__file__)), "comfy_extras"), "nodes_canny.py"))
1641
    load_custom_nodes()