nodes.py 68.7 KB
Newer Older
comfyanonymous's avatar
comfyanonymous committed
1
2
3
4
5
import torch

import os
import sys
import json
6
import hashlib
7
import traceback
8
import math
9
import time
10
import random
comfyanonymous's avatar
comfyanonymous committed
11

12
from PIL import Image, ImageOps
comfyanonymous's avatar
comfyanonymous committed
13
14
from PIL.PngImagePlugin import PngInfo
import numpy as np
15
import safetensors.torch
comfyanonymous's avatar
comfyanonymous committed
16

comfyanonymous's avatar
comfyanonymous committed
17
sys.path.insert(0, os.path.join(os.path.dirname(os.path.realpath(__file__)), "comfy"))
comfyanonymous's avatar
comfyanonymous committed
18
19


20
import comfy.diffusers_load
comfyanonymous's avatar
comfyanonymous committed
21
import comfy.samplers
22
import comfy.sample
comfyanonymous's avatar
comfyanonymous committed
23
import comfy.sd
comfyanonymous's avatar
comfyanonymous committed
24
import comfy.utils
25
import comfy.controlnet
comfyanonymous's avatar
comfyanonymous committed
26

27
import comfy.clip_vision
28

29
import comfy.model_management
30
31
from comfy.cli_args import args

32
import importlib
comfyanonymous's avatar
comfyanonymous committed
33

34
import folder_paths
35
import latent_preview
space-nuko's avatar
space-nuko committed
36

37
def before_node_execution():
38
    comfy.model_management.throw_exception_if_processing_interrupted()
39

40
def interrupt_processing(value=True):
41
    comfy.model_management.interrupt_current_processing(value)
42

43
44
MAX_RESOLUTION=8192

comfyanonymous's avatar
comfyanonymous committed
45
46
47
class CLIPTextEncode:
    @classmethod
    def INPUT_TYPES(s):
48
        return {"required": {"text": ("STRING", {"multiline": True}), "clip": ("CLIP", )}}
comfyanonymous's avatar
comfyanonymous committed
49
50
51
    RETURN_TYPES = ("CONDITIONING",)
    FUNCTION = "encode"

52
53
    CATEGORY = "conditioning"

comfyanonymous's avatar
comfyanonymous committed
54
    def encode(self, clip, text):
55
56
57
        tokens = clip.tokenize(text)
        cond, pooled = clip.encode_from_tokens(tokens, return_pooled=True)
        return ([[cond, {"pooled_output": pooled}]], )
comfyanonymous's avatar
comfyanonymous committed
58
59
60
61
62
63
64
65

class ConditioningCombine:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": {"conditioning_1": ("CONDITIONING", ), "conditioning_2": ("CONDITIONING", )}}
    RETURN_TYPES = ("CONDITIONING",)
    FUNCTION = "combine"

66
67
    CATEGORY = "conditioning"

comfyanonymous's avatar
comfyanonymous committed
68
69
70
    def combine(self, conditioning_1, conditioning_2):
        return (conditioning_1 + conditioning_2, )

FizzleDorf's avatar
FizzleDorf committed
71
72
73
class ConditioningAverage :
    @classmethod
    def INPUT_TYPES(s):
comfyanonymous's avatar
comfyanonymous committed
74
75
        return {"required": {"conditioning_to": ("CONDITIONING", ), "conditioning_from": ("CONDITIONING", ),
                              "conditioning_to_strength": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01})
FizzleDorf's avatar
FizzleDorf committed
76
77
78
79
80
81
                             }}
    RETURN_TYPES = ("CONDITIONING",)
    FUNCTION = "addWeighted"

    CATEGORY = "conditioning"

comfyanonymous's avatar
comfyanonymous committed
82
    def addWeighted(self, conditioning_to, conditioning_from, conditioning_to_strength):
FizzleDorf's avatar
FizzleDorf committed
83
        out = []
comfyanonymous's avatar
comfyanonymous committed
84
85
86
87
88

        if len(conditioning_from) > 1:
            print("Warning: ConditioningAverage conditioning_from contains more than 1 cond, only the first one will actually be applied to conditioning_to.")

        cond_from = conditioning_from[0][0]
89
        pooled_output_from = conditioning_from[0][1].get("pooled_output", None)
comfyanonymous's avatar
comfyanonymous committed
90
91
92

        for i in range(len(conditioning_to)):
            t1 = conditioning_to[i][0]
93
            pooled_output_to = conditioning_to[i][1].get("pooled_output", pooled_output_from)
comfyanonymous's avatar
comfyanonymous committed
94
95
96
97
98
            t0 = cond_from[:,:t1.shape[1]]
            if t0.shape[1] < t1.shape[1]:
                t0 = torch.cat([t0] + [torch.zeros((1, (t1.shape[1] - t0.shape[1]), t1.shape[2]))], dim=1)

            tw = torch.mul(t1, conditioning_to_strength) + torch.mul(t0, (1.0 - conditioning_to_strength))
99
100
101
102
103
104
105
            t_to = conditioning_to[i][1].copy()
            if pooled_output_from is not None and pooled_output_to is not None:
                t_to["pooled_output"] = torch.mul(pooled_output_to, conditioning_to_strength) + torch.mul(pooled_output_from, (1.0 - conditioning_to_strength))
            elif pooled_output_from is not None:
                t_to["pooled_output"] = pooled_output_from

            n = [tw, t_to]
FizzleDorf's avatar
FizzleDorf committed
106
107
108
            out.append(n)
        return (out, )

109
110
111
112
113
114
115
116
117
118
class ConditioningConcat:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": {
            "conditioning_to": ("CONDITIONING",),
            "conditioning_from": ("CONDITIONING",),
            }}
    RETURN_TYPES = ("CONDITIONING",)
    FUNCTION = "concat"

119
    CATEGORY = "conditioning"
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136

    def concat(self, conditioning_to, conditioning_from):
        out = []

        if len(conditioning_from) > 1:
            print("Warning: ConditioningConcat conditioning_from contains more than 1 cond, only the first one will actually be applied to conditioning_to.")

        cond_from = conditioning_from[0][0]

        for i in range(len(conditioning_to)):
            t1 = conditioning_to[i][0]
            tw = torch.cat((t1, cond_from),1)
            n = [tw, conditioning_to[i][1].copy()]
            out.append(n)

        return (out, )

comfyanonymous's avatar
comfyanonymous committed
137
138
139
140
class ConditioningSetArea:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": {"conditioning": ("CONDITIONING", ),
141
142
143
144
                              "width": ("INT", {"default": 64, "min": 64, "max": MAX_RESOLUTION, "step": 8}),
                              "height": ("INT", {"default": 64, "min": 64, "max": MAX_RESOLUTION, "step": 8}),
                              "x": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 8}),
                              "y": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 8}),
comfyanonymous's avatar
comfyanonymous committed
145
146
147
148
149
                              "strength": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}),
                             }}
    RETURN_TYPES = ("CONDITIONING",)
    FUNCTION = "append"

150
151
    CATEGORY = "conditioning"

152
    def append(self, conditioning, width, height, x, y, strength):
comfyanonymous's avatar
comfyanonymous committed
153
154
155
156
157
        c = []
        for t in conditioning:
            n = [t[0], t[1].copy()]
            n[1]['area'] = (height // 8, width // 8, y // 8, x // 8)
            n[1]['strength'] = strength
158
            n[1]['set_area_to_bounds'] = False
comfyanonymous's avatar
comfyanonymous committed
159
            c.append(n)
comfyanonymous's avatar
comfyanonymous committed
160
        return (c, )
comfyanonymous's avatar
comfyanonymous committed
161

162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
class ConditioningSetAreaPercentage:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": {"conditioning": ("CONDITIONING", ),
                              "width": ("FLOAT", {"default": 1.0, "min": 0, "max": 1.0, "step": 0.01}),
                              "height": ("FLOAT", {"default": 1.0, "min": 0, "max": 1.0, "step": 0.01}),
                              "x": ("FLOAT", {"default": 0, "min": 0, "max": 1.0, "step": 0.01}),
                              "y": ("FLOAT", {"default": 0, "min": 0, "max": 1.0, "step": 0.01}),
                              "strength": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}),
                             }}
    RETURN_TYPES = ("CONDITIONING",)
    FUNCTION = "append"

    CATEGORY = "conditioning"

    def append(self, conditioning, width, height, x, y, strength):
        c = []
        for t in conditioning:
            n = [t[0], t[1].copy()]
            n[1]['area'] = ("percentage", height, width, y, x)
            n[1]['strength'] = strength
            n[1]['set_area_to_bounds'] = False
            c.append(n)
        return (c, )

Jacob Segal's avatar
Jacob Segal committed
187
188
189
190
191
192
class ConditioningSetMask:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": {"conditioning": ("CONDITIONING", ),
                              "mask": ("MASK", ),
                              "strength": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}),
193
                              "set_cond_area": (["default", "mask bounds"],),
Jacob Segal's avatar
Jacob Segal committed
194
195
196
197
198
199
                             }}
    RETURN_TYPES = ("CONDITIONING",)
    FUNCTION = "append"

    CATEGORY = "conditioning"

200
    def append(self, conditioning, mask, set_cond_area, strength):
Jacob Segal's avatar
Jacob Segal committed
201
        c = []
202
203
204
        set_area_to_bounds = False
        if set_cond_area != "default":
            set_area_to_bounds = True
Jacob Segal's avatar
Jacob Segal committed
205
206
207
208
209
210
        if len(mask.shape) < 3:
            mask = mask.unsqueeze(0)
        for t in conditioning:
            n = [t[0], t[1].copy()]
            _, h, w = mask.shape
            n[1]['mask'] = mask
Jacob Segal's avatar
Jacob Segal committed
211
            n[1]['set_area_to_bounds'] = set_area_to_bounds
212
            n[1]['mask_strength'] = strength
Jacob Segal's avatar
Jacob Segal committed
213
214
215
            c.append(n)
        return (c, )

216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
class ConditioningZeroOut:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": {"conditioning": ("CONDITIONING", )}}
    RETURN_TYPES = ("CONDITIONING",)
    FUNCTION = "zero_out"

    CATEGORY = "advanced/conditioning"

    def zero_out(self, conditioning):
        c = []
        for t in conditioning:
            d = t[1].copy()
            if "pooled_output" in d:
                d["pooled_output"] = torch.zeros_like(d["pooled_output"])
            n = [torch.zeros_like(t[0]), d]
            c.append(n)
        return (c, )

235
236
237
238
class ConditioningSetTimestepRange:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": {"conditioning": ("CONDITIONING", ),
239
240
                             "start": ("FLOAT", {"default": 0.0, "min": 0.0, "max": 1.0, "step": 0.001}),
                             "end": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.001})
241
242
243
244
245
246
247
248
249
250
                             }}
    RETURN_TYPES = ("CONDITIONING",)
    FUNCTION = "set_range"

    CATEGORY = "advanced/conditioning"

    def set_range(self, conditioning, start, end):
        c = []
        for t in conditioning:
            d = t[1].copy()
251
252
            d['start_percent'] = start
            d['end_percent'] = end
253
254
255
256
            n = [t[0], d]
            c.append(n)
        return (c, )

comfyanonymous's avatar
comfyanonymous committed
257
258
259
260
261
262
263
class VAEDecode:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "samples": ("LATENT", ), "vae": ("VAE", )}}
    RETURN_TYPES = ("IMAGE",)
    FUNCTION = "decode"

264
265
    CATEGORY = "latent"

comfyanonymous's avatar
comfyanonymous committed
266
    def decode(self, vae, samples):
267
        return (vae.decode(samples["samples"]), )
comfyanonymous's avatar
comfyanonymous committed
268

269
270
271
class VAEDecodeTiled:
    @classmethod
    def INPUT_TYPES(s):
272
        return {"required": {"samples": ("LATENT", ), "vae": ("VAE", ),
comfyanonymous's avatar
comfyanonymous committed
273
                             "tile_size": ("INT", {"default": 512, "min": 320, "max": 4096, "step": 64})
274
                            }}
275
276
277
278
279
    RETURN_TYPES = ("IMAGE",)
    FUNCTION = "decode"

    CATEGORY = "_for_testing"

280
    def decode(self, vae, samples, tile_size):
281
        return (vae.decode_tiled(samples["samples"], tile_x=tile_size // 8, tile_y=tile_size // 8, ), )
282

comfyanonymous's avatar
comfyanonymous committed
283
284
285
286
287
288
289
class VAEEncode:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "pixels": ("IMAGE", ), "vae": ("VAE", )}}
    RETURN_TYPES = ("LATENT",)
    FUNCTION = "encode"

290
291
    CATEGORY = "latent"

292
293
294
295
    @staticmethod
    def vae_encode_crop_pixels(pixels):
        x = (pixels.shape[1] // 8) * 8
        y = (pixels.shape[2] // 8) * 8
296
        if pixels.shape[1] != x or pixels.shape[2] != y:
297
298
299
300
            x_offset = (pixels.shape[1] % 8) // 2
            y_offset = (pixels.shape[2] % 8) // 2
            pixels = pixels[:, x_offset:x + x_offset, y_offset:y + y_offset, :]
        return pixels
301

302
303
304
    def encode(self, vae, pixels):
        pixels = self.vae_encode_crop_pixels(pixels)
        t = vae.encode(pixels[:,:,:,:3])
305
        return ({"samples":t}, )
comfyanonymous's avatar
comfyanonymous committed
306

comfyanonymous's avatar
comfyanonymous committed
307
308
309
class VAEEncodeTiled:
    @classmethod
    def INPUT_TYPES(s):
310
        return {"required": {"pixels": ("IMAGE", ), "vae": ("VAE", ),
311
                             "tile_size": ("INT", {"default": 512, "min": 320, "max": 4096, "step": 64})
312
                            }}
comfyanonymous's avatar
comfyanonymous committed
313
314
315
316
317
    RETURN_TYPES = ("LATENT",)
    FUNCTION = "encode"

    CATEGORY = "_for_testing"

318
    def encode(self, vae, pixels, tile_size):
319
        pixels = VAEEncode.vae_encode_crop_pixels(pixels)
320
        t = vae.encode_tiled(pixels[:,:,:,:3], tile_x=tile_size, tile_y=tile_size, )
comfyanonymous's avatar
comfyanonymous committed
321
        return ({"samples":t}, )
322

323
324
325
class VAEEncodeForInpaint:
    @classmethod
    def INPUT_TYPES(s):
326
        return {"required": { "pixels": ("IMAGE", ), "vae": ("VAE", ), "mask": ("MASK", ), "grow_mask_by": ("INT", {"default": 6, "min": 0, "max": 64, "step": 1}),}}
327
328
329
330
331
    RETURN_TYPES = ("LATENT",)
    FUNCTION = "encode"

    CATEGORY = "latent/inpaint"

332
    def encode(self, vae, pixels, mask, grow_mask_by=6):
333
334
        x = (pixels.shape[1] // 8) * 8
        y = (pixels.shape[2] // 8) * 8
335
        mask = torch.nn.functional.interpolate(mask.reshape((-1, 1, mask.shape[-2], mask.shape[-1])), size=(pixels.shape[1], pixels.shape[2]), mode="bilinear")
336

337
        pixels = pixels.clone()
338
        if pixels.shape[1] != x or pixels.shape[2] != y:
339
340
341
342
            x_offset = (pixels.shape[1] % 8) // 2
            y_offset = (pixels.shape[2] % 8) // 2
            pixels = pixels[:,x_offset:x + x_offset, y_offset:y + y_offset,:]
            mask = mask[:,:,x_offset:x + x_offset, y_offset:y + y_offset]
343

344
        #grow mask by a few pixels to keep things seamless in latent space
345
346
347
348
349
350
351
352
        if grow_mask_by == 0:
            mask_erosion = mask
        else:
            kernel_tensor = torch.ones((1, 1, grow_mask_by, grow_mask_by))
            padding = math.ceil((grow_mask_by - 1) / 2)

            mask_erosion = torch.clamp(torch.nn.functional.conv2d(mask.round(), kernel_tensor, padding=padding), 0, 1)

353
        m = (1.0 - mask.round()).squeeze(1)
354
355
        for i in range(3):
            pixels[:,:,:,i] -= 0.5
356
            pixels[:,:,:,i] *= m
357
358
359
            pixels[:,:,:,i] += 0.5
        t = vae.encode(pixels)

360
        return ({"samples":t, "noise_mask": (mask_erosion[:,:,:x,:y].round())}, )
comfyanonymous's avatar
comfyanonymous committed
361

Dr.Lt.Data's avatar
Dr.Lt.Data committed
362
363
class SaveLatent:
    def __init__(self):
364
        self.output_dir = folder_paths.get_output_directory()
Dr.Lt.Data's avatar
Dr.Lt.Data committed
365
366
367
368

    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "samples": ("LATENT", ),
369
                              "filename_prefix": ("STRING", {"default": "latents/ComfyUI"})},
Dr.Lt.Data's avatar
Dr.Lt.Data committed
370
371
372
373
374
375
376
377
378
379
                "hidden": {"prompt": "PROMPT", "extra_pnginfo": "EXTRA_PNGINFO"},
                }
    RETURN_TYPES = ()
    FUNCTION = "save"

    OUTPUT_NODE = True

    CATEGORY = "_for_testing"

    def save(self, samples, filename_prefix="ComfyUI", prompt=None, extra_pnginfo=None):
380
        full_output_folder, filename, counter, subfolder, filename_prefix = folder_paths.get_save_image_path(filename_prefix, self.output_dir)
Dr.Lt.Data's avatar
Dr.Lt.Data committed
381
382
383
384
385
386

        # support save metadata for latent sharing
        prompt_info = ""
        if prompt is not None:
            prompt_info = json.dumps(prompt)

387
388
389
390
391
392
        metadata = None
        if not args.disable_metadata:
            metadata = {"prompt": prompt_info}
            if extra_pnginfo is not None:
                for x in extra_pnginfo:
                    metadata[x] = json.dumps(extra_pnginfo[x])
Dr.Lt.Data's avatar
Dr.Lt.Data committed
393
394

        file = f"{filename}_{counter:05}_.latent"
395
396
397
398
399
400
401
402

        results = list()
        results.append({
            "filename": file,
            "subfolder": subfolder,
            "type": "output"
        })

Dr.Lt.Data's avatar
Dr.Lt.Data committed
403
404
        file = os.path.join(full_output_folder, file)

405
406
        output = {}
        output["latent_tensor"] = samples["samples"]
407
        output["latent_format_version_0"] = torch.tensor([])
408

409
        comfy.utils.save_torch_file(output, file, metadata=metadata)
410
        return { "ui": { "latents": results } }
Dr.Lt.Data's avatar
Dr.Lt.Data committed
411
412
413
414
415


class LoadLatent:
    @classmethod
    def INPUT_TYPES(s):
416
417
        input_dir = folder_paths.get_input_directory()
        files = [f for f in os.listdir(input_dir) if os.path.isfile(os.path.join(input_dir, f)) and f.endswith(".latent")]
Dr.Lt.Data's avatar
Dr.Lt.Data committed
418
419
420
421
422
423
424
425
        return {"required": {"latent": [sorted(files), ]}, }

    CATEGORY = "_for_testing"

    RETURN_TYPES = ("LATENT", )
    FUNCTION = "load"

    def load(self, latent):
426
427
        latent_path = folder_paths.get_annotated_filepath(latent)
        latent = safetensors.torch.load_file(latent_path, device="cpu")
428
429
430
431
        multiplier = 1.0
        if "latent_format_version_0" not in latent:
            multiplier = 1.0 / 0.18215
        samples = {"samples": latent["latent_tensor"].float() * multiplier}
432
        return (samples, )
Dr.Lt.Data's avatar
Dr.Lt.Data committed
433

434
435
436
437
438
439
440
441
442
443
444
445
446
447
    @classmethod
    def IS_CHANGED(s, latent):
        image_path = folder_paths.get_annotated_filepath(latent)
        m = hashlib.sha256()
        with open(image_path, 'rb') as f:
            m.update(f.read())
        return m.digest().hex()

    @classmethod
    def VALIDATE_INPUTS(s, latent):
        if not folder_paths.exists_annotated_filepath(latent):
            return "Invalid latent file: {}".format(latent)
        return True

Dr.Lt.Data's avatar
Dr.Lt.Data committed
448

comfyanonymous's avatar
comfyanonymous committed
449
450
451
class CheckpointLoader:
    @classmethod
    def INPUT_TYPES(s):
452
453
        return {"required": { "config_name": (folder_paths.get_filename_list("configs"), ),
                              "ckpt_name": (folder_paths.get_filename_list("checkpoints"), )}}
comfyanonymous's avatar
comfyanonymous committed
454
455
456
    RETURN_TYPES = ("MODEL", "CLIP", "VAE")
    FUNCTION = "load_checkpoint"

457
    CATEGORY = "advanced/loaders"
458

comfyanonymous's avatar
comfyanonymous committed
459
    def load_checkpoint(self, config_name, ckpt_name, output_vae=True, output_clip=True):
460
461
        config_path = folder_paths.get_full_path("configs", config_name)
        ckpt_path = folder_paths.get_full_path("checkpoints", ckpt_name)
462
        return comfy.sd.load_checkpoint(config_path, ckpt_path, output_vae=True, output_clip=True, embedding_directory=folder_paths.get_folder_paths("embeddings"))
comfyanonymous's avatar
comfyanonymous committed
463

464
465
466
class CheckpointLoaderSimple:
    @classmethod
    def INPUT_TYPES(s):
467
        return {"required": { "ckpt_name": (folder_paths.get_filename_list("checkpoints"), ),
468
469
470
471
                             }}
    RETURN_TYPES = ("MODEL", "CLIP", "VAE")
    FUNCTION = "load_checkpoint"

472
    CATEGORY = "loaders"
473

474
    def load_checkpoint(self, ckpt_name, output_vae=True, output_clip=True):
475
        ckpt_path = folder_paths.get_full_path("checkpoints", ckpt_name)
476
        out = comfy.sd.load_checkpoint_guess_config(ckpt_path, output_vae=True, output_clip=True, embedding_directory=folder_paths.get_folder_paths("embeddings"))
477
        return out[:3]
478

sALTaccount's avatar
sALTaccount committed
479
480
481
class DiffusersLoader:
    @classmethod
    def INPUT_TYPES(cls):
482
        paths = []
sALTaccount's avatar
sALTaccount committed
483
        for search_path in folder_paths.get_folder_paths("diffusers"):
484
            if os.path.exists(search_path):
485
486
487
488
                for root, subdir, files in os.walk(search_path, followlinks=True):
                    if "model_index.json" in files:
                        paths.append(os.path.relpath(root, start=search_path))

489
        return {"required": {"model_path": (paths,), }}
sALTaccount's avatar
sALTaccount committed
490
491
492
    RETURN_TYPES = ("MODEL", "CLIP", "VAE")
    FUNCTION = "load_checkpoint"

493
    CATEGORY = "advanced/loaders/deprecated"
sALTaccount's avatar
sALTaccount committed
494
495

    def load_checkpoint(self, model_path, output_vae=True, output_clip=True):
sALTaccount's avatar
sALTaccount committed
496
497
        for search_path in folder_paths.get_folder_paths("diffusers"):
            if os.path.exists(search_path):
498
499
500
                path = os.path.join(search_path, model_path)
                if os.path.exists(path):
                    model_path = path
sALTaccount's avatar
sALTaccount committed
501
                    break
502

503
        return comfy.diffusers_load.load_diffusers(model_path, output_vae=output_vae, output_clip=output_clip, embedding_directory=folder_paths.get_folder_paths("embeddings"))
sALTaccount's avatar
sALTaccount committed
504
505


506
507
508
509
510
511
512
513
class unCLIPCheckpointLoader:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "ckpt_name": (folder_paths.get_filename_list("checkpoints"), ),
                             }}
    RETURN_TYPES = ("MODEL", "CLIP", "VAE", "CLIP_VISION")
    FUNCTION = "load_checkpoint"

514
    CATEGORY = "loaders"
515
516
517
518
519
520

    def load_checkpoint(self, ckpt_name, output_vae=True, output_clip=True):
        ckpt_path = folder_paths.get_full_path("checkpoints", ckpt_name)
        out = comfy.sd.load_checkpoint_guess_config(ckpt_path, output_vae=True, output_clip=True, output_clipvision=True, embedding_directory=folder_paths.get_folder_paths("embeddings"))
        return out

comfyanonymous's avatar
comfyanonymous committed
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
class CLIPSetLastLayer:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "clip": ("CLIP", ),
                              "stop_at_clip_layer": ("INT", {"default": -1, "min": -24, "max": -1, "step": 1}),
                              }}
    RETURN_TYPES = ("CLIP",)
    FUNCTION = "set_last_layer"

    CATEGORY = "conditioning"

    def set_last_layer(self, clip, stop_at_clip_layer):
        clip = clip.clone()
        clip.clip_layer(stop_at_clip_layer)
        return (clip,)

537
class LoraLoader:
538
539
540
    def __init__(self):
        self.loaded_lora = None

541
542
543
544
    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "model": ("MODEL",),
                              "clip": ("CLIP", ),
545
                              "lora_name": (folder_paths.get_filename_list("loras"), ),
546
547
                              "strength_model": ("FLOAT", {"default": 1.0, "min": -20.0, "max": 20.0, "step": 0.01}),
                              "strength_clip": ("FLOAT", {"default": 1.0, "min": -20.0, "max": 20.0, "step": 0.01}),
548
549
550
551
552
553
554
                              }}
    RETURN_TYPES = ("MODEL", "CLIP")
    FUNCTION = "load_lora"

    CATEGORY = "loaders"

    def load_lora(self, model, clip, lora_name, strength_model, strength_clip):
555
556
557
        if strength_model == 0 and strength_clip == 0:
            return (model, clip)

558
        lora_path = folder_paths.get_full_path("loras", lora_name)
559
560
561
562
563
        lora = None
        if self.loaded_lora is not None:
            if self.loaded_lora[0] == lora_path:
                lora = self.loaded_lora[1]
            else:
564
565
566
                temp = self.loaded_lora
                self.loaded_lora = None
                del temp
567
568
569
570
571
572

        if lora is None:
            lora = comfy.utils.load_torch_file(lora_path, safe_load=True)
            self.loaded_lora = (lora_path, lora)

        model_lora, clip_lora = comfy.sd.load_lora_for_models(model, clip, lora, strength_model, strength_clip)
573
574
        return (model_lora, clip_lora)

575
576
577
578
579
580
581
582
583
584
585
586
587
class LoraLoaderModelOnly(LoraLoader):
    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "model": ("MODEL",),
                              "lora_name": (folder_paths.get_filename_list("loras"), ),
                              "strength_model": ("FLOAT", {"default": 1.0, "min": -20.0, "max": 20.0, "step": 0.01}),
                              }}
    RETURN_TYPES = ("MODEL",)
    FUNCTION = "load_lora_model_only"

    def load_lora_model_only(self, model, lora_name, strength_model):
        return (self.load_lora(model, None, lora_name, strength_model, 0)[0],)

comfyanonymous's avatar
comfyanonymous committed
588
class VAELoader:
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
    @staticmethod
    def vae_list():
        vaes = folder_paths.get_filename_list("vae")
        approx_vaes = folder_paths.get_filename_list("vae_approx")
        sdxl_taesd_enc = False
        sdxl_taesd_dec = False
        sd1_taesd_enc = False
        sd1_taesd_dec = False

        for v in approx_vaes:
            if v.startswith("taesd_decoder."):
                sd1_taesd_dec = True
            elif v.startswith("taesd_encoder."):
                sd1_taesd_enc = True
            elif v.startswith("taesdxl_decoder."):
                sdxl_taesd_dec = True
            elif v.startswith("taesdxl_encoder."):
                sdxl_taesd_enc = True
        if sd1_taesd_dec and sd1_taesd_enc:
            vaes.append("taesd")
        if sdxl_taesd_dec and sdxl_taesd_enc:
            vaes.append("taesdxl")
        return vaes

    @staticmethod
    def load_taesd(name):
        sd = {}
        approx_vaes = folder_paths.get_filename_list("vae_approx")

        encoder = next(filter(lambda a: a.startswith("{}_encoder.".format(name)), approx_vaes))
        decoder = next(filter(lambda a: a.startswith("{}_decoder.".format(name)), approx_vaes))

        enc = comfy.utils.load_torch_file(folder_paths.get_full_path("vae_approx", encoder))
        for k in enc:
            sd["taesd_encoder.{}".format(k)] = enc[k]

        dec = comfy.utils.load_torch_file(folder_paths.get_full_path("vae_approx", decoder))
        for k in dec:
            sd["taesd_decoder.{}".format(k)] = dec[k]

        if name == "taesd":
            sd["vae_scale"] = torch.tensor(0.18215)
        elif name == "taesdxl":
            sd["vae_scale"] = torch.tensor(0.13025)
        return sd

comfyanonymous's avatar
comfyanonymous committed
635
636
    @classmethod
    def INPUT_TYPES(s):
637
        return {"required": { "vae_name": (s.vae_list(), )}}
comfyanonymous's avatar
comfyanonymous committed
638
639
640
    RETURN_TYPES = ("VAE",)
    FUNCTION = "load_vae"

641
642
    CATEGORY = "loaders"

comfyanonymous's avatar
comfyanonymous committed
643
644
    #TODO: scale factor?
    def load_vae(self, vae_name):
645
646
647
648
649
        if vae_name in ["taesd", "taesdxl"]:
            sd = self.load_taesd(vae_name)
        else:
            vae_path = folder_paths.get_full_path("vae", vae_name)
            sd = comfy.utils.load_torch_file(vae_path)
comfyanonymous's avatar
comfyanonymous committed
650
        vae = comfy.sd.VAE(sd=sd)
comfyanonymous's avatar
comfyanonymous committed
651
652
        return (vae,)

comfyanonymous's avatar
comfyanonymous committed
653
654
655
class ControlNetLoader:
    @classmethod
    def INPUT_TYPES(s):
656
        return {"required": { "control_net_name": (folder_paths.get_filename_list("controlnet"), )}}
comfyanonymous's avatar
comfyanonymous committed
657
658
659
660
661
662
663

    RETURN_TYPES = ("CONTROL_NET",)
    FUNCTION = "load_controlnet"

    CATEGORY = "loaders"

    def load_controlnet(self, control_net_name):
664
        controlnet_path = folder_paths.get_full_path("controlnet", control_net_name)
665
        controlnet = comfy.controlnet.load_controlnet(controlnet_path)
comfyanonymous's avatar
comfyanonymous committed
666
667
        return (controlnet,)

668
669
670
671
class DiffControlNetLoader:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "model": ("MODEL",),
672
                              "control_net_name": (folder_paths.get_filename_list("controlnet"), )}}
673
674
675
676
677
678
679

    RETURN_TYPES = ("CONTROL_NET",)
    FUNCTION = "load_controlnet"

    CATEGORY = "loaders"

    def load_controlnet(self, model, control_net_name):
680
        controlnet_path = folder_paths.get_full_path("controlnet", control_net_name)
681
        controlnet = comfy.controlnet.load_controlnet(controlnet_path, model)
682
683
        return (controlnet,)

comfyanonymous's avatar
comfyanonymous committed
684
685
686
687

class ControlNetApply:
    @classmethod
    def INPUT_TYPES(s):
688
689
690
691
692
        return {"required": {"conditioning": ("CONDITIONING", ),
                             "control_net": ("CONTROL_NET", ),
                             "image": ("IMAGE", ),
                             "strength": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01})
                             }}
comfyanonymous's avatar
comfyanonymous committed
693
694
695
696
697
    RETURN_TYPES = ("CONDITIONING",)
    FUNCTION = "apply_controlnet"

    CATEGORY = "conditioning"

698
    def apply_controlnet(self, conditioning, control_net, image, strength):
699
700
701
        if strength == 0:
            return (conditioning, )

comfyanonymous's avatar
comfyanonymous committed
702
703
704
705
        c = []
        control_hint = image.movedim(-1,1)
        for t in conditioning:
            n = [t[0], t[1].copy()]
comfyanonymous's avatar
comfyanonymous committed
706
707
708
709
            c_net = control_net.copy().set_cond_hint(control_hint, strength)
            if 'control' in t[1]:
                c_net.set_previous_controlnet(t[1]['control'])
            n[1]['control'] = c_net
710
            n[1]['control_apply_to_uncond'] = True
comfyanonymous's avatar
comfyanonymous committed
711
712
713
            c.append(n)
        return (c, )

714
715
716
717
718
719
720
721
722

class ControlNetApplyAdvanced:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": {"positive": ("CONDITIONING", ),
                             "negative": ("CONDITIONING", ),
                             "control_net": ("CONTROL_NET", ),
                             "image": ("IMAGE", ),
                             "strength": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}),
723
724
                             "start_percent": ("FLOAT", {"default": 0.0, "min": 0.0, "max": 1.0, "step": 0.001}),
                             "end_percent": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.001})
725
726
727
728
729
730
731
732
                             }}

    RETURN_TYPES = ("CONDITIONING","CONDITIONING")
    RETURN_NAMES = ("positive", "negative")
    FUNCTION = "apply_controlnet"

    CATEGORY = "conditioning"

733
    def apply_controlnet(self, positive, negative, control_net, image, strength, start_percent, end_percent):
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
        if strength == 0:
            return (positive, negative)

        control_hint = image.movedim(-1,1)
        cnets = {}

        out = []
        for conditioning in [positive, negative]:
            c = []
            for t in conditioning:
                d = t[1].copy()

                prev_cnet = d.get('control', None)
                if prev_cnet in cnets:
                    c_net = cnets[prev_cnet]
                else:
750
                    c_net = control_net.copy().set_cond_hint(control_hint, strength, (start_percent, end_percent))
751
752
753
754
755
756
757
758
759
760
761
                    c_net.set_previous_controlnet(prev_cnet)
                    cnets[prev_cnet] = c_net

                d['control'] = c_net
                d['control_apply_to_uncond'] = False
                n = [t[0], d]
                c.append(n)
            out.append(c)
        return (out[0], out[1])


762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
class UNETLoader:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "unet_name": (folder_paths.get_filename_list("unet"), ),
                             }}
    RETURN_TYPES = ("MODEL",)
    FUNCTION = "load_unet"

    CATEGORY = "advanced/loaders"

    def load_unet(self, unet_name):
        unet_path = folder_paths.get_full_path("unet", unet_name)
        model = comfy.sd.load_unet(unet_path)
        return (model,)

777
778
779
class CLIPLoader:
    @classmethod
    def INPUT_TYPES(s):
780
        return {"required": { "clip_name": (folder_paths.get_filename_list("clip"), ),
781
782
783
784
                             }}
    RETURN_TYPES = ("CLIP",)
    FUNCTION = "load_clip"

785
    CATEGORY = "advanced/loaders"
786

787
    def load_clip(self, clip_name):
788
        clip_path = folder_paths.get_full_path("clip", clip_name)
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
        clip = comfy.sd.load_clip(ckpt_paths=[clip_path], embedding_directory=folder_paths.get_folder_paths("embeddings"))
        return (clip,)

class DualCLIPLoader:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "clip_name1": (folder_paths.get_filename_list("clip"), ), "clip_name2": (folder_paths.get_filename_list("clip"), ),
                             }}
    RETURN_TYPES = ("CLIP",)
    FUNCTION = "load_clip"

    CATEGORY = "advanced/loaders"

    def load_clip(self, clip_name1, clip_name2):
        clip_path1 = folder_paths.get_full_path("clip", clip_name1)
        clip_path2 = folder_paths.get_full_path("clip", clip_name2)
        clip = comfy.sd.load_clip(ckpt_paths=[clip_path1, clip_path2], embedding_directory=folder_paths.get_folder_paths("embeddings"))
806
807
        return (clip,)

808
809
810
class CLIPVisionLoader:
    @classmethod
    def INPUT_TYPES(s):
811
        return {"required": { "clip_name": (folder_paths.get_filename_list("clip_vision"), ),
812
813
814
815
816
817
818
                             }}
    RETURN_TYPES = ("CLIP_VISION",)
    FUNCTION = "load_clip"

    CATEGORY = "loaders"

    def load_clip(self, clip_name):
819
        clip_path = folder_paths.get_full_path("clip_vision", clip_name)
820
        clip_vision = comfy.clip_vision.load(clip_path)
821
822
823
824
825
826
827
828
        return (clip_vision,)

class CLIPVisionEncode:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "clip_vision": ("CLIP_VISION",),
                              "image": ("IMAGE",)
                             }}
comfyanonymous's avatar
comfyanonymous committed
829
    RETURN_TYPES = ("CLIP_VISION_OUTPUT",)
830
831
    FUNCTION = "encode"

832
    CATEGORY = "conditioning"
833
834
835
836
837
838
839
840

    def encode(self, clip_vision, image):
        output = clip_vision.encode_image(image)
        return (output,)

class StyleModelLoader:
    @classmethod
    def INPUT_TYPES(s):
841
        return {"required": { "style_model_name": (folder_paths.get_filename_list("style_models"), )}}
842
843
844
845
846
847
848

    RETURN_TYPES = ("STYLE_MODEL",)
    FUNCTION = "load_style_model"

    CATEGORY = "loaders"

    def load_style_model(self, style_model_name):
849
        style_model_path = folder_paths.get_full_path("style_models", style_model_name)
850
851
852
853
854
855
856
        style_model = comfy.sd.load_style_model(style_model_path)
        return (style_model,)


class StyleModelApply:
    @classmethod
    def INPUT_TYPES(s):
857
858
859
        return {"required": {"conditioning": ("CONDITIONING", ),
                             "style_model": ("STYLE_MODEL", ),
                             "clip_vision_output": ("CLIP_VISION_OUTPUT", ),
860
861
862
863
                             }}
    RETURN_TYPES = ("CONDITIONING",)
    FUNCTION = "apply_stylemodel"

comfyanonymous's avatar
comfyanonymous committed
864
    CATEGORY = "conditioning/style_model"
865

866
    def apply_stylemodel(self, clip_vision_output, style_model, conditioning):
867
        cond = style_model.get_cond(clip_vision_output).flatten(start_dim=0, end_dim=1).unsqueeze(dim=0)
868
        c = []
869
870
        for t in conditioning:
            n = [torch.cat((t[0], cond), dim=1), t[1].copy()]
871
872
873
            c.append(n)
        return (c, )

874
875
876
877
878
879
class unCLIPConditioning:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": {"conditioning": ("CONDITIONING", ),
                             "clip_vision_output": ("CLIP_VISION_OUTPUT", ),
                             "strength": ("FLOAT", {"default": 1.0, "min": -10.0, "max": 10.0, "step": 0.01}),
880
                             "noise_augmentation": ("FLOAT", {"default": 0.0, "min": 0.0, "max": 1.0, "step": 0.01}),
881
882
883
884
                             }}
    RETURN_TYPES = ("CONDITIONING",)
    FUNCTION = "apply_adm"

885
    CATEGORY = "conditioning"
886

887
    def apply_adm(self, conditioning, clip_vision_output, strength, noise_augmentation):
888
889
890
        if strength == 0:
            return (conditioning, )

891
892
893
        c = []
        for t in conditioning:
            o = t[1].copy()
894
895
896
            x = {"clip_vision_output": clip_vision_output, "strength": strength, "noise_augmentation": noise_augmentation}
            if "unclip_conditioning" in o:
                o["unclip_conditioning"] = o["unclip_conditioning"][:] + [x]
897
            else:
898
                o["unclip_conditioning"] = [x]
899
900
901
902
            n = [t[0], o]
            c.append(n)
        return (c, )

903
904
905
906
907
908
909
910
class GLIGENLoader:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "gligen_name": (folder_paths.get_filename_list("gligen"), )}}

    RETURN_TYPES = ("GLIGEN",)
    FUNCTION = "load_gligen"

comfyanonymous's avatar
comfyanonymous committed
911
    CATEGORY = "loaders"
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932

    def load_gligen(self, gligen_name):
        gligen_path = folder_paths.get_full_path("gligen", gligen_name)
        gligen = comfy.sd.load_gligen(gligen_path)
        return (gligen,)

class GLIGENTextBoxApply:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": {"conditioning_to": ("CONDITIONING", ),
                              "clip": ("CLIP", ),
                              "gligen_textbox_model": ("GLIGEN", ),
                              "text": ("STRING", {"multiline": True}),
                              "width": ("INT", {"default": 64, "min": 8, "max": MAX_RESOLUTION, "step": 8}),
                              "height": ("INT", {"default": 64, "min": 8, "max": MAX_RESOLUTION, "step": 8}),
                              "x": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 8}),
                              "y": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 8}),
                             }}
    RETURN_TYPES = ("CONDITIONING",)
    FUNCTION = "append"

comfyanonymous's avatar
comfyanonymous committed
933
    CATEGORY = "conditioning/gligen"
934
935
936
937
938
939
940
941
942
943
944
945
946
947

    def append(self, conditioning_to, clip, gligen_textbox_model, text, width, height, x, y):
        c = []
        cond, cond_pooled = clip.encode_from_tokens(clip.tokenize(text), return_pooled=True)
        for t in conditioning_to:
            n = [t[0], t[1].copy()]
            position_params = [(cond_pooled, height // 8, width // 8, y // 8, x // 8)]
            prev = []
            if "gligen" in n[1]:
                prev = n[1]['gligen'][2]

            n[1]['gligen'] = ("position", gligen_textbox_model, prev + position_params)
            c.append(n)
        return (c, )
948

comfyanonymous's avatar
comfyanonymous committed
949
950
951
952
953
954
class EmptyLatentImage:
    def __init__(self, device="cpu"):
        self.device = device

    @classmethod
    def INPUT_TYPES(s):
955
956
        return {"required": { "width": ("INT", {"default": 512, "min": 16, "max": MAX_RESOLUTION, "step": 8}),
                              "height": ("INT", {"default": 512, "min": 16, "max": MAX_RESOLUTION, "step": 8}),
comfyanonymous's avatar
comfyanonymous committed
957
                              "batch_size": ("INT", {"default": 1, "min": 1, "max": 4096})}}
comfyanonymous's avatar
comfyanonymous committed
958
959
960
    RETURN_TYPES = ("LATENT",)
    FUNCTION = "generate"

961
962
    CATEGORY = "latent"

comfyanonymous's avatar
comfyanonymous committed
963
964
    def generate(self, width, height, batch_size=1):
        latent = torch.zeros([batch_size, 4, height // 8, width // 8])
965
        return ({"samples":latent}, )
comfyanonymous's avatar
comfyanonymous committed
966

comfyanonymous's avatar
comfyanonymous committed
967

968
969
970
971
972
class LatentFromBatch:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "samples": ("LATENT",),
                              "batch_index": ("INT", {"default": 0, "min": 0, "max": 63}),
973
                              "length": ("INT", {"default": 1, "min": 1, "max": 64}),
974
975
                              }}
    RETURN_TYPES = ("LATENT",)
976
    FUNCTION = "frombatch"
977

978
    CATEGORY = "latent/batch"
979

980
    def frombatch(self, samples, batch_index, length):
981
982
983
        s = samples.copy()
        s_in = samples["samples"]
        batch_index = min(s_in.shape[0] - 1, batch_index)
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
        length = min(s_in.shape[0] - batch_index, length)
        s["samples"] = s_in[batch_index:batch_index + length].clone()
        if "noise_mask" in samples:
            masks = samples["noise_mask"]
            if masks.shape[0] == 1:
                s["noise_mask"] = masks.clone()
            else:
                if masks.shape[0] < s_in.shape[0]:
                    masks = masks.repeat(math.ceil(s_in.shape[0] / masks.shape[0]), 1, 1, 1)[:s_in.shape[0]]
                s["noise_mask"] = masks[batch_index:batch_index + length].clone()
        if "batch_index" not in s:
            s["batch_index"] = [x for x in range(batch_index, batch_index+length)]
        else:
            s["batch_index"] = samples["batch_index"][batch_index:batch_index + length]
        return (s,)
    
class RepeatLatentBatch:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "samples": ("LATENT",),
                              "amount": ("INT", {"default": 1, "min": 1, "max": 64}),
                              }}
    RETURN_TYPES = ("LATENT",)
    FUNCTION = "repeat"

    CATEGORY = "latent/batch"

    def repeat(self, samples, amount):
        s = samples.copy()
        s_in = samples["samples"]
        
        s["samples"] = s_in.repeat((amount, 1,1,1))
        if "noise_mask" in samples and samples["noise_mask"].shape[0] > 1:
            masks = samples["noise_mask"]
            if masks.shape[0] < s_in.shape[0]:
                masks = masks.repeat(math.ceil(s_in.shape[0] / masks.shape[0]), 1, 1, 1)[:s_in.shape[0]]
            s["noise_mask"] = samples["noise_mask"].repeat((amount, 1,1,1))
        if "batch_index" in s:
            offset = max(s["batch_index"]) - min(s["batch_index"]) + 1
            s["batch_index"] = s["batch_index"] + [x + (i * offset) for i in range(1, amount) for x in s["batch_index"]]
1024
        return (s,)
comfyanonymous's avatar
comfyanonymous committed
1025

comfyanonymous's avatar
comfyanonymous committed
1026
class LatentUpscale:
comfyanonymous's avatar
comfyanonymous committed
1027
    upscale_methods = ["nearest-exact", "bilinear", "area", "bicubic", "bislerp"]
1028
    crop_methods = ["disabled", "center"]
comfyanonymous's avatar
comfyanonymous committed
1029
1030
1031
1032

    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "samples": ("LATENT",), "upscale_method": (s.upscale_methods,),
1033
1034
                              "width": ("INT", {"default": 512, "min": 0, "max": MAX_RESOLUTION, "step": 8}),
                              "height": ("INT", {"default": 512, "min": 0, "max": MAX_RESOLUTION, "step": 8}),
1035
                              "crop": (s.crop_methods,)}}
comfyanonymous's avatar
comfyanonymous committed
1036
1037
1038
    RETURN_TYPES = ("LATENT",)
    FUNCTION = "upscale"

1039
1040
    CATEGORY = "latent"

1041
    def upscale(self, samples, upscale_method, width, height, crop):
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
        if width == 0 and height == 0:
            s = samples
        else:
            s = samples.copy()

            if width == 0:
                height = max(64, height)
                width = max(64, round(samples["samples"].shape[3] * height / samples["samples"].shape[2]))
            elif height == 0:
                width = max(64, width)
                height = max(64, round(samples["samples"].shape[2] * width / samples["samples"].shape[3]))
            else:
                width = max(64, width)
                height = max(64, height)

            s["samples"] = comfy.utils.common_upscale(samples["samples"], width // 8, height // 8, upscale_method, crop)
comfyanonymous's avatar
comfyanonymous committed
1058
1059
        return (s,)

comfyanonymous's avatar
comfyanonymous committed
1060
class LatentUpscaleBy:
comfyanonymous's avatar
comfyanonymous committed
1061
    upscale_methods = ["nearest-exact", "bilinear", "area", "bicubic", "bislerp"]
comfyanonymous's avatar
comfyanonymous committed
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078

    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "samples": ("LATENT",), "upscale_method": (s.upscale_methods,),
                              "scale_by": ("FLOAT", {"default": 1.5, "min": 0.01, "max": 8.0, "step": 0.01}),}}
    RETURN_TYPES = ("LATENT",)
    FUNCTION = "upscale"

    CATEGORY = "latent"

    def upscale(self, samples, upscale_method, scale_by):
        s = samples.copy()
        width = round(samples["samples"].shape[3] * scale_by)
        height = round(samples["samples"].shape[2] * scale_by)
        s["samples"] = comfy.utils.common_upscale(samples["samples"], width, height, upscale_method, "disabled")
        return (s,)

comfyanonymous's avatar
comfyanonymous committed
1079
1080
1081
1082
1083
1084
1085
1086
1087
class LatentRotate:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "samples": ("LATENT",),
                              "rotation": (["none", "90 degrees", "180 degrees", "270 degrees"],),
                              }}
    RETURN_TYPES = ("LATENT",)
    FUNCTION = "rotate"

comfyanonymous's avatar
comfyanonymous committed
1088
    CATEGORY = "latent/transform"
comfyanonymous's avatar
comfyanonymous committed
1089
1090

    def rotate(self, samples, rotation):
1091
        s = samples.copy()
comfyanonymous's avatar
comfyanonymous committed
1092
1093
1094
1095
1096
1097
1098
1099
        rotate_by = 0
        if rotation.startswith("90"):
            rotate_by = 1
        elif rotation.startswith("180"):
            rotate_by = 2
        elif rotation.startswith("270"):
            rotate_by = 3

1100
        s["samples"] = torch.rot90(samples["samples"], k=rotate_by, dims=[3, 2])
comfyanonymous's avatar
comfyanonymous committed
1101
        return (s,)
comfyanonymous's avatar
comfyanonymous committed
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111

class LatentFlip:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "samples": ("LATENT",),
                              "flip_method": (["x-axis: vertically", "y-axis: horizontally"],),
                              }}
    RETURN_TYPES = ("LATENT",)
    FUNCTION = "flip"

comfyanonymous's avatar
comfyanonymous committed
1112
    CATEGORY = "latent/transform"
comfyanonymous's avatar
comfyanonymous committed
1113
1114

    def flip(self, samples, flip_method):
1115
        s = samples.copy()
comfyanonymous's avatar
comfyanonymous committed
1116
        if flip_method.startswith("x"):
1117
            s["samples"] = torch.flip(samples["samples"], dims=[2])
comfyanonymous's avatar
comfyanonymous committed
1118
        elif flip_method.startswith("y"):
1119
            s["samples"] = torch.flip(samples["samples"], dims=[3])
comfyanonymous's avatar
comfyanonymous committed
1120
1121

        return (s,)
comfyanonymous's avatar
comfyanonymous committed
1122
1123
1124
1125

class LatentComposite:
    @classmethod
    def INPUT_TYPES(s):
comfyanonymous's avatar
comfyanonymous committed
1126
1127
1128
1129
1130
1131
        return {"required": { "samples_to": ("LATENT",),
                              "samples_from": ("LATENT",),
                              "x": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 8}),
                              "y": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 8}),
                              "feather": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 8}),
                              }}
comfyanonymous's avatar
comfyanonymous committed
1132
1133
1134
1135
1136
    RETURN_TYPES = ("LATENT",)
    FUNCTION = "composite"

    CATEGORY = "latent"

comfyanonymous's avatar
comfyanonymous committed
1137
1138
1139
    def composite(self, samples_to, samples_from, x, y, composite_method="normal", feather=0):
        x =  x // 8
        y = y // 8
1140
        feather = feather // 8
comfyanonymous's avatar
comfyanonymous committed
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
        samples_out = samples_to.copy()
        s = samples_to["samples"].clone()
        samples_to = samples_to["samples"]
        samples_from = samples_from["samples"]
        if feather == 0:
            s[:,:,y:y+samples_from.shape[2],x:x+samples_from.shape[3]] = samples_from[:,:,:samples_to.shape[2] - y, :samples_to.shape[3] - x]
        else:
            samples_from = samples_from[:,:,:samples_to.shape[2] - y, :samples_to.shape[3] - x]
            mask = torch.ones_like(samples_from)
            for t in range(feather):
                if y != 0:
                    mask[:,:,t:1+t,:] *= ((1.0/feather) * (t + 1))

                if y + samples_from.shape[2] < samples_to.shape[2]:
                    mask[:,:,mask.shape[2] -1 -t: mask.shape[2]-t,:] *= ((1.0/feather) * (t + 1))
                if x != 0:
                    mask[:,:,:,t:1+t] *= ((1.0/feather) * (t + 1))
                if x + samples_from.shape[3] < samples_to.shape[3]:
                    mask[:,:,:,mask.shape[3]- 1 - t: mask.shape[3]- t] *= ((1.0/feather) * (t + 1))
            rev_mask = torch.ones_like(mask) - mask
            s[:,:,y:y+samples_from.shape[2],x:x+samples_from.shape[3]] = samples_from[:,:,:samples_to.shape[2] - y, :samples_to.shape[3] - x] * mask + s[:,:,y:y+samples_from.shape[2],x:x+samples_from.shape[3]] * rev_mask
        samples_out["samples"] = s
        return (samples_out,)
comfyanonymous's avatar
comfyanonymous committed
1164

1165
1166
1167
1168
class LatentBlend:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": {
1169
1170
            "samples1": ("LATENT",),
            "samples2": ("LATENT",),
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
            "blend_factor": ("FLOAT", {
                "default": 0.5,
                "min": 0,
                "max": 1,
                "step": 0.01
            }),
        }}

    RETURN_TYPES = ("LATENT",)
    FUNCTION = "blend"

    CATEGORY = "_for_testing"

1184
    def blend(self, samples1, samples2, blend_factor:float, blend_mode: str="normal"):
1185

1186
1187
1188
        samples_out = samples1.copy()
        samples1 = samples1["samples"]
        samples2 = samples2["samples"]
1189

1190
1191
1192
1193
        if samples1.shape != samples2.shape:
            samples2.permute(0, 3, 1, 2)
            samples2 = comfy.utils.common_upscale(samples2, samples1.shape[3], samples1.shape[2], 'bicubic', crop='center')
            samples2.permute(0, 2, 3, 1)
1194

1195
1196
        samples_blended = self.blend_mode(samples1, samples2, blend_mode)
        samples_blended = samples1 * blend_factor + samples_blended * (1 - blend_factor)
1197
1198
1199
1200
1201
1202
1203
1204
1205
        samples_out["samples"] = samples_blended
        return (samples_out,)

    def blend_mode(self, img1, img2, mode):
        if mode == "normal":
            return img2
        else:
            raise ValueError(f"Unsupported blend mode: {mode}")

comfyanonymous's avatar
comfyanonymous committed
1206
1207
1208
1209
class LatentCrop:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "samples": ("LATENT",),
1210
1211
                              "width": ("INT", {"default": 512, "min": 64, "max": MAX_RESOLUTION, "step": 8}),
                              "height": ("INT", {"default": 512, "min": 64, "max": MAX_RESOLUTION, "step": 8}),
1212
1213
                              "x": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 8}),
                              "y": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 8}),
comfyanonymous's avatar
comfyanonymous committed
1214
1215
1216
1217
                              }}
    RETURN_TYPES = ("LATENT",)
    FUNCTION = "crop"

comfyanonymous's avatar
comfyanonymous committed
1218
    CATEGORY = "latent/transform"
comfyanonymous's avatar
comfyanonymous committed
1219
1220

    def crop(self, samples, width, height, x, y):
1221
1222
        s = samples.copy()
        samples = samples['samples']
comfyanonymous's avatar
comfyanonymous committed
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
        x =  x // 8
        y = y // 8

        #enfonce minimum size of 64
        if x > (samples.shape[3] - 8):
            x = samples.shape[3] - 8
        if y > (samples.shape[2] - 8):
            y = samples.shape[2] - 8

        new_height = height // 8
        new_width = width // 8
        to_x = new_width + x
        to_y = new_height + y
1236
        s['samples'] = samples[:,:,y:to_y, x:to_x]
comfyanonymous's avatar
comfyanonymous committed
1237
1238
        return (s,)

1239
1240
1241
1242
1243
1244
1245
1246
1247
class SetLatentNoiseMask:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "samples": ("LATENT",),
                              "mask": ("MASK",),
                              }}
    RETURN_TYPES = ("LATENT",)
    FUNCTION = "set_mask"

1248
    CATEGORY = "latent/inpaint"
1249
1250
1251

    def set_mask(self, samples, mask):
        s = samples.copy()
1252
        s["noise_mask"] = mask.reshape((-1, 1, mask.shape[-2], mask.shape[-1]))
1253
1254
        return (s,)

space-nuko's avatar
space-nuko committed
1255
def common_ksampler(model, seed, steps, cfg, sampler_name, scheduler, positive, negative, latent, denoise=1.0, disable_noise=False, start_step=None, last_step=None, force_full_denoise=False):
1256
    latent_image = latent["samples"]
comfyanonymous's avatar
comfyanonymous committed
1257
1258
1259
    if disable_noise:
        noise = torch.zeros(latent_image.size(), dtype=latent_image.dtype, layout=latent_image.layout, device="cpu")
    else:
1260
1261
        batch_inds = latent["batch_index"] if "batch_index" in latent else None
        noise = comfy.sample.prepare_noise(latent_image, seed, batch_inds)
comfyanonymous's avatar
comfyanonymous committed
1262

1263
    noise_mask = None
1264
    if "noise_mask" in latent:
1265
        noise_mask = latent["noise_mask"]
comfyanonymous's avatar
comfyanonymous committed
1266

1267
    callback = latent_preview.prepare_callback(model, steps)
1268
    disable_pbar = not comfy.utils.PROGRESS_BAR_ENABLED
1269
1270
    samples = comfy.sample.sample(model, noise, steps, cfg, sampler_name, scheduler, positive, negative, latent_image,
                                  denoise=denoise, disable_noise=disable_noise, start_step=start_step, last_step=last_step,
comfyanonymous's avatar
comfyanonymous committed
1271
                                  force_full_denoise=force_full_denoise, noise_mask=noise_mask, callback=callback, disable_pbar=disable_pbar, seed=seed)
1272
1273
1274
    out = latent.copy()
    out["samples"] = samples
    return (out, )
comfyanonymous's avatar
comfyanonymous committed
1275

comfyanonymous's avatar
comfyanonymous committed
1276
1277
1278
class KSampler:
    @classmethod
    def INPUT_TYPES(s):
comfyanonymous's avatar
comfyanonymous committed
1279
        return {"required":
comfyanonymous's avatar
comfyanonymous committed
1280
1281
1282
                    {"model": ("MODEL",),
                    "seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}),
                    "steps": ("INT", {"default": 20, "min": 1, "max": 10000}),
1283
                    "cfg": ("FLOAT", {"default": 8.0, "min": 0.0, "max": 100.0, "step":0.1, "round": 0.01}),
comfyanonymous's avatar
comfyanonymous committed
1284
1285
1286
1287
1288
1289
                    "sampler_name": (comfy.samplers.KSampler.SAMPLERS, ),
                    "scheduler": (comfy.samplers.KSampler.SCHEDULERS, ),
                    "positive": ("CONDITIONING", ),
                    "negative": ("CONDITIONING", ),
                    "latent_image": ("LATENT", ),
                    "denoise": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}),
space-nuko's avatar
space-nuko committed
1290
1291
                     }
                }
comfyanonymous's avatar
comfyanonymous committed
1292
1293
1294
1295

    RETURN_TYPES = ("LATENT",)
    FUNCTION = "sample"

1296
1297
    CATEGORY = "sampling"

space-nuko's avatar
space-nuko committed
1298
1299
    def sample(self, model, seed, steps, cfg, sampler_name, scheduler, positive, negative, latent_image, denoise=1.0):
        return common_ksampler(model, seed, steps, cfg, sampler_name, scheduler, positive, negative, latent_image, denoise=denoise)
comfyanonymous's avatar
comfyanonymous committed
1300

comfyanonymous's avatar
comfyanonymous committed
1301
1302
1303
1304
1305
1306
1307
1308
class KSamplerAdvanced:
    @classmethod
    def INPUT_TYPES(s):
        return {"required":
                    {"model": ("MODEL",),
                    "add_noise": (["enable", "disable"], ),
                    "noise_seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}),
                    "steps": ("INT", {"default": 20, "min": 1, "max": 10000}),
1309
                    "cfg": ("FLOAT", {"default": 8.0, "min": 0.0, "max": 100.0, "step":0.1, "round": 0.01}),
comfyanonymous's avatar
comfyanonymous committed
1310
1311
1312
1313
1314
1315
1316
1317
                    "sampler_name": (comfy.samplers.KSampler.SAMPLERS, ),
                    "scheduler": (comfy.samplers.KSampler.SCHEDULERS, ),
                    "positive": ("CONDITIONING", ),
                    "negative": ("CONDITIONING", ),
                    "latent_image": ("LATENT", ),
                    "start_at_step": ("INT", {"default": 0, "min": 0, "max": 10000}),
                    "end_at_step": ("INT", {"default": 10000, "min": 0, "max": 10000}),
                    "return_with_leftover_noise": (["disable", "enable"], ),
space-nuko's avatar
space-nuko committed
1318
1319
                     }
                }
comfyanonymous's avatar
comfyanonymous committed
1320
1321
1322
1323
1324

    RETURN_TYPES = ("LATENT",)
    FUNCTION = "sample"

    CATEGORY = "sampling"
comfyanonymous's avatar
comfyanonymous committed
1325

space-nuko's avatar
space-nuko committed
1326
    def sample(self, model, add_noise, noise_seed, steps, cfg, sampler_name, scheduler, positive, negative, latent_image, start_at_step, end_at_step, return_with_leftover_noise, denoise=1.0):
comfyanonymous's avatar
comfyanonymous committed
1327
1328
1329
1330
1331
1332
        force_full_denoise = True
        if return_with_leftover_noise == "enable":
            force_full_denoise = False
        disable_noise = False
        if add_noise == "disable":
            disable_noise = True
space-nuko's avatar
space-nuko committed
1333
        return common_ksampler(model, noise_seed, steps, cfg, sampler_name, scheduler, positive, negative, latent_image, denoise=denoise, disable_noise=disable_noise, start_step=start_at_step, last_step=end_at_step, force_full_denoise=force_full_denoise)
comfyanonymous's avatar
comfyanonymous committed
1334
1335
1336

class SaveImage:
    def __init__(self):
1337
        self.output_dir = folder_paths.get_output_directory()
m957ymj75urz's avatar
m957ymj75urz committed
1338
        self.type = "output"
1339
        self.prefix_append = ""
comfyanonymous's avatar
comfyanonymous committed
1340
1341
1342
1343

    @classmethod
    def INPUT_TYPES(s):
        return {"required": 
1344
                    {"images": ("IMAGE", ),
pythongosssss's avatar
tidy  
pythongosssss committed
1345
                     "filename_prefix": ("STRING", {"default": "ComfyUI"})},
pythongosssss's avatar
pythongosssss committed
1346
                "hidden": {"prompt": "PROMPT", "extra_pnginfo": "EXTRA_PNGINFO"},
comfyanonymous's avatar
comfyanonymous committed
1347
1348
1349
1350
1351
1352
1353
                }

    RETURN_TYPES = ()
    FUNCTION = "save_images"

    OUTPUT_NODE = True

1354
1355
    CATEGORY = "image"

pythongosssss's avatar
tidy  
pythongosssss committed
1356
    def save_images(self, images, filename_prefix="ComfyUI", prompt=None, extra_pnginfo=None):
1357
        filename_prefix += self.prefix_append
1358
        full_output_folder, filename, counter, subfolder, filename_prefix = folder_paths.get_save_image_path(filename_prefix, self.output_dir, images[0].shape[1], images[0].shape[0])
m957ymj75urz's avatar
m957ymj75urz committed
1359
        results = list()
comfyanonymous's avatar
comfyanonymous committed
1360
1361
        for image in images:
            i = 255. * image.cpu().numpy()
1362
            img = Image.fromarray(np.clip(i, 0, 255).astype(np.uint8))
1363
1364
1365
1366
1367
1368
1369
1370
            metadata = None
            if not args.disable_metadata:
                metadata = PngInfo()
                if prompt is not None:
                    metadata.add_text("prompt", json.dumps(prompt))
                if extra_pnginfo is not None:
                    for x in extra_pnginfo:
                        metadata.add_text(x, json.dumps(extra_pnginfo[x]))
1371

1372
            file = f"{filename}_{counter:05}_.png"
1373
            img.save(os.path.join(full_output_folder, file), pnginfo=metadata, compress_level=4)
m957ymj75urz's avatar
m957ymj75urz committed
1374
1375
1376
1377
            results.append({
                "filename": file,
                "subfolder": subfolder,
                "type": self.type
Gavroche CryptoRUSH's avatar
Gavroche CryptoRUSH committed
1378
            })
1379
            counter += 1
comfyanonymous's avatar
comfyanonymous committed
1380

m957ymj75urz's avatar
m957ymj75urz committed
1381
        return { "ui": { "images": results } }
comfyanonymous's avatar
comfyanonymous committed
1382

pythongosssss's avatar
pythongosssss committed
1383
1384
class PreviewImage(SaveImage):
    def __init__(self):
1385
        self.output_dir = folder_paths.get_temp_directory()
m957ymj75urz's avatar
m957ymj75urz committed
1386
        self.type = "temp"
1387
        self.prefix_append = "_temp_" + ''.join(random.choice("abcdefghijklmnopqrstupvxyz") for x in range(5))
pythongosssss's avatar
pythongosssss committed
1388
1389
1390

    @classmethod
    def INPUT_TYPES(s):
1391
        return {"required":
pythongosssss's avatar
pythongosssss committed
1392
1393
1394
                    {"images": ("IMAGE", ), },
                "hidden": {"prompt": "PROMPT", "extra_pnginfo": "EXTRA_PNGINFO"},
                }
1395

1396
1397
1398
class LoadImage:
    @classmethod
    def INPUT_TYPES(s):
1399
        input_dir = folder_paths.get_input_directory()
comfyanonymous's avatar
comfyanonymous committed
1400
        files = [f for f in os.listdir(input_dir) if os.path.isfile(os.path.join(input_dir, f))]
1401
        return {"required":
1402
                    {"image": (sorted(files), {"image_upload": True})},
1403
                }
1404
1405

    CATEGORY = "image"
1406

1407
    RETURN_TYPES = ("IMAGE", "MASK")
1408
1409
    FUNCTION = "load_image"
    def load_image(self, image):
1410
        image_path = folder_paths.get_annotated_filepath(image)
1411
        i = Image.open(image_path)
1412
        i = ImageOps.exif_transpose(i)
1413
        image = i.convert("RGB")
1414
        image = np.array(image).astype(np.float32) / 255.0
1415
        image = torch.from_numpy(image)[None,]
1416
1417
1418
1419
1420
        if 'A' in i.getbands():
            mask = np.array(i.getchannel('A')).astype(np.float32) / 255.0
            mask = 1. - torch.from_numpy(mask)
        else:
            mask = torch.zeros((64,64), dtype=torch.float32, device="cpu")
1421
        return (image, mask.unsqueeze(0))
1422

1423
1424
    @classmethod
    def IS_CHANGED(s, image):
1425
        image_path = folder_paths.get_annotated_filepath(image)
1426
1427
1428
1429
        m = hashlib.sha256()
        with open(image_path, 'rb') as f:
            m.update(f.read())
        return m.digest().hex()
pythongosssss's avatar
pythongosssss committed
1430

1431
1432
1433
1434
1435
1436
1437
    @classmethod
    def VALIDATE_INPUTS(s, image):
        if not folder_paths.exists_annotated_filepath(image):
            return "Invalid image file: {}".format(image)

        return True

1438
class LoadImageMask:
1439
    _color_channels = ["alpha", "red", "green", "blue"]
1440
1441
    @classmethod
    def INPUT_TYPES(s):
1442
        input_dir = folder_paths.get_input_directory()
comfyanonymous's avatar
comfyanonymous committed
1443
        files = [f for f in os.listdir(input_dir) if os.path.isfile(os.path.join(input_dir, f))]
1444
        return {"required":
1445
                    {"image": (sorted(files), {"image_upload": True}),
1446
                     "channel": (s._color_channels, ), }
1447
1448
                }

1449
    CATEGORY = "mask"
1450
1451
1452
1453

    RETURN_TYPES = ("MASK",)
    FUNCTION = "load_image"
    def load_image(self, image, channel):
1454
        image_path = folder_paths.get_annotated_filepath(image)
1455
        i = Image.open(image_path)
1456
        i = ImageOps.exif_transpose(i)
1457
1458
        if i.getbands() != ("R", "G", "B", "A"):
            i = i.convert("RGBA")
1459
1460
1461
1462
1463
1464
1465
1466
1467
        mask = None
        c = channel[0].upper()
        if c in i.getbands():
            mask = np.array(i.getchannel(c)).astype(np.float32) / 255.0
            mask = torch.from_numpy(mask)
            if c == 'A':
                mask = 1. - mask
        else:
            mask = torch.zeros((64,64), dtype=torch.float32, device="cpu")
1468
        return (mask.unsqueeze(0),)
1469
1470
1471

    @classmethod
    def IS_CHANGED(s, image, channel):
1472
        image_path = folder_paths.get_annotated_filepath(image)
1473
1474
1475
1476
        m = hashlib.sha256()
        with open(image_path, 'rb') as f:
            m.update(f.read())
        return m.digest().hex()
pythongosssss's avatar
pythongosssss committed
1477

1478
1479
1480
1481
1482
1483
1484
1485
1486
1487
    @classmethod
    def VALIDATE_INPUTS(s, image, channel):
        if not folder_paths.exists_annotated_filepath(image):
            return "Invalid image file: {}".format(image)

        if channel not in s._color_channels:
            return "Invalid color channel: {}".format(channel)

        return True

comfyanonymous's avatar
comfyanonymous committed
1488
class ImageScale:
1489
    upscale_methods = ["nearest-exact", "bilinear", "area", "bicubic", "lanczos"]
comfyanonymous's avatar
comfyanonymous committed
1490
1491
1492
1493
1494
    crop_methods = ["disabled", "center"]

    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "image": ("IMAGE",), "upscale_method": (s.upscale_methods,),
1495
1496
                              "width": ("INT", {"default": 512, "min": 0, "max": MAX_RESOLUTION, "step": 1}),
                              "height": ("INT", {"default": 512, "min": 0, "max": MAX_RESOLUTION, "step": 1}),
comfyanonymous's avatar
comfyanonymous committed
1497
1498
1499
1500
                              "crop": (s.crop_methods,)}}
    RETURN_TYPES = ("IMAGE",)
    FUNCTION = "upscale"

1501
    CATEGORY = "image/upscaling"
1502

comfyanonymous's avatar
comfyanonymous committed
1503
    def upscale(self, image, upscale_method, width, height, crop):
1504
1505
1506
1507
1508
1509
1510
1511
1512
1513
1514
1515
        if width == 0 and height == 0:
            s = image
        else:
            samples = image.movedim(-1,1)

            if width == 0:
                width = max(1, round(samples.shape[3] * height / samples.shape[2]))
            elif height == 0:
                height = max(1, round(samples.shape[2] * width / samples.shape[3]))

            s = comfy.utils.common_upscale(samples, width, height, upscale_method, crop)
            s = s.movedim(1,-1)
comfyanonymous's avatar
comfyanonymous committed
1516
        return (s,)
comfyanonymous's avatar
comfyanonymous committed
1517

comfyanonymous's avatar
comfyanonymous committed
1518
class ImageScaleBy:
1519
    upscale_methods = ["nearest-exact", "bilinear", "area", "bicubic", "lanczos"]
comfyanonymous's avatar
comfyanonymous committed
1520
1521
1522
1523
1524
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534
1535
1536
1537

    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "image": ("IMAGE",), "upscale_method": (s.upscale_methods,),
                              "scale_by": ("FLOAT", {"default": 1.0, "min": 0.01, "max": 8.0, "step": 0.01}),}}
    RETURN_TYPES = ("IMAGE",)
    FUNCTION = "upscale"

    CATEGORY = "image/upscaling"

    def upscale(self, image, upscale_method, scale_by):
        samples = image.movedim(-1,1)
        width = round(samples.shape[3] * scale_by)
        height = round(samples.shape[2] * scale_by)
        s = comfy.utils.common_upscale(samples, width, height, upscale_method, "disabled")
        s = s.movedim(1,-1)
        return (s,)

1538
1539
1540
1541
1542
1543
1544
1545
1546
1547
1548
1549
1550
1551
1552
class ImageInvert:

    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "image": ("IMAGE",)}}

    RETURN_TYPES = ("IMAGE",)
    FUNCTION = "invert"

    CATEGORY = "image"

    def invert(self, image):
        s = 1.0 - image
        return (s,)

1553
1554
1555
1556
1557
1558
1559
1560
1561
1562
1563
1564
1565
1566
1567
1568
class ImageBatch:

    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "image1": ("IMAGE",), "image2": ("IMAGE",)}}

    RETURN_TYPES = ("IMAGE",)
    FUNCTION = "batch"

    CATEGORY = "image"

    def batch(self, image1, image2):
        if image1.shape[1:] != image2.shape[1:]:
            image2 = comfy.utils.common_upscale(image2.movedim(-1,1), image1.shape[2], image1.shape[1], "bilinear", "center").movedim(1,-1)
        s = torch.cat((image1, image2), dim=0)
        return (s,)
1569

comfyanonymous's avatar
comfyanonymous committed
1570
1571
1572
1573
1574
1575
1576
1577
class EmptyImage:
    def __init__(self, device="cpu"):
        self.device = device

    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "width": ("INT", {"default": 512, "min": 1, "max": MAX_RESOLUTION, "step": 1}),
                              "height": ("INT", {"default": 512, "min": 1, "max": MAX_RESOLUTION, "step": 1}),
comfyanonymous's avatar
comfyanonymous committed
1578
                              "batch_size": ("INT", {"default": 1, "min": 1, "max": 4096}),
comfyanonymous's avatar
comfyanonymous committed
1579
1580
1581
1582
1583
1584
1585
1586
1587
1588
1589
1590
1591
                              "color": ("INT", {"default": 0, "min": 0, "max": 0xFFFFFF, "step": 1, "display": "color"}),
                              }}
    RETURN_TYPES = ("IMAGE",)
    FUNCTION = "generate"

    CATEGORY = "image"

    def generate(self, width, height, batch_size=1, color=0):
        r = torch.full([batch_size, height, width, 1], ((color >> 16) & 0xFF) / 0xFF)
        g = torch.full([batch_size, height, width, 1], ((color >> 8) & 0xFF) / 0xFF)
        b = torch.full([batch_size, height, width, 1], ((color) & 0xFF) / 0xFF)
        return (torch.cat((r, g, b), dim=-1), )

Guo Y.K's avatar
Guo Y.K committed
1592
1593
1594
1595
1596
1597
1598
class ImagePadForOutpaint:

    @classmethod
    def INPUT_TYPES(s):
        return {
            "required": {
                "image": ("IMAGE",),
1599
1600
1601
1602
                "left": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 8}),
                "top": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 8}),
                "right": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 8}),
                "bottom": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 8}),
1603
                "feathering": ("INT", {"default": 40, "min": 0, "max": MAX_RESOLUTION, "step": 1}),
Guo Y.K's avatar
Guo Y.K committed
1604
1605
1606
1607
1608
1609
1610
1611
            }
        }

    RETURN_TYPES = ("IMAGE", "MASK")
    FUNCTION = "expand_image"

    CATEGORY = "image"

1612
    def expand_image(self, image, left, top, right, bottom, feathering):
Guo Y.K's avatar
Guo Y.K committed
1613
1614
1615
1616
1617
1618
1619
1620
1621
1622
1623
1624
        d1, d2, d3, d4 = image.size()

        new_image = torch.zeros(
            (d1, d2 + top + bottom, d3 + left + right, d4),
            dtype=torch.float32,
        )
        new_image[:, top:top + d2, left:left + d3, :] = image

        mask = torch.ones(
            (d2 + top + bottom, d3 + left + right),
            dtype=torch.float32,
        )
1625

1626
1627
1628
1629
1630
        t = torch.zeros(
            (d2, d3),
            dtype=torch.float32
        )

1631
        if feathering > 0 and feathering * 2 < d2 and feathering * 2 < d3:
1632
1633
1634
1635
1636
1637
1638
1639
1640
1641
1642
1643
1644
1645
1646
1647
1648
1649
1650

            for i in range(d2):
                for j in range(d3):
                    dt = i if top != 0 else d2
                    db = d2 - i if bottom != 0 else d2

                    dl = j if left != 0 else d3
                    dr = d3 - j if right != 0 else d3

                    d = min(dt, db, dl, dr)

                    if d >= feathering:
                        continue

                    v = (feathering - d) / feathering

                    t[i, j] = v * v

        mask[top:top + d2, left:left + d3] = t
1651

Guo Y.K's avatar
Guo Y.K committed
1652
1653
1654
        return (new_image, mask)


comfyanonymous's avatar
comfyanonymous committed
1655
1656
NODE_CLASS_MAPPINGS = {
    "KSampler": KSampler,
1657
    "CheckpointLoaderSimple": CheckpointLoaderSimple,
comfyanonymous's avatar
comfyanonymous committed
1658
    "CLIPTextEncode": CLIPTextEncode,
comfyanonymous's avatar
comfyanonymous committed
1659
    "CLIPSetLastLayer": CLIPSetLastLayer,
comfyanonymous's avatar
comfyanonymous committed
1660
1661
    "VAEDecode": VAEDecode,
    "VAEEncode": VAEEncode,
1662
    "VAEEncodeForInpaint": VAEEncodeForInpaint,
comfyanonymous's avatar
comfyanonymous committed
1663
1664
1665
    "VAELoader": VAELoader,
    "EmptyLatentImage": EmptyLatentImage,
    "LatentUpscale": LatentUpscale,
comfyanonymous's avatar
comfyanonymous committed
1666
    "LatentUpscaleBy": LatentUpscaleBy,
1667
    "LatentFromBatch": LatentFromBatch,
1668
    "RepeatLatentBatch": RepeatLatentBatch,
comfyanonymous's avatar
comfyanonymous committed
1669
    "SaveImage": SaveImage,
pythongosssss's avatar
pythongosssss committed
1670
    "PreviewImage": PreviewImage,
comfyanonymous's avatar
comfyanonymous committed
1671
    "LoadImage": LoadImage,
1672
    "LoadImageMask": LoadImageMask,
comfyanonymous's avatar
comfyanonymous committed
1673
    "ImageScale": ImageScale,
comfyanonymous's avatar
comfyanonymous committed
1674
    "ImageScaleBy": ImageScaleBy,
1675
    "ImageInvert": ImageInvert,
1676
    "ImageBatch": ImageBatch,
Guo Y.K's avatar
Guo Y.K committed
1677
    "ImagePadForOutpaint": ImagePadForOutpaint,
comfyanonymous's avatar
comfyanonymous committed
1678
    "EmptyImage": EmptyImage,
comfyanonymous's avatar
comfyanonymous committed
1679
    "ConditioningAverage": ConditioningAverage ,
comfyanonymous's avatar
comfyanonymous committed
1680
    "ConditioningCombine": ConditioningCombine,
1681
    "ConditioningConcat": ConditioningConcat,
comfyanonymous's avatar
comfyanonymous committed
1682
    "ConditioningSetArea": ConditioningSetArea,
1683
    "ConditioningSetAreaPercentage": ConditioningSetAreaPercentage,
Jacob Segal's avatar
Jacob Segal committed
1684
    "ConditioningSetMask": ConditioningSetMask,
comfyanonymous's avatar
comfyanonymous committed
1685
    "KSamplerAdvanced": KSamplerAdvanced,
1686
    "SetLatentNoiseMask": SetLatentNoiseMask,
comfyanonymous's avatar
comfyanonymous committed
1687
    "LatentComposite": LatentComposite,
1688
    "LatentBlend": LatentBlend,
comfyanonymous's avatar
comfyanonymous committed
1689
    "LatentRotate": LatentRotate,
comfyanonymous's avatar
comfyanonymous committed
1690
    "LatentFlip": LatentFlip,
comfyanonymous's avatar
comfyanonymous committed
1691
    "LatentCrop": LatentCrop,
1692
    "LoraLoader": LoraLoader,
1693
    "CLIPLoader": CLIPLoader,
1694
    "UNETLoader": UNETLoader,
1695
    "DualCLIPLoader": DualCLIPLoader,
1696
    "CLIPVisionEncode": CLIPVisionEncode,
1697
    "StyleModelApply": StyleModelApply,
1698
    "unCLIPConditioning": unCLIPConditioning,
comfyanonymous's avatar
comfyanonymous committed
1699
    "ControlNetApply": ControlNetApply,
1700
    "ControlNetApplyAdvanced": ControlNetApplyAdvanced,
comfyanonymous's avatar
comfyanonymous committed
1701
    "ControlNetLoader": ControlNetLoader,
1702
    "DiffControlNetLoader": DiffControlNetLoader,
comfyanonymous's avatar
comfyanonymous committed
1703
1704
    "StyleModelLoader": StyleModelLoader,
    "CLIPVisionLoader": CLIPVisionLoader,
1705
    "VAEDecodeTiled": VAEDecodeTiled,
comfyanonymous's avatar
comfyanonymous committed
1706
    "VAEEncodeTiled": VAEEncodeTiled,
1707
    "unCLIPCheckpointLoader": unCLIPCheckpointLoader,
1708
1709
1710
    "GLIGENLoader": GLIGENLoader,
    "GLIGENTextBoxApply": GLIGENTextBoxApply,

1711
    "CheckpointLoader": CheckpointLoader,
sALTaccount's avatar
sALTaccount committed
1712
    "DiffusersLoader": DiffusersLoader,
Dr.Lt.Data's avatar
Dr.Lt.Data committed
1713
1714

    "LoadLatent": LoadLatent,
1715
    "SaveLatent": SaveLatent,
1716
1717

    "ConditioningZeroOut": ConditioningZeroOut,
1718
    "ConditioningSetTimestepRange": ConditioningSetTimestepRange,
1719
    "LoraLoaderModelOnly": LoraLoaderModelOnly,
comfyanonymous's avatar
comfyanonymous committed
1720
1721
}

City's avatar
City committed
1722
1723
1724
1725
1726
NODE_DISPLAY_NAME_MAPPINGS = {
    # Sampling
    "KSampler": "KSampler",
    "KSamplerAdvanced": "KSampler (Advanced)",
    # Loaders
1727
    "CheckpointLoader": "Load Checkpoint With Config (DEPRECATED)",
comfyanonymous's avatar
Rename.  
comfyanonymous committed
1728
    "CheckpointLoaderSimple": "Load Checkpoint",
City's avatar
City committed
1729
1730
1731
1732
1733
1734
1735
1736
1737
1738
1739
1740
1741
1742
    "VAELoader": "Load VAE",
    "LoraLoader": "Load LoRA",
    "CLIPLoader": "Load CLIP",
    "ControlNetLoader": "Load ControlNet Model",
    "DiffControlNetLoader": "Load ControlNet Model (diff)",
    "StyleModelLoader": "Load Style Model",
    "CLIPVisionLoader": "Load CLIP Vision",
    "UpscaleModelLoader": "Load Upscale Model",
    # Conditioning
    "CLIPVisionEncode": "CLIP Vision Encode",
    "StyleModelApply": "Apply Style Model",
    "CLIPTextEncode": "CLIP Text Encode (Prompt)",
    "CLIPSetLastLayer": "CLIP Set Last Layer",
    "ConditioningCombine": "Conditioning (Combine)",
FizzleDorf's avatar
FizzleDorf committed
1743
    "ConditioningAverage ": "Conditioning (Average)",
1744
    "ConditioningConcat": "Conditioning (Concat)",
City's avatar
City committed
1745
    "ConditioningSetArea": "Conditioning (Set Area)",
1746
    "ConditioningSetAreaPercentage": "Conditioning (Set Area with Percentage)",
Jacob Segal's avatar
Jacob Segal committed
1747
    "ConditioningSetMask": "Conditioning (Set Mask)",
City's avatar
City committed
1748
    "ControlNetApply": "Apply ControlNet",
1749
    "ControlNetApplyAdvanced": "Apply ControlNet (Advanced)",
City's avatar
City committed
1750
1751
1752
1753
1754
1755
1756
1757
1758
1759
    # Latent
    "VAEEncodeForInpaint": "VAE Encode (for Inpainting)",
    "SetLatentNoiseMask": "Set Latent Noise Mask",
    "VAEDecode": "VAE Decode",
    "VAEEncode": "VAE Encode",
    "LatentRotate": "Rotate Latent",
    "LatentFlip": "Flip Latent",
    "LatentCrop": "Crop Latent",
    "EmptyLatentImage": "Empty Latent Image",
    "LatentUpscale": "Upscale Latent",
comfyanonymous's avatar
comfyanonymous committed
1760
    "LatentUpscaleBy": "Upscale Latent By",
City's avatar
City committed
1761
    "LatentComposite": "Latent Composite",
1762
    "LatentBlend": "Latent Blend",
1763
1764
    "LatentFromBatch" : "Latent From Batch",
    "RepeatLatentBatch": "Repeat Latent Batch",
City's avatar
City committed
1765
1766
1767
1768
1769
1770
    # Image
    "SaveImage": "Save Image",
    "PreviewImage": "Preview Image",
    "LoadImage": "Load Image",
    "LoadImageMask": "Load Image (as Mask)",
    "ImageScale": "Upscale Image",
comfyanonymous's avatar
comfyanonymous committed
1771
    "ImageScaleBy": "Upscale Image By",
City's avatar
City committed
1772
1773
1774
    "ImageUpscaleWithModel": "Upscale Image (using Model)",
    "ImageInvert": "Invert Image",
    "ImagePadForOutpaint": "Pad Image for Outpainting",
1775
    "ImageBatch": "Batch Images",
City's avatar
City committed
1776
1777
1778
1779
1780
    # _for_testing
    "VAEDecodeTiled": "VAE Decode (Tiled)",
    "VAEEncodeTiled": "VAE Encode (Tiled)",
}

1781
1782
EXTENSION_WEB_DIRS = {}

1783
def load_custom_node(module_path, ignore=set()):
1784
1785
1786
1787
1788
1789
1790
    module_name = os.path.basename(module_path)
    if os.path.isfile(module_path):
        sp = os.path.splitext(module_path)
        module_name = sp[0]
    try:
        if os.path.isfile(module_path):
            module_spec = importlib.util.spec_from_file_location(module_name, module_path)
1791
            module_dir = os.path.split(module_path)[0]
1792
1793
        else:
            module_spec = importlib.util.spec_from_file_location(module_name, os.path.join(module_path, "__init__.py"))
1794
1795
            module_dir = module_path

1796
1797
1798
        module = importlib.util.module_from_spec(module_spec)
        sys.modules[module_name] = module
        module_spec.loader.exec_module(module)
1799
1800
1801
1802
1803
1804

        if hasattr(module, "WEB_DIRECTORY") and getattr(module, "WEB_DIRECTORY") is not None:
            web_dir = os.path.abspath(os.path.join(module_dir, getattr(module, "WEB_DIRECTORY")))
            if os.path.isdir(web_dir):
                EXTENSION_WEB_DIRS[module_name] = web_dir

1805
        if hasattr(module, "NODE_CLASS_MAPPINGS") and getattr(module, "NODE_CLASS_MAPPINGS") is not None:
1806
1807
1808
            for name in module.NODE_CLASS_MAPPINGS:
                if name not in ignore:
                    NODE_CLASS_MAPPINGS[name] = module.NODE_CLASS_MAPPINGS[name]
1809
1810
            if hasattr(module, "NODE_DISPLAY_NAME_MAPPINGS") and getattr(module, "NODE_DISPLAY_NAME_MAPPINGS") is not None:
                NODE_DISPLAY_NAME_MAPPINGS.update(module.NODE_DISPLAY_NAME_MAPPINGS)
1811
            return True
1812
1813
        else:
            print(f"Skip {module_path} module for custom nodes due to the lack of NODE_CLASS_MAPPINGS.")
1814
            return False
1815
1816
1817
    except Exception as e:
        print(traceback.format_exc())
        print(f"Cannot import {module_path} module for custom nodes:", e)
1818
        return False
1819

Hacker 17082006's avatar
Hacker 17082006 committed
1820
def load_custom_nodes():
1821
    base_node_names = set(NODE_CLASS_MAPPINGS.keys())
1822
    node_paths = folder_paths.get_folder_paths("custom_nodes")
1823
    node_import_times = []
1824
1825
1826
1827
1828
1829
1830
1831
    for custom_node_path in node_paths:
        possible_modules = os.listdir(custom_node_path)
        if "__pycache__" in possible_modules:
            possible_modules.remove("__pycache__")

        for possible_module in possible_modules:
            module_path = os.path.join(custom_node_path, possible_module)
            if os.path.isfile(module_path) and os.path.splitext(module_path)[1] != ".py": continue
1832
            if module_path.endswith(".disabled"): continue
1833
            time_before = time.perf_counter()
1834
            success = load_custom_node(module_path, base_node_names)
1835
            node_import_times.append((time.perf_counter() - time_before, module_path, success))
1836

1837
    if len(node_import_times) > 0:
comfyanonymous's avatar
comfyanonymous committed
1838
        print("\nImport times for custom nodes:")
1839
        for n in sorted(node_import_times):
1840
1841
1842
1843
1844
            if n[2]:
                import_message = ""
            else:
                import_message = " (IMPORT FAILED)"
            print("{:6.1f} seconds{}:".format(n[0], import_message), n[1])
1845
        print()
1846

1847
def init_custom_nodes():
1848
1849
1850
1851
1852
1853
1854
    extras_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), "comfy_extras")
    extras_files = [
        "nodes_latent.py",
        "nodes_hypernetwork.py",
        "nodes_upscale_model.py",
        "nodes_post_processing.py",
        "nodes_mask.py",
1855
        "nodes_compositing.py",
1856
1857
1858
1859
1860
1861
        "nodes_rebatch.py",
        "nodes_model_merging.py",
        "nodes_tomesd.py",
        "nodes_clip_sdxl.py",
        "nodes_canny.py",
        "nodes_freelunch.py",
1862
1863
        "nodes_custom_sampler.py",
        "nodes_hypertile.py",
1864
        "nodes_model_advanced.py",
1865
        "nodes_model_downscale.py",
comfyanonymous's avatar
comfyanonymous committed
1866
        "nodes_images.py",
1867
        "nodes_video_model.py",
1868
1869
1870
1871
1872
    ]

    for node_file in extras_files:
        load_custom_node(os.path.join(extras_dir, node_file))

1873
    load_custom_nodes()