nodes.py 46.5 KB
Newer Older
comfyanonymous's avatar
comfyanonymous committed
1
2
3
4
5
import torch

import os
import sys
import json
6
import hashlib
7
import traceback
comfyanonymous's avatar
comfyanonymous committed
8
9
10
11
12

from PIL import Image
from PIL.PngImagePlugin import PngInfo
import numpy as np

sALTaccount's avatar
sALTaccount committed
13

comfyanonymous's avatar
comfyanonymous committed
14
sys.path.insert(0, os.path.join(os.path.dirname(os.path.realpath(__file__)), "comfy"))
comfyanonymous's avatar
comfyanonymous committed
15
16


comfyanonymous's avatar
comfyanonymous committed
17
import comfy.diffusers_convert
comfyanonymous's avatar
comfyanonymous committed
18
import comfy.samplers
19
import comfy.sample
comfyanonymous's avatar
comfyanonymous committed
20
import comfy.sd
comfyanonymous's avatar
comfyanonymous committed
21
22
import comfy.utils

23
import comfy.clip_vision
24

25
import comfy.model_management
26
import importlib
comfyanonymous's avatar
comfyanonymous committed
27

28
import folder_paths
29
30

def before_node_execution():
31
    comfy.model_management.throw_exception_if_processing_interrupted()
32

33
def interrupt_processing(value=True):
34
    comfy.model_management.interrupt_current_processing(value)
35

36
37
MAX_RESOLUTION=8192

comfyanonymous's avatar
comfyanonymous committed
38
39
40
class CLIPTextEncode:
    @classmethod
    def INPUT_TYPES(s):
41
        return {"required": {"text": ("STRING", {"multiline": True}), "clip": ("CLIP", )}}
comfyanonymous's avatar
comfyanonymous committed
42
43
44
    RETURN_TYPES = ("CONDITIONING",)
    FUNCTION = "encode"

45
46
    CATEGORY = "conditioning"

comfyanonymous's avatar
comfyanonymous committed
47
    def encode(self, clip, text):
comfyanonymous's avatar
comfyanonymous committed
48
49
50
51
52
53
54
55
56
        return ([[clip.encode(text), {}]], )

class ConditioningCombine:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": {"conditioning_1": ("CONDITIONING", ), "conditioning_2": ("CONDITIONING", )}}
    RETURN_TYPES = ("CONDITIONING",)
    FUNCTION = "combine"

57
58
    CATEGORY = "conditioning"

comfyanonymous's avatar
comfyanonymous committed
59
60
61
    def combine(self, conditioning_1, conditioning_2):
        return (conditioning_1 + conditioning_2, )

FizzleDorf's avatar
FizzleDorf committed
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
class ConditioningAverage :
    @classmethod
    def INPUT_TYPES(s):
        return {"required": {"conditioning_from": ("CONDITIONING", ), "conditioning_to": ("CONDITIONING", ),
                              "conditioning_from_strength": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.1})
                             }}
    RETURN_TYPES = ("CONDITIONING",)
    FUNCTION = "addWeighted"

    CATEGORY = "conditioning"

    def addWeighted(self, conditioning_from, conditioning_to, conditioning_from_strength):
        out = []
        for i in range(min(len(conditioning_from),len(conditioning_to))):
            t0 = conditioning_from[i]
            t1 = conditioning_to[i]
            tw = torch.mul(t0[0],(1-conditioning_from_strength)) + torch.mul(t1[0],conditioning_from_strength)
            n = [tw, t0[1].copy()]
            out.append(n)
        return (out, )

comfyanonymous's avatar
comfyanonymous committed
83
84
85
86
class ConditioningSetArea:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": {"conditioning": ("CONDITIONING", ),
87
88
89
90
                              "width": ("INT", {"default": 64, "min": 64, "max": MAX_RESOLUTION, "step": 64}),
                              "height": ("INT", {"default": 64, "min": 64, "max": MAX_RESOLUTION, "step": 64}),
                              "x": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 64}),
                              "y": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 64}),
comfyanonymous's avatar
comfyanonymous committed
91
92
93
94
95
                              "strength": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}),
                             }}
    RETURN_TYPES = ("CONDITIONING",)
    FUNCTION = "append"

96
97
    CATEGORY = "conditioning"

comfyanonymous's avatar
comfyanonymous committed
98
    def append(self, conditioning, width, height, x, y, strength, min_sigma=0.0, max_sigma=99.0):
comfyanonymous's avatar
comfyanonymous committed
99
100
101
102
103
        c = []
        for t in conditioning:
            n = [t[0], t[1].copy()]
            n[1]['area'] = (height // 8, width // 8, y // 8, x // 8)
            n[1]['strength'] = strength
104
            n[1]['set_area_to_bounds'] = False
comfyanonymous's avatar
comfyanonymous committed
105
106
107
            n[1]['min_sigma'] = min_sigma
            n[1]['max_sigma'] = max_sigma
            c.append(n)
comfyanonymous's avatar
comfyanonymous committed
108
        return (c, )
comfyanonymous's avatar
comfyanonymous committed
109

Jacob Segal's avatar
Jacob Segal committed
110
111
112
113
114
115
class ConditioningSetMask:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": {"conditioning": ("CONDITIONING", ),
                              "mask": ("MASK", ),
                              "strength": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}),
116
                              "set_cond_area": (["default", "mask bounds"],),
Jacob Segal's avatar
Jacob Segal committed
117
118
119
120
121
122
                             }}
    RETURN_TYPES = ("CONDITIONING",)
    FUNCTION = "append"

    CATEGORY = "conditioning"

123
    def append(self, conditioning, mask, set_cond_area, strength):
Jacob Segal's avatar
Jacob Segal committed
124
        c = []
125
126
127
        set_area_to_bounds = False
        if set_cond_area != "default":
            set_area_to_bounds = True
Jacob Segal's avatar
Jacob Segal committed
128
129
130
131
132
133
        if len(mask.shape) < 3:
            mask = mask.unsqueeze(0)
        for t in conditioning:
            n = [t[0], t[1].copy()]
            _, h, w = mask.shape
            n[1]['mask'] = mask
Jacob Segal's avatar
Jacob Segal committed
134
            n[1]['set_area_to_bounds'] = set_area_to_bounds
135
            n[1]['mask_strength'] = strength
Jacob Segal's avatar
Jacob Segal committed
136
137
138
            c.append(n)
        return (c, )

comfyanonymous's avatar
comfyanonymous committed
139
140
141
142
143
144
145
146
147
148
class VAEDecode:
    def __init__(self, device="cpu"):
        self.device = device

    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "samples": ("LATENT", ), "vae": ("VAE", )}}
    RETURN_TYPES = ("IMAGE",)
    FUNCTION = "decode"

149
150
    CATEGORY = "latent"

comfyanonymous's avatar
comfyanonymous committed
151
    def decode(self, vae, samples):
152
        return (vae.decode(samples["samples"]), )
comfyanonymous's avatar
comfyanonymous committed
153

154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
class VAEDecodeTiled:
    def __init__(self, device="cpu"):
        self.device = device

    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "samples": ("LATENT", ), "vae": ("VAE", )}}
    RETURN_TYPES = ("IMAGE",)
    FUNCTION = "decode"

    CATEGORY = "_for_testing"

    def decode(self, vae, samples):
        return (vae.decode_tiled(samples["samples"]), )

comfyanonymous's avatar
comfyanonymous committed
169
170
171
172
173
174
175
176
177
178
class VAEEncode:
    def __init__(self, device="cpu"):
        self.device = device

    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "pixels": ("IMAGE", ), "vae": ("VAE", )}}
    RETURN_TYPES = ("LATENT",)
    FUNCTION = "encode"

179
180
    CATEGORY = "latent"

comfyanonymous's avatar
comfyanonymous committed
181
    def encode(self, vae, pixels):
182
183
184
185
        x = (pixels.shape[1] // 64) * 64
        y = (pixels.shape[2] // 64) * 64
        if pixels.shape[1] != x or pixels.shape[2] != y:
            pixels = pixels[:,:x,:y,:]
186
187
188
        t = vae.encode(pixels[:,:,:,:3])

        return ({"samples":t}, )
comfyanonymous's avatar
comfyanonymous committed
189

comfyanonymous's avatar
comfyanonymous committed
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210

class VAEEncodeTiled:
    def __init__(self, device="cpu"):
        self.device = device

    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "pixels": ("IMAGE", ), "vae": ("VAE", )}}
    RETURN_TYPES = ("LATENT",)
    FUNCTION = "encode"

    CATEGORY = "_for_testing"

    def encode(self, vae, pixels):
        x = (pixels.shape[1] // 64) * 64
        y = (pixels.shape[2] // 64) * 64
        if pixels.shape[1] != x or pixels.shape[2] != y:
            pixels = pixels[:,:x,:y,:]
        t = vae.encode_tiled(pixels[:,:,:,:3])

        return ({"samples":t}, )
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
class VAEEncodeForInpaint:
    def __init__(self, device="cpu"):
        self.device = device

    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "pixels": ("IMAGE", ), "vae": ("VAE", ), "mask": ("MASK", )}}
    RETURN_TYPES = ("LATENT",)
    FUNCTION = "encode"

    CATEGORY = "latent/inpaint"

    def encode(self, vae, pixels, mask):
        x = (pixels.shape[1] // 64) * 64
        y = (pixels.shape[2] // 64) * 64
226
        mask = torch.nn.functional.interpolate(mask.reshape((-1, 1, mask.shape[-2], mask.shape[-1])), size=(pixels.shape[1], pixels.shape[2]), mode="bilinear")
227

228
        pixels = pixels.clone()
229
230
        if pixels.shape[1] != x or pixels.shape[2] != y:
            pixels = pixels[:,:x,:y,:]
231
            mask = mask[:,:,:x,:y]
232

233
        #grow mask by a few pixels to keep things seamless in latent space
234
        kernel_tensor = torch.ones((1, 1, 6, 6))
235
236
        mask_erosion = torch.clamp(torch.nn.functional.conv2d(mask.round(), kernel_tensor, padding=3), 0, 1)
        m = (1.0 - mask.round()).squeeze(1)
237
238
        for i in range(3):
            pixels[:,:,:,i] -= 0.5
239
            pixels[:,:,:,i] *= m
240
241
242
            pixels[:,:,:,i] += 0.5
        t = vae.encode(pixels)

243
        return ({"samples":t, "noise_mask": (mask_erosion[:,:,:x,:y].round())}, )
comfyanonymous's avatar
comfyanonymous committed
244
245
246
247

class CheckpointLoader:
    @classmethod
    def INPUT_TYPES(s):
248
249
        return {"required": { "config_name": (folder_paths.get_filename_list("configs"), ),
                              "ckpt_name": (folder_paths.get_filename_list("checkpoints"), )}}
comfyanonymous's avatar
comfyanonymous committed
250
251
252
    RETURN_TYPES = ("MODEL", "CLIP", "VAE")
    FUNCTION = "load_checkpoint"

253
    CATEGORY = "advanced/loaders"
254

comfyanonymous's avatar
comfyanonymous committed
255
    def load_checkpoint(self, config_name, ckpt_name, output_vae=True, output_clip=True):
256
257
        config_path = folder_paths.get_full_path("configs", config_name)
        ckpt_path = folder_paths.get_full_path("checkpoints", ckpt_name)
258
        return comfy.sd.load_checkpoint(config_path, ckpt_path, output_vae=True, output_clip=True, embedding_directory=folder_paths.get_folder_paths("embeddings"))
comfyanonymous's avatar
comfyanonymous committed
259

260
261
262
class CheckpointLoaderSimple:
    @classmethod
    def INPUT_TYPES(s):
263
        return {"required": { "ckpt_name": (folder_paths.get_filename_list("checkpoints"), ),
264
265
266
267
                             }}
    RETURN_TYPES = ("MODEL", "CLIP", "VAE")
    FUNCTION = "load_checkpoint"

268
    CATEGORY = "loaders"
269

270
    def load_checkpoint(self, ckpt_name, output_vae=True, output_clip=True):
271
        ckpt_path = folder_paths.get_full_path("checkpoints", ckpt_name)
272
        out = comfy.sd.load_checkpoint_guess_config(ckpt_path, output_vae=True, output_clip=True, embedding_directory=folder_paths.get_folder_paths("embeddings"))
273
274
        return out

sALTaccount's avatar
sALTaccount committed
275
276
277
class DiffusersLoader:
    @classmethod
    def INPUT_TYPES(cls):
278
        paths = []
sALTaccount's avatar
sALTaccount committed
279
        for search_path in folder_paths.get_folder_paths("diffusers"):
280
            if os.path.exists(search_path):
sALTaccount's avatar
sALTaccount committed
281
                paths += next(os.walk(search_path))[1]
282
        return {"required": {"model_path": (paths,), }}
sALTaccount's avatar
sALTaccount committed
283
284
285
    RETURN_TYPES = ("MODEL", "CLIP", "VAE")
    FUNCTION = "load_checkpoint"

286
    CATEGORY = "advanced/loaders"
sALTaccount's avatar
sALTaccount committed
287
288

    def load_checkpoint(self, model_path, output_vae=True, output_clip=True):
sALTaccount's avatar
sALTaccount committed
289
290
291
292
293
294
        for search_path in folder_paths.get_folder_paths("diffusers"):
            if os.path.exists(search_path):
                paths = next(os.walk(search_path))[1]
                if model_path in paths:
                    model_path = os.path.join(search_path, model_path)
                    break
295

296
        return comfy.diffusers_convert.load_diffusers(model_path, fp16=comfy.model_management.should_use_fp16(), output_vae=output_vae, output_clip=output_clip, embedding_directory=folder_paths.get_folder_paths("embeddings"))
sALTaccount's avatar
sALTaccount committed
297
298


299
300
301
302
303
304
305
306
class unCLIPCheckpointLoader:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "ckpt_name": (folder_paths.get_filename_list("checkpoints"), ),
                             }}
    RETURN_TYPES = ("MODEL", "CLIP", "VAE", "CLIP_VISION")
    FUNCTION = "load_checkpoint"

307
    CATEGORY = "loaders"
308
309
310
311
312
313

    def load_checkpoint(self, ckpt_name, output_vae=True, output_clip=True):
        ckpt_path = folder_paths.get_full_path("checkpoints", ckpt_name)
        out = comfy.sd.load_checkpoint_guess_config(ckpt_path, output_vae=True, output_clip=True, output_clipvision=True, embedding_directory=folder_paths.get_folder_paths("embeddings"))
        return out

comfyanonymous's avatar
comfyanonymous committed
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
class CLIPSetLastLayer:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "clip": ("CLIP", ),
                              "stop_at_clip_layer": ("INT", {"default": -1, "min": -24, "max": -1, "step": 1}),
                              }}
    RETURN_TYPES = ("CLIP",)
    FUNCTION = "set_last_layer"

    CATEGORY = "conditioning"

    def set_last_layer(self, clip, stop_at_clip_layer):
        clip = clip.clone()
        clip.clip_layer(stop_at_clip_layer)
        return (clip,)

330
331
332
333
334
class LoraLoader:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "model": ("MODEL",),
                              "clip": ("CLIP", ),
335
                              "lora_name": (folder_paths.get_filename_list("loras"), ),
336
337
                              "strength_model": ("FLOAT", {"default": 1.0, "min": -10.0, "max": 10.0, "step": 0.01}),
                              "strength_clip": ("FLOAT", {"default": 1.0, "min": -10.0, "max": 10.0, "step": 0.01}),
338
339
340
341
342
343
344
                              }}
    RETURN_TYPES = ("MODEL", "CLIP")
    FUNCTION = "load_lora"

    CATEGORY = "loaders"

    def load_lora(self, model, clip, lora_name, strength_model, strength_clip):
345
        lora_path = folder_paths.get_full_path("loras", lora_name)
346
347
348
        model_lora, clip_lora = comfy.sd.load_lora_for_models(model, clip, lora_path, strength_model, strength_clip)
        return (model_lora, clip_lora)

349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
class TomePatchModel:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "model": ("MODEL",),
                              "ratio": ("FLOAT", {"default": 0.3, "min": 0.0, "max": 1.0, "step": 0.01}),
                              }}
    RETURN_TYPES = ("MODEL",)
    FUNCTION = "patch"

    CATEGORY = "_for_testing"

    def patch(self, model, ratio):
        m = model.clone()
        m.set_model_tomesd(ratio)
        return (m, )

comfyanonymous's avatar
comfyanonymous committed
365
366
367
class VAELoader:
    @classmethod
    def INPUT_TYPES(s):
368
        return {"required": { "vae_name": (folder_paths.get_filename_list("vae"), )}}
comfyanonymous's avatar
comfyanonymous committed
369
370
371
    RETURN_TYPES = ("VAE",)
    FUNCTION = "load_vae"

372
373
    CATEGORY = "loaders"

comfyanonymous's avatar
comfyanonymous committed
374
375
    #TODO: scale factor?
    def load_vae(self, vae_name):
376
        vae_path = folder_paths.get_full_path("vae", vae_name)
comfyanonymous's avatar
comfyanonymous committed
377
378
379
        vae = comfy.sd.VAE(ckpt_path=vae_path)
        return (vae,)

comfyanonymous's avatar
comfyanonymous committed
380
381
382
class ControlNetLoader:
    @classmethod
    def INPUT_TYPES(s):
383
        return {"required": { "control_net_name": (folder_paths.get_filename_list("controlnet"), )}}
comfyanonymous's avatar
comfyanonymous committed
384
385
386
387
388
389
390

    RETURN_TYPES = ("CONTROL_NET",)
    FUNCTION = "load_controlnet"

    CATEGORY = "loaders"

    def load_controlnet(self, control_net_name):
391
        controlnet_path = folder_paths.get_full_path("controlnet", control_net_name)
comfyanonymous's avatar
comfyanonymous committed
392
393
394
        controlnet = comfy.sd.load_controlnet(controlnet_path)
        return (controlnet,)

395
396
397
398
class DiffControlNetLoader:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "model": ("MODEL",),
399
                              "control_net_name": (folder_paths.get_filename_list("controlnet"), )}}
400
401
402
403
404
405
406

    RETURN_TYPES = ("CONTROL_NET",)
    FUNCTION = "load_controlnet"

    CATEGORY = "loaders"

    def load_controlnet(self, model, control_net_name):
407
        controlnet_path = folder_paths.get_full_path("controlnet", control_net_name)
408
409
410
        controlnet = comfy.sd.load_controlnet(controlnet_path, model)
        return (controlnet,)

comfyanonymous's avatar
comfyanonymous committed
411
412
413
414

class ControlNetApply:
    @classmethod
    def INPUT_TYPES(s):
415
416
417
418
419
        return {"required": {"conditioning": ("CONDITIONING", ),
                             "control_net": ("CONTROL_NET", ),
                             "image": ("IMAGE", ),
                             "strength": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01})
                             }}
comfyanonymous's avatar
comfyanonymous committed
420
421
422
423
424
    RETURN_TYPES = ("CONDITIONING",)
    FUNCTION = "apply_controlnet"

    CATEGORY = "conditioning"

425
    def apply_controlnet(self, conditioning, control_net, image, strength):
comfyanonymous's avatar
comfyanonymous committed
426
427
428
429
430
        c = []
        control_hint = image.movedim(-1,1)
        print(control_hint.shape)
        for t in conditioning:
            n = [t[0], t[1].copy()]
comfyanonymous's avatar
comfyanonymous committed
431
432
433
434
            c_net = control_net.copy().set_cond_hint(control_hint, strength)
            if 'control' in t[1]:
                c_net.set_previous_controlnet(t[1]['control'])
            n[1]['control'] = c_net
comfyanonymous's avatar
comfyanonymous committed
435
436
437
            c.append(n)
        return (c, )

438
439
440
class CLIPLoader:
    @classmethod
    def INPUT_TYPES(s):
441
        return {"required": { "clip_name": (folder_paths.get_filename_list("clip"), ),
442
443
444
445
446
447
                             }}
    RETURN_TYPES = ("CLIP",)
    FUNCTION = "load_clip"

    CATEGORY = "loaders"

448
    def load_clip(self, clip_name):
449
        clip_path = folder_paths.get_full_path("clip", clip_name)
comfyanonymous's avatar
comfyanonymous committed
450
        clip = comfy.sd.load_clip(ckpt_path=clip_path, embedding_directory=folder_paths.get_folder_paths("embeddings"))
451
452
        return (clip,)

453
454
455
class CLIPVisionLoader:
    @classmethod
    def INPUT_TYPES(s):
456
        return {"required": { "clip_name": (folder_paths.get_filename_list("clip_vision"), ),
457
458
459
460
461
462
463
                             }}
    RETURN_TYPES = ("CLIP_VISION",)
    FUNCTION = "load_clip"

    CATEGORY = "loaders"

    def load_clip(self, clip_name):
464
        clip_path = folder_paths.get_full_path("clip_vision", clip_name)
465
        clip_vision = comfy.clip_vision.load(clip_path)
466
467
468
469
470
471
472
473
        return (clip_vision,)

class CLIPVisionEncode:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "clip_vision": ("CLIP_VISION",),
                              "image": ("IMAGE",)
                             }}
comfyanonymous's avatar
comfyanonymous committed
474
    RETURN_TYPES = ("CLIP_VISION_OUTPUT",)
475
476
    FUNCTION = "encode"

477
    CATEGORY = "conditioning"
478
479
480
481
482
483
484
485

    def encode(self, clip_vision, image):
        output = clip_vision.encode_image(image)
        return (output,)

class StyleModelLoader:
    @classmethod
    def INPUT_TYPES(s):
486
        return {"required": { "style_model_name": (folder_paths.get_filename_list("style_models"), )}}
487
488
489
490
491
492
493

    RETURN_TYPES = ("STYLE_MODEL",)
    FUNCTION = "load_style_model"

    CATEGORY = "loaders"

    def load_style_model(self, style_model_name):
494
        style_model_path = folder_paths.get_full_path("style_models", style_model_name)
495
496
497
498
499
500
501
        style_model = comfy.sd.load_style_model(style_model_path)
        return (style_model,)


class StyleModelApply:
    @classmethod
    def INPUT_TYPES(s):
502
503
504
        return {"required": {"conditioning": ("CONDITIONING", ),
                             "style_model": ("STYLE_MODEL", ),
                             "clip_vision_output": ("CLIP_VISION_OUTPUT", ),
505
506
507
508
                             }}
    RETURN_TYPES = ("CONDITIONING",)
    FUNCTION = "apply_stylemodel"

comfyanonymous's avatar
comfyanonymous committed
509
    CATEGORY = "conditioning/style_model"
510

511
512
    def apply_stylemodel(self, clip_vision_output, style_model, conditioning):
        cond = style_model.get_cond(clip_vision_output)
513
        c = []
514
515
        for t in conditioning:
            n = [torch.cat((t[0], cond), dim=1), t[1].copy()]
516
517
518
            c.append(n)
        return (c, )

519
520
521
522
523
524
class unCLIPConditioning:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": {"conditioning": ("CONDITIONING", ),
                             "clip_vision_output": ("CLIP_VISION_OUTPUT", ),
                             "strength": ("FLOAT", {"default": 1.0, "min": -10.0, "max": 10.0, "step": 0.01}),
525
                             "noise_augmentation": ("FLOAT", {"default": 0.0, "min": 0.0, "max": 1.0, "step": 0.01}),
526
527
528
529
                             }}
    RETURN_TYPES = ("CONDITIONING",)
    FUNCTION = "apply_adm"

530
    CATEGORY = "conditioning"
531

532
    def apply_adm(self, conditioning, clip_vision_output, strength, noise_augmentation):
533
534
535
        c = []
        for t in conditioning:
            o = t[1].copy()
536
            x = (clip_vision_output, strength, noise_augmentation)
537
538
539
540
541
542
543
544
            if "adm" in o:
                o["adm"] = o["adm"][:] + [x]
            else:
                o["adm"] = [x]
            n = [t[0], o]
            c.append(n)
        return (c, )

545
546
547
548
549
550
551
552
class GLIGENLoader:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "gligen_name": (folder_paths.get_filename_list("gligen"), )}}

    RETURN_TYPES = ("GLIGEN",)
    FUNCTION = "load_gligen"

comfyanonymous's avatar
comfyanonymous committed
553
    CATEGORY = "loaders"
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574

    def load_gligen(self, gligen_name):
        gligen_path = folder_paths.get_full_path("gligen", gligen_name)
        gligen = comfy.sd.load_gligen(gligen_path)
        return (gligen,)

class GLIGENTextBoxApply:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": {"conditioning_to": ("CONDITIONING", ),
                              "clip": ("CLIP", ),
                              "gligen_textbox_model": ("GLIGEN", ),
                              "text": ("STRING", {"multiline": True}),
                              "width": ("INT", {"default": 64, "min": 8, "max": MAX_RESOLUTION, "step": 8}),
                              "height": ("INT", {"default": 64, "min": 8, "max": MAX_RESOLUTION, "step": 8}),
                              "x": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 8}),
                              "y": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 8}),
                             }}
    RETURN_TYPES = ("CONDITIONING",)
    FUNCTION = "append"

comfyanonymous's avatar
comfyanonymous committed
575
    CATEGORY = "conditioning/gligen"
576
577
578
579
580
581
582
583
584
585
586
587
588
589

    def append(self, conditioning_to, clip, gligen_textbox_model, text, width, height, x, y):
        c = []
        cond, cond_pooled = clip.encode_from_tokens(clip.tokenize(text), return_pooled=True)
        for t in conditioning_to:
            n = [t[0], t[1].copy()]
            position_params = [(cond_pooled, height // 8, width // 8, y // 8, x // 8)]
            prev = []
            if "gligen" in n[1]:
                prev = n[1]['gligen'][2]

            n[1]['gligen'] = ("position", gligen_textbox_model, prev + position_params)
            c.append(n)
        return (c, )
590

comfyanonymous's avatar
comfyanonymous committed
591
592
593
594
595
596
class EmptyLatentImage:
    def __init__(self, device="cpu"):
        self.device = device

    @classmethod
    def INPUT_TYPES(s):
597
598
        return {"required": { "width": ("INT", {"default": 512, "min": 64, "max": MAX_RESOLUTION, "step": 64}),
                              "height": ("INT", {"default": 512, "min": 64, "max": MAX_RESOLUTION, "step": 64}),
comfyanonymous's avatar
comfyanonymous committed
599
600
601
602
                              "batch_size": ("INT", {"default": 1, "min": 1, "max": 64})}}
    RETURN_TYPES = ("LATENT",)
    FUNCTION = "generate"

603
604
    CATEGORY = "latent"

comfyanonymous's avatar
comfyanonymous committed
605
606
    def generate(self, width, height, batch_size=1):
        latent = torch.zeros([batch_size, 4, height // 8, width // 8])
607
        return ({"samples":latent}, )
comfyanonymous's avatar
comfyanonymous committed
608

comfyanonymous's avatar
comfyanonymous committed
609

610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
class LatentFromBatch:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "samples": ("LATENT",),
                              "batch_index": ("INT", {"default": 0, "min": 0, "max": 63}),
                              }}
    RETURN_TYPES = ("LATENT",)
    FUNCTION = "rotate"

    CATEGORY = "latent"

    def rotate(self, samples, batch_index):
        s = samples.copy()
        s_in = samples["samples"]
        batch_index = min(s_in.shape[0] - 1, batch_index)
        s["samples"] = s_in[batch_index:batch_index + 1].clone()
        s["batch_index"] = batch_index
        return (s,)
comfyanonymous's avatar
comfyanonymous committed
628

comfyanonymous's avatar
comfyanonymous committed
629
630
class LatentUpscale:
    upscale_methods = ["nearest-exact", "bilinear", "area"]
631
    crop_methods = ["disabled", "center"]
comfyanonymous's avatar
comfyanonymous committed
632
633
634
635

    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "samples": ("LATENT",), "upscale_method": (s.upscale_methods,),
636
637
                              "width": ("INT", {"default": 512, "min": 64, "max": MAX_RESOLUTION, "step": 64}),
                              "height": ("INT", {"default": 512, "min": 64, "max": MAX_RESOLUTION, "step": 64}),
638
                              "crop": (s.crop_methods,)}}
comfyanonymous's avatar
comfyanonymous committed
639
640
641
    RETURN_TYPES = ("LATENT",)
    FUNCTION = "upscale"

642
643
    CATEGORY = "latent"

644
    def upscale(self, samples, upscale_method, width, height, crop):
645
        s = samples.copy()
comfyanonymous's avatar
comfyanonymous committed
646
        s["samples"] = comfy.utils.common_upscale(samples["samples"], width // 8, height // 8, upscale_method, crop)
comfyanonymous's avatar
comfyanonymous committed
647
648
        return (s,)

comfyanonymous's avatar
comfyanonymous committed
649
650
651
652
653
654
655
656
657
class LatentRotate:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "samples": ("LATENT",),
                              "rotation": (["none", "90 degrees", "180 degrees", "270 degrees"],),
                              }}
    RETURN_TYPES = ("LATENT",)
    FUNCTION = "rotate"

comfyanonymous's avatar
comfyanonymous committed
658
    CATEGORY = "latent/transform"
comfyanonymous's avatar
comfyanonymous committed
659
660

    def rotate(self, samples, rotation):
661
        s = samples.copy()
comfyanonymous's avatar
comfyanonymous committed
662
663
664
665
666
667
668
669
        rotate_by = 0
        if rotation.startswith("90"):
            rotate_by = 1
        elif rotation.startswith("180"):
            rotate_by = 2
        elif rotation.startswith("270"):
            rotate_by = 3

670
        s["samples"] = torch.rot90(samples["samples"], k=rotate_by, dims=[3, 2])
comfyanonymous's avatar
comfyanonymous committed
671
        return (s,)
comfyanonymous's avatar
comfyanonymous committed
672
673
674
675
676
677
678
679
680
681

class LatentFlip:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "samples": ("LATENT",),
                              "flip_method": (["x-axis: vertically", "y-axis: horizontally"],),
                              }}
    RETURN_TYPES = ("LATENT",)
    FUNCTION = "flip"

comfyanonymous's avatar
comfyanonymous committed
682
    CATEGORY = "latent/transform"
comfyanonymous's avatar
comfyanonymous committed
683
684

    def flip(self, samples, flip_method):
685
        s = samples.copy()
comfyanonymous's avatar
comfyanonymous committed
686
        if flip_method.startswith("x"):
687
            s["samples"] = torch.flip(samples["samples"], dims=[2])
comfyanonymous's avatar
comfyanonymous committed
688
        elif flip_method.startswith("y"):
689
            s["samples"] = torch.flip(samples["samples"], dims=[3])
comfyanonymous's avatar
comfyanonymous committed
690
691

        return (s,)
comfyanonymous's avatar
comfyanonymous committed
692
693
694
695

class LatentComposite:
    @classmethod
    def INPUT_TYPES(s):
comfyanonymous's avatar
comfyanonymous committed
696
697
698
699
700
701
        return {"required": { "samples_to": ("LATENT",),
                              "samples_from": ("LATENT",),
                              "x": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 8}),
                              "y": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 8}),
                              "feather": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 8}),
                              }}
comfyanonymous's avatar
comfyanonymous committed
702
703
704
705
706
    RETURN_TYPES = ("LATENT",)
    FUNCTION = "composite"

    CATEGORY = "latent"

comfyanonymous's avatar
comfyanonymous committed
707
708
709
    def composite(self, samples_to, samples_from, x, y, composite_method="normal", feather=0):
        x =  x // 8
        y = y // 8
710
        feather = feather // 8
comfyanonymous's avatar
comfyanonymous committed
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
        samples_out = samples_to.copy()
        s = samples_to["samples"].clone()
        samples_to = samples_to["samples"]
        samples_from = samples_from["samples"]
        if feather == 0:
            s[:,:,y:y+samples_from.shape[2],x:x+samples_from.shape[3]] = samples_from[:,:,:samples_to.shape[2] - y, :samples_to.shape[3] - x]
        else:
            samples_from = samples_from[:,:,:samples_to.shape[2] - y, :samples_to.shape[3] - x]
            mask = torch.ones_like(samples_from)
            for t in range(feather):
                if y != 0:
                    mask[:,:,t:1+t,:] *= ((1.0/feather) * (t + 1))

                if y + samples_from.shape[2] < samples_to.shape[2]:
                    mask[:,:,mask.shape[2] -1 -t: mask.shape[2]-t,:] *= ((1.0/feather) * (t + 1))
                if x != 0:
                    mask[:,:,:,t:1+t] *= ((1.0/feather) * (t + 1))
                if x + samples_from.shape[3] < samples_to.shape[3]:
                    mask[:,:,:,mask.shape[3]- 1 - t: mask.shape[3]- t] *= ((1.0/feather) * (t + 1))
            rev_mask = torch.ones_like(mask) - mask
            s[:,:,y:y+samples_from.shape[2],x:x+samples_from.shape[3]] = samples_from[:,:,:samples_to.shape[2] - y, :samples_to.shape[3] - x] * mask + s[:,:,y:y+samples_from.shape[2],x:x+samples_from.shape[3]] * rev_mask
        samples_out["samples"] = s
        return (samples_out,)
comfyanonymous's avatar
comfyanonymous committed
734

comfyanonymous's avatar
comfyanonymous committed
735
736
737
738
class LatentCrop:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "samples": ("LATENT",),
739
740
741
742
                              "width": ("INT", {"default": 512, "min": 64, "max": MAX_RESOLUTION, "step": 64}),
                              "height": ("INT", {"default": 512, "min": 64, "max": MAX_RESOLUTION, "step": 64}),
                              "x": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 8}),
                              "y": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 8}),
comfyanonymous's avatar
comfyanonymous committed
743
744
745
746
                              }}
    RETURN_TYPES = ("LATENT",)
    FUNCTION = "crop"

comfyanonymous's avatar
comfyanonymous committed
747
    CATEGORY = "latent/transform"
comfyanonymous's avatar
comfyanonymous committed
748
749

    def crop(self, samples, width, height, x, y):
750
751
        s = samples.copy()
        samples = samples['samples']
comfyanonymous's avatar
comfyanonymous committed
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
        x =  x // 8
        y = y // 8

        #enfonce minimum size of 64
        if x > (samples.shape[3] - 8):
            x = samples.shape[3] - 8
        if y > (samples.shape[2] - 8):
            y = samples.shape[2] - 8

        new_height = height // 8
        new_width = width // 8
        to_x = new_width + x
        to_y = new_height + y
        def enforce_image_dim(d, to_d, max_d):
            if to_d > max_d:
                leftover = (to_d - max_d) % 8
                to_d = max_d
                d -= leftover
            return (d, to_d)

        #make sure size is always multiple of 64
        x, to_x = enforce_image_dim(x, to_x, samples.shape[3])
        y, to_y = enforce_image_dim(y, to_y, samples.shape[2])
775
        s['samples'] = samples[:,:,y:to_y, x:to_x]
comfyanonymous's avatar
comfyanonymous committed
776
777
        return (s,)

778
779
780
781
782
783
784
785
786
class SetLatentNoiseMask:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "samples": ("LATENT",),
                              "mask": ("MASK",),
                              }}
    RETURN_TYPES = ("LATENT",)
    FUNCTION = "set_mask"

787
    CATEGORY = "latent/inpaint"
788
789
790
791
792
793

    def set_mask(self, samples, mask):
        s = samples.copy()
        s["noise_mask"] = mask
        return (s,)

794
def common_ksampler(model, seed, steps, cfg, sampler_name, scheduler, positive, negative, latent, denoise=1.0, disable_noise=False, start_step=None, last_step=None, force_full_denoise=False):
795
    device = comfy.model_management.get_torch_device()
796
    latent_image = latent["samples"]
797

comfyanonymous's avatar
comfyanonymous committed
798
799
800
    if disable_noise:
        noise = torch.zeros(latent_image.size(), dtype=latent_image.dtype, layout=latent_image.layout, device="cpu")
    else:
801
802
        skip = latent["batch_index"] if "batch_index" in latent else 0
        noise = comfy.sample.prepare_noise(latent_image, seed, skip)
comfyanonymous's avatar
comfyanonymous committed
803

804
    noise_mask = None
805
    if "noise_mask" in latent:
806
        noise_mask = latent["noise_mask"]
comfyanonymous's avatar
comfyanonymous committed
807

808
809
810
    samples = comfy.sample.sample(model, noise, steps, cfg, sampler_name, scheduler, positive, negative, latent_image,
                                  denoise=denoise, disable_noise=disable_noise, start_step=start_step, last_step=last_step,
                                  force_full_denoise=force_full_denoise, noise_mask=noise_mask)
811
812
813
    out = latent.copy()
    out["samples"] = samples
    return (out, )
comfyanonymous's avatar
comfyanonymous committed
814

comfyanonymous's avatar
comfyanonymous committed
815
816
817
class KSampler:
    @classmethod
    def INPUT_TYPES(s):
comfyanonymous's avatar
comfyanonymous committed
818
        return {"required":
comfyanonymous's avatar
comfyanonymous committed
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
                    {"model": ("MODEL",),
                    "seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}),
                    "steps": ("INT", {"default": 20, "min": 1, "max": 10000}),
                    "cfg": ("FLOAT", {"default": 8.0, "min": 0.0, "max": 100.0}),
                    "sampler_name": (comfy.samplers.KSampler.SAMPLERS, ),
                    "scheduler": (comfy.samplers.KSampler.SCHEDULERS, ),
                    "positive": ("CONDITIONING", ),
                    "negative": ("CONDITIONING", ),
                    "latent_image": ("LATENT", ),
                    "denoise": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}),
                    }}

    RETURN_TYPES = ("LATENT",)
    FUNCTION = "sample"

834
835
    CATEGORY = "sampling"

comfyanonymous's avatar
comfyanonymous committed
836
    def sample(self, model, seed, steps, cfg, sampler_name, scheduler, positive, negative, latent_image, denoise=1.0):
837
        return common_ksampler(model, seed, steps, cfg, sampler_name, scheduler, positive, negative, latent_image, denoise=denoise)
comfyanonymous's avatar
comfyanonymous committed
838

comfyanonymous's avatar
comfyanonymous committed
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
class KSamplerAdvanced:
    @classmethod
    def INPUT_TYPES(s):
        return {"required":
                    {"model": ("MODEL",),
                    "add_noise": (["enable", "disable"], ),
                    "noise_seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}),
                    "steps": ("INT", {"default": 20, "min": 1, "max": 10000}),
                    "cfg": ("FLOAT", {"default": 8.0, "min": 0.0, "max": 100.0}),
                    "sampler_name": (comfy.samplers.KSampler.SAMPLERS, ),
                    "scheduler": (comfy.samplers.KSampler.SCHEDULERS, ),
                    "positive": ("CONDITIONING", ),
                    "negative": ("CONDITIONING", ),
                    "latent_image": ("LATENT", ),
                    "start_at_step": ("INT", {"default": 0, "min": 0, "max": 10000}),
                    "end_at_step": ("INT", {"default": 10000, "min": 0, "max": 10000}),
                    "return_with_leftover_noise": (["disable", "enable"], ),
                    }}

    RETURN_TYPES = ("LATENT",)
    FUNCTION = "sample"

    CATEGORY = "sampling"
comfyanonymous's avatar
comfyanonymous committed
862

comfyanonymous's avatar
comfyanonymous committed
863
864
865
866
867
868
869
    def sample(self, model, add_noise, noise_seed, steps, cfg, sampler_name, scheduler, positive, negative, latent_image, start_at_step, end_at_step, return_with_leftover_noise, denoise=1.0):
        force_full_denoise = True
        if return_with_leftover_noise == "enable":
            force_full_denoise = False
        disable_noise = False
        if add_noise == "disable":
            disable_noise = True
870
        return common_ksampler(model, noise_seed, steps, cfg, sampler_name, scheduler, positive, negative, latent_image, denoise=denoise, disable_noise=disable_noise, start_step=start_at_step, last_step=end_at_step, force_full_denoise=force_full_denoise)
comfyanonymous's avatar
comfyanonymous committed
871
872
873

class SaveImage:
    def __init__(self):
874
        self.output_dir = folder_paths.get_output_directory()
m957ymj75urz's avatar
m957ymj75urz committed
875
        self.type = "output"
comfyanonymous's avatar
comfyanonymous committed
876
877
878
879

    @classmethod
    def INPUT_TYPES(s):
        return {"required": 
880
                    {"images": ("IMAGE", ),
pythongosssss's avatar
tidy  
pythongosssss committed
881
                     "filename_prefix": ("STRING", {"default": "ComfyUI"})},
pythongosssss's avatar
pythongosssss committed
882
                "hidden": {"prompt": "PROMPT", "extra_pnginfo": "EXTRA_PNGINFO"},
comfyanonymous's avatar
comfyanonymous committed
883
884
885
886
887
888
889
                }

    RETURN_TYPES = ()
    FUNCTION = "save_images"

    OUTPUT_NODE = True

890
891
    CATEGORY = "image"

pythongosssss's avatar
tidy  
pythongosssss committed
892
    def save_images(self, images, filename_prefix="ComfyUI", prompt=None, extra_pnginfo=None):
893
        def map_filename(filename):
894
            prefix_len = len(os.path.basename(filename_prefix))
895
896
897
898
899
900
            prefix = filename[:prefix_len + 1]
            try:
                digits = int(filename[prefix_len + 1:].split('_')[0])
            except:
                digits = 0
            return (digits, prefix)
comfyanonymous's avatar
Style.  
comfyanonymous committed
901

902
903
904
905
        def compute_vars(input):
            input = input.replace("%width%", str(images[0].shape[1]))
            input = input.replace("%height%", str(images[0].shape[0]))
            return input
comfyanonymous's avatar
Style.  
comfyanonymous committed
906

907
        filename_prefix = compute_vars(filename_prefix)
comfyanonymous's avatar
comfyanonymous committed
908

m957ymj75urz's avatar
m957ymj75urz committed
909
910
911
        subfolder = os.path.dirname(os.path.normpath(filename_prefix))
        filename = os.path.basename(os.path.normpath(filename_prefix))

comfyanonymous's avatar
comfyanonymous committed
912
        full_output_folder = os.path.join(self.output_dir, subfolder)
913

914
        if os.path.commonpath((self.output_dir, os.path.abspath(full_output_folder))) != self.output_dir:
915
            print("Saving image outside the output folder is not allowed.")
comfyanonymous's avatar
comfyanonymous committed
916
917
            return {}

918
        try:
919
            counter = max(filter(lambda a: a[1][:-1] == filename and a[1][-1] == "_", map(map_filename, os.listdir(full_output_folder))))[0] + 1
920
921
        except ValueError:
            counter = 1
922
        except FileNotFoundError:
923
            os.makedirs(full_output_folder, exist_ok=True)
924
            counter = 1
pythongosssss's avatar
pythongosssss committed
925

m957ymj75urz's avatar
m957ymj75urz committed
926
        results = list()
comfyanonymous's avatar
comfyanonymous committed
927
928
        for image in images:
            i = 255. * image.cpu().numpy()
929
            img = Image.fromarray(np.clip(i, 0, 255).astype(np.uint8))
comfyanonymous's avatar
comfyanonymous committed
930
931
932
933
934
935
            metadata = PngInfo()
            if prompt is not None:
                metadata.add_text("prompt", json.dumps(prompt))
            if extra_pnginfo is not None:
                for x in extra_pnginfo:
                    metadata.add_text(x, json.dumps(extra_pnginfo[x]))
936

937
            file = f"{filename}_{counter:05}_.png"
938
            img.save(os.path.join(full_output_folder, file), pnginfo=metadata, compress_level=4)
m957ymj75urz's avatar
m957ymj75urz committed
939
940
941
942
            results.append({
                "filename": file,
                "subfolder": subfolder,
                "type": self.type
Gavroche CryptoRUSH's avatar
Gavroche CryptoRUSH committed
943
            })
944
            counter += 1
comfyanonymous's avatar
comfyanonymous committed
945

m957ymj75urz's avatar
m957ymj75urz committed
946
        return { "ui": { "images": results } }
comfyanonymous's avatar
comfyanonymous committed
947

pythongosssss's avatar
pythongosssss committed
948
949
class PreviewImage(SaveImage):
    def __init__(self):
950
        self.output_dir = folder_paths.get_temp_directory()
m957ymj75urz's avatar
m957ymj75urz committed
951
        self.type = "temp"
pythongosssss's avatar
pythongosssss committed
952
953
954

    @classmethod
    def INPUT_TYPES(s):
955
        return {"required":
pythongosssss's avatar
pythongosssss committed
956
957
958
                    {"images": ("IMAGE", ), },
                "hidden": {"prompt": "PROMPT", "extra_pnginfo": "EXTRA_PNGINFO"},
                }
959

960
961
962
class LoadImage:
    @classmethod
    def INPUT_TYPES(s):
963
        input_dir = folder_paths.get_input_directory()
964
        return {"required":
965
                    {"image": (sorted(os.listdir(input_dir)), )},
966
                }
967
968

    CATEGORY = "image"
969

970
    RETURN_TYPES = ("IMAGE", "MASK")
971
972
    FUNCTION = "load_image"
    def load_image(self, image):
973
        image_path = folder_paths.get_annotated_filepath(image)
974
975
        i = Image.open(image_path)
        image = i.convert("RGB")
976
        image = np.array(image).astype(np.float32) / 255.0
977
        image = torch.from_numpy(image)[None,]
978
979
980
981
982
983
        if 'A' in i.getbands():
            mask = np.array(i.getchannel('A')).astype(np.float32) / 255.0
            mask = 1. - torch.from_numpy(mask)
        else:
            mask = torch.zeros((64,64), dtype=torch.float32, device="cpu")
        return (image, mask)
984

985
986
    @classmethod
    def IS_CHANGED(s, image):
987
        image_path = folder_paths.get_annotated_filepath(image)
988
989
990
991
        m = hashlib.sha256()
        with open(image_path, 'rb') as f:
            m.update(f.read())
        return m.digest().hex()
pythongosssss's avatar
pythongosssss committed
992

993
994
995
996
997
998
999
    @classmethod
    def VALIDATE_INPUTS(s, image):
        if not folder_paths.exists_annotated_filepath(image):
            return "Invalid image file: {}".format(image)

        return True

1000
class LoadImageMask:
1001
    _color_channels = ["alpha", "red", "green", "blue"]
1002
1003
    @classmethod
    def INPUT_TYPES(s):
1004
        input_dir = folder_paths.get_input_directory()
1005
        return {"required":
1006
                    {"image": (sorted(os.listdir(input_dir)), ),
1007
                    "channel": (s._color_channels, ),}
1008
1009
                }

1010
    CATEGORY = "mask"
1011
1012
1013
1014

    RETURN_TYPES = ("MASK",)
    FUNCTION = "load_image"
    def load_image(self, image, channel):
1015
        image_path = folder_paths.get_annotated_filepath(image)
1016
        i = Image.open(image_path)
1017
1018
        if i.getbands() != ("R", "G", "B", "A"):
            i = i.convert("RGBA")
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
        mask = None
        c = channel[0].upper()
        if c in i.getbands():
            mask = np.array(i.getchannel(c)).astype(np.float32) / 255.0
            mask = torch.from_numpy(mask)
            if c == 'A':
                mask = 1. - mask
        else:
            mask = torch.zeros((64,64), dtype=torch.float32, device="cpu")
        return (mask,)

    @classmethod
    def IS_CHANGED(s, image, channel):
1032
        image_path = folder_paths.get_annotated_filepath(image)
1033
1034
1035
1036
        m = hashlib.sha256()
        with open(image_path, 'rb') as f:
            m.update(f.read())
        return m.digest().hex()
pythongosssss's avatar
pythongosssss committed
1037

1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
    @classmethod
    def VALIDATE_INPUTS(s, image, channel):
        if not folder_paths.exists_annotated_filepath(image):
            return "Invalid image file: {}".format(image)

        if channel not in s._color_channels:
            return "Invalid color channel: {}".format(channel)

        return True

comfyanonymous's avatar
comfyanonymous committed
1048
1049
1050
1051
1052
1053
1054
class ImageScale:
    upscale_methods = ["nearest-exact", "bilinear", "area"]
    crop_methods = ["disabled", "center"]

    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "image": ("IMAGE",), "upscale_method": (s.upscale_methods,),
1055
1056
                              "width": ("INT", {"default": 512, "min": 1, "max": MAX_RESOLUTION, "step": 1}),
                              "height": ("INT", {"default": 512, "min": 1, "max": MAX_RESOLUTION, "step": 1}),
comfyanonymous's avatar
comfyanonymous committed
1057
1058
1059
1060
                              "crop": (s.crop_methods,)}}
    RETURN_TYPES = ("IMAGE",)
    FUNCTION = "upscale"

1061
    CATEGORY = "image/upscaling"
1062

comfyanonymous's avatar
comfyanonymous committed
1063
1064
    def upscale(self, image, upscale_method, width, height, crop):
        samples = image.movedim(-1,1)
comfyanonymous's avatar
comfyanonymous committed
1065
        s = comfy.utils.common_upscale(samples, width, height, upscale_method, crop)
comfyanonymous's avatar
comfyanonymous committed
1066
1067
        s = s.movedim(1,-1)
        return (s,)
comfyanonymous's avatar
comfyanonymous committed
1068

1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
class ImageInvert:

    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "image": ("IMAGE",)}}

    RETURN_TYPES = ("IMAGE",)
    FUNCTION = "invert"

    CATEGORY = "image"

    def invert(self, image):
        s = 1.0 - image
        return (s,)


Guo Y.K's avatar
Guo Y.K committed
1085
1086
1087
1088
1089
1090
1091
class ImagePadForOutpaint:

    @classmethod
    def INPUT_TYPES(s):
        return {
            "required": {
                "image": ("IMAGE",),
1092
1093
1094
1095
1096
                "left": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 64}),
                "top": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 64}),
                "right": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 64}),
                "bottom": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 64}),
                "feathering": ("INT", {"default": 40, "min": 0, "max": MAX_RESOLUTION, "step": 1}),
Guo Y.K's avatar
Guo Y.K committed
1097
1098
1099
1100
1101
1102
1103
1104
            }
        }

    RETURN_TYPES = ("IMAGE", "MASK")
    FUNCTION = "expand_image"

    CATEGORY = "image"

1105
    def expand_image(self, image, left, top, right, bottom, feathering):
Guo Y.K's avatar
Guo Y.K committed
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
        d1, d2, d3, d4 = image.size()

        new_image = torch.zeros(
            (d1, d2 + top + bottom, d3 + left + right, d4),
            dtype=torch.float32,
        )
        new_image[:, top:top + d2, left:left + d3, :] = image

        mask = torch.ones(
            (d2 + top + bottom, d3 + left + right),
            dtype=torch.float32,
        )
1118

1119
1120
1121
1122
1123
        t = torch.zeros(
            (d2, d3),
            dtype=torch.float32
        )

1124
        if feathering > 0 and feathering * 2 < d2 and feathering * 2 < d3:
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143

            for i in range(d2):
                for j in range(d3):
                    dt = i if top != 0 else d2
                    db = d2 - i if bottom != 0 else d2

                    dl = j if left != 0 else d3
                    dr = d3 - j if right != 0 else d3

                    d = min(dt, db, dl, dr)

                    if d >= feathering:
                        continue

                    v = (feathering - d) / feathering

                    t[i, j] = v * v

        mask[top:top + d2, left:left + d3] = t
1144

Guo Y.K's avatar
Guo Y.K committed
1145
1146
1147
        return (new_image, mask)


comfyanonymous's avatar
comfyanonymous committed
1148
1149
NODE_CLASS_MAPPINGS = {
    "KSampler": KSampler,
1150
    "CheckpointLoaderSimple": CheckpointLoaderSimple,
comfyanonymous's avatar
comfyanonymous committed
1151
    "CLIPTextEncode": CLIPTextEncode,
comfyanonymous's avatar
comfyanonymous committed
1152
    "CLIPSetLastLayer": CLIPSetLastLayer,
comfyanonymous's avatar
comfyanonymous committed
1153
1154
    "VAEDecode": VAEDecode,
    "VAEEncode": VAEEncode,
1155
    "VAEEncodeForInpaint": VAEEncodeForInpaint,
comfyanonymous's avatar
comfyanonymous committed
1156
1157
1158
    "VAELoader": VAELoader,
    "EmptyLatentImage": EmptyLatentImage,
    "LatentUpscale": LatentUpscale,
1159
    "LatentFromBatch": LatentFromBatch,
comfyanonymous's avatar
comfyanonymous committed
1160
    "SaveImage": SaveImage,
pythongosssss's avatar
pythongosssss committed
1161
    "PreviewImage": PreviewImage,
comfyanonymous's avatar
comfyanonymous committed
1162
    "LoadImage": LoadImage,
1163
    "LoadImageMask": LoadImageMask,
comfyanonymous's avatar
comfyanonymous committed
1164
    "ImageScale": ImageScale,
1165
    "ImageInvert": ImageInvert,
Guo Y.K's avatar
Guo Y.K committed
1166
    "ImagePadForOutpaint": ImagePadForOutpaint,
FizzleDorf's avatar
FizzleDorf committed
1167
    "ConditioningAverage ": ConditioningAverage ,
comfyanonymous's avatar
comfyanonymous committed
1168
1169
    "ConditioningCombine": ConditioningCombine,
    "ConditioningSetArea": ConditioningSetArea,
Jacob Segal's avatar
Jacob Segal committed
1170
    "ConditioningSetMask": ConditioningSetMask,
comfyanonymous's avatar
comfyanonymous committed
1171
    "KSamplerAdvanced": KSamplerAdvanced,
1172
    "SetLatentNoiseMask": SetLatentNoiseMask,
comfyanonymous's avatar
comfyanonymous committed
1173
    "LatentComposite": LatentComposite,
comfyanonymous's avatar
comfyanonymous committed
1174
    "LatentRotate": LatentRotate,
comfyanonymous's avatar
comfyanonymous committed
1175
    "LatentFlip": LatentFlip,
comfyanonymous's avatar
comfyanonymous committed
1176
    "LatentCrop": LatentCrop,
1177
    "LoraLoader": LoraLoader,
1178
    "CLIPLoader": CLIPLoader,
1179
    "CLIPVisionEncode": CLIPVisionEncode,
1180
    "StyleModelApply": StyleModelApply,
1181
    "unCLIPConditioning": unCLIPConditioning,
comfyanonymous's avatar
comfyanonymous committed
1182
1183
    "ControlNetApply": ControlNetApply,
    "ControlNetLoader": ControlNetLoader,
1184
    "DiffControlNetLoader": DiffControlNetLoader,
comfyanonymous's avatar
comfyanonymous committed
1185
1186
    "StyleModelLoader": StyleModelLoader,
    "CLIPVisionLoader": CLIPVisionLoader,
1187
    "VAEDecodeTiled": VAEDecodeTiled,
comfyanonymous's avatar
comfyanonymous committed
1188
    "VAEEncodeTiled": VAEEncodeTiled,
1189
    "TomePatchModel": TomePatchModel,
1190
    "unCLIPCheckpointLoader": unCLIPCheckpointLoader,
1191
1192
1193
    "GLIGENLoader": GLIGENLoader,
    "GLIGENTextBoxApply": GLIGENTextBoxApply,

1194
    "CheckpointLoader": CheckpointLoader,
sALTaccount's avatar
sALTaccount committed
1195
    "DiffusersLoader": DiffusersLoader,
comfyanonymous's avatar
comfyanonymous committed
1196
1197
}

City's avatar
City committed
1198
1199
1200
1201
1202
NODE_DISPLAY_NAME_MAPPINGS = {
    # Sampling
    "KSampler": "KSampler",
    "KSamplerAdvanced": "KSampler (Advanced)",
    # Loaders
comfyanonymous's avatar
Rename.  
comfyanonymous committed
1203
1204
    "CheckpointLoader": "Load Checkpoint (With Config)",
    "CheckpointLoaderSimple": "Load Checkpoint",
City's avatar
City committed
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
    "VAELoader": "Load VAE",
    "LoraLoader": "Load LoRA",
    "CLIPLoader": "Load CLIP",
    "ControlNetLoader": "Load ControlNet Model",
    "DiffControlNetLoader": "Load ControlNet Model (diff)",
    "StyleModelLoader": "Load Style Model",
    "CLIPVisionLoader": "Load CLIP Vision",
    "UpscaleModelLoader": "Load Upscale Model",
    # Conditioning
    "CLIPVisionEncode": "CLIP Vision Encode",
    "StyleModelApply": "Apply Style Model",
    "CLIPTextEncode": "CLIP Text Encode (Prompt)",
    "CLIPSetLastLayer": "CLIP Set Last Layer",
    "ConditioningCombine": "Conditioning (Combine)",
FizzleDorf's avatar
FizzleDorf committed
1219
    "ConditioningAverage ": "Conditioning (Average)",
City's avatar
City committed
1220
    "ConditioningSetArea": "Conditioning (Set Area)",
Jacob Segal's avatar
Jacob Segal committed
1221
    "ConditioningSetMask": "Conditioning (Set Mask)",
City's avatar
City committed
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
    "ControlNetApply": "Apply ControlNet",
    # Latent
    "VAEEncodeForInpaint": "VAE Encode (for Inpainting)",
    "SetLatentNoiseMask": "Set Latent Noise Mask",
    "VAEDecode": "VAE Decode",
    "VAEEncode": "VAE Encode",
    "LatentRotate": "Rotate Latent",
    "LatentFlip": "Flip Latent",
    "LatentCrop": "Crop Latent",
    "EmptyLatentImage": "Empty Latent Image",
    "LatentUpscale": "Upscale Latent",
    "LatentComposite": "Latent Composite",
    # Image
    "SaveImage": "Save Image",
    "PreviewImage": "Preview Image",
    "LoadImage": "Load Image",
    "LoadImageMask": "Load Image (as Mask)",
    "ImageScale": "Upscale Image",
    "ImageUpscaleWithModel": "Upscale Image (using Model)",
    "ImageInvert": "Invert Image",
    "ImagePadForOutpaint": "Pad Image for Outpainting",
    # _for_testing
    "VAEDecodeTiled": "VAE Decode (Tiled)",
    "VAEEncodeTiled": "VAE Encode (Tiled)",
}

1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
def load_custom_node(module_path):
    module_name = os.path.basename(module_path)
    if os.path.isfile(module_path):
        sp = os.path.splitext(module_path)
        module_name = sp[0]
    try:
        if os.path.isfile(module_path):
            module_spec = importlib.util.spec_from_file_location(module_name, module_path)
        else:
            module_spec = importlib.util.spec_from_file_location(module_name, os.path.join(module_path, "__init__.py"))
        module = importlib.util.module_from_spec(module_spec)
        sys.modules[module_name] = module
        module_spec.loader.exec_module(module)
        if hasattr(module, "NODE_CLASS_MAPPINGS") and getattr(module, "NODE_CLASS_MAPPINGS") is not None:
            NODE_CLASS_MAPPINGS.update(module.NODE_CLASS_MAPPINGS)
1263
1264
            if hasattr(module, "NODE_DISPLAY_NAME_MAPPINGS") and getattr(module, "NODE_DISPLAY_NAME_MAPPINGS") is not None:
                NODE_DISPLAY_NAME_MAPPINGS.update(module.NODE_DISPLAY_NAME_MAPPINGS)
1265
1266
1267
1268
1269
1270
        else:
            print(f"Skip {module_path} module for custom nodes due to the lack of NODE_CLASS_MAPPINGS.")
    except Exception as e:
        print(traceback.format_exc())
        print(f"Cannot import {module_path} module for custom nodes:", e)

Hacker 17082006's avatar
Hacker 17082006 committed
1271
def load_custom_nodes():
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
    node_paths = folder_paths.get_folder_paths("custom_nodes")
    for custom_node_path in node_paths:
        possible_modules = os.listdir(custom_node_path)
        if "__pycache__" in possible_modules:
            possible_modules.remove("__pycache__")

        for possible_module in possible_modules:
            module_path = os.path.join(custom_node_path, possible_module)
            if os.path.isfile(module_path) and os.path.splitext(module_path)[1] != ".py": continue
            load_custom_node(module_path)
1282

1283
1284
def init_custom_nodes():
    load_custom_nodes()
1285
    load_custom_node(os.path.join(os.path.join(os.path.dirname(os.path.realpath(__file__)), "comfy_extras"), "nodes_hypernetwork.py"))
1286
1287
    load_custom_node(os.path.join(os.path.join(os.path.dirname(os.path.realpath(__file__)), "comfy_extras"), "nodes_upscale_model.py"))
    load_custom_node(os.path.join(os.path.join(os.path.dirname(os.path.realpath(__file__)), "comfy_extras"), "nodes_post_processing.py"))
1288
    load_custom_node(os.path.join(os.path.join(os.path.dirname(os.path.realpath(__file__)), "comfy_extras"), "nodes_mask.py"))