nodes.py 45.5 KB
Newer Older
comfyanonymous's avatar
comfyanonymous committed
1
2
3
4
5
import torch

import os
import sys
import json
6
import hashlib
7
import traceback
comfyanonymous's avatar
comfyanonymous committed
8
9
10
11
12

from PIL import Image
from PIL.PngImagePlugin import PngInfo
import numpy as np

sALTaccount's avatar
sALTaccount committed
13

comfyanonymous's avatar
comfyanonymous committed
14
sys.path.insert(0, os.path.join(os.path.dirname(os.path.realpath(__file__)), "comfy"))
comfyanonymous's avatar
comfyanonymous committed
15
16


comfyanonymous's avatar
comfyanonymous committed
17
import comfy.diffusers_convert
comfyanonymous's avatar
comfyanonymous committed
18
import comfy.samplers
19
import comfy.sample
comfyanonymous's avatar
comfyanonymous committed
20
import comfy.sd
comfyanonymous's avatar
comfyanonymous committed
21
22
import comfy.utils

23
import comfy.clip_vision
24

25
import comfy.model_management
26
import importlib
comfyanonymous's avatar
comfyanonymous committed
27

28
import folder_paths
29
30

def before_node_execution():
31
    comfy.model_management.throw_exception_if_processing_interrupted()
32

33
def interrupt_processing(value=True):
34
    comfy.model_management.interrupt_current_processing(value)
35

36
37
MAX_RESOLUTION=8192

comfyanonymous's avatar
comfyanonymous committed
38
39
40
class CLIPTextEncode:
    @classmethod
    def INPUT_TYPES(s):
41
        return {"required": {"text": ("STRING", {"multiline": True}), "clip": ("CLIP", )}}
comfyanonymous's avatar
comfyanonymous committed
42
43
44
    RETURN_TYPES = ("CONDITIONING",)
    FUNCTION = "encode"

45
46
    CATEGORY = "conditioning"

comfyanonymous's avatar
comfyanonymous committed
47
    def encode(self, clip, text):
comfyanonymous's avatar
comfyanonymous committed
48
49
50
51
52
53
54
55
56
        return ([[clip.encode(text), {}]], )

class ConditioningCombine:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": {"conditioning_1": ("CONDITIONING", ), "conditioning_2": ("CONDITIONING", )}}
    RETURN_TYPES = ("CONDITIONING",)
    FUNCTION = "combine"

57
58
    CATEGORY = "conditioning"

comfyanonymous's avatar
comfyanonymous committed
59
60
61
62
63
64
65
    def combine(self, conditioning_1, conditioning_2):
        return (conditioning_1 + conditioning_2, )

class ConditioningSetArea:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": {"conditioning": ("CONDITIONING", ),
66
67
68
69
                              "width": ("INT", {"default": 64, "min": 64, "max": MAX_RESOLUTION, "step": 64}),
                              "height": ("INT", {"default": 64, "min": 64, "max": MAX_RESOLUTION, "step": 64}),
                              "x": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 64}),
                              "y": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 64}),
comfyanonymous's avatar
comfyanonymous committed
70
71
72
73
74
                              "strength": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}),
                             }}
    RETURN_TYPES = ("CONDITIONING",)
    FUNCTION = "append"

75
76
    CATEGORY = "conditioning"

comfyanonymous's avatar
comfyanonymous committed
77
    def append(self, conditioning, width, height, x, y, strength, min_sigma=0.0, max_sigma=99.0):
comfyanonymous's avatar
comfyanonymous committed
78
79
80
81
82
        c = []
        for t in conditioning:
            n = [t[0], t[1].copy()]
            n[1]['area'] = (height // 8, width // 8, y // 8, x // 8)
            n[1]['strength'] = strength
83
            n[1]['set_area_to_bounds'] = False
comfyanonymous's avatar
comfyanonymous committed
84
85
86
            n[1]['min_sigma'] = min_sigma
            n[1]['max_sigma'] = max_sigma
            c.append(n)
comfyanonymous's avatar
comfyanonymous committed
87
        return (c, )
comfyanonymous's avatar
comfyanonymous committed
88

Jacob Segal's avatar
Jacob Segal committed
89
90
91
92
93
94
class ConditioningSetMask:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": {"conditioning": ("CONDITIONING", ),
                              "mask": ("MASK", ),
                              "strength": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}),
95
                              "set_cond_area": (["default", "mask bounds"],),
Jacob Segal's avatar
Jacob Segal committed
96
97
98
99
100
101
                             }}
    RETURN_TYPES = ("CONDITIONING",)
    FUNCTION = "append"

    CATEGORY = "conditioning"

102
    def append(self, conditioning, mask, set_cond_area, strength):
Jacob Segal's avatar
Jacob Segal committed
103
        c = []
104
105
106
        set_area_to_bounds = False
        if set_cond_area != "default":
            set_area_to_bounds = True
Jacob Segal's avatar
Jacob Segal committed
107
108
109
110
111
112
        if len(mask.shape) < 3:
            mask = mask.unsqueeze(0)
        for t in conditioning:
            n = [t[0], t[1].copy()]
            _, h, w = mask.shape
            n[1]['mask'] = mask
Jacob Segal's avatar
Jacob Segal committed
113
            n[1]['set_area_to_bounds'] = set_area_to_bounds
114
            n[1]['mask_strength'] = strength
Jacob Segal's avatar
Jacob Segal committed
115
116
117
            c.append(n)
        return (c, )

comfyanonymous's avatar
comfyanonymous committed
118
119
120
121
122
123
124
125
126
127
class VAEDecode:
    def __init__(self, device="cpu"):
        self.device = device

    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "samples": ("LATENT", ), "vae": ("VAE", )}}
    RETURN_TYPES = ("IMAGE",)
    FUNCTION = "decode"

128
129
    CATEGORY = "latent"

comfyanonymous's avatar
comfyanonymous committed
130
    def decode(self, vae, samples):
131
        return (vae.decode(samples["samples"]), )
comfyanonymous's avatar
comfyanonymous committed
132

133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
class VAEDecodeTiled:
    def __init__(self, device="cpu"):
        self.device = device

    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "samples": ("LATENT", ), "vae": ("VAE", )}}
    RETURN_TYPES = ("IMAGE",)
    FUNCTION = "decode"

    CATEGORY = "_for_testing"

    def decode(self, vae, samples):
        return (vae.decode_tiled(samples["samples"]), )

comfyanonymous's avatar
comfyanonymous committed
148
149
150
151
152
153
154
155
156
157
class VAEEncode:
    def __init__(self, device="cpu"):
        self.device = device

    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "pixels": ("IMAGE", ), "vae": ("VAE", )}}
    RETURN_TYPES = ("LATENT",)
    FUNCTION = "encode"

158
159
    CATEGORY = "latent"

comfyanonymous's avatar
comfyanonymous committed
160
    def encode(self, vae, pixels):
161
162
163
164
        x = (pixels.shape[1] // 64) * 64
        y = (pixels.shape[2] // 64) * 64
        if pixels.shape[1] != x or pixels.shape[2] != y:
            pixels = pixels[:,:x,:y,:]
165
166
167
        t = vae.encode(pixels[:,:,:,:3])

        return ({"samples":t}, )
comfyanonymous's avatar
comfyanonymous committed
168

comfyanonymous's avatar
comfyanonymous committed
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189

class VAEEncodeTiled:
    def __init__(self, device="cpu"):
        self.device = device

    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "pixels": ("IMAGE", ), "vae": ("VAE", )}}
    RETURN_TYPES = ("LATENT",)
    FUNCTION = "encode"

    CATEGORY = "_for_testing"

    def encode(self, vae, pixels):
        x = (pixels.shape[1] // 64) * 64
        y = (pixels.shape[2] // 64) * 64
        if pixels.shape[1] != x or pixels.shape[2] != y:
            pixels = pixels[:,:x,:y,:]
        t = vae.encode_tiled(pixels[:,:,:,:3])

        return ({"samples":t}, )
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
class VAEEncodeForInpaint:
    def __init__(self, device="cpu"):
        self.device = device

    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "pixels": ("IMAGE", ), "vae": ("VAE", ), "mask": ("MASK", )}}
    RETURN_TYPES = ("LATENT",)
    FUNCTION = "encode"

    CATEGORY = "latent/inpaint"

    def encode(self, vae, pixels, mask):
        x = (pixels.shape[1] // 64) * 64
        y = (pixels.shape[2] // 64) * 64
205
        mask = torch.nn.functional.interpolate(mask.reshape((-1, 1, mask.shape[-2], mask.shape[-1])), size=(pixels.shape[1], pixels.shape[2]), mode="bilinear")
206

207
        pixels = pixels.clone()
208
209
        if pixels.shape[1] != x or pixels.shape[2] != y:
            pixels = pixels[:,:x,:y,:]
210
            mask = mask[:,:,:x,:y]
211

212
        #grow mask by a few pixels to keep things seamless in latent space
213
        kernel_tensor = torch.ones((1, 1, 6, 6))
214
215
        mask_erosion = torch.clamp(torch.nn.functional.conv2d(mask.round(), kernel_tensor, padding=3), 0, 1)
        m = (1.0 - mask.round()).squeeze(1)
216
217
        for i in range(3):
            pixels[:,:,:,i] -= 0.5
218
            pixels[:,:,:,i] *= m
219
220
221
            pixels[:,:,:,i] += 0.5
        t = vae.encode(pixels)

222
        return ({"samples":t, "noise_mask": (mask_erosion[:,:,:x,:y].round())}, )
comfyanonymous's avatar
comfyanonymous committed
223
224
225
226

class CheckpointLoader:
    @classmethod
    def INPUT_TYPES(s):
227
228
        return {"required": { "config_name": (folder_paths.get_filename_list("configs"), ),
                              "ckpt_name": (folder_paths.get_filename_list("checkpoints"), )}}
comfyanonymous's avatar
comfyanonymous committed
229
230
231
    RETURN_TYPES = ("MODEL", "CLIP", "VAE")
    FUNCTION = "load_checkpoint"

232
    CATEGORY = "advanced/loaders"
233

comfyanonymous's avatar
comfyanonymous committed
234
    def load_checkpoint(self, config_name, ckpt_name, output_vae=True, output_clip=True):
235
236
        config_path = folder_paths.get_full_path("configs", config_name)
        ckpt_path = folder_paths.get_full_path("checkpoints", ckpt_name)
237
        return comfy.sd.load_checkpoint(config_path, ckpt_path, output_vae=True, output_clip=True, embedding_directory=folder_paths.get_folder_paths("embeddings"))
comfyanonymous's avatar
comfyanonymous committed
238

239
240
241
class CheckpointLoaderSimple:
    @classmethod
    def INPUT_TYPES(s):
242
        return {"required": { "ckpt_name": (folder_paths.get_filename_list("checkpoints"), ),
243
244
245
246
                             }}
    RETURN_TYPES = ("MODEL", "CLIP", "VAE")
    FUNCTION = "load_checkpoint"

247
    CATEGORY = "loaders"
248

249
    def load_checkpoint(self, ckpt_name, output_vae=True, output_clip=True):
250
        ckpt_path = folder_paths.get_full_path("checkpoints", ckpt_name)
251
        out = comfy.sd.load_checkpoint_guess_config(ckpt_path, output_vae=True, output_clip=True, embedding_directory=folder_paths.get_folder_paths("embeddings"))
252
253
        return out

sALTaccount's avatar
sALTaccount committed
254
255
256
class DiffusersLoader:
    @classmethod
    def INPUT_TYPES(cls):
257
        paths = []
sALTaccount's avatar
sALTaccount committed
258
        for search_path in folder_paths.get_folder_paths("diffusers"):
259
            if os.path.exists(search_path):
sALTaccount's avatar
sALTaccount committed
260
                paths += next(os.walk(search_path))[1]
261
        return {"required": {"model_path": (paths,), }}
sALTaccount's avatar
sALTaccount committed
262
263
264
    RETURN_TYPES = ("MODEL", "CLIP", "VAE")
    FUNCTION = "load_checkpoint"

265
    CATEGORY = "advanced/loaders"
sALTaccount's avatar
sALTaccount committed
266
267

    def load_checkpoint(self, model_path, output_vae=True, output_clip=True):
sALTaccount's avatar
sALTaccount committed
268
269
270
271
272
273
        for search_path in folder_paths.get_folder_paths("diffusers"):
            if os.path.exists(search_path):
                paths = next(os.walk(search_path))[1]
                if model_path in paths:
                    model_path = os.path.join(search_path, model_path)
                    break
274

275
        return comfy.diffusers_convert.load_diffusers(model_path, fp16=comfy.model_management.should_use_fp16(), output_vae=output_vae, output_clip=output_clip, embedding_directory=folder_paths.get_folder_paths("embeddings"))
sALTaccount's avatar
sALTaccount committed
276
277


278
279
280
281
282
283
284
285
class unCLIPCheckpointLoader:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "ckpt_name": (folder_paths.get_filename_list("checkpoints"), ),
                             }}
    RETURN_TYPES = ("MODEL", "CLIP", "VAE", "CLIP_VISION")
    FUNCTION = "load_checkpoint"

286
    CATEGORY = "loaders"
287
288
289
290
291
292

    def load_checkpoint(self, ckpt_name, output_vae=True, output_clip=True):
        ckpt_path = folder_paths.get_full_path("checkpoints", ckpt_name)
        out = comfy.sd.load_checkpoint_guess_config(ckpt_path, output_vae=True, output_clip=True, output_clipvision=True, embedding_directory=folder_paths.get_folder_paths("embeddings"))
        return out

comfyanonymous's avatar
comfyanonymous committed
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
class CLIPSetLastLayer:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "clip": ("CLIP", ),
                              "stop_at_clip_layer": ("INT", {"default": -1, "min": -24, "max": -1, "step": 1}),
                              }}
    RETURN_TYPES = ("CLIP",)
    FUNCTION = "set_last_layer"

    CATEGORY = "conditioning"

    def set_last_layer(self, clip, stop_at_clip_layer):
        clip = clip.clone()
        clip.clip_layer(stop_at_clip_layer)
        return (clip,)

309
310
311
312
313
class LoraLoader:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "model": ("MODEL",),
                              "clip": ("CLIP", ),
314
                              "lora_name": (folder_paths.get_filename_list("loras"), ),
315
316
                              "strength_model": ("FLOAT", {"default": 1.0, "min": -10.0, "max": 10.0, "step": 0.01}),
                              "strength_clip": ("FLOAT", {"default": 1.0, "min": -10.0, "max": 10.0, "step": 0.01}),
317
318
319
320
321
322
323
                              }}
    RETURN_TYPES = ("MODEL", "CLIP")
    FUNCTION = "load_lora"

    CATEGORY = "loaders"

    def load_lora(self, model, clip, lora_name, strength_model, strength_clip):
324
        lora_path = folder_paths.get_full_path("loras", lora_name)
325
326
327
        model_lora, clip_lora = comfy.sd.load_lora_for_models(model, clip, lora_path, strength_model, strength_clip)
        return (model_lora, clip_lora)

328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
class TomePatchModel:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "model": ("MODEL",),
                              "ratio": ("FLOAT", {"default": 0.3, "min": 0.0, "max": 1.0, "step": 0.01}),
                              }}
    RETURN_TYPES = ("MODEL",)
    FUNCTION = "patch"

    CATEGORY = "_for_testing"

    def patch(self, model, ratio):
        m = model.clone()
        m.set_model_tomesd(ratio)
        return (m, )

comfyanonymous's avatar
comfyanonymous committed
344
345
346
class VAELoader:
    @classmethod
    def INPUT_TYPES(s):
347
        return {"required": { "vae_name": (folder_paths.get_filename_list("vae"), )}}
comfyanonymous's avatar
comfyanonymous committed
348
349
350
    RETURN_TYPES = ("VAE",)
    FUNCTION = "load_vae"

351
352
    CATEGORY = "loaders"

comfyanonymous's avatar
comfyanonymous committed
353
354
    #TODO: scale factor?
    def load_vae(self, vae_name):
355
        vae_path = folder_paths.get_full_path("vae", vae_name)
comfyanonymous's avatar
comfyanonymous committed
356
357
358
        vae = comfy.sd.VAE(ckpt_path=vae_path)
        return (vae,)

comfyanonymous's avatar
comfyanonymous committed
359
360
361
class ControlNetLoader:
    @classmethod
    def INPUT_TYPES(s):
362
        return {"required": { "control_net_name": (folder_paths.get_filename_list("controlnet"), )}}
comfyanonymous's avatar
comfyanonymous committed
363
364
365
366
367
368
369

    RETURN_TYPES = ("CONTROL_NET",)
    FUNCTION = "load_controlnet"

    CATEGORY = "loaders"

    def load_controlnet(self, control_net_name):
370
        controlnet_path = folder_paths.get_full_path("controlnet", control_net_name)
comfyanonymous's avatar
comfyanonymous committed
371
372
373
        controlnet = comfy.sd.load_controlnet(controlnet_path)
        return (controlnet,)

374
375
376
377
class DiffControlNetLoader:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "model": ("MODEL",),
378
                              "control_net_name": (folder_paths.get_filename_list("controlnet"), )}}
379
380
381
382
383
384
385

    RETURN_TYPES = ("CONTROL_NET",)
    FUNCTION = "load_controlnet"

    CATEGORY = "loaders"

    def load_controlnet(self, model, control_net_name):
386
        controlnet_path = folder_paths.get_full_path("controlnet", control_net_name)
387
388
389
        controlnet = comfy.sd.load_controlnet(controlnet_path, model)
        return (controlnet,)

comfyanonymous's avatar
comfyanonymous committed
390
391
392
393

class ControlNetApply:
    @classmethod
    def INPUT_TYPES(s):
394
395
396
397
398
        return {"required": {"conditioning": ("CONDITIONING", ),
                             "control_net": ("CONTROL_NET", ),
                             "image": ("IMAGE", ),
                             "strength": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01})
                             }}
comfyanonymous's avatar
comfyanonymous committed
399
400
401
402
403
    RETURN_TYPES = ("CONDITIONING",)
    FUNCTION = "apply_controlnet"

    CATEGORY = "conditioning"

404
    def apply_controlnet(self, conditioning, control_net, image, strength):
comfyanonymous's avatar
comfyanonymous committed
405
406
407
408
409
        c = []
        control_hint = image.movedim(-1,1)
        print(control_hint.shape)
        for t in conditioning:
            n = [t[0], t[1].copy()]
comfyanonymous's avatar
comfyanonymous committed
410
411
412
413
            c_net = control_net.copy().set_cond_hint(control_hint, strength)
            if 'control' in t[1]:
                c_net.set_previous_controlnet(t[1]['control'])
            n[1]['control'] = c_net
comfyanonymous's avatar
comfyanonymous committed
414
415
416
            c.append(n)
        return (c, )

417
418
419
class CLIPLoader:
    @classmethod
    def INPUT_TYPES(s):
420
        return {"required": { "clip_name": (folder_paths.get_filename_list("clip"), ),
421
422
423
424
425
426
                             }}
    RETURN_TYPES = ("CLIP",)
    FUNCTION = "load_clip"

    CATEGORY = "loaders"

427
    def load_clip(self, clip_name):
428
        clip_path = folder_paths.get_full_path("clip", clip_name)
comfyanonymous's avatar
comfyanonymous committed
429
        clip = comfy.sd.load_clip(ckpt_path=clip_path, embedding_directory=folder_paths.get_folder_paths("embeddings"))
430
431
        return (clip,)

432
433
434
class CLIPVisionLoader:
    @classmethod
    def INPUT_TYPES(s):
435
        return {"required": { "clip_name": (folder_paths.get_filename_list("clip_vision"), ),
436
437
438
439
440
441
442
                             }}
    RETURN_TYPES = ("CLIP_VISION",)
    FUNCTION = "load_clip"

    CATEGORY = "loaders"

    def load_clip(self, clip_name):
443
        clip_path = folder_paths.get_full_path("clip_vision", clip_name)
444
        clip_vision = comfy.clip_vision.load(clip_path)
445
446
447
448
449
450
451
452
        return (clip_vision,)

class CLIPVisionEncode:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "clip_vision": ("CLIP_VISION",),
                              "image": ("IMAGE",)
                             }}
comfyanonymous's avatar
comfyanonymous committed
453
    RETURN_TYPES = ("CLIP_VISION_OUTPUT",)
454
455
    FUNCTION = "encode"

456
    CATEGORY = "conditioning"
457
458
459
460
461
462
463
464

    def encode(self, clip_vision, image):
        output = clip_vision.encode_image(image)
        return (output,)

class StyleModelLoader:
    @classmethod
    def INPUT_TYPES(s):
465
        return {"required": { "style_model_name": (folder_paths.get_filename_list("style_models"), )}}
466
467
468
469
470
471
472

    RETURN_TYPES = ("STYLE_MODEL",)
    FUNCTION = "load_style_model"

    CATEGORY = "loaders"

    def load_style_model(self, style_model_name):
473
        style_model_path = folder_paths.get_full_path("style_models", style_model_name)
474
475
476
477
478
479
480
        style_model = comfy.sd.load_style_model(style_model_path)
        return (style_model,)


class StyleModelApply:
    @classmethod
    def INPUT_TYPES(s):
481
482
483
        return {"required": {"conditioning": ("CONDITIONING", ),
                             "style_model": ("STYLE_MODEL", ),
                             "clip_vision_output": ("CLIP_VISION_OUTPUT", ),
484
485
486
487
                             }}
    RETURN_TYPES = ("CONDITIONING",)
    FUNCTION = "apply_stylemodel"

comfyanonymous's avatar
comfyanonymous committed
488
    CATEGORY = "conditioning/style_model"
489

490
491
    def apply_stylemodel(self, clip_vision_output, style_model, conditioning):
        cond = style_model.get_cond(clip_vision_output)
492
        c = []
493
494
        for t in conditioning:
            n = [torch.cat((t[0], cond), dim=1), t[1].copy()]
495
496
497
            c.append(n)
        return (c, )

498
499
500
501
502
503
class unCLIPConditioning:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": {"conditioning": ("CONDITIONING", ),
                             "clip_vision_output": ("CLIP_VISION_OUTPUT", ),
                             "strength": ("FLOAT", {"default": 1.0, "min": -10.0, "max": 10.0, "step": 0.01}),
504
                             "noise_augmentation": ("FLOAT", {"default": 0.0, "min": 0.0, "max": 1.0, "step": 0.01}),
505
506
507
508
                             }}
    RETURN_TYPES = ("CONDITIONING",)
    FUNCTION = "apply_adm"

509
    CATEGORY = "conditioning"
510

511
    def apply_adm(self, conditioning, clip_vision_output, strength, noise_augmentation):
512
513
514
        c = []
        for t in conditioning:
            o = t[1].copy()
515
            x = (clip_vision_output, strength, noise_augmentation)
516
517
518
519
520
521
522
523
            if "adm" in o:
                o["adm"] = o["adm"][:] + [x]
            else:
                o["adm"] = [x]
            n = [t[0], o]
            c.append(n)
        return (c, )

524
525
526
527
528
529
530
531
class GLIGENLoader:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "gligen_name": (folder_paths.get_filename_list("gligen"), )}}

    RETURN_TYPES = ("GLIGEN",)
    FUNCTION = "load_gligen"

comfyanonymous's avatar
comfyanonymous committed
532
    CATEGORY = "loaders"
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553

    def load_gligen(self, gligen_name):
        gligen_path = folder_paths.get_full_path("gligen", gligen_name)
        gligen = comfy.sd.load_gligen(gligen_path)
        return (gligen,)

class GLIGENTextBoxApply:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": {"conditioning_to": ("CONDITIONING", ),
                              "clip": ("CLIP", ),
                              "gligen_textbox_model": ("GLIGEN", ),
                              "text": ("STRING", {"multiline": True}),
                              "width": ("INT", {"default": 64, "min": 8, "max": MAX_RESOLUTION, "step": 8}),
                              "height": ("INT", {"default": 64, "min": 8, "max": MAX_RESOLUTION, "step": 8}),
                              "x": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 8}),
                              "y": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 8}),
                             }}
    RETURN_TYPES = ("CONDITIONING",)
    FUNCTION = "append"

comfyanonymous's avatar
comfyanonymous committed
554
    CATEGORY = "conditioning/gligen"
555
556
557
558
559
560
561
562
563
564
565
566
567
568

    def append(self, conditioning_to, clip, gligen_textbox_model, text, width, height, x, y):
        c = []
        cond, cond_pooled = clip.encode_from_tokens(clip.tokenize(text), return_pooled=True)
        for t in conditioning_to:
            n = [t[0], t[1].copy()]
            position_params = [(cond_pooled, height // 8, width // 8, y // 8, x // 8)]
            prev = []
            if "gligen" in n[1]:
                prev = n[1]['gligen'][2]

            n[1]['gligen'] = ("position", gligen_textbox_model, prev + position_params)
            c.append(n)
        return (c, )
569

comfyanonymous's avatar
comfyanonymous committed
570
571
572
573
574
575
class EmptyLatentImage:
    def __init__(self, device="cpu"):
        self.device = device

    @classmethod
    def INPUT_TYPES(s):
576
577
        return {"required": { "width": ("INT", {"default": 512, "min": 64, "max": MAX_RESOLUTION, "step": 64}),
                              "height": ("INT", {"default": 512, "min": 64, "max": MAX_RESOLUTION, "step": 64}),
comfyanonymous's avatar
comfyanonymous committed
578
579
580
581
                              "batch_size": ("INT", {"default": 1, "min": 1, "max": 64})}}
    RETURN_TYPES = ("LATENT",)
    FUNCTION = "generate"

582
583
    CATEGORY = "latent"

comfyanonymous's avatar
comfyanonymous committed
584
585
    def generate(self, width, height, batch_size=1):
        latent = torch.zeros([batch_size, 4, height // 8, width // 8])
586
        return ({"samples":latent}, )
comfyanonymous's avatar
comfyanonymous committed
587

comfyanonymous's avatar
comfyanonymous committed
588

589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
class LatentFromBatch:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "samples": ("LATENT",),
                              "batch_index": ("INT", {"default": 0, "min": 0, "max": 63}),
                              }}
    RETURN_TYPES = ("LATENT",)
    FUNCTION = "rotate"

    CATEGORY = "latent"

    def rotate(self, samples, batch_index):
        s = samples.copy()
        s_in = samples["samples"]
        batch_index = min(s_in.shape[0] - 1, batch_index)
        s["samples"] = s_in[batch_index:batch_index + 1].clone()
        s["batch_index"] = batch_index
        return (s,)
comfyanonymous's avatar
comfyanonymous committed
607

comfyanonymous's avatar
comfyanonymous committed
608
609
class LatentUpscale:
    upscale_methods = ["nearest-exact", "bilinear", "area"]
610
    crop_methods = ["disabled", "center"]
comfyanonymous's avatar
comfyanonymous committed
611
612
613
614

    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "samples": ("LATENT",), "upscale_method": (s.upscale_methods,),
615
616
                              "width": ("INT", {"default": 512, "min": 64, "max": MAX_RESOLUTION, "step": 64}),
                              "height": ("INT", {"default": 512, "min": 64, "max": MAX_RESOLUTION, "step": 64}),
617
                              "crop": (s.crop_methods,)}}
comfyanonymous's avatar
comfyanonymous committed
618
619
620
    RETURN_TYPES = ("LATENT",)
    FUNCTION = "upscale"

621
622
    CATEGORY = "latent"

623
    def upscale(self, samples, upscale_method, width, height, crop):
624
        s = samples.copy()
comfyanonymous's avatar
comfyanonymous committed
625
        s["samples"] = comfy.utils.common_upscale(samples["samples"], width // 8, height // 8, upscale_method, crop)
comfyanonymous's avatar
comfyanonymous committed
626
627
        return (s,)

comfyanonymous's avatar
comfyanonymous committed
628
629
630
631
632
633
634
635
636
class LatentRotate:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "samples": ("LATENT",),
                              "rotation": (["none", "90 degrees", "180 degrees", "270 degrees"],),
                              }}
    RETURN_TYPES = ("LATENT",)
    FUNCTION = "rotate"

comfyanonymous's avatar
comfyanonymous committed
637
    CATEGORY = "latent/transform"
comfyanonymous's avatar
comfyanonymous committed
638
639

    def rotate(self, samples, rotation):
640
        s = samples.copy()
comfyanonymous's avatar
comfyanonymous committed
641
642
643
644
645
646
647
648
        rotate_by = 0
        if rotation.startswith("90"):
            rotate_by = 1
        elif rotation.startswith("180"):
            rotate_by = 2
        elif rotation.startswith("270"):
            rotate_by = 3

649
        s["samples"] = torch.rot90(samples["samples"], k=rotate_by, dims=[3, 2])
comfyanonymous's avatar
comfyanonymous committed
650
        return (s,)
comfyanonymous's avatar
comfyanonymous committed
651
652
653
654
655
656
657
658
659
660

class LatentFlip:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "samples": ("LATENT",),
                              "flip_method": (["x-axis: vertically", "y-axis: horizontally"],),
                              }}
    RETURN_TYPES = ("LATENT",)
    FUNCTION = "flip"

comfyanonymous's avatar
comfyanonymous committed
661
    CATEGORY = "latent/transform"
comfyanonymous's avatar
comfyanonymous committed
662
663

    def flip(self, samples, flip_method):
664
        s = samples.copy()
comfyanonymous's avatar
comfyanonymous committed
665
        if flip_method.startswith("x"):
666
            s["samples"] = torch.flip(samples["samples"], dims=[2])
comfyanonymous's avatar
comfyanonymous committed
667
        elif flip_method.startswith("y"):
668
            s["samples"] = torch.flip(samples["samples"], dims=[3])
comfyanonymous's avatar
comfyanonymous committed
669
670

        return (s,)
comfyanonymous's avatar
comfyanonymous committed
671
672
673
674

class LatentComposite:
    @classmethod
    def INPUT_TYPES(s):
comfyanonymous's avatar
comfyanonymous committed
675
676
677
678
679
680
        return {"required": { "samples_to": ("LATENT",),
                              "samples_from": ("LATENT",),
                              "x": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 8}),
                              "y": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 8}),
                              "feather": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 8}),
                              }}
comfyanonymous's avatar
comfyanonymous committed
681
682
683
684
685
    RETURN_TYPES = ("LATENT",)
    FUNCTION = "composite"

    CATEGORY = "latent"

comfyanonymous's avatar
comfyanonymous committed
686
687
688
    def composite(self, samples_to, samples_from, x, y, composite_method="normal", feather=0):
        x =  x // 8
        y = y // 8
689
        feather = feather // 8
comfyanonymous's avatar
comfyanonymous committed
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
        samples_out = samples_to.copy()
        s = samples_to["samples"].clone()
        samples_to = samples_to["samples"]
        samples_from = samples_from["samples"]
        if feather == 0:
            s[:,:,y:y+samples_from.shape[2],x:x+samples_from.shape[3]] = samples_from[:,:,:samples_to.shape[2] - y, :samples_to.shape[3] - x]
        else:
            samples_from = samples_from[:,:,:samples_to.shape[2] - y, :samples_to.shape[3] - x]
            mask = torch.ones_like(samples_from)
            for t in range(feather):
                if y != 0:
                    mask[:,:,t:1+t,:] *= ((1.0/feather) * (t + 1))

                if y + samples_from.shape[2] < samples_to.shape[2]:
                    mask[:,:,mask.shape[2] -1 -t: mask.shape[2]-t,:] *= ((1.0/feather) * (t + 1))
                if x != 0:
                    mask[:,:,:,t:1+t] *= ((1.0/feather) * (t + 1))
                if x + samples_from.shape[3] < samples_to.shape[3]:
                    mask[:,:,:,mask.shape[3]- 1 - t: mask.shape[3]- t] *= ((1.0/feather) * (t + 1))
            rev_mask = torch.ones_like(mask) - mask
            s[:,:,y:y+samples_from.shape[2],x:x+samples_from.shape[3]] = samples_from[:,:,:samples_to.shape[2] - y, :samples_to.shape[3] - x] * mask + s[:,:,y:y+samples_from.shape[2],x:x+samples_from.shape[3]] * rev_mask
        samples_out["samples"] = s
        return (samples_out,)
comfyanonymous's avatar
comfyanonymous committed
713

comfyanonymous's avatar
comfyanonymous committed
714
715
716
717
class LatentCrop:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "samples": ("LATENT",),
718
719
720
721
                              "width": ("INT", {"default": 512, "min": 64, "max": MAX_RESOLUTION, "step": 64}),
                              "height": ("INT", {"default": 512, "min": 64, "max": MAX_RESOLUTION, "step": 64}),
                              "x": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 8}),
                              "y": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 8}),
comfyanonymous's avatar
comfyanonymous committed
722
723
724
725
                              }}
    RETURN_TYPES = ("LATENT",)
    FUNCTION = "crop"

comfyanonymous's avatar
comfyanonymous committed
726
    CATEGORY = "latent/transform"
comfyanonymous's avatar
comfyanonymous committed
727
728

    def crop(self, samples, width, height, x, y):
729
730
        s = samples.copy()
        samples = samples['samples']
comfyanonymous's avatar
comfyanonymous committed
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
        x =  x // 8
        y = y // 8

        #enfonce minimum size of 64
        if x > (samples.shape[3] - 8):
            x = samples.shape[3] - 8
        if y > (samples.shape[2] - 8):
            y = samples.shape[2] - 8

        new_height = height // 8
        new_width = width // 8
        to_x = new_width + x
        to_y = new_height + y
        def enforce_image_dim(d, to_d, max_d):
            if to_d > max_d:
                leftover = (to_d - max_d) % 8
                to_d = max_d
                d -= leftover
            return (d, to_d)

        #make sure size is always multiple of 64
        x, to_x = enforce_image_dim(x, to_x, samples.shape[3])
        y, to_y = enforce_image_dim(y, to_y, samples.shape[2])
754
        s['samples'] = samples[:,:,y:to_y, x:to_x]
comfyanonymous's avatar
comfyanonymous committed
755
756
        return (s,)

757
758
759
760
761
762
763
764
765
class SetLatentNoiseMask:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "samples": ("LATENT",),
                              "mask": ("MASK",),
                              }}
    RETURN_TYPES = ("LATENT",)
    FUNCTION = "set_mask"

766
    CATEGORY = "latent/inpaint"
767
768
769
770
771
772

    def set_mask(self, samples, mask):
        s = samples.copy()
        s["noise_mask"] = mask
        return (s,)

773
def common_ksampler(model, seed, steps, cfg, sampler_name, scheduler, positive, negative, latent, denoise=1.0, disable_noise=False, start_step=None, last_step=None, force_full_denoise=False):
774
    device = comfy.model_management.get_torch_device()
775
    latent_image = latent["samples"]
776

comfyanonymous's avatar
comfyanonymous committed
777
778
779
    if disable_noise:
        noise = torch.zeros(latent_image.size(), dtype=latent_image.dtype, layout=latent_image.layout, device="cpu")
    else:
780
781
        skip = latent["batch_index"] if "batch_index" in latent else 0
        noise = comfy.sample.prepare_noise(latent_image, seed, skip)
comfyanonymous's avatar
comfyanonymous committed
782

783
    noise_mask = None
784
    if "noise_mask" in latent:
785
        noise_mask = latent["noise_mask"]
comfyanonymous's avatar
comfyanonymous committed
786

787
788
789
    samples = comfy.sample.sample(model, noise, steps, cfg, sampler_name, scheduler, positive, negative, latent_image,
                                  denoise=denoise, disable_noise=disable_noise, start_step=start_step, last_step=last_step,
                                  force_full_denoise=force_full_denoise, noise_mask=noise_mask)
790
791
792
    out = latent.copy()
    out["samples"] = samples
    return (out, )
comfyanonymous's avatar
comfyanonymous committed
793

comfyanonymous's avatar
comfyanonymous committed
794
795
796
class KSampler:
    @classmethod
    def INPUT_TYPES(s):
comfyanonymous's avatar
comfyanonymous committed
797
        return {"required":
comfyanonymous's avatar
comfyanonymous committed
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
                    {"model": ("MODEL",),
                    "seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}),
                    "steps": ("INT", {"default": 20, "min": 1, "max": 10000}),
                    "cfg": ("FLOAT", {"default": 8.0, "min": 0.0, "max": 100.0}),
                    "sampler_name": (comfy.samplers.KSampler.SAMPLERS, ),
                    "scheduler": (comfy.samplers.KSampler.SCHEDULERS, ),
                    "positive": ("CONDITIONING", ),
                    "negative": ("CONDITIONING", ),
                    "latent_image": ("LATENT", ),
                    "denoise": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}),
                    }}

    RETURN_TYPES = ("LATENT",)
    FUNCTION = "sample"

813
814
    CATEGORY = "sampling"

comfyanonymous's avatar
comfyanonymous committed
815
    def sample(self, model, seed, steps, cfg, sampler_name, scheduler, positive, negative, latent_image, denoise=1.0):
816
        return common_ksampler(model, seed, steps, cfg, sampler_name, scheduler, positive, negative, latent_image, denoise=denoise)
comfyanonymous's avatar
comfyanonymous committed
817

comfyanonymous's avatar
comfyanonymous committed
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
class KSamplerAdvanced:
    @classmethod
    def INPUT_TYPES(s):
        return {"required":
                    {"model": ("MODEL",),
                    "add_noise": (["enable", "disable"], ),
                    "noise_seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}),
                    "steps": ("INT", {"default": 20, "min": 1, "max": 10000}),
                    "cfg": ("FLOAT", {"default": 8.0, "min": 0.0, "max": 100.0}),
                    "sampler_name": (comfy.samplers.KSampler.SAMPLERS, ),
                    "scheduler": (comfy.samplers.KSampler.SCHEDULERS, ),
                    "positive": ("CONDITIONING", ),
                    "negative": ("CONDITIONING", ),
                    "latent_image": ("LATENT", ),
                    "start_at_step": ("INT", {"default": 0, "min": 0, "max": 10000}),
                    "end_at_step": ("INT", {"default": 10000, "min": 0, "max": 10000}),
                    "return_with_leftover_noise": (["disable", "enable"], ),
                    }}

    RETURN_TYPES = ("LATENT",)
    FUNCTION = "sample"

    CATEGORY = "sampling"
comfyanonymous's avatar
comfyanonymous committed
841

comfyanonymous's avatar
comfyanonymous committed
842
843
844
845
846
847
848
    def sample(self, model, add_noise, noise_seed, steps, cfg, sampler_name, scheduler, positive, negative, latent_image, start_at_step, end_at_step, return_with_leftover_noise, denoise=1.0):
        force_full_denoise = True
        if return_with_leftover_noise == "enable":
            force_full_denoise = False
        disable_noise = False
        if add_noise == "disable":
            disable_noise = True
849
        return common_ksampler(model, noise_seed, steps, cfg, sampler_name, scheduler, positive, negative, latent_image, denoise=denoise, disable_noise=disable_noise, start_step=start_at_step, last_step=end_at_step, force_full_denoise=force_full_denoise)
comfyanonymous's avatar
comfyanonymous committed
850
851
852

class SaveImage:
    def __init__(self):
853
        self.output_dir = folder_paths.get_output_directory()
m957ymj75urz's avatar
m957ymj75urz committed
854
        self.type = "output"
comfyanonymous's avatar
comfyanonymous committed
855
856
857
858

    @classmethod
    def INPUT_TYPES(s):
        return {"required": 
859
                    {"images": ("IMAGE", ),
pythongosssss's avatar
tidy  
pythongosssss committed
860
                     "filename_prefix": ("STRING", {"default": "ComfyUI"})},
pythongosssss's avatar
pythongosssss committed
861
                "hidden": {"prompt": "PROMPT", "extra_pnginfo": "EXTRA_PNGINFO"},
comfyanonymous's avatar
comfyanonymous committed
862
863
864
865
866
867
868
                }

    RETURN_TYPES = ()
    FUNCTION = "save_images"

    OUTPUT_NODE = True

869
870
    CATEGORY = "image"

pythongosssss's avatar
tidy  
pythongosssss committed
871
    def save_images(self, images, filename_prefix="ComfyUI", prompt=None, extra_pnginfo=None):
872
        def map_filename(filename):
873
            prefix_len = len(os.path.basename(filename_prefix))
874
875
876
877
878
879
            prefix = filename[:prefix_len + 1]
            try:
                digits = int(filename[prefix_len + 1:].split('_')[0])
            except:
                digits = 0
            return (digits, prefix)
comfyanonymous's avatar
Style.  
comfyanonymous committed
880

881
882
883
884
        def compute_vars(input):
            input = input.replace("%width%", str(images[0].shape[1]))
            input = input.replace("%height%", str(images[0].shape[0]))
            return input
comfyanonymous's avatar
Style.  
comfyanonymous committed
885

886
        filename_prefix = compute_vars(filename_prefix)
comfyanonymous's avatar
comfyanonymous committed
887

m957ymj75urz's avatar
m957ymj75urz committed
888
889
890
        subfolder = os.path.dirname(os.path.normpath(filename_prefix))
        filename = os.path.basename(os.path.normpath(filename_prefix))

comfyanonymous's avatar
comfyanonymous committed
891
        full_output_folder = os.path.join(self.output_dir, subfolder)
892

893
        if os.path.commonpath((self.output_dir, os.path.abspath(full_output_folder))) != self.output_dir:
894
            print("Saving image outside the output folder is not allowed.")
comfyanonymous's avatar
comfyanonymous committed
895
896
            return {}

897
        try:
898
            counter = max(filter(lambda a: a[1][:-1] == filename and a[1][-1] == "_", map(map_filename, os.listdir(full_output_folder))))[0] + 1
899
900
        except ValueError:
            counter = 1
901
        except FileNotFoundError:
902
            os.makedirs(full_output_folder, exist_ok=True)
903
            counter = 1
pythongosssss's avatar
pythongosssss committed
904

m957ymj75urz's avatar
m957ymj75urz committed
905
        results = list()
comfyanonymous's avatar
comfyanonymous committed
906
907
        for image in images:
            i = 255. * image.cpu().numpy()
908
            img = Image.fromarray(np.clip(i, 0, 255).astype(np.uint8))
comfyanonymous's avatar
comfyanonymous committed
909
910
911
912
913
914
            metadata = PngInfo()
            if prompt is not None:
                metadata.add_text("prompt", json.dumps(prompt))
            if extra_pnginfo is not None:
                for x in extra_pnginfo:
                    metadata.add_text(x, json.dumps(extra_pnginfo[x]))
915

916
            file = f"{filename}_{counter:05}_.png"
917
            img.save(os.path.join(full_output_folder, file), pnginfo=metadata, compress_level=4)
m957ymj75urz's avatar
m957ymj75urz committed
918
919
920
921
            results.append({
                "filename": file,
                "subfolder": subfolder,
                "type": self.type
Gavroche CryptoRUSH's avatar
Gavroche CryptoRUSH committed
922
            })
923
            counter += 1
comfyanonymous's avatar
comfyanonymous committed
924

m957ymj75urz's avatar
m957ymj75urz committed
925
        return { "ui": { "images": results } }
comfyanonymous's avatar
comfyanonymous committed
926

pythongosssss's avatar
pythongosssss committed
927
928
class PreviewImage(SaveImage):
    def __init__(self):
929
        self.output_dir = folder_paths.get_temp_directory()
m957ymj75urz's avatar
m957ymj75urz committed
930
        self.type = "temp"
pythongosssss's avatar
pythongosssss committed
931
932
933

    @classmethod
    def INPUT_TYPES(s):
934
        return {"required":
pythongosssss's avatar
pythongosssss committed
935
936
937
                    {"images": ("IMAGE", ), },
                "hidden": {"prompt": "PROMPT", "extra_pnginfo": "EXTRA_PNGINFO"},
                }
938

939
940
941
class LoadImage:
    @classmethod
    def INPUT_TYPES(s):
942
        input_dir = folder_paths.get_input_directory()
943
        return {"required":
944
                    {"image": (sorted(os.listdir(input_dir)), )},
945
                }
946
947

    CATEGORY = "image"
948

949
    RETURN_TYPES = ("IMAGE", "MASK")
950
951
    FUNCTION = "load_image"
    def load_image(self, image):
952
        image_path = folder_paths.get_annotated_filepath(image)
953
954
        i = Image.open(image_path)
        image = i.convert("RGB")
955
        image = np.array(image).astype(np.float32) / 255.0
956
        image = torch.from_numpy(image)[None,]
957
958
959
960
961
962
        if 'A' in i.getbands():
            mask = np.array(i.getchannel('A')).astype(np.float32) / 255.0
            mask = 1. - torch.from_numpy(mask)
        else:
            mask = torch.zeros((64,64), dtype=torch.float32, device="cpu")
        return (image, mask)
963

964
965
    @classmethod
    def IS_CHANGED(s, image):
966
        image_path = folder_paths.get_annotated_filepath(image)
967
968
969
970
        m = hashlib.sha256()
        with open(image_path, 'rb') as f:
            m.update(f.read())
        return m.digest().hex()
pythongosssss's avatar
pythongosssss committed
971

972
973
974
975
976
977
978
    @classmethod
    def VALIDATE_INPUTS(s, image):
        if not folder_paths.exists_annotated_filepath(image):
            return "Invalid image file: {}".format(image)

        return True

979
class LoadImageMask:
980
    _color_channels = ["alpha", "red", "green", "blue"]
981
982
    @classmethod
    def INPUT_TYPES(s):
983
        input_dir = folder_paths.get_input_directory()
984
        return {"required":
985
                    {"image": (sorted(os.listdir(input_dir)), ),
986
                    "channel": (s._color_channels, ),}
987
988
                }

989
    CATEGORY = "mask"
990
991
992
993

    RETURN_TYPES = ("MASK",)
    FUNCTION = "load_image"
    def load_image(self, image, channel):
994
        image_path = folder_paths.get_annotated_filepath(image)
995
        i = Image.open(image_path)
996
997
        if i.getbands() != ("R", "G", "B", "A"):
            i = i.convert("RGBA")
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
        mask = None
        c = channel[0].upper()
        if c in i.getbands():
            mask = np.array(i.getchannel(c)).astype(np.float32) / 255.0
            mask = torch.from_numpy(mask)
            if c == 'A':
                mask = 1. - mask
        else:
            mask = torch.zeros((64,64), dtype=torch.float32, device="cpu")
        return (mask,)

    @classmethod
    def IS_CHANGED(s, image, channel):
1011
        image_path = folder_paths.get_annotated_filepath(image)
1012
1013
1014
1015
        m = hashlib.sha256()
        with open(image_path, 'rb') as f:
            m.update(f.read())
        return m.digest().hex()
pythongosssss's avatar
pythongosssss committed
1016

1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
    @classmethod
    def VALIDATE_INPUTS(s, image, channel):
        if not folder_paths.exists_annotated_filepath(image):
            return "Invalid image file: {}".format(image)

        if channel not in s._color_channels:
            return "Invalid color channel: {}".format(channel)

        return True

comfyanonymous's avatar
comfyanonymous committed
1027
1028
1029
1030
1031
1032
1033
class ImageScale:
    upscale_methods = ["nearest-exact", "bilinear", "area"]
    crop_methods = ["disabled", "center"]

    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "image": ("IMAGE",), "upscale_method": (s.upscale_methods,),
1034
1035
                              "width": ("INT", {"default": 512, "min": 1, "max": MAX_RESOLUTION, "step": 1}),
                              "height": ("INT", {"default": 512, "min": 1, "max": MAX_RESOLUTION, "step": 1}),
comfyanonymous's avatar
comfyanonymous committed
1036
1037
1038
1039
                              "crop": (s.crop_methods,)}}
    RETURN_TYPES = ("IMAGE",)
    FUNCTION = "upscale"

1040
    CATEGORY = "image/upscaling"
1041

comfyanonymous's avatar
comfyanonymous committed
1042
1043
    def upscale(self, image, upscale_method, width, height, crop):
        samples = image.movedim(-1,1)
comfyanonymous's avatar
comfyanonymous committed
1044
        s = comfy.utils.common_upscale(samples, width, height, upscale_method, crop)
comfyanonymous's avatar
comfyanonymous committed
1045
1046
        s = s.movedim(1,-1)
        return (s,)
comfyanonymous's avatar
comfyanonymous committed
1047

1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
class ImageInvert:

    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "image": ("IMAGE",)}}

    RETURN_TYPES = ("IMAGE",)
    FUNCTION = "invert"

    CATEGORY = "image"

    def invert(self, image):
        s = 1.0 - image
        return (s,)


Guo Y.K's avatar
Guo Y.K committed
1064
1065
1066
1067
1068
1069
1070
class ImagePadForOutpaint:

    @classmethod
    def INPUT_TYPES(s):
        return {
            "required": {
                "image": ("IMAGE",),
1071
1072
1073
1074
1075
                "left": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 64}),
                "top": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 64}),
                "right": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 64}),
                "bottom": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 64}),
                "feathering": ("INT", {"default": 40, "min": 0, "max": MAX_RESOLUTION, "step": 1}),
Guo Y.K's avatar
Guo Y.K committed
1076
1077
1078
1079
1080
1081
1082
1083
            }
        }

    RETURN_TYPES = ("IMAGE", "MASK")
    FUNCTION = "expand_image"

    CATEGORY = "image"

1084
    def expand_image(self, image, left, top, right, bottom, feathering):
Guo Y.K's avatar
Guo Y.K committed
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
        d1, d2, d3, d4 = image.size()

        new_image = torch.zeros(
            (d1, d2 + top + bottom, d3 + left + right, d4),
            dtype=torch.float32,
        )
        new_image[:, top:top + d2, left:left + d3, :] = image

        mask = torch.ones(
            (d2 + top + bottom, d3 + left + right),
            dtype=torch.float32,
        )
1097

1098
1099
1100
1101
1102
        t = torch.zeros(
            (d2, d3),
            dtype=torch.float32
        )

1103
        if feathering > 0 and feathering * 2 < d2 and feathering * 2 < d3:
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122

            for i in range(d2):
                for j in range(d3):
                    dt = i if top != 0 else d2
                    db = d2 - i if bottom != 0 else d2

                    dl = j if left != 0 else d3
                    dr = d3 - j if right != 0 else d3

                    d = min(dt, db, dl, dr)

                    if d >= feathering:
                        continue

                    v = (feathering - d) / feathering

                    t[i, j] = v * v

        mask[top:top + d2, left:left + d3] = t
1123

Guo Y.K's avatar
Guo Y.K committed
1124
1125
1126
        return (new_image, mask)


comfyanonymous's avatar
comfyanonymous committed
1127
1128
NODE_CLASS_MAPPINGS = {
    "KSampler": KSampler,
1129
    "CheckpointLoaderSimple": CheckpointLoaderSimple,
comfyanonymous's avatar
comfyanonymous committed
1130
    "CLIPTextEncode": CLIPTextEncode,
comfyanonymous's avatar
comfyanonymous committed
1131
    "CLIPSetLastLayer": CLIPSetLastLayer,
comfyanonymous's avatar
comfyanonymous committed
1132
1133
    "VAEDecode": VAEDecode,
    "VAEEncode": VAEEncode,
1134
    "VAEEncodeForInpaint": VAEEncodeForInpaint,
comfyanonymous's avatar
comfyanonymous committed
1135
1136
1137
    "VAELoader": VAELoader,
    "EmptyLatentImage": EmptyLatentImage,
    "LatentUpscale": LatentUpscale,
1138
    "LatentFromBatch": LatentFromBatch,
comfyanonymous's avatar
comfyanonymous committed
1139
    "SaveImage": SaveImage,
pythongosssss's avatar
pythongosssss committed
1140
    "PreviewImage": PreviewImage,
comfyanonymous's avatar
comfyanonymous committed
1141
    "LoadImage": LoadImage,
1142
    "LoadImageMask": LoadImageMask,
comfyanonymous's avatar
comfyanonymous committed
1143
    "ImageScale": ImageScale,
1144
    "ImageInvert": ImageInvert,
Guo Y.K's avatar
Guo Y.K committed
1145
    "ImagePadForOutpaint": ImagePadForOutpaint,
comfyanonymous's avatar
comfyanonymous committed
1146
1147
    "ConditioningCombine": ConditioningCombine,
    "ConditioningSetArea": ConditioningSetArea,
Jacob Segal's avatar
Jacob Segal committed
1148
    "ConditioningSetMask": ConditioningSetMask,
comfyanonymous's avatar
comfyanonymous committed
1149
    "KSamplerAdvanced": KSamplerAdvanced,
1150
    "SetLatentNoiseMask": SetLatentNoiseMask,
comfyanonymous's avatar
comfyanonymous committed
1151
    "LatentComposite": LatentComposite,
comfyanonymous's avatar
comfyanonymous committed
1152
    "LatentRotate": LatentRotate,
comfyanonymous's avatar
comfyanonymous committed
1153
    "LatentFlip": LatentFlip,
comfyanonymous's avatar
comfyanonymous committed
1154
    "LatentCrop": LatentCrop,
1155
    "LoraLoader": LoraLoader,
1156
    "CLIPLoader": CLIPLoader,
1157
    "CLIPVisionEncode": CLIPVisionEncode,
1158
    "StyleModelApply": StyleModelApply,
1159
    "unCLIPConditioning": unCLIPConditioning,
comfyanonymous's avatar
comfyanonymous committed
1160
1161
    "ControlNetApply": ControlNetApply,
    "ControlNetLoader": ControlNetLoader,
1162
    "DiffControlNetLoader": DiffControlNetLoader,
comfyanonymous's avatar
comfyanonymous committed
1163
1164
    "StyleModelLoader": StyleModelLoader,
    "CLIPVisionLoader": CLIPVisionLoader,
1165
    "VAEDecodeTiled": VAEDecodeTiled,
comfyanonymous's avatar
comfyanonymous committed
1166
    "VAEEncodeTiled": VAEEncodeTiled,
1167
    "TomePatchModel": TomePatchModel,
1168
    "unCLIPCheckpointLoader": unCLIPCheckpointLoader,
1169
1170
1171
    "GLIGENLoader": GLIGENLoader,
    "GLIGENTextBoxApply": GLIGENTextBoxApply,

1172
    "CheckpointLoader": CheckpointLoader,
sALTaccount's avatar
sALTaccount committed
1173
    "DiffusersLoader": DiffusersLoader,
comfyanonymous's avatar
comfyanonymous committed
1174
1175
}

City's avatar
City committed
1176
1177
1178
1179
1180
NODE_DISPLAY_NAME_MAPPINGS = {
    # Sampling
    "KSampler": "KSampler",
    "KSamplerAdvanced": "KSampler (Advanced)",
    # Loaders
comfyanonymous's avatar
Rename.  
comfyanonymous committed
1181
1182
    "CheckpointLoader": "Load Checkpoint (With Config)",
    "CheckpointLoaderSimple": "Load Checkpoint",
City's avatar
City committed
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
    "VAELoader": "Load VAE",
    "LoraLoader": "Load LoRA",
    "CLIPLoader": "Load CLIP",
    "ControlNetLoader": "Load ControlNet Model",
    "DiffControlNetLoader": "Load ControlNet Model (diff)",
    "StyleModelLoader": "Load Style Model",
    "CLIPVisionLoader": "Load CLIP Vision",
    "UpscaleModelLoader": "Load Upscale Model",
    # Conditioning
    "CLIPVisionEncode": "CLIP Vision Encode",
    "StyleModelApply": "Apply Style Model",
    "CLIPTextEncode": "CLIP Text Encode (Prompt)",
    "CLIPSetLastLayer": "CLIP Set Last Layer",
    "ConditioningCombine": "Conditioning (Combine)",
    "ConditioningSetArea": "Conditioning (Set Area)",
Jacob Segal's avatar
Jacob Segal committed
1198
    "ConditioningSetMask": "Conditioning (Set Mask)",
City's avatar
City committed
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
    "ControlNetApply": "Apply ControlNet",
    # Latent
    "VAEEncodeForInpaint": "VAE Encode (for Inpainting)",
    "SetLatentNoiseMask": "Set Latent Noise Mask",
    "VAEDecode": "VAE Decode",
    "VAEEncode": "VAE Encode",
    "LatentRotate": "Rotate Latent",
    "LatentFlip": "Flip Latent",
    "LatentCrop": "Crop Latent",
    "EmptyLatentImage": "Empty Latent Image",
    "LatentUpscale": "Upscale Latent",
    "LatentComposite": "Latent Composite",
    # Image
    "SaveImage": "Save Image",
    "PreviewImage": "Preview Image",
    "LoadImage": "Load Image",
    "LoadImageMask": "Load Image (as Mask)",
    "ImageScale": "Upscale Image",
    "ImageUpscaleWithModel": "Upscale Image (using Model)",
    "ImageInvert": "Invert Image",
    "ImagePadForOutpaint": "Pad Image for Outpainting",
    # _for_testing
    "VAEDecodeTiled": "VAE Decode (Tiled)",
    "VAEEncodeTiled": "VAE Encode (Tiled)",
}

1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
def load_custom_node(module_path):
    module_name = os.path.basename(module_path)
    if os.path.isfile(module_path):
        sp = os.path.splitext(module_path)
        module_name = sp[0]
    try:
        if os.path.isfile(module_path):
            module_spec = importlib.util.spec_from_file_location(module_name, module_path)
        else:
            module_spec = importlib.util.spec_from_file_location(module_name, os.path.join(module_path, "__init__.py"))
        module = importlib.util.module_from_spec(module_spec)
        sys.modules[module_name] = module
        module_spec.loader.exec_module(module)
        if hasattr(module, "NODE_CLASS_MAPPINGS") and getattr(module, "NODE_CLASS_MAPPINGS") is not None:
            NODE_CLASS_MAPPINGS.update(module.NODE_CLASS_MAPPINGS)
1240
1241
            if hasattr(module, "NODE_DISPLAY_NAME_MAPPINGS") and getattr(module, "NODE_DISPLAY_NAME_MAPPINGS") is not None:
                NODE_DISPLAY_NAME_MAPPINGS.update(module.NODE_DISPLAY_NAME_MAPPINGS)
1242
1243
1244
1245
1246
1247
        else:
            print(f"Skip {module_path} module for custom nodes due to the lack of NODE_CLASS_MAPPINGS.")
    except Exception as e:
        print(traceback.format_exc())
        print(f"Cannot import {module_path} module for custom nodes:", e)

Hacker 17082006's avatar
Hacker 17082006 committed
1248
def load_custom_nodes():
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
    node_paths = folder_paths.get_folder_paths("custom_nodes")
    for custom_node_path in node_paths:
        possible_modules = os.listdir(custom_node_path)
        if "__pycache__" in possible_modules:
            possible_modules.remove("__pycache__")

        for possible_module in possible_modules:
            module_path = os.path.join(custom_node_path, possible_module)
            if os.path.isfile(module_path) and os.path.splitext(module_path)[1] != ".py": continue
            load_custom_node(module_path)
1259

1260
1261
def init_custom_nodes():
    load_custom_nodes()
1262
    load_custom_node(os.path.join(os.path.join(os.path.dirname(os.path.realpath(__file__)), "comfy_extras"), "nodes_hypernetwork.py"))
1263
1264
    load_custom_node(os.path.join(os.path.join(os.path.dirname(os.path.realpath(__file__)), "comfy_extras"), "nodes_upscale_model.py"))
    load_custom_node(os.path.join(os.path.join(os.path.dirname(os.path.realpath(__file__)), "comfy_extras"), "nodes_post_processing.py"))
1265
    load_custom_node(os.path.join(os.path.join(os.path.dirname(os.path.realpath(__file__)), "comfy_extras"), "nodes_mask.py"))