nodes.py 40.1 KB
Newer Older
comfyanonymous's avatar
comfyanonymous committed
1
2
3
4
5
import torch

import os
import sys
import json
6
import hashlib
7
import traceback
comfyanonymous's avatar
comfyanonymous committed
8
9
10
11
12

from PIL import Image
from PIL.PngImagePlugin import PngInfo
import numpy as np

sALTaccount's avatar
sALTaccount committed
13
14
from comfy.diffusers_convert import load_diffusers

comfyanonymous's avatar
comfyanonymous committed
15
sys.path.insert(0, os.path.join(os.path.dirname(os.path.realpath(__file__)), "comfy"))
comfyanonymous's avatar
comfyanonymous committed
16
17
18
19


import comfy.samplers
import comfy.sd
comfyanonymous's avatar
comfyanonymous committed
20
21
import comfy.utils

22
import comfy.clip_vision
23

24
import model_management
25
import importlib
comfyanonymous's avatar
comfyanonymous committed
26

27
import folder_paths
28
29
30
31

def before_node_execution():
    model_management.throw_exception_if_processing_interrupted()

32
33
def interrupt_processing(value=True):
    model_management.interrupt_current_processing(value)
34

35
36
MAX_RESOLUTION=8192

comfyanonymous's avatar
comfyanonymous committed
37
38
39
class CLIPTextEncode:
    @classmethod
    def INPUT_TYPES(s):
40
        return {"required": {"text": ("STRING", {"multiline": True}), "clip": ("CLIP", )}}
comfyanonymous's avatar
comfyanonymous committed
41
42
43
    RETURN_TYPES = ("CONDITIONING",)
    FUNCTION = "encode"

44
45
    CATEGORY = "conditioning"

comfyanonymous's avatar
comfyanonymous committed
46
    def encode(self, clip, text):
comfyanonymous's avatar
comfyanonymous committed
47
48
49
50
51
52
53
54
55
        return ([[clip.encode(text), {}]], )

class ConditioningCombine:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": {"conditioning_1": ("CONDITIONING", ), "conditioning_2": ("CONDITIONING", )}}
    RETURN_TYPES = ("CONDITIONING",)
    FUNCTION = "combine"

56
57
    CATEGORY = "conditioning"

comfyanonymous's avatar
comfyanonymous committed
58
59
60
61
62
63
64
    def combine(self, conditioning_1, conditioning_2):
        return (conditioning_1 + conditioning_2, )

class ConditioningSetArea:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": {"conditioning": ("CONDITIONING", ),
65
66
67
68
                              "width": ("INT", {"default": 64, "min": 64, "max": MAX_RESOLUTION, "step": 64}),
                              "height": ("INT", {"default": 64, "min": 64, "max": MAX_RESOLUTION, "step": 64}),
                              "x": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 64}),
                              "y": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 64}),
comfyanonymous's avatar
comfyanonymous committed
69
70
71
72
73
                              "strength": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}),
                             }}
    RETURN_TYPES = ("CONDITIONING",)
    FUNCTION = "append"

74
75
    CATEGORY = "conditioning"

comfyanonymous's avatar
comfyanonymous committed
76
    def append(self, conditioning, width, height, x, y, strength, min_sigma=0.0, max_sigma=99.0):
comfyanonymous's avatar
comfyanonymous committed
77
78
79
80
81
82
83
84
        c = []
        for t in conditioning:
            n = [t[0], t[1].copy()]
            n[1]['area'] = (height // 8, width // 8, y // 8, x // 8)
            n[1]['strength'] = strength
            n[1]['min_sigma'] = min_sigma
            n[1]['max_sigma'] = max_sigma
            c.append(n)
comfyanonymous's avatar
comfyanonymous committed
85
        return (c, )
comfyanonymous's avatar
comfyanonymous committed
86
87
88
89
90
91
92
93
94
95
96

class VAEDecode:
    def __init__(self, device="cpu"):
        self.device = device

    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "samples": ("LATENT", ), "vae": ("VAE", )}}
    RETURN_TYPES = ("IMAGE",)
    FUNCTION = "decode"

97
98
    CATEGORY = "latent"

comfyanonymous's avatar
comfyanonymous committed
99
    def decode(self, vae, samples):
100
        return (vae.decode(samples["samples"]), )
comfyanonymous's avatar
comfyanonymous committed
101

102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
class VAEDecodeTiled:
    def __init__(self, device="cpu"):
        self.device = device

    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "samples": ("LATENT", ), "vae": ("VAE", )}}
    RETURN_TYPES = ("IMAGE",)
    FUNCTION = "decode"

    CATEGORY = "_for_testing"

    def decode(self, vae, samples):
        return (vae.decode_tiled(samples["samples"]), )

comfyanonymous's avatar
comfyanonymous committed
117
118
119
120
121
122
123
124
125
126
class VAEEncode:
    def __init__(self, device="cpu"):
        self.device = device

    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "pixels": ("IMAGE", ), "vae": ("VAE", )}}
    RETURN_TYPES = ("LATENT",)
    FUNCTION = "encode"

127
128
    CATEGORY = "latent"

comfyanonymous's avatar
comfyanonymous committed
129
    def encode(self, vae, pixels):
130
131
132
133
        x = (pixels.shape[1] // 64) * 64
        y = (pixels.shape[2] // 64) * 64
        if pixels.shape[1] != x or pixels.shape[2] != y:
            pixels = pixels[:,:x,:y,:]
134
135
136
        t = vae.encode(pixels[:,:,:,:3])

        return ({"samples":t}, )
comfyanonymous's avatar
comfyanonymous committed
137

comfyanonymous's avatar
comfyanonymous committed
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158

class VAEEncodeTiled:
    def __init__(self, device="cpu"):
        self.device = device

    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "pixels": ("IMAGE", ), "vae": ("VAE", )}}
    RETURN_TYPES = ("LATENT",)
    FUNCTION = "encode"

    CATEGORY = "_for_testing"

    def encode(self, vae, pixels):
        x = (pixels.shape[1] // 64) * 64
        y = (pixels.shape[2] // 64) * 64
        if pixels.shape[1] != x or pixels.shape[2] != y:
            pixels = pixels[:,:x,:y,:]
        t = vae.encode_tiled(pixels[:,:,:,:3])

        return ({"samples":t}, )
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
class VAEEncodeForInpaint:
    def __init__(self, device="cpu"):
        self.device = device

    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "pixels": ("IMAGE", ), "vae": ("VAE", ), "mask": ("MASK", )}}
    RETURN_TYPES = ("LATENT",)
    FUNCTION = "encode"

    CATEGORY = "latent/inpaint"

    def encode(self, vae, pixels, mask):
        x = (pixels.shape[1] // 64) * 64
        y = (pixels.shape[2] // 64) * 64
174
175
        mask = torch.nn.functional.interpolate(mask[None,None,], size=(pixels.shape[1], pixels.shape[2]), mode="bilinear")[0][0]

176
        pixels = pixels.clone()
177
178
179
180
        if pixels.shape[1] != x or pixels.shape[2] != y:
            pixels = pixels[:,:x,:y,:]
            mask = mask[:x,:y]

181
        #grow mask by a few pixels to keep things seamless in latent space
182
        kernel_tensor = torch.ones((1, 1, 6, 6))
183
184
        mask_erosion = torch.clamp(torch.nn.functional.conv2d((mask.round())[None], kernel_tensor, padding=3), 0, 1)
        m = (1.0 - mask.round())
185
186
        for i in range(3):
            pixels[:,:,:,i] -= 0.5
187
            pixels[:,:,:,i] *= m
188
189
190
            pixels[:,:,:,i] += 0.5
        t = vae.encode(pixels)

191
        return ({"samples":t, "noise_mask": (mask_erosion[0][:x,:y].round())}, )
comfyanonymous's avatar
comfyanonymous committed
192
193
194
195

class CheckpointLoader:
    @classmethod
    def INPUT_TYPES(s):
196
197
        return {"required": { "config_name": (folder_paths.get_filename_list("configs"), ),
                              "ckpt_name": (folder_paths.get_filename_list("checkpoints"), )}}
comfyanonymous's avatar
comfyanonymous committed
198
199
200
    RETURN_TYPES = ("MODEL", "CLIP", "VAE")
    FUNCTION = "load_checkpoint"

201
    CATEGORY = "advanced/loaders"
202

comfyanonymous's avatar
comfyanonymous committed
203
    def load_checkpoint(self, config_name, ckpt_name, output_vae=True, output_clip=True):
204
205
        config_path = folder_paths.get_full_path("configs", config_name)
        ckpt_path = folder_paths.get_full_path("checkpoints", ckpt_name)
206
        return comfy.sd.load_checkpoint(config_path, ckpt_path, output_vae=True, output_clip=True, embedding_directory=folder_paths.get_folder_paths("embeddings"))
comfyanonymous's avatar
comfyanonymous committed
207

208
209
210
class CheckpointLoaderSimple:
    @classmethod
    def INPUT_TYPES(s):
211
        return {"required": { "ckpt_name": (folder_paths.get_filename_list("checkpoints"), ),
212
213
214
215
                             }}
    RETURN_TYPES = ("MODEL", "CLIP", "VAE")
    FUNCTION = "load_checkpoint"

216
    CATEGORY = "loaders"
217

218
    def load_checkpoint(self, ckpt_name, output_vae=True, output_clip=True):
219
        ckpt_path = folder_paths.get_full_path("checkpoints", ckpt_name)
220
        out = comfy.sd.load_checkpoint_guess_config(ckpt_path, output_vae=True, output_clip=True, embedding_directory=folder_paths.get_folder_paths("embeddings"))
221
222
        return out

sALTaccount's avatar
sALTaccount committed
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
class DiffusersLoader:
    @classmethod
    def INPUT_TYPES(cls):
        return {"required": {"model_path": (os.listdir(os.path.join(folder_paths.models_dir, 'diffusers'), ),),
                             }}
    RETURN_TYPES = ("MODEL", "CLIP", "VAE")
    FUNCTION = "load_checkpoint"

    CATEGORY = "loaders"

    def load_checkpoint(self, model_path, output_vae=True, output_clip=True):
        model_path = os.path.join(folder_paths.models_dir, 'diffusers', model_path)
        return load_diffusers(model_path, fp16=True, output_vae=output_vae, output_clip=output_clip, embedding_directory=folder_paths.get_folder_paths("embeddings"))


238
239
240
241
242
243
244
245
class unCLIPCheckpointLoader:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "ckpt_name": (folder_paths.get_filename_list("checkpoints"), ),
                             }}
    RETURN_TYPES = ("MODEL", "CLIP", "VAE", "CLIP_VISION")
    FUNCTION = "load_checkpoint"

246
    CATEGORY = "loaders"
247
248
249
250
251
252

    def load_checkpoint(self, ckpt_name, output_vae=True, output_clip=True):
        ckpt_path = folder_paths.get_full_path("checkpoints", ckpt_name)
        out = comfy.sd.load_checkpoint_guess_config(ckpt_path, output_vae=True, output_clip=True, output_clipvision=True, embedding_directory=folder_paths.get_folder_paths("embeddings"))
        return out

comfyanonymous's avatar
comfyanonymous committed
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
class CLIPSetLastLayer:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "clip": ("CLIP", ),
                              "stop_at_clip_layer": ("INT", {"default": -1, "min": -24, "max": -1, "step": 1}),
                              }}
    RETURN_TYPES = ("CLIP",)
    FUNCTION = "set_last_layer"

    CATEGORY = "conditioning"

    def set_last_layer(self, clip, stop_at_clip_layer):
        clip = clip.clone()
        clip.clip_layer(stop_at_clip_layer)
        return (clip,)

269
270
271
272
273
class LoraLoader:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "model": ("MODEL",),
                              "clip": ("CLIP", ),
274
                              "lora_name": (folder_paths.get_filename_list("loras"), ),
275
276
                              "strength_model": ("FLOAT", {"default": 1.0, "min": -10.0, "max": 10.0, "step": 0.01}),
                              "strength_clip": ("FLOAT", {"default": 1.0, "min": -10.0, "max": 10.0, "step": 0.01}),
277
278
279
280
281
282
283
                              }}
    RETURN_TYPES = ("MODEL", "CLIP")
    FUNCTION = "load_lora"

    CATEGORY = "loaders"

    def load_lora(self, model, clip, lora_name, strength_model, strength_clip):
284
        lora_path = folder_paths.get_full_path("loras", lora_name)
285
286
287
        model_lora, clip_lora = comfy.sd.load_lora_for_models(model, clip, lora_path, strength_model, strength_clip)
        return (model_lora, clip_lora)

288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
class TomePatchModel:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "model": ("MODEL",),
                              "ratio": ("FLOAT", {"default": 0.3, "min": 0.0, "max": 1.0, "step": 0.01}),
                              }}
    RETURN_TYPES = ("MODEL",)
    FUNCTION = "patch"

    CATEGORY = "_for_testing"

    def patch(self, model, ratio):
        m = model.clone()
        m.set_model_tomesd(ratio)
        return (m, )

comfyanonymous's avatar
comfyanonymous committed
304
305
306
class VAELoader:
    @classmethod
    def INPUT_TYPES(s):
307
        return {"required": { "vae_name": (folder_paths.get_filename_list("vae"), )}}
comfyanonymous's avatar
comfyanonymous committed
308
309
310
    RETURN_TYPES = ("VAE",)
    FUNCTION = "load_vae"

311
312
    CATEGORY = "loaders"

comfyanonymous's avatar
comfyanonymous committed
313
314
    #TODO: scale factor?
    def load_vae(self, vae_name):
315
        vae_path = folder_paths.get_full_path("vae", vae_name)
comfyanonymous's avatar
comfyanonymous committed
316
317
318
        vae = comfy.sd.VAE(ckpt_path=vae_path)
        return (vae,)

comfyanonymous's avatar
comfyanonymous committed
319
320
321
class ControlNetLoader:
    @classmethod
    def INPUT_TYPES(s):
322
        return {"required": { "control_net_name": (folder_paths.get_filename_list("controlnet"), )}}
comfyanonymous's avatar
comfyanonymous committed
323
324
325
326
327
328
329

    RETURN_TYPES = ("CONTROL_NET",)
    FUNCTION = "load_controlnet"

    CATEGORY = "loaders"

    def load_controlnet(self, control_net_name):
330
        controlnet_path = folder_paths.get_full_path("controlnet", control_net_name)
comfyanonymous's avatar
comfyanonymous committed
331
332
333
        controlnet = comfy.sd.load_controlnet(controlnet_path)
        return (controlnet,)

334
335
336
337
class DiffControlNetLoader:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "model": ("MODEL",),
338
                              "control_net_name": (folder_paths.get_filename_list("controlnet"), )}}
339
340
341
342
343
344
345

    RETURN_TYPES = ("CONTROL_NET",)
    FUNCTION = "load_controlnet"

    CATEGORY = "loaders"

    def load_controlnet(self, model, control_net_name):
346
        controlnet_path = folder_paths.get_full_path("controlnet", control_net_name)
347
348
349
        controlnet = comfy.sd.load_controlnet(controlnet_path, model)
        return (controlnet,)

comfyanonymous's avatar
comfyanonymous committed
350
351
352
353

class ControlNetApply:
    @classmethod
    def INPUT_TYPES(s):
354
355
356
357
358
        return {"required": {"conditioning": ("CONDITIONING", ),
                             "control_net": ("CONTROL_NET", ),
                             "image": ("IMAGE", ),
                             "strength": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01})
                             }}
comfyanonymous's avatar
comfyanonymous committed
359
360
361
362
363
    RETURN_TYPES = ("CONDITIONING",)
    FUNCTION = "apply_controlnet"

    CATEGORY = "conditioning"

364
    def apply_controlnet(self, conditioning, control_net, image, strength):
comfyanonymous's avatar
comfyanonymous committed
365
366
367
368
369
        c = []
        control_hint = image.movedim(-1,1)
        print(control_hint.shape)
        for t in conditioning:
            n = [t[0], t[1].copy()]
comfyanonymous's avatar
comfyanonymous committed
370
371
372
373
            c_net = control_net.copy().set_cond_hint(control_hint, strength)
            if 'control' in t[1]:
                c_net.set_previous_controlnet(t[1]['control'])
            n[1]['control'] = c_net
comfyanonymous's avatar
comfyanonymous committed
374
375
376
            c.append(n)
        return (c, )

377
378
379
class CLIPLoader:
    @classmethod
    def INPUT_TYPES(s):
380
        return {"required": { "clip_name": (folder_paths.get_filename_list("clip"), ),
381
382
383
384
385
386
                             }}
    RETURN_TYPES = ("CLIP",)
    FUNCTION = "load_clip"

    CATEGORY = "loaders"

387
    def load_clip(self, clip_name):
388
        clip_path = folder_paths.get_full_path("clip", clip_name)
comfyanonymous's avatar
comfyanonymous committed
389
        clip = comfy.sd.load_clip(ckpt_path=clip_path, embedding_directory=folder_paths.get_folder_paths("embeddings"))
390
391
        return (clip,)

392
393
394
class CLIPVisionLoader:
    @classmethod
    def INPUT_TYPES(s):
395
        return {"required": { "clip_name": (folder_paths.get_filename_list("clip_vision"), ),
396
397
398
399
400
401
402
                             }}
    RETURN_TYPES = ("CLIP_VISION",)
    FUNCTION = "load_clip"

    CATEGORY = "loaders"

    def load_clip(self, clip_name):
403
        clip_path = folder_paths.get_full_path("clip_vision", clip_name)
404
        clip_vision = comfy.clip_vision.load(clip_path)
405
406
407
408
409
410
411
412
        return (clip_vision,)

class CLIPVisionEncode:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "clip_vision": ("CLIP_VISION",),
                              "image": ("IMAGE",)
                             }}
comfyanonymous's avatar
comfyanonymous committed
413
    RETURN_TYPES = ("CLIP_VISION_OUTPUT",)
414
415
    FUNCTION = "encode"

416
    CATEGORY = "conditioning"
417
418
419
420
421
422
423
424

    def encode(self, clip_vision, image):
        output = clip_vision.encode_image(image)
        return (output,)

class StyleModelLoader:
    @classmethod
    def INPUT_TYPES(s):
425
        return {"required": { "style_model_name": (folder_paths.get_filename_list("style_models"), )}}
426
427
428
429
430
431
432

    RETURN_TYPES = ("STYLE_MODEL",)
    FUNCTION = "load_style_model"

    CATEGORY = "loaders"

    def load_style_model(self, style_model_name):
433
        style_model_path = folder_paths.get_full_path("style_models", style_model_name)
434
435
436
437
438
439
440
        style_model = comfy.sd.load_style_model(style_model_path)
        return (style_model,)


class StyleModelApply:
    @classmethod
    def INPUT_TYPES(s):
441
442
443
        return {"required": {"conditioning": ("CONDITIONING", ),
                             "style_model": ("STYLE_MODEL", ),
                             "clip_vision_output": ("CLIP_VISION_OUTPUT", ),
444
445
446
447
                             }}
    RETURN_TYPES = ("CONDITIONING",)
    FUNCTION = "apply_stylemodel"

comfyanonymous's avatar
comfyanonymous committed
448
    CATEGORY = "conditioning/style_model"
449

450
451
    def apply_stylemodel(self, clip_vision_output, style_model, conditioning):
        cond = style_model.get_cond(clip_vision_output)
452
        c = []
453
454
        for t in conditioning:
            n = [torch.cat((t[0], cond), dim=1), t[1].copy()]
455
456
457
            c.append(n)
        return (c, )

458
459
460
461
462
463
class unCLIPConditioning:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": {"conditioning": ("CONDITIONING", ),
                             "clip_vision_output": ("CLIP_VISION_OUTPUT", ),
                             "strength": ("FLOAT", {"default": 1.0, "min": -10.0, "max": 10.0, "step": 0.01}),
464
                             "noise_augmentation": ("FLOAT", {"default": 0.0, "min": 0.0, "max": 1.0, "step": 0.01}),
465
466
467
468
                             }}
    RETURN_TYPES = ("CONDITIONING",)
    FUNCTION = "apply_adm"

469
    CATEGORY = "conditioning"
470

471
    def apply_adm(self, conditioning, clip_vision_output, strength, noise_augmentation):
472
473
474
        c = []
        for t in conditioning:
            o = t[1].copy()
475
            x = (clip_vision_output, strength, noise_augmentation)
476
477
478
479
480
481
482
483
484
            if "adm" in o:
                o["adm"] = o["adm"][:] + [x]
            else:
                o["adm"] = [x]
            n = [t[0], o]
            c.append(n)
        return (c, )


comfyanonymous's avatar
comfyanonymous committed
485
486
487
488
489
490
class EmptyLatentImage:
    def __init__(self, device="cpu"):
        self.device = device

    @classmethod
    def INPUT_TYPES(s):
491
492
        return {"required": { "width": ("INT", {"default": 512, "min": 64, "max": MAX_RESOLUTION, "step": 64}),
                              "height": ("INT", {"default": 512, "min": 64, "max": MAX_RESOLUTION, "step": 64}),
comfyanonymous's avatar
comfyanonymous committed
493
494
495
496
                              "batch_size": ("INT", {"default": 1, "min": 1, "max": 64})}}
    RETURN_TYPES = ("LATENT",)
    FUNCTION = "generate"

497
498
    CATEGORY = "latent"

comfyanonymous's avatar
comfyanonymous committed
499
500
    def generate(self, width, height, batch_size=1):
        latent = torch.zeros([batch_size, 4, height // 8, width // 8])
501
        return ({"samples":latent}, )
comfyanonymous's avatar
comfyanonymous committed
502

comfyanonymous's avatar
comfyanonymous committed
503

comfyanonymous's avatar
comfyanonymous committed
504

comfyanonymous's avatar
comfyanonymous committed
505
506
class LatentUpscale:
    upscale_methods = ["nearest-exact", "bilinear", "area"]
507
    crop_methods = ["disabled", "center"]
comfyanonymous's avatar
comfyanonymous committed
508
509
510
511

    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "samples": ("LATENT",), "upscale_method": (s.upscale_methods,),
512
513
                              "width": ("INT", {"default": 512, "min": 64, "max": MAX_RESOLUTION, "step": 64}),
                              "height": ("INT", {"default": 512, "min": 64, "max": MAX_RESOLUTION, "step": 64}),
514
                              "crop": (s.crop_methods,)}}
comfyanonymous's avatar
comfyanonymous committed
515
516
517
    RETURN_TYPES = ("LATENT",)
    FUNCTION = "upscale"

518
519
    CATEGORY = "latent"

520
    def upscale(self, samples, upscale_method, width, height, crop):
521
        s = samples.copy()
comfyanonymous's avatar
comfyanonymous committed
522
        s["samples"] = comfy.utils.common_upscale(samples["samples"], width // 8, height // 8, upscale_method, crop)
comfyanonymous's avatar
comfyanonymous committed
523
524
        return (s,)

comfyanonymous's avatar
comfyanonymous committed
525
526
527
528
529
530
531
532
533
class LatentRotate:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "samples": ("LATENT",),
                              "rotation": (["none", "90 degrees", "180 degrees", "270 degrees"],),
                              }}
    RETURN_TYPES = ("LATENT",)
    FUNCTION = "rotate"

comfyanonymous's avatar
comfyanonymous committed
534
    CATEGORY = "latent/transform"
comfyanonymous's avatar
comfyanonymous committed
535
536

    def rotate(self, samples, rotation):
537
        s = samples.copy()
comfyanonymous's avatar
comfyanonymous committed
538
539
540
541
542
543
544
545
        rotate_by = 0
        if rotation.startswith("90"):
            rotate_by = 1
        elif rotation.startswith("180"):
            rotate_by = 2
        elif rotation.startswith("270"):
            rotate_by = 3

546
        s["samples"] = torch.rot90(samples["samples"], k=rotate_by, dims=[3, 2])
comfyanonymous's avatar
comfyanonymous committed
547
        return (s,)
comfyanonymous's avatar
comfyanonymous committed
548
549
550
551
552
553
554
555
556
557

class LatentFlip:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "samples": ("LATENT",),
                              "flip_method": (["x-axis: vertically", "y-axis: horizontally"],),
                              }}
    RETURN_TYPES = ("LATENT",)
    FUNCTION = "flip"

comfyanonymous's avatar
comfyanonymous committed
558
    CATEGORY = "latent/transform"
comfyanonymous's avatar
comfyanonymous committed
559
560

    def flip(self, samples, flip_method):
561
        s = samples.copy()
comfyanonymous's avatar
comfyanonymous committed
562
        if flip_method.startswith("x"):
563
            s["samples"] = torch.flip(samples["samples"], dims=[2])
comfyanonymous's avatar
comfyanonymous committed
564
        elif flip_method.startswith("y"):
565
            s["samples"] = torch.flip(samples["samples"], dims=[3])
comfyanonymous's avatar
comfyanonymous committed
566
567

        return (s,)
comfyanonymous's avatar
comfyanonymous committed
568
569
570
571
572
573

class LatentComposite:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "samples_to": ("LATENT",),
                              "samples_from": ("LATENT",),
574
575
576
                              "x": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 8}),
                              "y": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 8}),
                              "feather": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 8}),
comfyanonymous's avatar
comfyanonymous committed
577
578
579
580
581
582
                              }}
    RETURN_TYPES = ("LATENT",)
    FUNCTION = "composite"

    CATEGORY = "latent"

583
    def composite(self, samples_to, samples_from, x, y, composite_method="normal", feather=0):
comfyanonymous's avatar
comfyanonymous committed
584
585
        x =  x // 8
        y = y // 8
586
        feather = feather // 8
587
588
589
590
        samples_out = samples_to.copy()
        s = samples_to["samples"].clone()
        samples_to = samples_to["samples"]
        samples_from = samples_from["samples"]
591
592
593
        if feather == 0:
            s[:,:,y:y+samples_from.shape[2],x:x+samples_from.shape[3]] = samples_from[:,:,:samples_to.shape[2] - y, :samples_to.shape[3] - x]
        else:
594
595
            samples_from = samples_from[:,:,:samples_to.shape[2] - y, :samples_to.shape[3] - x]
            mask = torch.ones_like(samples_from)
596
597
598
599
600
601
602
603
604
605
606
607
            for t in range(feather):
                if y != 0:
                    mask[:,:,t:1+t,:] *= ((1.0/feather) * (t + 1))

                if y + samples_from.shape[2] < samples_to.shape[2]:
                    mask[:,:,mask.shape[2] -1 -t: mask.shape[2]-t,:] *= ((1.0/feather) * (t + 1))
                if x != 0:
                    mask[:,:,:,t:1+t] *= ((1.0/feather) * (t + 1))
                if x + samples_from.shape[3] < samples_to.shape[3]:
                    mask[:,:,:,mask.shape[3]- 1 - t: mask.shape[3]- t] *= ((1.0/feather) * (t + 1))
            rev_mask = torch.ones_like(mask) - mask
            s[:,:,y:y+samples_from.shape[2],x:x+samples_from.shape[3]] = samples_from[:,:,:samples_to.shape[2] - y, :samples_to.shape[3] - x] * mask + s[:,:,y:y+samples_from.shape[2],x:x+samples_from.shape[3]] * rev_mask
608
609
        samples_out["samples"] = s
        return (samples_out,)
comfyanonymous's avatar
comfyanonymous committed
610

comfyanonymous's avatar
comfyanonymous committed
611
612
613
614
class LatentCrop:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "samples": ("LATENT",),
615
616
617
618
                              "width": ("INT", {"default": 512, "min": 64, "max": MAX_RESOLUTION, "step": 64}),
                              "height": ("INT", {"default": 512, "min": 64, "max": MAX_RESOLUTION, "step": 64}),
                              "x": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 8}),
                              "y": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 8}),
comfyanonymous's avatar
comfyanonymous committed
619
620
621
622
                              }}
    RETURN_TYPES = ("LATENT",)
    FUNCTION = "crop"

comfyanonymous's avatar
comfyanonymous committed
623
    CATEGORY = "latent/transform"
comfyanonymous's avatar
comfyanonymous committed
624
625

    def crop(self, samples, width, height, x, y):
626
627
        s = samples.copy()
        samples = samples['samples']
comfyanonymous's avatar
comfyanonymous committed
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
        x =  x // 8
        y = y // 8

        #enfonce minimum size of 64
        if x > (samples.shape[3] - 8):
            x = samples.shape[3] - 8
        if y > (samples.shape[2] - 8):
            y = samples.shape[2] - 8

        new_height = height // 8
        new_width = width // 8
        to_x = new_width + x
        to_y = new_height + y
        def enforce_image_dim(d, to_d, max_d):
            if to_d > max_d:
                leftover = (to_d - max_d) % 8
                to_d = max_d
                d -= leftover
            return (d, to_d)

        #make sure size is always multiple of 64
        x, to_x = enforce_image_dim(x, to_x, samples.shape[3])
        y, to_y = enforce_image_dim(y, to_y, samples.shape[2])
651
        s['samples'] = samples[:,:,y:to_y, x:to_x]
comfyanonymous's avatar
comfyanonymous committed
652
653
        return (s,)

654
655
656
657
658
659
660
661
662
class SetLatentNoiseMask:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "samples": ("LATENT",),
                              "mask": ("MASK",),
                              }}
    RETURN_TYPES = ("LATENT",)
    FUNCTION = "set_mask"

663
    CATEGORY = "latent/inpaint"
664
665
666
667
668
669
670

    def set_mask(self, samples, mask):
        s = samples.copy()
        s["noise_mask"] = mask
        return (s,)


671
def common_ksampler(model, seed, steps, cfg, sampler_name, scheduler, positive, negative, latent, denoise=1.0, disable_noise=False, start_step=None, last_step=None, force_full_denoise=False):
672
673
    latent_image = latent["samples"]
    noise_mask = None
674
    device = model_management.get_torch_device()
675

comfyanonymous's avatar
comfyanonymous committed
676
677
678
679
680
    if disable_noise:
        noise = torch.zeros(latent_image.size(), dtype=latent_image.dtype, layout=latent_image.layout, device="cpu")
    else:
        noise = torch.randn(latent_image.size(), dtype=latent_image.dtype, layout=latent_image.layout, generator=torch.manual_seed(seed), device="cpu")

681
682
683
    if "noise_mask" in latent:
        noise_mask = latent['noise_mask']
        noise_mask = torch.nn.functional.interpolate(noise_mask[None,None,], size=(noise.shape[2], noise.shape[3]), mode="bilinear")
684
        noise_mask = noise_mask.round()
685
686
687
688
        noise_mask = torch.cat([noise_mask] * noise.shape[1], dim=1)
        noise_mask = torch.cat([noise_mask] * noise.shape[0])
        noise_mask = noise_mask.to(device)

689
    real_model = None
690
691
692
    model_management.load_model_gpu(model)
    real_model = model.model

693
694
695
696
697
698
    noise = noise.to(device)
    latent_image = latent_image.to(device)

    positive_copy = []
    negative_copy = []

comfyanonymous's avatar
comfyanonymous committed
699
    control_nets = []
700
701
702
703
704
    for p in positive:
        t = p[0]
        if t.shape[0] < noise.shape[0]:
            t = torch.cat([t] * noise.shape[0])
        t = t.to(device)
comfyanonymous's avatar
comfyanonymous committed
705
706
        if 'control' in p[1]:
            control_nets += [p[1]['control']]
707
708
709
710
711
712
        positive_copy += [[t] + p[1:]]
    for n in negative:
        t = n[0]
        if t.shape[0] < noise.shape[0]:
            t = torch.cat([t] * noise.shape[0])
        t = t.to(device)
713
714
        if 'control' in n[1]:
            control_nets += [n[1]['control']]
715
716
        negative_copy += [[t] + n[1:]]

comfyanonymous's avatar
comfyanonymous committed
717
718
719
720
    control_net_models = []
    for x in control_nets:
        control_net_models += x.get_control_models()
    model_management.load_controlnet_gpu(control_net_models)
comfyanonymous's avatar
comfyanonymous committed
721

722
    if sampler_name in comfy.samplers.KSampler.SAMPLERS:
723
        sampler = comfy.samplers.KSampler(real_model, steps=steps, device=device, sampler=sampler_name, scheduler=scheduler, denoise=denoise, model_options=model.model_options)
724
725
726
727
    else:
        #other samplers
        pass

728
    samples = sampler.sample(noise, positive_copy, negative_copy, cfg=cfg, latent_image=latent_image, start_step=start_step, last_step=last_step, force_full_denoise=force_full_denoise, denoise_mask=noise_mask)
729
    samples = samples.cpu()
comfyanonymous's avatar
comfyanonymous committed
730
731
    for c in control_nets:
        c.cleanup()
comfyanonymous's avatar
comfyanonymous committed
732

733
734
735
    out = latent.copy()
    out["samples"] = samples
    return (out, )
comfyanonymous's avatar
comfyanonymous committed
736

comfyanonymous's avatar
comfyanonymous committed
737
738
739
class KSampler:
    @classmethod
    def INPUT_TYPES(s):
comfyanonymous's avatar
comfyanonymous committed
740
        return {"required":
comfyanonymous's avatar
comfyanonymous committed
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
                    {"model": ("MODEL",),
                    "seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}),
                    "steps": ("INT", {"default": 20, "min": 1, "max": 10000}),
                    "cfg": ("FLOAT", {"default": 8.0, "min": 0.0, "max": 100.0}),
                    "sampler_name": (comfy.samplers.KSampler.SAMPLERS, ),
                    "scheduler": (comfy.samplers.KSampler.SCHEDULERS, ),
                    "positive": ("CONDITIONING", ),
                    "negative": ("CONDITIONING", ),
                    "latent_image": ("LATENT", ),
                    "denoise": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}),
                    }}

    RETURN_TYPES = ("LATENT",)
    FUNCTION = "sample"

756
757
    CATEGORY = "sampling"

comfyanonymous's avatar
comfyanonymous committed
758
    def sample(self, model, seed, steps, cfg, sampler_name, scheduler, positive, negative, latent_image, denoise=1.0):
759
        return common_ksampler(model, seed, steps, cfg, sampler_name, scheduler, positive, negative, latent_image, denoise=denoise)
comfyanonymous's avatar
comfyanonymous committed
760

comfyanonymous's avatar
comfyanonymous committed
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
class KSamplerAdvanced:
    @classmethod
    def INPUT_TYPES(s):
        return {"required":
                    {"model": ("MODEL",),
                    "add_noise": (["enable", "disable"], ),
                    "noise_seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}),
                    "steps": ("INT", {"default": 20, "min": 1, "max": 10000}),
                    "cfg": ("FLOAT", {"default": 8.0, "min": 0.0, "max": 100.0}),
                    "sampler_name": (comfy.samplers.KSampler.SAMPLERS, ),
                    "scheduler": (comfy.samplers.KSampler.SCHEDULERS, ),
                    "positive": ("CONDITIONING", ),
                    "negative": ("CONDITIONING", ),
                    "latent_image": ("LATENT", ),
                    "start_at_step": ("INT", {"default": 0, "min": 0, "max": 10000}),
                    "end_at_step": ("INT", {"default": 10000, "min": 0, "max": 10000}),
                    "return_with_leftover_noise": (["disable", "enable"], ),
                    }}

    RETURN_TYPES = ("LATENT",)
    FUNCTION = "sample"

    CATEGORY = "sampling"
comfyanonymous's avatar
comfyanonymous committed
784

comfyanonymous's avatar
comfyanonymous committed
785
786
787
788
789
790
791
    def sample(self, model, add_noise, noise_seed, steps, cfg, sampler_name, scheduler, positive, negative, latent_image, start_at_step, end_at_step, return_with_leftover_noise, denoise=1.0):
        force_full_denoise = True
        if return_with_leftover_noise == "enable":
            force_full_denoise = False
        disable_noise = False
        if add_noise == "disable":
            disable_noise = True
792
        return common_ksampler(model, noise_seed, steps, cfg, sampler_name, scheduler, positive, negative, latent_image, denoise=denoise, disable_noise=disable_noise, start_step=start_at_step, last_step=end_at_step, force_full_denoise=force_full_denoise)
comfyanonymous's avatar
comfyanonymous committed
793
794
795

class SaveImage:
    def __init__(self):
796
        self.output_dir = folder_paths.get_output_directory()
m957ymj75urz's avatar
m957ymj75urz committed
797
        self.type = "output"
comfyanonymous's avatar
comfyanonymous committed
798
799
800
801

    @classmethod
    def INPUT_TYPES(s):
        return {"required": 
802
                    {"images": ("IMAGE", ),
pythongosssss's avatar
tidy  
pythongosssss committed
803
                     "filename_prefix": ("STRING", {"default": "ComfyUI"})},
pythongosssss's avatar
pythongosssss committed
804
                "hidden": {"prompt": "PROMPT", "extra_pnginfo": "EXTRA_PNGINFO"},
comfyanonymous's avatar
comfyanonymous committed
805
806
807
808
809
810
811
                }

    RETURN_TYPES = ()
    FUNCTION = "save_images"

    OUTPUT_NODE = True

812
813
    CATEGORY = "image"

pythongosssss's avatar
tidy  
pythongosssss committed
814
    def save_images(self, images, filename_prefix="ComfyUI", prompt=None, extra_pnginfo=None):
815
        def map_filename(filename):
816
            prefix_len = len(os.path.basename(filename_prefix))
817
818
819
820
821
822
            prefix = filename[:prefix_len + 1]
            try:
                digits = int(filename[prefix_len + 1:].split('_')[0])
            except:
                digits = 0
            return (digits, prefix)
comfyanonymous's avatar
Style.  
comfyanonymous committed
823

824
825
826
827
        def compute_vars(input):
            input = input.replace("%width%", str(images[0].shape[1]))
            input = input.replace("%height%", str(images[0].shape[0]))
            return input
comfyanonymous's avatar
Style.  
comfyanonymous committed
828

829
        filename_prefix = compute_vars(filename_prefix)
comfyanonymous's avatar
comfyanonymous committed
830

m957ymj75urz's avatar
m957ymj75urz committed
831
832
833
        subfolder = os.path.dirname(os.path.normpath(filename_prefix))
        filename = os.path.basename(os.path.normpath(filename_prefix))

comfyanonymous's avatar
comfyanonymous committed
834
        full_output_folder = os.path.join(self.output_dir, subfolder)
835

836
        if os.path.commonpath((self.output_dir, os.path.abspath(full_output_folder))) != self.output_dir:
837
            print("Saving image outside the output folder is not allowed.")
comfyanonymous's avatar
comfyanonymous committed
838
839
            return {}

840
        try:
841
            counter = max(filter(lambda a: a[1][:-1] == filename and a[1][-1] == "_", map(map_filename, os.listdir(full_output_folder))))[0] + 1
842
843
        except ValueError:
            counter = 1
844
        except FileNotFoundError:
845
            os.makedirs(full_output_folder, exist_ok=True)
846
            counter = 1
pythongosssss's avatar
pythongosssss committed
847

m957ymj75urz's avatar
m957ymj75urz committed
848
        results = list()
comfyanonymous's avatar
comfyanonymous committed
849
850
        for image in images:
            i = 255. * image.cpu().numpy()
851
            img = Image.fromarray(np.clip(i, 0, 255).astype(np.uint8))
comfyanonymous's avatar
comfyanonymous committed
852
853
854
855
856
857
            metadata = PngInfo()
            if prompt is not None:
                metadata.add_text("prompt", json.dumps(prompt))
            if extra_pnginfo is not None:
                for x in extra_pnginfo:
                    metadata.add_text(x, json.dumps(extra_pnginfo[x]))
858

859
            file = f"{filename}_{counter:05}_.png"
860
            img.save(os.path.join(full_output_folder, file), pnginfo=metadata, compress_level=4)
m957ymj75urz's avatar
m957ymj75urz committed
861
862
863
864
865
            results.append({
                "filename": file,
                "subfolder": subfolder,
                "type": self.type
            });
866
            counter += 1
comfyanonymous's avatar
comfyanonymous committed
867

m957ymj75urz's avatar
m957ymj75urz committed
868
        return { "ui": { "images": results } }
comfyanonymous's avatar
comfyanonymous committed
869

pythongosssss's avatar
pythongosssss committed
870
871
class PreviewImage(SaveImage):
    def __init__(self):
872
        self.output_dir = folder_paths.get_temp_directory()
m957ymj75urz's avatar
m957ymj75urz committed
873
        self.type = "temp"
pythongosssss's avatar
pythongosssss committed
874
875
876

    @classmethod
    def INPUT_TYPES(s):
877
        return {"required":
pythongosssss's avatar
pythongosssss committed
878
879
880
                    {"images": ("IMAGE", ), },
                "hidden": {"prompt": "PROMPT", "extra_pnginfo": "EXTRA_PNGINFO"},
                }
881

882
883
884
class LoadImage:
    @classmethod
    def INPUT_TYPES(s):
885
        input_dir = folder_paths.get_input_directory()
886
        return {"required":
887
                    {"image": (sorted(os.listdir(input_dir)), )},
888
                }
889
890

    CATEGORY = "image"
891

892
    RETURN_TYPES = ("IMAGE", "MASK")
893
894
    FUNCTION = "load_image"
    def load_image(self, image):
895
896
        input_dir = folder_paths.get_input_directory()
        image_path = os.path.join(input_dir, image)
897
898
        i = Image.open(image_path)
        image = i.convert("RGB")
899
        image = np.array(image).astype(np.float32) / 255.0
900
        image = torch.from_numpy(image)[None,]
901
902
903
904
905
906
        if 'A' in i.getbands():
            mask = np.array(i.getchannel('A')).astype(np.float32) / 255.0
            mask = 1. - torch.from_numpy(mask)
        else:
            mask = torch.zeros((64,64), dtype=torch.float32, device="cpu")
        return (image, mask)
907

908
909
    @classmethod
    def IS_CHANGED(s, image):
910
911
        input_dir = folder_paths.get_input_directory()
        image_path = os.path.join(input_dir, image)
912
913
914
915
        m = hashlib.sha256()
        with open(image_path, 'rb') as f:
            m.update(f.read())
        return m.digest().hex()
pythongosssss's avatar
pythongosssss committed
916

917
918
919
class LoadImageMask:
    @classmethod
    def INPUT_TYPES(s):
920
        input_dir = folder_paths.get_input_directory()
921
        return {"required":
922
                    {"image": (sorted(os.listdir(input_dir)), ),
923
924
925
926
927
928
929
930
                    "channel": (["alpha", "red", "green", "blue"], ),}
                }

    CATEGORY = "image"

    RETURN_TYPES = ("MASK",)
    FUNCTION = "load_image"
    def load_image(self, image, channel):
931
932
        input_dir = folder_paths.get_input_directory()
        image_path = os.path.join(input_dir, image)
933
934
935
936
937
938
939
940
941
942
943
944
945
946
        i = Image.open(image_path)
        mask = None
        c = channel[0].upper()
        if c in i.getbands():
            mask = np.array(i.getchannel(c)).astype(np.float32) / 255.0
            mask = torch.from_numpy(mask)
            if c == 'A':
                mask = 1. - mask
        else:
            mask = torch.zeros((64,64), dtype=torch.float32, device="cpu")
        return (mask,)

    @classmethod
    def IS_CHANGED(s, image, channel):
947
948
        input_dir = folder_paths.get_input_directory()
        image_path = os.path.join(input_dir, image)
949
950
951
952
        m = hashlib.sha256()
        with open(image_path, 'rb') as f:
            m.update(f.read())
        return m.digest().hex()
pythongosssss's avatar
pythongosssss committed
953

comfyanonymous's avatar
comfyanonymous committed
954
955
956
957
958
959
960
class ImageScale:
    upscale_methods = ["nearest-exact", "bilinear", "area"]
    crop_methods = ["disabled", "center"]

    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "image": ("IMAGE",), "upscale_method": (s.upscale_methods,),
961
962
                              "width": ("INT", {"default": 512, "min": 1, "max": MAX_RESOLUTION, "step": 1}),
                              "height": ("INT", {"default": 512, "min": 1, "max": MAX_RESOLUTION, "step": 1}),
comfyanonymous's avatar
comfyanonymous committed
963
964
965
966
                              "crop": (s.crop_methods,)}}
    RETURN_TYPES = ("IMAGE",)
    FUNCTION = "upscale"

967
    CATEGORY = "image/upscaling"
968

comfyanonymous's avatar
comfyanonymous committed
969
970
    def upscale(self, image, upscale_method, width, height, crop):
        samples = image.movedim(-1,1)
comfyanonymous's avatar
comfyanonymous committed
971
        s = comfy.utils.common_upscale(samples, width, height, upscale_method, crop)
comfyanonymous's avatar
comfyanonymous committed
972
973
        s = s.movedim(1,-1)
        return (s,)
comfyanonymous's avatar
comfyanonymous committed
974

975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
class ImageInvert:

    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "image": ("IMAGE",)}}

    RETURN_TYPES = ("IMAGE",)
    FUNCTION = "invert"

    CATEGORY = "image"

    def invert(self, image):
        s = 1.0 - image
        return (s,)


Guo Y.K's avatar
Guo Y.K committed
991
992
993
994
995
996
997
class ImagePadForOutpaint:

    @classmethod
    def INPUT_TYPES(s):
        return {
            "required": {
                "image": ("IMAGE",),
998
999
1000
1001
1002
                "left": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 64}),
                "top": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 64}),
                "right": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 64}),
                "bottom": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 64}),
                "feathering": ("INT", {"default": 40, "min": 0, "max": MAX_RESOLUTION, "step": 1}),
Guo Y.K's avatar
Guo Y.K committed
1003
1004
1005
1006
1007
1008
1009
1010
            }
        }

    RETURN_TYPES = ("IMAGE", "MASK")
    FUNCTION = "expand_image"

    CATEGORY = "image"

1011
    def expand_image(self, image, left, top, right, bottom, feathering):
Guo Y.K's avatar
Guo Y.K committed
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
        d1, d2, d3, d4 = image.size()

        new_image = torch.zeros(
            (d1, d2 + top + bottom, d3 + left + right, d4),
            dtype=torch.float32,
        )
        new_image[:, top:top + d2, left:left + d3, :] = image

        mask = torch.ones(
            (d2 + top + bottom, d3 + left + right),
            dtype=torch.float32,
        )
1024

1025
1026
1027
1028
1029
        t = torch.zeros(
            (d2, d3),
            dtype=torch.float32
        )

1030
        if feathering > 0 and feathering * 2 < d2 and feathering * 2 < d3:
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049

            for i in range(d2):
                for j in range(d3):
                    dt = i if top != 0 else d2
                    db = d2 - i if bottom != 0 else d2

                    dl = j if left != 0 else d3
                    dr = d3 - j if right != 0 else d3

                    d = min(dt, db, dl, dr)

                    if d >= feathering:
                        continue

                    v = (feathering - d) / feathering

                    t[i, j] = v * v

        mask[top:top + d2, left:left + d3] = t
1050

Guo Y.K's avatar
Guo Y.K committed
1051
1052
1053
        return (new_image, mask)


comfyanonymous's avatar
comfyanonymous committed
1054
1055
NODE_CLASS_MAPPINGS = {
    "KSampler": KSampler,
1056
    "CheckpointLoaderSimple": CheckpointLoaderSimple,
comfyanonymous's avatar
comfyanonymous committed
1057
    "CLIPTextEncode": CLIPTextEncode,
comfyanonymous's avatar
comfyanonymous committed
1058
    "CLIPSetLastLayer": CLIPSetLastLayer,
comfyanonymous's avatar
comfyanonymous committed
1059
1060
    "VAEDecode": VAEDecode,
    "VAEEncode": VAEEncode,
1061
    "VAEEncodeForInpaint": VAEEncodeForInpaint,
comfyanonymous's avatar
comfyanonymous committed
1062
1063
1064
1065
    "VAELoader": VAELoader,
    "EmptyLatentImage": EmptyLatentImage,
    "LatentUpscale": LatentUpscale,
    "SaveImage": SaveImage,
pythongosssss's avatar
pythongosssss committed
1066
    "PreviewImage": PreviewImage,
comfyanonymous's avatar
comfyanonymous committed
1067
    "LoadImage": LoadImage,
1068
    "LoadImageMask": LoadImageMask,
comfyanonymous's avatar
comfyanonymous committed
1069
    "ImageScale": ImageScale,
1070
    "ImageInvert": ImageInvert,
Guo Y.K's avatar
Guo Y.K committed
1071
    "ImagePadForOutpaint": ImagePadForOutpaint,
comfyanonymous's avatar
comfyanonymous committed
1072
1073
    "ConditioningCombine": ConditioningCombine,
    "ConditioningSetArea": ConditioningSetArea,
comfyanonymous's avatar
comfyanonymous committed
1074
    "KSamplerAdvanced": KSamplerAdvanced,
1075
    "SetLatentNoiseMask": SetLatentNoiseMask,
comfyanonymous's avatar
comfyanonymous committed
1076
    "LatentComposite": LatentComposite,
comfyanonymous's avatar
comfyanonymous committed
1077
    "LatentRotate": LatentRotate,
comfyanonymous's avatar
comfyanonymous committed
1078
    "LatentFlip": LatentFlip,
comfyanonymous's avatar
comfyanonymous committed
1079
    "LatentCrop": LatentCrop,
1080
    "LoraLoader": LoraLoader,
1081
    "CLIPLoader": CLIPLoader,
1082
    "CLIPVisionEncode": CLIPVisionEncode,
1083
    "StyleModelApply": StyleModelApply,
1084
    "unCLIPConditioning": unCLIPConditioning,
comfyanonymous's avatar
comfyanonymous committed
1085
1086
    "ControlNetApply": ControlNetApply,
    "ControlNetLoader": ControlNetLoader,
1087
    "DiffControlNetLoader": DiffControlNetLoader,
comfyanonymous's avatar
comfyanonymous committed
1088
1089
    "StyleModelLoader": StyleModelLoader,
    "CLIPVisionLoader": CLIPVisionLoader,
1090
    "VAEDecodeTiled": VAEDecodeTiled,
comfyanonymous's avatar
comfyanonymous committed
1091
    "VAEEncodeTiled": VAEEncodeTiled,
1092
    "TomePatchModel": TomePatchModel,
1093
    "unCLIPCheckpointLoader": unCLIPCheckpointLoader,
1094
    "CheckpointLoader": CheckpointLoader,
sALTaccount's avatar
sALTaccount committed
1095
    "DiffusersLoader": DiffusersLoader,
comfyanonymous's avatar
comfyanonymous committed
1096
1097
}

1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
def load_custom_node(module_path):
    module_name = os.path.basename(module_path)
    if os.path.isfile(module_path):
        sp = os.path.splitext(module_path)
        module_name = sp[0]
    try:
        if os.path.isfile(module_path):
            module_spec = importlib.util.spec_from_file_location(module_name, module_path)
        else:
            module_spec = importlib.util.spec_from_file_location(module_name, os.path.join(module_path, "__init__.py"))
        module = importlib.util.module_from_spec(module_spec)
        sys.modules[module_name] = module
        module_spec.loader.exec_module(module)
        if hasattr(module, "NODE_CLASS_MAPPINGS") and getattr(module, "NODE_CLASS_MAPPINGS") is not None:
            NODE_CLASS_MAPPINGS.update(module.NODE_CLASS_MAPPINGS)
        else:
            print(f"Skip {module_path} module for custom nodes due to the lack of NODE_CLASS_MAPPINGS.")
    except Exception as e:
        print(traceback.format_exc())
        print(f"Cannot import {module_path} module for custom nodes:", e)

Hacker 17082006's avatar
Hacker 17082006 committed
1119
def load_custom_nodes():
1120
    CUSTOM_NODE_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), "custom_nodes")
1121
    possible_modules = os.listdir(CUSTOM_NODE_PATH)
1122
    if "__pycache__" in possible_modules:
Hacker 17082006's avatar
.  
Hacker 17082006 committed
1123
        possible_modules.remove("__pycache__")
1124

Hacker 17082006's avatar
Hacker 17082006 committed
1125
    for possible_module in possible_modules:
1126
1127
        module_path = os.path.join(CUSTOM_NODE_PATH, possible_module)
        if os.path.isfile(module_path) and os.path.splitext(module_path)[1] != ".py": continue
1128
        load_custom_node(module_path)
1129

1130
1131
def init_custom_nodes():
    load_custom_nodes()
1132
1133
    load_custom_node(os.path.join(os.path.join(os.path.dirname(os.path.realpath(__file__)), "comfy_extras"), "nodes_upscale_model.py"))
    load_custom_node(os.path.join(os.path.join(os.path.dirname(os.path.realpath(__file__)), "comfy_extras"), "nodes_post_processing.py"))