nodes.py 37.6 KB
Newer Older
comfyanonymous's avatar
comfyanonymous committed
1
2
3
4
5
import torch

import os
import sys
import json
6
import hashlib
comfyanonymous's avatar
comfyanonymous committed
7
import copy
8
import traceback
comfyanonymous's avatar
comfyanonymous committed
9
10
11
12
13

from PIL import Image
from PIL.PngImagePlugin import PngInfo
import numpy as np

comfyanonymous's avatar
comfyanonymous committed
14
sys.path.insert(0, os.path.join(os.path.dirname(os.path.realpath(__file__)), "comfy"))
comfyanonymous's avatar
comfyanonymous committed
15
16
17
18


import comfy.samplers
import comfy.sd
comfyanonymous's avatar
comfyanonymous committed
19
20
import comfy.utils

21
22
import comfy_extras.clip_vision

23
import model_management
24
import importlib
comfyanonymous's avatar
comfyanonymous committed
25

26
import folder_paths
27
28
29
30

def before_node_execution():
    model_management.throw_exception_if_processing_interrupted()

31
32
def interrupt_processing(value=True):
    model_management.interrupt_current_processing(value)
33

34
35
MAX_RESOLUTION=8192

comfyanonymous's avatar
comfyanonymous committed
36
37
38
class CLIPTextEncode:
    @classmethod
    def INPUT_TYPES(s):
39
        return {"required": {"text": ("STRING", {"multiline": True}), "clip": ("CLIP", )}}
comfyanonymous's avatar
comfyanonymous committed
40
41
42
    RETURN_TYPES = ("CONDITIONING",)
    FUNCTION = "encode"

43
44
    CATEGORY = "conditioning"

comfyanonymous's avatar
comfyanonymous committed
45
    def encode(self, clip, text):
comfyanonymous's avatar
comfyanonymous committed
46
47
48
49
50
51
52
53
54
        return ([[clip.encode(text), {}]], )

class ConditioningCombine:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": {"conditioning_1": ("CONDITIONING", ), "conditioning_2": ("CONDITIONING", )}}
    RETURN_TYPES = ("CONDITIONING",)
    FUNCTION = "combine"

55
56
    CATEGORY = "conditioning"

comfyanonymous's avatar
comfyanonymous committed
57
58
59
60
61
62
63
    def combine(self, conditioning_1, conditioning_2):
        return (conditioning_1 + conditioning_2, )

class ConditioningSetArea:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": {"conditioning": ("CONDITIONING", ),
64
65
66
67
                              "width": ("INT", {"default": 64, "min": 64, "max": MAX_RESOLUTION, "step": 64}),
                              "height": ("INT", {"default": 64, "min": 64, "max": MAX_RESOLUTION, "step": 64}),
                              "x": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 64}),
                              "y": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 64}),
comfyanonymous's avatar
comfyanonymous committed
68
69
70
71
72
                              "strength": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}),
                             }}
    RETURN_TYPES = ("CONDITIONING",)
    FUNCTION = "append"

73
74
    CATEGORY = "conditioning"

comfyanonymous's avatar
comfyanonymous committed
75
    def append(self, conditioning, width, height, x, y, strength, min_sigma=0.0, max_sigma=99.0):
comfyanonymous's avatar
comfyanonymous committed
76
77
78
79
80
81
82
83
        c = []
        for t in conditioning:
            n = [t[0], t[1].copy()]
            n[1]['area'] = (height // 8, width // 8, y // 8, x // 8)
            n[1]['strength'] = strength
            n[1]['min_sigma'] = min_sigma
            n[1]['max_sigma'] = max_sigma
            c.append(n)
comfyanonymous's avatar
comfyanonymous committed
84
        return (c, )
comfyanonymous's avatar
comfyanonymous committed
85
86
87
88
89
90
91
92
93
94
95

class VAEDecode:
    def __init__(self, device="cpu"):
        self.device = device

    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "samples": ("LATENT", ), "vae": ("VAE", )}}
    RETURN_TYPES = ("IMAGE",)
    FUNCTION = "decode"

96
97
    CATEGORY = "latent"

comfyanonymous's avatar
comfyanonymous committed
98
    def decode(self, vae, samples):
99
        return (vae.decode(samples["samples"]), )
comfyanonymous's avatar
comfyanonymous committed
100

101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
class VAEDecodeTiled:
    def __init__(self, device="cpu"):
        self.device = device

    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "samples": ("LATENT", ), "vae": ("VAE", )}}
    RETURN_TYPES = ("IMAGE",)
    FUNCTION = "decode"

    CATEGORY = "_for_testing"

    def decode(self, vae, samples):
        return (vae.decode_tiled(samples["samples"]), )

comfyanonymous's avatar
comfyanonymous committed
116
117
118
119
120
121
122
123
124
125
class VAEEncode:
    def __init__(self, device="cpu"):
        self.device = device

    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "pixels": ("IMAGE", ), "vae": ("VAE", )}}
    RETURN_TYPES = ("LATENT",)
    FUNCTION = "encode"

126
127
    CATEGORY = "latent"

comfyanonymous's avatar
comfyanonymous committed
128
    def encode(self, vae, pixels):
129
130
131
132
        x = (pixels.shape[1] // 64) * 64
        y = (pixels.shape[2] // 64) * 64
        if pixels.shape[1] != x or pixels.shape[2] != y:
            pixels = pixels[:,:x,:y,:]
133
134
135
        t = vae.encode(pixels[:,:,:,:3])

        return ({"samples":t}, )
comfyanonymous's avatar
comfyanonymous committed
136

comfyanonymous's avatar
comfyanonymous committed
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157

class VAEEncodeTiled:
    def __init__(self, device="cpu"):
        self.device = device

    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "pixels": ("IMAGE", ), "vae": ("VAE", )}}
    RETURN_TYPES = ("LATENT",)
    FUNCTION = "encode"

    CATEGORY = "_for_testing"

    def encode(self, vae, pixels):
        x = (pixels.shape[1] // 64) * 64
        y = (pixels.shape[2] // 64) * 64
        if pixels.shape[1] != x or pixels.shape[2] != y:
            pixels = pixels[:,:x,:y,:]
        t = vae.encode_tiled(pixels[:,:,:,:3])

        return ({"samples":t}, )
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
class VAEEncodeForInpaint:
    def __init__(self, device="cpu"):
        self.device = device

    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "pixels": ("IMAGE", ), "vae": ("VAE", ), "mask": ("MASK", )}}
    RETURN_TYPES = ("LATENT",)
    FUNCTION = "encode"

    CATEGORY = "latent/inpaint"

    def encode(self, vae, pixels, mask):
        x = (pixels.shape[1] // 64) * 64
        y = (pixels.shape[2] // 64) * 64
173
174
        mask = torch.nn.functional.interpolate(mask[None,None,], size=(pixels.shape[1], pixels.shape[2]), mode="bilinear")[0][0]

175
        pixels = pixels.clone()
176
177
178
179
        if pixels.shape[1] != x or pixels.shape[2] != y:
            pixels = pixels[:,:x,:y,:]
            mask = mask[:x,:y]

180
        #grow mask by a few pixels to keep things seamless in latent space
181
        kernel_tensor = torch.ones((1, 1, 6, 6))
182
183
        mask_erosion = torch.clamp(torch.nn.functional.conv2d((mask.round())[None], kernel_tensor, padding=3), 0, 1)
        m = (1.0 - mask.round())
184
185
        for i in range(3):
            pixels[:,:,:,i] -= 0.5
186
            pixels[:,:,:,i] *= m
187
188
189
            pixels[:,:,:,i] += 0.5
        t = vae.encode(pixels)

190
        return ({"samples":t, "noise_mask": (mask_erosion[0][:x,:y].round())}, )
comfyanonymous's avatar
comfyanonymous committed
191
192
193
194

class CheckpointLoader:
    @classmethod
    def INPUT_TYPES(s):
195
196
        return {"required": { "config_name": (folder_paths.get_filename_list("configs"), ),
                              "ckpt_name": (folder_paths.get_filename_list("checkpoints"), )}}
comfyanonymous's avatar
comfyanonymous committed
197
198
199
    RETURN_TYPES = ("MODEL", "CLIP", "VAE")
    FUNCTION = "load_checkpoint"

200
201
    CATEGORY = "loaders"

comfyanonymous's avatar
comfyanonymous committed
202
    def load_checkpoint(self, config_name, ckpt_name, output_vae=True, output_clip=True):
203
204
        config_path = folder_paths.get_full_path("configs", config_name)
        ckpt_path = folder_paths.get_full_path("checkpoints", ckpt_name)
205
        return comfy.sd.load_checkpoint(config_path, ckpt_path, output_vae=True, output_clip=True, embedding_directory=folder_paths.get_folder_paths("embeddings"))
comfyanonymous's avatar
comfyanonymous committed
206

207
208
209
class CheckpointLoaderSimple:
    @classmethod
    def INPUT_TYPES(s):
210
        return {"required": { "ckpt_name": (folder_paths.get_filename_list("checkpoints"), ),
211
212
213
214
                             }}
    RETURN_TYPES = ("MODEL", "CLIP", "VAE")
    FUNCTION = "load_checkpoint"

215
    CATEGORY = "loaders"
216

217
    def load_checkpoint(self, ckpt_name, output_vae=True, output_clip=True):
218
        ckpt_path = folder_paths.get_full_path("checkpoints", ckpt_name)
219
        out = comfy.sd.load_checkpoint_guess_config(ckpt_path, output_vae=True, output_clip=True, embedding_directory=folder_paths.get_folder_paths("embeddings"))
220
221
        return out

comfyanonymous's avatar
comfyanonymous committed
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
class CLIPSetLastLayer:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "clip": ("CLIP", ),
                              "stop_at_clip_layer": ("INT", {"default": -1, "min": -24, "max": -1, "step": 1}),
                              }}
    RETURN_TYPES = ("CLIP",)
    FUNCTION = "set_last_layer"

    CATEGORY = "conditioning"

    def set_last_layer(self, clip, stop_at_clip_layer):
        clip = clip.clone()
        clip.clip_layer(stop_at_clip_layer)
        return (clip,)

238
239
240
241
242
class LoraLoader:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "model": ("MODEL",),
                              "clip": ("CLIP", ),
243
                              "lora_name": (folder_paths.get_filename_list("loras"), ),
244
245
                              "strength_model": ("FLOAT", {"default": 1.0, "min": -10.0, "max": 10.0, "step": 0.01}),
                              "strength_clip": ("FLOAT", {"default": 1.0, "min": -10.0, "max": 10.0, "step": 0.01}),
246
247
248
249
250
251
252
                              }}
    RETURN_TYPES = ("MODEL", "CLIP")
    FUNCTION = "load_lora"

    CATEGORY = "loaders"

    def load_lora(self, model, clip, lora_name, strength_model, strength_clip):
253
        lora_path = folder_paths.get_full_path("loras", lora_name)
254
255
256
        model_lora, clip_lora = comfy.sd.load_lora_for_models(model, clip, lora_path, strength_model, strength_clip)
        return (model_lora, clip_lora)

257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
class TomePatchModel:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "model": ("MODEL",),
                              "ratio": ("FLOAT", {"default": 0.3, "min": 0.0, "max": 1.0, "step": 0.01}),
                              }}
    RETURN_TYPES = ("MODEL",)
    FUNCTION = "patch"

    CATEGORY = "_for_testing"

    def patch(self, model, ratio):
        m = model.clone()
        m.set_model_tomesd(ratio)
        return (m, )

comfyanonymous's avatar
comfyanonymous committed
273
274
275
class VAELoader:
    @classmethod
    def INPUT_TYPES(s):
276
        return {"required": { "vae_name": (folder_paths.get_filename_list("vae"), )}}
comfyanonymous's avatar
comfyanonymous committed
277
278
279
    RETURN_TYPES = ("VAE",)
    FUNCTION = "load_vae"

280
281
    CATEGORY = "loaders"

comfyanonymous's avatar
comfyanonymous committed
282
283
    #TODO: scale factor?
    def load_vae(self, vae_name):
284
        vae_path = folder_paths.get_full_path("vae", vae_name)
comfyanonymous's avatar
comfyanonymous committed
285
286
287
        vae = comfy.sd.VAE(ckpt_path=vae_path)
        return (vae,)

comfyanonymous's avatar
comfyanonymous committed
288
289
290
class ControlNetLoader:
    @classmethod
    def INPUT_TYPES(s):
291
        return {"required": { "control_net_name": (folder_paths.get_filename_list("controlnet"), )}}
comfyanonymous's avatar
comfyanonymous committed
292
293
294
295
296
297
298

    RETURN_TYPES = ("CONTROL_NET",)
    FUNCTION = "load_controlnet"

    CATEGORY = "loaders"

    def load_controlnet(self, control_net_name):
299
        controlnet_path = folder_paths.get_full_path("controlnet", control_net_name)
comfyanonymous's avatar
comfyanonymous committed
300
301
302
        controlnet = comfy.sd.load_controlnet(controlnet_path)
        return (controlnet,)

303
304
305
306
class DiffControlNetLoader:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "model": ("MODEL",),
307
                              "control_net_name": (folder_paths.get_filename_list("controlnet"), )}}
308
309
310
311
312
313
314

    RETURN_TYPES = ("CONTROL_NET",)
    FUNCTION = "load_controlnet"

    CATEGORY = "loaders"

    def load_controlnet(self, model, control_net_name):
315
        controlnet_path = folder_paths.get_full_path("controlnet", control_net_name)
316
317
318
        controlnet = comfy.sd.load_controlnet(controlnet_path, model)
        return (controlnet,)

comfyanonymous's avatar
comfyanonymous committed
319
320
321
322

class ControlNetApply:
    @classmethod
    def INPUT_TYPES(s):
323
324
325
326
327
        return {"required": {"conditioning": ("CONDITIONING", ),
                             "control_net": ("CONTROL_NET", ),
                             "image": ("IMAGE", ),
                             "strength": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01})
                             }}
comfyanonymous's avatar
comfyanonymous committed
328
329
330
331
332
    RETURN_TYPES = ("CONDITIONING",)
    FUNCTION = "apply_controlnet"

    CATEGORY = "conditioning"

333
    def apply_controlnet(self, conditioning, control_net, image, strength):
comfyanonymous's avatar
comfyanonymous committed
334
335
336
337
338
        c = []
        control_hint = image.movedim(-1,1)
        print(control_hint.shape)
        for t in conditioning:
            n = [t[0], t[1].copy()]
comfyanonymous's avatar
comfyanonymous committed
339
340
341
342
            c_net = control_net.copy().set_cond_hint(control_hint, strength)
            if 'control' in t[1]:
                c_net.set_previous_controlnet(t[1]['control'])
            n[1]['control'] = c_net
comfyanonymous's avatar
comfyanonymous committed
343
344
345
            c.append(n)
        return (c, )

346
347
348
class CLIPLoader:
    @classmethod
    def INPUT_TYPES(s):
349
        return {"required": { "clip_name": (folder_paths.get_filename_list("clip"), ),
350
351
352
353
354
355
                             }}
    RETURN_TYPES = ("CLIP",)
    FUNCTION = "load_clip"

    CATEGORY = "loaders"

356
    def load_clip(self, clip_name):
357
        clip_path = folder_paths.get_full_path("clip", clip_name)
comfyanonymous's avatar
comfyanonymous committed
358
        clip = comfy.sd.load_clip(ckpt_path=clip_path, embedding_directory=folder_paths.get_folder_paths("embeddings"))
359
360
        return (clip,)

361
362
363
class CLIPVisionLoader:
    @classmethod
    def INPUT_TYPES(s):
364
        return {"required": { "clip_name": (folder_paths.get_filename_list("clip_vision"), ),
365
366
367
368
369
370
371
                             }}
    RETURN_TYPES = ("CLIP_VISION",)
    FUNCTION = "load_clip"

    CATEGORY = "loaders"

    def load_clip(self, clip_name):
372
        clip_path = folder_paths.get_full_path("clip_vision", clip_name)
373
374
375
376
377
378
379
380
381
        clip_vision = comfy_extras.clip_vision.load(clip_path)
        return (clip_vision,)

class CLIPVisionEncode:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "clip_vision": ("CLIP_VISION",),
                              "image": ("IMAGE",)
                             }}
comfyanonymous's avatar
comfyanonymous committed
382
    RETURN_TYPES = ("CLIP_VISION_OUTPUT",)
383
384
    FUNCTION = "encode"

comfyanonymous's avatar
comfyanonymous committed
385
    CATEGORY = "conditioning/style_model"
386
387
388
389
390
391
392
393

    def encode(self, clip_vision, image):
        output = clip_vision.encode_image(image)
        return (output,)

class StyleModelLoader:
    @classmethod
    def INPUT_TYPES(s):
394
        return {"required": { "style_model_name": (folder_paths.get_filename_list("style_models"), )}}
395
396
397
398
399
400
401

    RETURN_TYPES = ("STYLE_MODEL",)
    FUNCTION = "load_style_model"

    CATEGORY = "loaders"

    def load_style_model(self, style_model_name):
402
        style_model_path = folder_paths.get_full_path("style_models", style_model_name)
403
404
405
406
407
408
409
        style_model = comfy.sd.load_style_model(style_model_path)
        return (style_model,)


class StyleModelApply:
    @classmethod
    def INPUT_TYPES(s):
410
411
412
        return {"required": {"conditioning": ("CONDITIONING", ),
                             "style_model": ("STYLE_MODEL", ),
                             "clip_vision_output": ("CLIP_VISION_OUTPUT", ),
413
414
415
416
                             }}
    RETURN_TYPES = ("CONDITIONING",)
    FUNCTION = "apply_stylemodel"

comfyanonymous's avatar
comfyanonymous committed
417
    CATEGORY = "conditioning/style_model"
418

419
420
    def apply_stylemodel(self, clip_vision_output, style_model, conditioning):
        cond = style_model.get_cond(clip_vision_output)
421
        c = []
422
423
        for t in conditioning:
            n = [torch.cat((t[0], cond), dim=1), t[1].copy()]
424
425
426
            c.append(n)
        return (c, )

comfyanonymous's avatar
comfyanonymous committed
427
428
429
430
431
432
class EmptyLatentImage:
    def __init__(self, device="cpu"):
        self.device = device

    @classmethod
    def INPUT_TYPES(s):
433
434
        return {"required": { "width": ("INT", {"default": 512, "min": 64, "max": MAX_RESOLUTION, "step": 64}),
                              "height": ("INT", {"default": 512, "min": 64, "max": MAX_RESOLUTION, "step": 64}),
comfyanonymous's avatar
comfyanonymous committed
435
436
437
438
                              "batch_size": ("INT", {"default": 1, "min": 1, "max": 64})}}
    RETURN_TYPES = ("LATENT",)
    FUNCTION = "generate"

439
440
    CATEGORY = "latent"

comfyanonymous's avatar
comfyanonymous committed
441
442
    def generate(self, width, height, batch_size=1):
        latent = torch.zeros([batch_size, 4, height // 8, width // 8])
443
        return ({"samples":latent}, )
comfyanonymous's avatar
comfyanonymous committed
444

comfyanonymous's avatar
comfyanonymous committed
445

comfyanonymous's avatar
comfyanonymous committed
446

comfyanonymous's avatar
comfyanonymous committed
447
448
class LatentUpscale:
    upscale_methods = ["nearest-exact", "bilinear", "area"]
449
    crop_methods = ["disabled", "center"]
comfyanonymous's avatar
comfyanonymous committed
450
451
452
453

    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "samples": ("LATENT",), "upscale_method": (s.upscale_methods,),
454
455
                              "width": ("INT", {"default": 512, "min": 64, "max": MAX_RESOLUTION, "step": 64}),
                              "height": ("INT", {"default": 512, "min": 64, "max": MAX_RESOLUTION, "step": 64}),
456
                              "crop": (s.crop_methods,)}}
comfyanonymous's avatar
comfyanonymous committed
457
458
459
    RETURN_TYPES = ("LATENT",)
    FUNCTION = "upscale"

460
461
    CATEGORY = "latent"

462
    def upscale(self, samples, upscale_method, width, height, crop):
463
        s = samples.copy()
comfyanonymous's avatar
comfyanonymous committed
464
        s["samples"] = comfy.utils.common_upscale(samples["samples"], width // 8, height // 8, upscale_method, crop)
comfyanonymous's avatar
comfyanonymous committed
465
466
        return (s,)

comfyanonymous's avatar
comfyanonymous committed
467
468
469
470
471
472
473
474
475
class LatentRotate:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "samples": ("LATENT",),
                              "rotation": (["none", "90 degrees", "180 degrees", "270 degrees"],),
                              }}
    RETURN_TYPES = ("LATENT",)
    FUNCTION = "rotate"

comfyanonymous's avatar
comfyanonymous committed
476
    CATEGORY = "latent/transform"
comfyanonymous's avatar
comfyanonymous committed
477
478

    def rotate(self, samples, rotation):
479
        s = samples.copy()
comfyanonymous's avatar
comfyanonymous committed
480
481
482
483
484
485
486
487
        rotate_by = 0
        if rotation.startswith("90"):
            rotate_by = 1
        elif rotation.startswith("180"):
            rotate_by = 2
        elif rotation.startswith("270"):
            rotate_by = 3

488
        s["samples"] = torch.rot90(samples["samples"], k=rotate_by, dims=[3, 2])
comfyanonymous's avatar
comfyanonymous committed
489
        return (s,)
comfyanonymous's avatar
comfyanonymous committed
490
491
492
493
494
495
496
497
498
499

class LatentFlip:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "samples": ("LATENT",),
                              "flip_method": (["x-axis: vertically", "y-axis: horizontally"],),
                              }}
    RETURN_TYPES = ("LATENT",)
    FUNCTION = "flip"

comfyanonymous's avatar
comfyanonymous committed
500
    CATEGORY = "latent/transform"
comfyanonymous's avatar
comfyanonymous committed
501
502

    def flip(self, samples, flip_method):
503
        s = samples.copy()
comfyanonymous's avatar
comfyanonymous committed
504
        if flip_method.startswith("x"):
505
            s["samples"] = torch.flip(samples["samples"], dims=[2])
comfyanonymous's avatar
comfyanonymous committed
506
        elif flip_method.startswith("y"):
507
            s["samples"] = torch.flip(samples["samples"], dims=[3])
comfyanonymous's avatar
comfyanonymous committed
508
509

        return (s,)
comfyanonymous's avatar
comfyanonymous committed
510
511
512
513
514
515

class LatentComposite:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "samples_to": ("LATENT",),
                              "samples_from": ("LATENT",),
516
517
518
                              "x": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 8}),
                              "y": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 8}),
                              "feather": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 8}),
comfyanonymous's avatar
comfyanonymous committed
519
520
521
522
523
524
                              }}
    RETURN_TYPES = ("LATENT",)
    FUNCTION = "composite"

    CATEGORY = "latent"

525
    def composite(self, samples_to, samples_from, x, y, composite_method="normal", feather=0):
comfyanonymous's avatar
comfyanonymous committed
526
527
        x =  x // 8
        y = y // 8
528
        feather = feather // 8
529
530
531
532
        samples_out = samples_to.copy()
        s = samples_to["samples"].clone()
        samples_to = samples_to["samples"]
        samples_from = samples_from["samples"]
533
534
535
        if feather == 0:
            s[:,:,y:y+samples_from.shape[2],x:x+samples_from.shape[3]] = samples_from[:,:,:samples_to.shape[2] - y, :samples_to.shape[3] - x]
        else:
536
537
            samples_from = samples_from[:,:,:samples_to.shape[2] - y, :samples_to.shape[3] - x]
            mask = torch.ones_like(samples_from)
538
539
540
541
542
543
544
545
546
547
548
549
            for t in range(feather):
                if y != 0:
                    mask[:,:,t:1+t,:] *= ((1.0/feather) * (t + 1))

                if y + samples_from.shape[2] < samples_to.shape[2]:
                    mask[:,:,mask.shape[2] -1 -t: mask.shape[2]-t,:] *= ((1.0/feather) * (t + 1))
                if x != 0:
                    mask[:,:,:,t:1+t] *= ((1.0/feather) * (t + 1))
                if x + samples_from.shape[3] < samples_to.shape[3]:
                    mask[:,:,:,mask.shape[3]- 1 - t: mask.shape[3]- t] *= ((1.0/feather) * (t + 1))
            rev_mask = torch.ones_like(mask) - mask
            s[:,:,y:y+samples_from.shape[2],x:x+samples_from.shape[3]] = samples_from[:,:,:samples_to.shape[2] - y, :samples_to.shape[3] - x] * mask + s[:,:,y:y+samples_from.shape[2],x:x+samples_from.shape[3]] * rev_mask
550
551
        samples_out["samples"] = s
        return (samples_out,)
comfyanonymous's avatar
comfyanonymous committed
552

comfyanonymous's avatar
comfyanonymous committed
553
554
555
556
class LatentCrop:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "samples": ("LATENT",),
557
558
559
560
                              "width": ("INT", {"default": 512, "min": 64, "max": MAX_RESOLUTION, "step": 64}),
                              "height": ("INT", {"default": 512, "min": 64, "max": MAX_RESOLUTION, "step": 64}),
                              "x": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 8}),
                              "y": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 8}),
comfyanonymous's avatar
comfyanonymous committed
561
562
563
564
                              }}
    RETURN_TYPES = ("LATENT",)
    FUNCTION = "crop"

comfyanonymous's avatar
comfyanonymous committed
565
    CATEGORY = "latent/transform"
comfyanonymous's avatar
comfyanonymous committed
566
567

    def crop(self, samples, width, height, x, y):
568
569
        s = samples.copy()
        samples = samples['samples']
comfyanonymous's avatar
comfyanonymous committed
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
        x =  x // 8
        y = y // 8

        #enfonce minimum size of 64
        if x > (samples.shape[3] - 8):
            x = samples.shape[3] - 8
        if y > (samples.shape[2] - 8):
            y = samples.shape[2] - 8

        new_height = height // 8
        new_width = width // 8
        to_x = new_width + x
        to_y = new_height + y
        def enforce_image_dim(d, to_d, max_d):
            if to_d > max_d:
                leftover = (to_d - max_d) % 8
                to_d = max_d
                d -= leftover
            return (d, to_d)

        #make sure size is always multiple of 64
        x, to_x = enforce_image_dim(x, to_x, samples.shape[3])
        y, to_y = enforce_image_dim(y, to_y, samples.shape[2])
593
        s['samples'] = samples[:,:,y:to_y, x:to_x]
comfyanonymous's avatar
comfyanonymous committed
594
595
        return (s,)

596
597
598
599
600
601
602
603
604
class SetLatentNoiseMask:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "samples": ("LATENT",),
                              "mask": ("MASK",),
                              }}
    RETURN_TYPES = ("LATENT",)
    FUNCTION = "set_mask"

605
    CATEGORY = "latent/inpaint"
606
607
608
609
610
611
612

    def set_mask(self, samples, mask):
        s = samples.copy()
        s["noise_mask"] = mask
        return (s,)


613
def common_ksampler(model, seed, steps, cfg, sampler_name, scheduler, positive, negative, latent, denoise=1.0, disable_noise=False, start_step=None, last_step=None, force_full_denoise=False):
614
615
    latent_image = latent["samples"]
    noise_mask = None
616
    device = model_management.get_torch_device()
617

comfyanonymous's avatar
comfyanonymous committed
618
619
620
621
622
    if disable_noise:
        noise = torch.zeros(latent_image.size(), dtype=latent_image.dtype, layout=latent_image.layout, device="cpu")
    else:
        noise = torch.randn(latent_image.size(), dtype=latent_image.dtype, layout=latent_image.layout, generator=torch.manual_seed(seed), device="cpu")

623
624
625
    if "noise_mask" in latent:
        noise_mask = latent['noise_mask']
        noise_mask = torch.nn.functional.interpolate(noise_mask[None,None,], size=(noise.shape[2], noise.shape[3]), mode="bilinear")
626
        noise_mask = noise_mask.round()
627
628
629
630
        noise_mask = torch.cat([noise_mask] * noise.shape[1], dim=1)
        noise_mask = torch.cat([noise_mask] * noise.shape[0])
        noise_mask = noise_mask.to(device)

631
    real_model = None
632
633
634
    model_management.load_model_gpu(model)
    real_model = model.model

635
636
637
638
639
640
    noise = noise.to(device)
    latent_image = latent_image.to(device)

    positive_copy = []
    negative_copy = []

comfyanonymous's avatar
comfyanonymous committed
641
    control_nets = []
642
643
644
645
646
    for p in positive:
        t = p[0]
        if t.shape[0] < noise.shape[0]:
            t = torch.cat([t] * noise.shape[0])
        t = t.to(device)
comfyanonymous's avatar
comfyanonymous committed
647
648
        if 'control' in p[1]:
            control_nets += [p[1]['control']]
649
650
651
652
653
654
        positive_copy += [[t] + p[1:]]
    for n in negative:
        t = n[0]
        if t.shape[0] < noise.shape[0]:
            t = torch.cat([t] * noise.shape[0])
        t = t.to(device)
655
656
        if 'control' in n[1]:
            control_nets += [n[1]['control']]
657
658
        negative_copy += [[t] + n[1:]]

comfyanonymous's avatar
comfyanonymous committed
659
660
661
662
    control_net_models = []
    for x in control_nets:
        control_net_models += x.get_control_models()
    model_management.load_controlnet_gpu(control_net_models)
comfyanonymous's avatar
comfyanonymous committed
663

664
    if sampler_name in comfy.samplers.KSampler.SAMPLERS:
665
        sampler = comfy.samplers.KSampler(real_model, steps=steps, device=device, sampler=sampler_name, scheduler=scheduler, denoise=denoise, model_options=model.model_options)
666
667
668
669
    else:
        #other samplers
        pass

670
    samples = sampler.sample(noise, positive_copy, negative_copy, cfg=cfg, latent_image=latent_image, start_step=start_step, last_step=last_step, force_full_denoise=force_full_denoise, denoise_mask=noise_mask)
671
    samples = samples.cpu()
comfyanonymous's avatar
comfyanonymous committed
672
673
    for c in control_nets:
        c.cleanup()
comfyanonymous's avatar
comfyanonymous committed
674

675
676
677
    out = latent.copy()
    out["samples"] = samples
    return (out, )
comfyanonymous's avatar
comfyanonymous committed
678

comfyanonymous's avatar
comfyanonymous committed
679
680
681
class KSampler:
    @classmethod
    def INPUT_TYPES(s):
comfyanonymous's avatar
comfyanonymous committed
682
        return {"required":
comfyanonymous's avatar
comfyanonymous committed
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
                    {"model": ("MODEL",),
                    "seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}),
                    "steps": ("INT", {"default": 20, "min": 1, "max": 10000}),
                    "cfg": ("FLOAT", {"default": 8.0, "min": 0.0, "max": 100.0}),
                    "sampler_name": (comfy.samplers.KSampler.SAMPLERS, ),
                    "scheduler": (comfy.samplers.KSampler.SCHEDULERS, ),
                    "positive": ("CONDITIONING", ),
                    "negative": ("CONDITIONING", ),
                    "latent_image": ("LATENT", ),
                    "denoise": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}),
                    }}

    RETURN_TYPES = ("LATENT",)
    FUNCTION = "sample"

698
699
    CATEGORY = "sampling"

comfyanonymous's avatar
comfyanonymous committed
700
    def sample(self, model, seed, steps, cfg, sampler_name, scheduler, positive, negative, latent_image, denoise=1.0):
701
        return common_ksampler(model, seed, steps, cfg, sampler_name, scheduler, positive, negative, latent_image, denoise=denoise)
comfyanonymous's avatar
comfyanonymous committed
702

comfyanonymous's avatar
comfyanonymous committed
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
class KSamplerAdvanced:
    @classmethod
    def INPUT_TYPES(s):
        return {"required":
                    {"model": ("MODEL",),
                    "add_noise": (["enable", "disable"], ),
                    "noise_seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}),
                    "steps": ("INT", {"default": 20, "min": 1, "max": 10000}),
                    "cfg": ("FLOAT", {"default": 8.0, "min": 0.0, "max": 100.0}),
                    "sampler_name": (comfy.samplers.KSampler.SAMPLERS, ),
                    "scheduler": (comfy.samplers.KSampler.SCHEDULERS, ),
                    "positive": ("CONDITIONING", ),
                    "negative": ("CONDITIONING", ),
                    "latent_image": ("LATENT", ),
                    "start_at_step": ("INT", {"default": 0, "min": 0, "max": 10000}),
                    "end_at_step": ("INT", {"default": 10000, "min": 0, "max": 10000}),
                    "return_with_leftover_noise": (["disable", "enable"], ),
                    }}

    RETURN_TYPES = ("LATENT",)
    FUNCTION = "sample"

    CATEGORY = "sampling"
comfyanonymous's avatar
comfyanonymous committed
726

comfyanonymous's avatar
comfyanonymous committed
727
728
729
730
731
732
733
    def sample(self, model, add_noise, noise_seed, steps, cfg, sampler_name, scheduler, positive, negative, latent_image, start_at_step, end_at_step, return_with_leftover_noise, denoise=1.0):
        force_full_denoise = True
        if return_with_leftover_noise == "enable":
            force_full_denoise = False
        disable_noise = False
        if add_noise == "disable":
            disable_noise = True
734
        return common_ksampler(model, noise_seed, steps, cfg, sampler_name, scheduler, positive, negative, latent_image, denoise=denoise, disable_noise=disable_noise, start_step=start_at_step, last_step=end_at_step, force_full_denoise=force_full_denoise)
comfyanonymous's avatar
comfyanonymous committed
735
736
737
738

class SaveImage:
    def __init__(self):
        self.output_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), "output")
m957ymj75urz's avatar
m957ymj75urz committed
739
        self.type = "output"
comfyanonymous's avatar
comfyanonymous committed
740
741
742
743

    @classmethod
    def INPUT_TYPES(s):
        return {"required": 
744
                    {"images": ("IMAGE", ),
pythongosssss's avatar
tidy  
pythongosssss committed
745
                     "filename_prefix": ("STRING", {"default": "ComfyUI"})},
pythongosssss's avatar
pythongosssss committed
746
                "hidden": {"prompt": "PROMPT", "extra_pnginfo": "EXTRA_PNGINFO"},
comfyanonymous's avatar
comfyanonymous committed
747
748
749
750
751
752
753
                }

    RETURN_TYPES = ()
    FUNCTION = "save_images"

    OUTPUT_NODE = True

754
755
    CATEGORY = "image"

pythongosssss's avatar
tidy  
pythongosssss committed
756
    def save_images(self, images, filename_prefix="ComfyUI", prompt=None, extra_pnginfo=None):
757
        def map_filename(filename):
758
            prefix_len = len(os.path.basename(filename_prefix))
759
760
761
762
763
764
            prefix = filename[:prefix_len + 1]
            try:
                digits = int(filename[prefix_len + 1:].split('_')[0])
            except:
                digits = 0
            return (digits, prefix)
comfyanonymous's avatar
Style.  
comfyanonymous committed
765

766
767
768
769
        def compute_vars(input):
            input = input.replace("%width%", str(images[0].shape[1]))
            input = input.replace("%height%", str(images[0].shape[0]))
            return input
comfyanonymous's avatar
Style.  
comfyanonymous committed
770

771
        filename_prefix = compute_vars(filename_prefix)
comfyanonymous's avatar
comfyanonymous committed
772

m957ymj75urz's avatar
m957ymj75urz committed
773
774
775
        subfolder = os.path.dirname(os.path.normpath(filename_prefix))
        filename = os.path.basename(os.path.normpath(filename_prefix))

comfyanonymous's avatar
comfyanonymous committed
776
        full_output_folder = os.path.join(self.output_dir, subfolder)
777

778
        if os.path.commonpath((self.output_dir, os.path.abspath(full_output_folder))) != self.output_dir:
779
            print("Saving image outside the output folder is not allowed.")
comfyanonymous's avatar
comfyanonymous committed
780
781
            return {}

782
        try:
783
            counter = max(filter(lambda a: a[1][:-1] == filename and a[1][-1] == "_", map(map_filename, os.listdir(full_output_folder))))[0] + 1
784
785
        except ValueError:
            counter = 1
786
        except FileNotFoundError:
787
            os.makedirs(full_output_folder, exist_ok=True)
788
            counter = 1
pythongosssss's avatar
pythongosssss committed
789

pythongosssss's avatar
pythongosssss committed
790
791
        if not os.path.exists(self.output_dir):
            os.makedirs(self.output_dir)
792

m957ymj75urz's avatar
m957ymj75urz committed
793
        results = list()
comfyanonymous's avatar
comfyanonymous committed
794
795
        for image in images:
            i = 255. * image.cpu().numpy()
796
            img = Image.fromarray(np.clip(i, 0, 255).astype(np.uint8))
comfyanonymous's avatar
comfyanonymous committed
797
798
799
800
801
802
            metadata = PngInfo()
            if prompt is not None:
                metadata.add_text("prompt", json.dumps(prompt))
            if extra_pnginfo is not None:
                for x in extra_pnginfo:
                    metadata.add_text(x, json.dumps(extra_pnginfo[x]))
803

804
            file = f"{filename}_{counter:05}_.png"
805
            img.save(os.path.join(full_output_folder, file), pnginfo=metadata, compress_level=4)
m957ymj75urz's avatar
m957ymj75urz committed
806
807
808
809
810
            results.append({
                "filename": file,
                "subfolder": subfolder,
                "type": self.type
            });
811
            counter += 1
comfyanonymous's avatar
comfyanonymous committed
812

m957ymj75urz's avatar
m957ymj75urz committed
813
        return { "ui": { "images": results } }
comfyanonymous's avatar
comfyanonymous committed
814

pythongosssss's avatar
pythongosssss committed
815
816
817
class PreviewImage(SaveImage):
    def __init__(self):
        self.output_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), "temp")
m957ymj75urz's avatar
m957ymj75urz committed
818
        self.type = "temp"
pythongosssss's avatar
pythongosssss committed
819
820
821

    @classmethod
    def INPUT_TYPES(s):
822
        return {"required":
pythongosssss's avatar
pythongosssss committed
823
824
825
                    {"images": ("IMAGE", ), },
                "hidden": {"prompt": "PROMPT", "extra_pnginfo": "EXTRA_PNGINFO"},
                }
826

827
828
829
830
class LoadImage:
    input_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), "input")
    @classmethod
    def INPUT_TYPES(s):
pythongosssss's avatar
pythongosssss committed
831
832
        if not os.path.exists(s.input_dir):
            os.makedirs(s.input_dir)
833
        return {"required":
834
                    {"image": (sorted(os.listdir(s.input_dir)), )},
835
                }
836
837

    CATEGORY = "image"
838

839
    RETURN_TYPES = ("IMAGE", "MASK")
840
841
842
    FUNCTION = "load_image"
    def load_image(self, image):
        image_path = os.path.join(self.input_dir, image)
843
844
        i = Image.open(image_path)
        image = i.convert("RGB")
845
        image = np.array(image).astype(np.float32) / 255.0
846
        image = torch.from_numpy(image)[None,]
847
848
849
850
851
852
        if 'A' in i.getbands():
            mask = np.array(i.getchannel('A')).astype(np.float32) / 255.0
            mask = 1. - torch.from_numpy(mask)
        else:
            mask = torch.zeros((64,64), dtype=torch.float32, device="cpu")
        return (image, mask)
853

854
855
856
857
858
859
860
    @classmethod
    def IS_CHANGED(s, image):
        image_path = os.path.join(s.input_dir, image)
        m = hashlib.sha256()
        with open(image_path, 'rb') as f:
            m.update(f.read())
        return m.digest().hex()
pythongosssss's avatar
pythongosssss committed
861

862
863
864
865
866
class LoadImageMask:
    input_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), "input")
    @classmethod
    def INPUT_TYPES(s):
        return {"required":
867
                    {"image": (sorted(os.listdir(s.input_dir)), ),
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
                    "channel": (["alpha", "red", "green", "blue"], ),}
                }

    CATEGORY = "image"

    RETURN_TYPES = ("MASK",)
    FUNCTION = "load_image"
    def load_image(self, image, channel):
        image_path = os.path.join(self.input_dir, image)
        i = Image.open(image_path)
        mask = None
        c = channel[0].upper()
        if c in i.getbands():
            mask = np.array(i.getchannel(c)).astype(np.float32) / 255.0
            mask = torch.from_numpy(mask)
            if c == 'A':
                mask = 1. - mask
        else:
            mask = torch.zeros((64,64), dtype=torch.float32, device="cpu")
        return (mask,)

    @classmethod
    def IS_CHANGED(s, image, channel):
        image_path = os.path.join(s.input_dir, image)
        m = hashlib.sha256()
        with open(image_path, 'rb') as f:
            m.update(f.read())
        return m.digest().hex()
pythongosssss's avatar
pythongosssss committed
896

comfyanonymous's avatar
comfyanonymous committed
897
898
899
900
901
902
903
class ImageScale:
    upscale_methods = ["nearest-exact", "bilinear", "area"]
    crop_methods = ["disabled", "center"]

    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "image": ("IMAGE",), "upscale_method": (s.upscale_methods,),
904
905
                              "width": ("INT", {"default": 512, "min": 1, "max": MAX_RESOLUTION, "step": 1}),
                              "height": ("INT", {"default": 512, "min": 1, "max": MAX_RESOLUTION, "step": 1}),
comfyanonymous's avatar
comfyanonymous committed
906
907
908
909
                              "crop": (s.crop_methods,)}}
    RETURN_TYPES = ("IMAGE",)
    FUNCTION = "upscale"

910
    CATEGORY = "image/upscaling"
911

comfyanonymous's avatar
comfyanonymous committed
912
913
    def upscale(self, image, upscale_method, width, height, crop):
        samples = image.movedim(-1,1)
comfyanonymous's avatar
comfyanonymous committed
914
        s = comfy.utils.common_upscale(samples, width, height, upscale_method, crop)
comfyanonymous's avatar
comfyanonymous committed
915
916
        s = s.movedim(1,-1)
        return (s,)
comfyanonymous's avatar
comfyanonymous committed
917

918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
class ImageInvert:

    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "image": ("IMAGE",)}}

    RETURN_TYPES = ("IMAGE",)
    FUNCTION = "invert"

    CATEGORY = "image"

    def invert(self, image):
        s = 1.0 - image
        return (s,)


Guo Y.K's avatar
Guo Y.K committed
934
935
936
937
938
939
940
class ImagePadForOutpaint:

    @classmethod
    def INPUT_TYPES(s):
        return {
            "required": {
                "image": ("IMAGE",),
941
942
943
944
945
                "left": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 64}),
                "top": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 64}),
                "right": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 64}),
                "bottom": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 64}),
                "feathering": ("INT", {"default": 40, "min": 0, "max": MAX_RESOLUTION, "step": 1}),
Guo Y.K's avatar
Guo Y.K committed
946
947
948
949
950
951
952
953
            }
        }

    RETURN_TYPES = ("IMAGE", "MASK")
    FUNCTION = "expand_image"

    CATEGORY = "image"

954
    def expand_image(self, image, left, top, right, bottom, feathering):
Guo Y.K's avatar
Guo Y.K committed
955
956
957
958
959
960
961
962
963
964
965
966
        d1, d2, d3, d4 = image.size()

        new_image = torch.zeros(
            (d1, d2 + top + bottom, d3 + left + right, d4),
            dtype=torch.float32,
        )
        new_image[:, top:top + d2, left:left + d3, :] = image

        mask = torch.ones(
            (d2 + top + bottom, d3 + left + right),
            dtype=torch.float32,
        )
967

968
969
970
971
972
        t = torch.zeros(
            (d2, d3),
            dtype=torch.float32
        )

973
        if feathering > 0 and feathering * 2 < d2 and feathering * 2 < d3:
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992

            for i in range(d2):
                for j in range(d3):
                    dt = i if top != 0 else d2
                    db = d2 - i if bottom != 0 else d2

                    dl = j if left != 0 else d3
                    dr = d3 - j if right != 0 else d3

                    d = min(dt, db, dl, dr)

                    if d >= feathering:
                        continue

                    v = (feathering - d) / feathering

                    t[i, j] = v * v

        mask[top:top + d2, left:left + d3] = t
993

Guo Y.K's avatar
Guo Y.K committed
994
995
996
        return (new_image, mask)


comfyanonymous's avatar
comfyanonymous committed
997
998
999
NODE_CLASS_MAPPINGS = {
    "KSampler": KSampler,
    "CheckpointLoader": CheckpointLoader,
1000
    "CheckpointLoaderSimple": CheckpointLoaderSimple,
comfyanonymous's avatar
comfyanonymous committed
1001
    "CLIPTextEncode": CLIPTextEncode,
comfyanonymous's avatar
comfyanonymous committed
1002
    "CLIPSetLastLayer": CLIPSetLastLayer,
comfyanonymous's avatar
comfyanonymous committed
1003
1004
    "VAEDecode": VAEDecode,
    "VAEEncode": VAEEncode,
1005
    "VAEEncodeForInpaint": VAEEncodeForInpaint,
comfyanonymous's avatar
comfyanonymous committed
1006
1007
1008
1009
    "VAELoader": VAELoader,
    "EmptyLatentImage": EmptyLatentImage,
    "LatentUpscale": LatentUpscale,
    "SaveImage": SaveImage,
pythongosssss's avatar
pythongosssss committed
1010
    "PreviewImage": PreviewImage,
comfyanonymous's avatar
comfyanonymous committed
1011
    "LoadImage": LoadImage,
1012
    "LoadImageMask": LoadImageMask,
comfyanonymous's avatar
comfyanonymous committed
1013
    "ImageScale": ImageScale,
1014
    "ImageInvert": ImageInvert,
Guo Y.K's avatar
Guo Y.K committed
1015
    "ImagePadForOutpaint": ImagePadForOutpaint,
comfyanonymous's avatar
comfyanonymous committed
1016
1017
    "ConditioningCombine": ConditioningCombine,
    "ConditioningSetArea": ConditioningSetArea,
comfyanonymous's avatar
comfyanonymous committed
1018
    "KSamplerAdvanced": KSamplerAdvanced,
1019
    "SetLatentNoiseMask": SetLatentNoiseMask,
comfyanonymous's avatar
comfyanonymous committed
1020
    "LatentComposite": LatentComposite,
comfyanonymous's avatar
comfyanonymous committed
1021
    "LatentRotate": LatentRotate,
comfyanonymous's avatar
comfyanonymous committed
1022
    "LatentFlip": LatentFlip,
comfyanonymous's avatar
comfyanonymous committed
1023
    "LatentCrop": LatentCrop,
1024
    "LoraLoader": LoraLoader,
1025
    "CLIPLoader": CLIPLoader,
1026
    "CLIPVisionEncode": CLIPVisionEncode,
1027
    "StyleModelApply": StyleModelApply,
comfyanonymous's avatar
comfyanonymous committed
1028
1029
    "ControlNetApply": ControlNetApply,
    "ControlNetLoader": ControlNetLoader,
1030
    "DiffControlNetLoader": DiffControlNetLoader,
comfyanonymous's avatar
comfyanonymous committed
1031
1032
    "StyleModelLoader": StyleModelLoader,
    "CLIPVisionLoader": CLIPVisionLoader,
1033
    "VAEDecodeTiled": VAEDecodeTiled,
comfyanonymous's avatar
comfyanonymous committed
1034
    "VAEEncodeTiled": VAEEncodeTiled,
1035
    "TomePatchModel": TomePatchModel,
comfyanonymous's avatar
comfyanonymous committed
1036
1037
}

1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
def load_custom_node(module_path):
    module_name = os.path.basename(module_path)
    if os.path.isfile(module_path):
        sp = os.path.splitext(module_path)
        module_name = sp[0]
    try:
        if os.path.isfile(module_path):
            module_spec = importlib.util.spec_from_file_location(module_name, module_path)
        else:
            module_spec = importlib.util.spec_from_file_location(module_name, os.path.join(module_path, "__init__.py"))
        module = importlib.util.module_from_spec(module_spec)
        sys.modules[module_name] = module
        module_spec.loader.exec_module(module)
        if hasattr(module, "NODE_CLASS_MAPPINGS") and getattr(module, "NODE_CLASS_MAPPINGS") is not None:
            NODE_CLASS_MAPPINGS.update(module.NODE_CLASS_MAPPINGS)
        else:
            print(f"Skip {module_path} module for custom nodes due to the lack of NODE_CLASS_MAPPINGS.")
    except Exception as e:
        print(traceback.format_exc())
        print(f"Cannot import {module_path} module for custom nodes:", e)

Hacker 17082006's avatar
Hacker 17082006 committed
1059
def load_custom_nodes():
1060
    CUSTOM_NODE_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), "custom_nodes")
1061
    possible_modules = os.listdir(CUSTOM_NODE_PATH)
1062
    if "__pycache__" in possible_modules:
Hacker 17082006's avatar
.  
Hacker 17082006 committed
1063
        possible_modules.remove("__pycache__")
1064

Hacker 17082006's avatar
Hacker 17082006 committed
1065
    for possible_module in possible_modules:
1066
1067
        module_path = os.path.join(CUSTOM_NODE_PATH, possible_module)
        if os.path.isfile(module_path) and os.path.splitext(module_path)[1] != ".py": continue
1068
        load_custom_node(module_path)
1069

1070
load_custom_nodes()
1071

1072
load_custom_node(os.path.join(os.path.join(os.path.dirname(os.path.realpath(__file__)), "comfy_extras"), "nodes_upscale_model.py"))