nodes.py 37.3 KB
Newer Older
comfyanonymous's avatar
comfyanonymous committed
1
2
3
4
5
import torch

import os
import sys
import json
6
import hashlib
comfyanonymous's avatar
comfyanonymous committed
7
import copy
8
import traceback
comfyanonymous's avatar
comfyanonymous committed
9
10
11
12
13

from PIL import Image
from PIL.PngImagePlugin import PngInfo
import numpy as np

comfyanonymous's avatar
comfyanonymous committed
14
sys.path.insert(0, os.path.join(os.path.dirname(os.path.realpath(__file__)), "comfy"))
comfyanonymous's avatar
comfyanonymous committed
15
16
17
18


import comfy.samplers
import comfy.sd
comfyanonymous's avatar
comfyanonymous committed
19
20
import comfy.utils

21
22
import comfy_extras.clip_vision

23
import model_management
24
import importlib
comfyanonymous's avatar
comfyanonymous committed
25

comfyanonymous's avatar
comfyanonymous committed
26
27
supported_ckpt_extensions = ['.ckpt', '.pth']
supported_pt_extensions = ['.ckpt', '.pt', '.bin', '.pth']
comfyanonymous's avatar
comfyanonymous committed
28
29
30
try:
    import safetensors.torch
    supported_ckpt_extensions += ['.safetensors']
comfyanonymous's avatar
comfyanonymous committed
31
    supported_pt_extensions += ['.safetensors']
comfyanonymous's avatar
comfyanonymous committed
32
33
34
except:
    print("Could not import safetensors, safetensors support disabled.")

35
36
37
38
def recursive_search(directory):  
    result = []
    for root, subdir, file in os.walk(directory, followlinks=True):
        for filepath in file:
39
40
            #we os.path,join directory with a blank string to generate a path separator at the end.
            result.append(os.path.join(root, filepath).replace(os.path.join(directory,''),'')) 
41
42
    return result

comfyanonymous's avatar
comfyanonymous committed
43
44
45
def filter_files_extensions(files, extensions):
    return sorted(list(filter(lambda a: os.path.splitext(a)[-1].lower() in extensions, files)))

46
47
48
49

def before_node_execution():
    model_management.throw_exception_if_processing_interrupted()

50
51
def interrupt_processing(value=True):
    model_management.interrupt_current_processing(value)
52

comfyanonymous's avatar
comfyanonymous committed
53
54
55
class CLIPTextEncode:
    @classmethod
    def INPUT_TYPES(s):
56
        return {"required": {"text": ("STRING", {"multiline": True}), "clip": ("CLIP", )}}
comfyanonymous's avatar
comfyanonymous committed
57
58
59
    RETURN_TYPES = ("CONDITIONING",)
    FUNCTION = "encode"

60
61
    CATEGORY = "conditioning"

comfyanonymous's avatar
comfyanonymous committed
62
    def encode(self, clip, text):
comfyanonymous's avatar
comfyanonymous committed
63
64
65
66
67
68
69
70
71
        return ([[clip.encode(text), {}]], )

class ConditioningCombine:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": {"conditioning_1": ("CONDITIONING", ), "conditioning_2": ("CONDITIONING", )}}
    RETURN_TYPES = ("CONDITIONING",)
    FUNCTION = "combine"

72
73
    CATEGORY = "conditioning"

comfyanonymous's avatar
comfyanonymous committed
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
    def combine(self, conditioning_1, conditioning_2):
        return (conditioning_1 + conditioning_2, )

class ConditioningSetArea:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": {"conditioning": ("CONDITIONING", ),
                              "width": ("INT", {"default": 64, "min": 64, "max": 4096, "step": 64}),
                              "height": ("INT", {"default": 64, "min": 64, "max": 4096, "step": 64}),
                              "x": ("INT", {"default": 0, "min": 0, "max": 4096, "step": 64}),
                              "y": ("INT", {"default": 0, "min": 0, "max": 4096, "step": 64}),
                              "strength": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}),
                             }}
    RETURN_TYPES = ("CONDITIONING",)
    FUNCTION = "append"

90
91
    CATEGORY = "conditioning"

comfyanonymous's avatar
comfyanonymous committed
92
    def append(self, conditioning, width, height, x, y, strength, min_sigma=0.0, max_sigma=99.0):
comfyanonymous's avatar
comfyanonymous committed
93
94
95
96
97
98
99
100
        c = []
        for t in conditioning:
            n = [t[0], t[1].copy()]
            n[1]['area'] = (height // 8, width // 8, y // 8, x // 8)
            n[1]['strength'] = strength
            n[1]['min_sigma'] = min_sigma
            n[1]['max_sigma'] = max_sigma
            c.append(n)
comfyanonymous's avatar
comfyanonymous committed
101
        return (c, )
comfyanonymous's avatar
comfyanonymous committed
102
103
104
105
106
107
108
109
110
111
112

class VAEDecode:
    def __init__(self, device="cpu"):
        self.device = device

    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "samples": ("LATENT", ), "vae": ("VAE", )}}
    RETURN_TYPES = ("IMAGE",)
    FUNCTION = "decode"

113
114
    CATEGORY = "latent"

comfyanonymous's avatar
comfyanonymous committed
115
    def decode(self, vae, samples):
116
        return (vae.decode(samples["samples"]), )
comfyanonymous's avatar
comfyanonymous committed
117

118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
class VAEDecodeTiled:
    def __init__(self, device="cpu"):
        self.device = device

    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "samples": ("LATENT", ), "vae": ("VAE", )}}
    RETURN_TYPES = ("IMAGE",)
    FUNCTION = "decode"

    CATEGORY = "_for_testing"

    def decode(self, vae, samples):
        return (vae.decode_tiled(samples["samples"]), )

comfyanonymous's avatar
comfyanonymous committed
133
134
135
136
137
138
139
140
141
142
class VAEEncode:
    def __init__(self, device="cpu"):
        self.device = device

    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "pixels": ("IMAGE", ), "vae": ("VAE", )}}
    RETURN_TYPES = ("LATENT",)
    FUNCTION = "encode"

143
144
    CATEGORY = "latent"

comfyanonymous's avatar
comfyanonymous committed
145
    def encode(self, vae, pixels):
146
147
148
149
        x = (pixels.shape[1] // 64) * 64
        y = (pixels.shape[2] // 64) * 64
        if pixels.shape[1] != x or pixels.shape[2] != y:
            pixels = pixels[:,:x,:y,:]
150
151
152
        t = vae.encode(pixels[:,:,:,:3])

        return ({"samples":t}, )
comfyanonymous's avatar
comfyanonymous committed
153

comfyanonymous's avatar
comfyanonymous committed
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174

class VAEEncodeTiled:
    def __init__(self, device="cpu"):
        self.device = device

    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "pixels": ("IMAGE", ), "vae": ("VAE", )}}
    RETURN_TYPES = ("LATENT",)
    FUNCTION = "encode"

    CATEGORY = "_for_testing"

    def encode(self, vae, pixels):
        x = (pixels.shape[1] // 64) * 64
        y = (pixels.shape[2] // 64) * 64
        if pixels.shape[1] != x or pixels.shape[2] != y:
            pixels = pixels[:,:x,:y,:]
        t = vae.encode_tiled(pixels[:,:,:,:3])

        return ({"samples":t}, )
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
class VAEEncodeForInpaint:
    def __init__(self, device="cpu"):
        self.device = device

    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "pixels": ("IMAGE", ), "vae": ("VAE", ), "mask": ("MASK", )}}
    RETURN_TYPES = ("LATENT",)
    FUNCTION = "encode"

    CATEGORY = "latent/inpaint"

    def encode(self, vae, pixels, mask):
        x = (pixels.shape[1] // 64) * 64
        y = (pixels.shape[2] // 64) * 64
190
191
        mask = torch.nn.functional.interpolate(mask[None,None,], size=(pixels.shape[1], pixels.shape[2]), mode="bilinear")[0][0]

192
        pixels = pixels.clone()
193
194
195
196
        if pixels.shape[1] != x or pixels.shape[2] != y:
            pixels = pixels[:,:x,:y,:]
            mask = mask[:x,:y]

197
        #grow mask by a few pixels to keep things seamless in latent space
198
        kernel_tensor = torch.ones((1, 1, 6, 6))
199
200
        mask_erosion = torch.clamp(torch.nn.functional.conv2d((mask.round())[None], kernel_tensor, padding=3), 0, 1)
        m = (1.0 - mask.round())
201
202
        for i in range(3):
            pixels[:,:,:,i] -= 0.5
203
            pixels[:,:,:,i] *= m
204
205
206
            pixels[:,:,:,i] += 0.5
        t = vae.encode(pixels)

207
        return ({"samples":t, "noise_mask": (mask_erosion[0][:x,:y].round())}, )
comfyanonymous's avatar
comfyanonymous committed
208
209
210
211
212

class CheckpointLoader:
    models_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), "models")
    config_dir = os.path.join(models_dir, "configs")
    ckpt_dir = os.path.join(models_dir, "checkpoints")
213
    embedding_directory = os.path.join(models_dir, "embeddings")
comfyanonymous's avatar
comfyanonymous committed
214
215
216

    @classmethod
    def INPUT_TYPES(s):
217
218
        return {"required": { "config_name": (filter_files_extensions(recursive_search(s.config_dir), '.yaml'), ),
                              "ckpt_name": (filter_files_extensions(recursive_search(s.ckpt_dir), supported_ckpt_extensions), )}}
comfyanonymous's avatar
comfyanonymous committed
219
220
221
    RETURN_TYPES = ("MODEL", "CLIP", "VAE")
    FUNCTION = "load_checkpoint"

222
223
    CATEGORY = "loaders"

comfyanonymous's avatar
comfyanonymous committed
224
225
226
    def load_checkpoint(self, config_name, ckpt_name, output_vae=True, output_clip=True):
        config_path = os.path.join(self.config_dir, config_name)
        ckpt_path = os.path.join(self.ckpt_dir, ckpt_name)
227
        return comfy.sd.load_checkpoint(config_path, ckpt_path, output_vae=True, output_clip=True, embedding_directory=self.embedding_directory)
comfyanonymous's avatar
comfyanonymous committed
228

229
230
231
232
233
234
235
236
237
238
239
class CheckpointLoaderSimple:
    models_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), "models")
    ckpt_dir = os.path.join(models_dir, "checkpoints")

    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "ckpt_name": (filter_files_extensions(recursive_search(s.ckpt_dir), supported_ckpt_extensions), ),
                             }}
    RETURN_TYPES = ("MODEL", "CLIP", "VAE")
    FUNCTION = "load_checkpoint"

240
    CATEGORY = "loaders"
241

242
    def load_checkpoint(self, ckpt_name, output_vae=True, output_clip=True):
243
        ckpt_path = os.path.join(self.ckpt_dir, ckpt_name)
244
        out = comfy.sd.load_checkpoint_guess_config(ckpt_path, output_vae=True, output_clip=True, embedding_directory=CheckpointLoader.embedding_directory)
245
246
        return out

comfyanonymous's avatar
comfyanonymous committed
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
class CLIPSetLastLayer:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "clip": ("CLIP", ),
                              "stop_at_clip_layer": ("INT", {"default": -1, "min": -24, "max": -1, "step": 1}),
                              }}
    RETURN_TYPES = ("CLIP",)
    FUNCTION = "set_last_layer"

    CATEGORY = "conditioning"

    def set_last_layer(self, clip, stop_at_clip_layer):
        clip = clip.clone()
        clip.clip_layer(stop_at_clip_layer)
        return (clip,)

263
264
265
266
267
268
269
class LoraLoader:
    models_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), "models")
    lora_dir = os.path.join(models_dir, "loras")
    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "model": ("MODEL",),
                              "clip": ("CLIP", ),
270
                              "lora_name": (filter_files_extensions(recursive_search(s.lora_dir), supported_pt_extensions), ),
271
272
273
274
275
276
277
278
279
280
281
282
283
                              "strength_model": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}),
                              "strength_clip": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}),
                              }}
    RETURN_TYPES = ("MODEL", "CLIP")
    FUNCTION = "load_lora"

    CATEGORY = "loaders"

    def load_lora(self, model, clip, lora_name, strength_model, strength_clip):
        lora_path = os.path.join(self.lora_dir, lora_name)
        model_lora, clip_lora = comfy.sd.load_lora_for_models(model, clip, lora_path, strength_model, strength_clip)
        return (model_lora, clip_lora)

comfyanonymous's avatar
comfyanonymous committed
284
285
286
287
288
class VAELoader:
    models_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), "models")
    vae_dir = os.path.join(models_dir, "vae")
    @classmethod
    def INPUT_TYPES(s):
289
        return {"required": { "vae_name": (filter_files_extensions(recursive_search(s.vae_dir), supported_pt_extensions), )}}
comfyanonymous's avatar
comfyanonymous committed
290
291
292
    RETURN_TYPES = ("VAE",)
    FUNCTION = "load_vae"

293
294
    CATEGORY = "loaders"

comfyanonymous's avatar
comfyanonymous committed
295
296
297
298
299
300
    #TODO: scale factor?
    def load_vae(self, vae_name):
        vae_path = os.path.join(self.vae_dir, vae_name)
        vae = comfy.sd.VAE(ckpt_path=vae_path)
        return (vae,)

comfyanonymous's avatar
comfyanonymous committed
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
class ControlNetLoader:
    models_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), "models")
    controlnet_dir = os.path.join(models_dir, "controlnet")
    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "control_net_name": (filter_files_extensions(recursive_search(s.controlnet_dir), supported_pt_extensions), )}}

    RETURN_TYPES = ("CONTROL_NET",)
    FUNCTION = "load_controlnet"

    CATEGORY = "loaders"

    def load_controlnet(self, control_net_name):
        controlnet_path = os.path.join(self.controlnet_dir, control_net_name)
        controlnet = comfy.sd.load_controlnet(controlnet_path)
        return (controlnet,)

318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
class DiffControlNetLoader:
    models_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), "models")
    controlnet_dir = os.path.join(models_dir, "controlnet")
    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "model": ("MODEL",),
                              "control_net_name": (filter_files_extensions(recursive_search(s.controlnet_dir), supported_pt_extensions), )}}

    RETURN_TYPES = ("CONTROL_NET",)
    FUNCTION = "load_controlnet"

    CATEGORY = "loaders"

    def load_controlnet(self, model, control_net_name):
        controlnet_path = os.path.join(self.controlnet_dir, control_net_name)
        controlnet = comfy.sd.load_controlnet(controlnet_path, model)
        return (controlnet,)

comfyanonymous's avatar
comfyanonymous committed
336
337
338
339

class ControlNetApply:
    @classmethod
    def INPUT_TYPES(s):
340
341
342
343
344
        return {"required": {"conditioning": ("CONDITIONING", ),
                             "control_net": ("CONTROL_NET", ),
                             "image": ("IMAGE", ),
                             "strength": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01})
                             }}
comfyanonymous's avatar
comfyanonymous committed
345
346
347
348
349
    RETURN_TYPES = ("CONDITIONING",)
    FUNCTION = "apply_controlnet"

    CATEGORY = "conditioning"

350
    def apply_controlnet(self, conditioning, control_net, image, strength):
comfyanonymous's avatar
comfyanonymous committed
351
352
353
354
355
        c = []
        control_hint = image.movedim(-1,1)
        print(control_hint.shape)
        for t in conditioning:
            n = [t[0], t[1].copy()]
comfyanonymous's avatar
comfyanonymous committed
356
357
358
359
            c_net = control_net.copy().set_cond_hint(control_hint, strength)
            if 'control' in t[1]:
                c_net.set_previous_controlnet(t[1]['control'])
            n[1]['control'] = c_net
comfyanonymous's avatar
comfyanonymous committed
360
361
362
            c.append(n)
        return (c, )

363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
class T2IAdapterLoader:
    models_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), "models")
    t2i_adapter_dir = os.path.join(models_dir, "t2i_adapter")
    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "t2i_adapter_name": (filter_files_extensions(recursive_search(s.t2i_adapter_dir), supported_pt_extensions), )}}

    RETURN_TYPES = ("CONTROL_NET",)
    FUNCTION = "load_t2i_adapter"

    CATEGORY = "loaders"

    def load_t2i_adapter(self, t2i_adapter_name):
        t2i_path = os.path.join(self.t2i_adapter_dir, t2i_adapter_name)
        t2i_adapter = comfy.sd.load_t2i_adapter(t2i_path)
        return (t2i_adapter,)
comfyanonymous's avatar
comfyanonymous committed
379

380
381
382
383
384
class CLIPLoader:
    models_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), "models")
    clip_dir = os.path.join(models_dir, "clip")
    @classmethod
    def INPUT_TYPES(s):
385
        return {"required": { "clip_name": (filter_files_extensions(recursive_search(s.clip_dir), supported_pt_extensions), ),
386
387
388
389
390
391
                             }}
    RETURN_TYPES = ("CLIP",)
    FUNCTION = "load_clip"

    CATEGORY = "loaders"

392
    def load_clip(self, clip_name):
393
394
395
396
        clip_path = os.path.join(self.clip_dir, clip_name)
        clip = comfy.sd.load_clip(ckpt_path=clip_path, embedding_directory=CheckpointLoader.embedding_directory)
        return (clip,)

397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
class CLIPVisionLoader:
    models_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), "models")
    clip_dir = os.path.join(models_dir, "clip_vision")
    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "clip_name": (filter_files_extensions(recursive_search(s.clip_dir), supported_pt_extensions), ),
                             }}
    RETURN_TYPES = ("CLIP_VISION",)
    FUNCTION = "load_clip"

    CATEGORY = "loaders"

    def load_clip(self, clip_name):
        clip_path = os.path.join(self.clip_dir, clip_name)
        clip_vision = comfy_extras.clip_vision.load(clip_path)
        return (clip_vision,)

class CLIPVisionEncode:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "clip_vision": ("CLIP_VISION",),
                              "image": ("IMAGE",)
                             }}
comfyanonymous's avatar
comfyanonymous committed
420
    RETURN_TYPES = ("CLIP_VISION_OUTPUT",)
421
422
    FUNCTION = "encode"

comfyanonymous's avatar
comfyanonymous committed
423
    CATEGORY = "conditioning/style_model"
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449

    def encode(self, clip_vision, image):
        output = clip_vision.encode_image(image)
        return (output,)

class StyleModelLoader:
    models_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), "models")
    style_model_dir = os.path.join(models_dir, "style_models")
    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "style_model_name": (filter_files_extensions(recursive_search(s.style_model_dir), supported_pt_extensions), )}}

    RETURN_TYPES = ("STYLE_MODEL",)
    FUNCTION = "load_style_model"

    CATEGORY = "loaders"

    def load_style_model(self, style_model_name):
        style_model_path = os.path.join(self.style_model_dir, style_model_name)
        style_model = comfy.sd.load_style_model(style_model_path)
        return (style_model,)


class StyleModelApply:
    @classmethod
    def INPUT_TYPES(s):
450
451
452
        return {"required": {"conditioning": ("CONDITIONING", ),
                             "style_model": ("STYLE_MODEL", ),
                             "clip_vision_output": ("CLIP_VISION_OUTPUT", ),
453
454
455
456
                             }}
    RETURN_TYPES = ("CONDITIONING",)
    FUNCTION = "apply_stylemodel"

comfyanonymous's avatar
comfyanonymous committed
457
    CATEGORY = "conditioning/style_model"
458

459
460
    def apply_stylemodel(self, clip_vision_output, style_model, conditioning):
        cond = style_model.get_cond(clip_vision_output)
461
        c = []
462
463
        for t in conditioning:
            n = [torch.cat((t[0], cond), dim=1), t[1].copy()]
464
465
466
            c.append(n)
        return (c, )

comfyanonymous's avatar
comfyanonymous committed
467
468
469
470
471
472
473
474
475
476
477
478
class EmptyLatentImage:
    def __init__(self, device="cpu"):
        self.device = device

    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "width": ("INT", {"default": 512, "min": 64, "max": 4096, "step": 64}),
                              "height": ("INT", {"default": 512, "min": 64, "max": 4096, "step": 64}),
                              "batch_size": ("INT", {"default": 1, "min": 1, "max": 64})}}
    RETURN_TYPES = ("LATENT",)
    FUNCTION = "generate"

479
480
    CATEGORY = "latent"

comfyanonymous's avatar
comfyanonymous committed
481
482
    def generate(self, width, height, batch_size=1):
        latent = torch.zeros([batch_size, 4, height // 8, width // 8])
483
        return ({"samples":latent}, )
comfyanonymous's avatar
comfyanonymous committed
484

comfyanonymous's avatar
comfyanonymous committed
485

comfyanonymous's avatar
comfyanonymous committed
486

comfyanonymous's avatar
comfyanonymous committed
487
488
class LatentUpscale:
    upscale_methods = ["nearest-exact", "bilinear", "area"]
489
    crop_methods = ["disabled", "center"]
comfyanonymous's avatar
comfyanonymous committed
490
491
492
493
494

    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "samples": ("LATENT",), "upscale_method": (s.upscale_methods,),
                              "width": ("INT", {"default": 512, "min": 64, "max": 4096, "step": 64}),
495
496
                              "height": ("INT", {"default": 512, "min": 64, "max": 4096, "step": 64}),
                              "crop": (s.crop_methods,)}}
comfyanonymous's avatar
comfyanonymous committed
497
498
499
    RETURN_TYPES = ("LATENT",)
    FUNCTION = "upscale"

500
501
    CATEGORY = "latent"

502
    def upscale(self, samples, upscale_method, width, height, crop):
503
        s = samples.copy()
comfyanonymous's avatar
comfyanonymous committed
504
        s["samples"] = comfy.utils.common_upscale(samples["samples"], width // 8, height // 8, upscale_method, crop)
comfyanonymous's avatar
comfyanonymous committed
505
506
        return (s,)

comfyanonymous's avatar
comfyanonymous committed
507
508
509
510
511
512
513
514
515
class LatentRotate:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "samples": ("LATENT",),
                              "rotation": (["none", "90 degrees", "180 degrees", "270 degrees"],),
                              }}
    RETURN_TYPES = ("LATENT",)
    FUNCTION = "rotate"

comfyanonymous's avatar
comfyanonymous committed
516
    CATEGORY = "latent/transform"
comfyanonymous's avatar
comfyanonymous committed
517
518

    def rotate(self, samples, rotation):
519
        s = samples.copy()
comfyanonymous's avatar
comfyanonymous committed
520
521
522
523
524
525
526
527
        rotate_by = 0
        if rotation.startswith("90"):
            rotate_by = 1
        elif rotation.startswith("180"):
            rotate_by = 2
        elif rotation.startswith("270"):
            rotate_by = 3

528
        s["samples"] = torch.rot90(samples["samples"], k=rotate_by, dims=[3, 2])
comfyanonymous's avatar
comfyanonymous committed
529
        return (s,)
comfyanonymous's avatar
comfyanonymous committed
530
531
532
533
534
535
536
537
538
539

class LatentFlip:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "samples": ("LATENT",),
                              "flip_method": (["x-axis: vertically", "y-axis: horizontally"],),
                              }}
    RETURN_TYPES = ("LATENT",)
    FUNCTION = "flip"

comfyanonymous's avatar
comfyanonymous committed
540
    CATEGORY = "latent/transform"
comfyanonymous's avatar
comfyanonymous committed
541
542

    def flip(self, samples, flip_method):
543
        s = samples.copy()
comfyanonymous's avatar
comfyanonymous committed
544
        if flip_method.startswith("x"):
545
            s["samples"] = torch.flip(samples["samples"], dims=[2])
comfyanonymous's avatar
comfyanonymous committed
546
        elif flip_method.startswith("y"):
547
            s["samples"] = torch.flip(samples["samples"], dims=[3])
comfyanonymous's avatar
comfyanonymous committed
548
549

        return (s,)
comfyanonymous's avatar
comfyanonymous committed
550
551
552
553
554
555
556
557

class LatentComposite:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "samples_to": ("LATENT",),
                              "samples_from": ("LATENT",),
                              "x": ("INT", {"default": 0, "min": 0, "max": 4096, "step": 8}),
                              "y": ("INT", {"default": 0, "min": 0, "max": 4096, "step": 8}),
558
                              "feather": ("INT", {"default": 0, "min": 0, "max": 4096, "step": 8}),
comfyanonymous's avatar
comfyanonymous committed
559
560
561
562
563
564
                              }}
    RETURN_TYPES = ("LATENT",)
    FUNCTION = "composite"

    CATEGORY = "latent"

565
    def composite(self, samples_to, samples_from, x, y, composite_method="normal", feather=0):
comfyanonymous's avatar
comfyanonymous committed
566
567
        x =  x // 8
        y = y // 8
568
        feather = feather // 8
569
570
571
572
        samples_out = samples_to.copy()
        s = samples_to["samples"].clone()
        samples_to = samples_to["samples"]
        samples_from = samples_from["samples"]
573
574
575
        if feather == 0:
            s[:,:,y:y+samples_from.shape[2],x:x+samples_from.shape[3]] = samples_from[:,:,:samples_to.shape[2] - y, :samples_to.shape[3] - x]
        else:
576
577
            samples_from = samples_from[:,:,:samples_to.shape[2] - y, :samples_to.shape[3] - x]
            mask = torch.ones_like(samples_from)
578
579
580
581
582
583
584
585
586
587
588
589
            for t in range(feather):
                if y != 0:
                    mask[:,:,t:1+t,:] *= ((1.0/feather) * (t + 1))

                if y + samples_from.shape[2] < samples_to.shape[2]:
                    mask[:,:,mask.shape[2] -1 -t: mask.shape[2]-t,:] *= ((1.0/feather) * (t + 1))
                if x != 0:
                    mask[:,:,:,t:1+t] *= ((1.0/feather) * (t + 1))
                if x + samples_from.shape[3] < samples_to.shape[3]:
                    mask[:,:,:,mask.shape[3]- 1 - t: mask.shape[3]- t] *= ((1.0/feather) * (t + 1))
            rev_mask = torch.ones_like(mask) - mask
            s[:,:,y:y+samples_from.shape[2],x:x+samples_from.shape[3]] = samples_from[:,:,:samples_to.shape[2] - y, :samples_to.shape[3] - x] * mask + s[:,:,y:y+samples_from.shape[2],x:x+samples_from.shape[3]] * rev_mask
590
591
        samples_out["samples"] = s
        return (samples_out,)
comfyanonymous's avatar
comfyanonymous committed
592

comfyanonymous's avatar
comfyanonymous committed
593
594
595
596
597
598
599
600
601
602
603
604
class LatentCrop:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "samples": ("LATENT",),
                              "width": ("INT", {"default": 512, "min": 64, "max": 4096, "step": 64}),
                              "height": ("INT", {"default": 512, "min": 64, "max": 4096, "step": 64}),
                              "x": ("INT", {"default": 0, "min": 0, "max": 4096, "step": 8}),
                              "y": ("INT", {"default": 0, "min": 0, "max": 4096, "step": 8}),
                              }}
    RETURN_TYPES = ("LATENT",)
    FUNCTION = "crop"

comfyanonymous's avatar
comfyanonymous committed
605
    CATEGORY = "latent/transform"
comfyanonymous's avatar
comfyanonymous committed
606
607

    def crop(self, samples, width, height, x, y):
608
609
        s = samples.copy()
        samples = samples['samples']
comfyanonymous's avatar
comfyanonymous committed
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
        x =  x // 8
        y = y // 8

        #enfonce minimum size of 64
        if x > (samples.shape[3] - 8):
            x = samples.shape[3] - 8
        if y > (samples.shape[2] - 8):
            y = samples.shape[2] - 8

        new_height = height // 8
        new_width = width // 8
        to_x = new_width + x
        to_y = new_height + y
        def enforce_image_dim(d, to_d, max_d):
            if to_d > max_d:
                leftover = (to_d - max_d) % 8
                to_d = max_d
                d -= leftover
            return (d, to_d)

        #make sure size is always multiple of 64
        x, to_x = enforce_image_dim(x, to_x, samples.shape[3])
        y, to_y = enforce_image_dim(y, to_y, samples.shape[2])
633
        s['samples'] = samples[:,:,y:to_y, x:to_x]
comfyanonymous's avatar
comfyanonymous committed
634
635
        return (s,)

636
637
638
639
640
641
642
643
644
class SetLatentNoiseMask:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "samples": ("LATENT",),
                              "mask": ("MASK",),
                              }}
    RETURN_TYPES = ("LATENT",)
    FUNCTION = "set_mask"

645
    CATEGORY = "latent/inpaint"
646
647
648
649
650
651
652

    def set_mask(self, samples, mask):
        s = samples.copy()
        s["noise_mask"] = mask
        return (s,)


653
def common_ksampler(model, seed, steps, cfg, sampler_name, scheduler, positive, negative, latent, denoise=1.0, disable_noise=False, start_step=None, last_step=None, force_full_denoise=False):
654
655
    latent_image = latent["samples"]
    noise_mask = None
656
    device = model_management.get_torch_device()
657

comfyanonymous's avatar
comfyanonymous committed
658
659
660
661
662
    if disable_noise:
        noise = torch.zeros(latent_image.size(), dtype=latent_image.dtype, layout=latent_image.layout, device="cpu")
    else:
        noise = torch.randn(latent_image.size(), dtype=latent_image.dtype, layout=latent_image.layout, generator=torch.manual_seed(seed), device="cpu")

663
664
665
    if "noise_mask" in latent:
        noise_mask = latent['noise_mask']
        noise_mask = torch.nn.functional.interpolate(noise_mask[None,None,], size=(noise.shape[2], noise.shape[3]), mode="bilinear")
666
        noise_mask = noise_mask.round()
667
668
669
670
        noise_mask = torch.cat([noise_mask] * noise.shape[1], dim=1)
        noise_mask = torch.cat([noise_mask] * noise.shape[0])
        noise_mask = noise_mask.to(device)

671
    real_model = None
672
673
674
    model_management.load_model_gpu(model)
    real_model = model.model

675
676
677
678
679
680
    noise = noise.to(device)
    latent_image = latent_image.to(device)

    positive_copy = []
    negative_copy = []

comfyanonymous's avatar
comfyanonymous committed
681
    control_nets = []
682
683
684
685
686
    for p in positive:
        t = p[0]
        if t.shape[0] < noise.shape[0]:
            t = torch.cat([t] * noise.shape[0])
        t = t.to(device)
comfyanonymous's avatar
comfyanonymous committed
687
688
        if 'control' in p[1]:
            control_nets += [p[1]['control']]
689
690
691
692
693
694
        positive_copy += [[t] + p[1:]]
    for n in negative:
        t = n[0]
        if t.shape[0] < noise.shape[0]:
            t = torch.cat([t] * noise.shape[0])
        t = t.to(device)
695
696
        if 'control' in n[1]:
            control_nets += [n[1]['control']]
697
698
        negative_copy += [[t] + n[1:]]

comfyanonymous's avatar
comfyanonymous committed
699
700
701
702
    control_net_models = []
    for x in control_nets:
        control_net_models += x.get_control_models()
    model_management.load_controlnet_gpu(control_net_models)
comfyanonymous's avatar
comfyanonymous committed
703

704
705
706
707
708
709
    if sampler_name in comfy.samplers.KSampler.SAMPLERS:
        sampler = comfy.samplers.KSampler(real_model, steps=steps, device=device, sampler=sampler_name, scheduler=scheduler, denoise=denoise)
    else:
        #other samplers
        pass

710
    samples = sampler.sample(noise, positive_copy, negative_copy, cfg=cfg, latent_image=latent_image, start_step=start_step, last_step=last_step, force_full_denoise=force_full_denoise, denoise_mask=noise_mask)
711
    samples = samples.cpu()
comfyanonymous's avatar
comfyanonymous committed
712
713
    for c in control_nets:
        c.cleanup()
comfyanonymous's avatar
comfyanonymous committed
714

715
716
717
    out = latent.copy()
    out["samples"] = samples
    return (out, )
comfyanonymous's avatar
comfyanonymous committed
718

comfyanonymous's avatar
comfyanonymous committed
719
720
721
class KSampler:
    @classmethod
    def INPUT_TYPES(s):
comfyanonymous's avatar
comfyanonymous committed
722
        return {"required":
comfyanonymous's avatar
comfyanonymous committed
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
                    {"model": ("MODEL",),
                    "seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}),
                    "steps": ("INT", {"default": 20, "min": 1, "max": 10000}),
                    "cfg": ("FLOAT", {"default": 8.0, "min": 0.0, "max": 100.0}),
                    "sampler_name": (comfy.samplers.KSampler.SAMPLERS, ),
                    "scheduler": (comfy.samplers.KSampler.SCHEDULERS, ),
                    "positive": ("CONDITIONING", ),
                    "negative": ("CONDITIONING", ),
                    "latent_image": ("LATENT", ),
                    "denoise": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}),
                    }}

    RETURN_TYPES = ("LATENT",)
    FUNCTION = "sample"

738
739
    CATEGORY = "sampling"

comfyanonymous's avatar
comfyanonymous committed
740
    def sample(self, model, seed, steps, cfg, sampler_name, scheduler, positive, negative, latent_image, denoise=1.0):
741
        return common_ksampler(model, seed, steps, cfg, sampler_name, scheduler, positive, negative, latent_image, denoise=denoise)
comfyanonymous's avatar
comfyanonymous committed
742

comfyanonymous's avatar
comfyanonymous committed
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
class KSamplerAdvanced:
    @classmethod
    def INPUT_TYPES(s):
        return {"required":
                    {"model": ("MODEL",),
                    "add_noise": (["enable", "disable"], ),
                    "noise_seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}),
                    "steps": ("INT", {"default": 20, "min": 1, "max": 10000}),
                    "cfg": ("FLOAT", {"default": 8.0, "min": 0.0, "max": 100.0}),
                    "sampler_name": (comfy.samplers.KSampler.SAMPLERS, ),
                    "scheduler": (comfy.samplers.KSampler.SCHEDULERS, ),
                    "positive": ("CONDITIONING", ),
                    "negative": ("CONDITIONING", ),
                    "latent_image": ("LATENT", ),
                    "start_at_step": ("INT", {"default": 0, "min": 0, "max": 10000}),
                    "end_at_step": ("INT", {"default": 10000, "min": 0, "max": 10000}),
                    "return_with_leftover_noise": (["disable", "enable"], ),
                    }}

    RETURN_TYPES = ("LATENT",)
    FUNCTION = "sample"

    CATEGORY = "sampling"
comfyanonymous's avatar
comfyanonymous committed
766

comfyanonymous's avatar
comfyanonymous committed
767
768
769
770
771
772
773
    def sample(self, model, add_noise, noise_seed, steps, cfg, sampler_name, scheduler, positive, negative, latent_image, start_at_step, end_at_step, return_with_leftover_noise, denoise=1.0):
        force_full_denoise = True
        if return_with_leftover_noise == "enable":
            force_full_denoise = False
        disable_noise = False
        if add_noise == "disable":
            disable_noise = True
774
        return common_ksampler(model, noise_seed, steps, cfg, sampler_name, scheduler, positive, negative, latent_image, denoise=denoise, disable_noise=disable_noise, start_step=start_at_step, last_step=end_at_step, force_full_denoise=force_full_denoise)
comfyanonymous's avatar
comfyanonymous committed
775
776
777
778

class SaveImage:
    def __init__(self):
        self.output_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), "output")
pythongosssss's avatar
pythongosssss committed
779
        self.url_suffix = ""
comfyanonymous's avatar
comfyanonymous committed
780
781
782
783

    @classmethod
    def INPUT_TYPES(s):
        return {"required": 
784
                    {"images": ("IMAGE", ),
pythongosssss's avatar
tidy  
pythongosssss committed
785
                     "filename_prefix": ("STRING", {"default": "ComfyUI"})},
pythongosssss's avatar
pythongosssss committed
786
                "hidden": {"prompt": "PROMPT", "extra_pnginfo": "EXTRA_PNGINFO"},
comfyanonymous's avatar
comfyanonymous committed
787
788
789
790
791
792
793
                }

    RETURN_TYPES = ()
    FUNCTION = "save_images"

    OUTPUT_NODE = True

794
795
    CATEGORY = "image"

pythongosssss's avatar
tidy  
pythongosssss committed
796
    def save_images(self, images, filename_prefix="ComfyUI", prompt=None, extra_pnginfo=None):
797
798
799
800
801
802
803
804
805
806
807
808
        def map_filename(filename):
            prefix_len = len(filename_prefix)
            prefix = filename[:prefix_len + 1]
            try:
                digits = int(filename[prefix_len + 1:].split('_')[0])
            except:
                digits = 0
            return (digits, prefix)
        try:
            counter = max(filter(lambda a: a[1][:-1] == filename_prefix and a[1][-1] == "_", map(map_filename, os.listdir(self.output_dir))))[0] + 1
        except ValueError:
            counter = 1
809
810
811
        except FileNotFoundError:
            os.mkdir(self.output_dir)
            counter = 1
pythongosssss's avatar
pythongosssss committed
812

pythongosssss's avatar
pythongosssss committed
813
814
        if not os.path.exists(self.output_dir):
            os.makedirs(self.output_dir)
815

pythongosssss's avatar
pythongosssss committed
816
        paths = list()
comfyanonymous's avatar
comfyanonymous committed
817
818
        for image in images:
            i = 255. * image.cpu().numpy()
819
            img = Image.fromarray(np.clip(i, 0, 255).astype(np.uint8))
comfyanonymous's avatar
comfyanonymous committed
820
821
822
823
824
825
            metadata = PngInfo()
            if prompt is not None:
                metadata.add_text("prompt", json.dumps(prompt))
            if extra_pnginfo is not None:
                for x in extra_pnginfo:
                    metadata.add_text(x, json.dumps(extra_pnginfo[x]))
pythongosssss's avatar
pythongosssss committed
826
            file = f"{filename_prefix}_{counter:05}_.png"
pythongosssss's avatar
pythongosssss committed
827
828
            img.save(os.path.join(self.output_dir, file), pnginfo=metadata, optimize=True)
            paths.append(file + self.url_suffix)
829
            counter += 1
pythongosssss's avatar
pythongosssss committed
830
        return { "ui": { "images": paths } }
comfyanonymous's avatar
comfyanonymous committed
831

pythongosssss's avatar
pythongosssss committed
832
833
834
835
836
837
838
class PreviewImage(SaveImage):
    def __init__(self):
        self.output_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), "temp")
        self.url_suffix = "?type=temp"

    @classmethod
    def INPUT_TYPES(s):
839
        return {"required":
pythongosssss's avatar
pythongosssss committed
840
841
842
                    {"images": ("IMAGE", ), },
                "hidden": {"prompt": "PROMPT", "extra_pnginfo": "EXTRA_PNGINFO"},
                }
843

844
845
846
847
class LoadImage:
    input_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), "input")
    @classmethod
    def INPUT_TYPES(s):
pythongosssss's avatar
pythongosssss committed
848
849
        if not os.path.exists(s.input_dir):
            os.makedirs(s.input_dir)
850
        return {"required":
851
                    {"image": (sorted(os.listdir(s.input_dir)), )},
852
                }
853
854

    CATEGORY = "image"
855

856
    RETURN_TYPES = ("IMAGE", "MASK")
857
858
859
    FUNCTION = "load_image"
    def load_image(self, image):
        image_path = os.path.join(self.input_dir, image)
860
861
        i = Image.open(image_path)
        image = i.convert("RGB")
862
        image = np.array(image).astype(np.float32) / 255.0
863
        image = torch.from_numpy(image)[None,]
864
865
866
867
868
869
        if 'A' in i.getbands():
            mask = np.array(i.getchannel('A')).astype(np.float32) / 255.0
            mask = 1. - torch.from_numpy(mask)
        else:
            mask = torch.zeros((64,64), dtype=torch.float32, device="cpu")
        return (image, mask)
870

871
872
873
874
875
876
877
    @classmethod
    def IS_CHANGED(s, image):
        image_path = os.path.join(s.input_dir, image)
        m = hashlib.sha256()
        with open(image_path, 'rb') as f:
            m.update(f.read())
        return m.digest().hex()
pythongosssss's avatar
pythongosssss committed
878

879
880
881
882
883
class LoadImageMask:
    input_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), "input")
    @classmethod
    def INPUT_TYPES(s):
        return {"required":
884
                    {"image": (sorted(os.listdir(s.input_dir)), ),
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
                    "channel": (["alpha", "red", "green", "blue"], ),}
                }

    CATEGORY = "image"

    RETURN_TYPES = ("MASK",)
    FUNCTION = "load_image"
    def load_image(self, image, channel):
        image_path = os.path.join(self.input_dir, image)
        i = Image.open(image_path)
        mask = None
        c = channel[0].upper()
        if c in i.getbands():
            mask = np.array(i.getchannel(c)).astype(np.float32) / 255.0
            mask = torch.from_numpy(mask)
            if c == 'A':
                mask = 1. - mask
        else:
            mask = torch.zeros((64,64), dtype=torch.float32, device="cpu")
        return (mask,)

    @classmethod
    def IS_CHANGED(s, image, channel):
        image_path = os.path.join(s.input_dir, image)
        m = hashlib.sha256()
        with open(image_path, 'rb') as f:
            m.update(f.read())
        return m.digest().hex()
pythongosssss's avatar
pythongosssss committed
913

comfyanonymous's avatar
comfyanonymous committed
914
915
916
917
918
919
920
921
922
923
924
925
926
class ImageScale:
    upscale_methods = ["nearest-exact", "bilinear", "area"]
    crop_methods = ["disabled", "center"]

    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "image": ("IMAGE",), "upscale_method": (s.upscale_methods,),
                              "width": ("INT", {"default": 512, "min": 1, "max": 4096, "step": 1}),
                              "height": ("INT", {"default": 512, "min": 1, "max": 4096, "step": 1}),
                              "crop": (s.crop_methods,)}}
    RETURN_TYPES = ("IMAGE",)
    FUNCTION = "upscale"

927
    CATEGORY = "image/upscaling"
928

comfyanonymous's avatar
comfyanonymous committed
929
930
    def upscale(self, image, upscale_method, width, height, crop):
        samples = image.movedim(-1,1)
comfyanonymous's avatar
comfyanonymous committed
931
        s = comfy.utils.common_upscale(samples, width, height, upscale_method, crop)
comfyanonymous's avatar
comfyanonymous committed
932
933
        s = s.movedim(1,-1)
        return (s,)
comfyanonymous's avatar
comfyanonymous committed
934

935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
class ImageInvert:

    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "image": ("IMAGE",)}}

    RETURN_TYPES = ("IMAGE",)
    FUNCTION = "invert"

    CATEGORY = "image"

    def invert(self, image):
        s = 1.0 - image
        return (s,)


comfyanonymous's avatar
comfyanonymous committed
951
952
953
NODE_CLASS_MAPPINGS = {
    "KSampler": KSampler,
    "CheckpointLoader": CheckpointLoader,
954
    "CheckpointLoaderSimple": CheckpointLoaderSimple,
comfyanonymous's avatar
comfyanonymous committed
955
    "CLIPTextEncode": CLIPTextEncode,
comfyanonymous's avatar
comfyanonymous committed
956
    "CLIPSetLastLayer": CLIPSetLastLayer,
comfyanonymous's avatar
comfyanonymous committed
957
958
    "VAEDecode": VAEDecode,
    "VAEEncode": VAEEncode,
959
    "VAEEncodeForInpaint": VAEEncodeForInpaint,
comfyanonymous's avatar
comfyanonymous committed
960
961
962
963
    "VAELoader": VAELoader,
    "EmptyLatentImage": EmptyLatentImage,
    "LatentUpscale": LatentUpscale,
    "SaveImage": SaveImage,
pythongosssss's avatar
pythongosssss committed
964
    "PreviewImage": PreviewImage,
comfyanonymous's avatar
comfyanonymous committed
965
    "LoadImage": LoadImage,
966
    "LoadImageMask": LoadImageMask,
comfyanonymous's avatar
comfyanonymous committed
967
    "ImageScale": ImageScale,
968
    "ImageInvert": ImageInvert,
comfyanonymous's avatar
comfyanonymous committed
969
970
    "ConditioningCombine": ConditioningCombine,
    "ConditioningSetArea": ConditioningSetArea,
comfyanonymous's avatar
comfyanonymous committed
971
    "KSamplerAdvanced": KSamplerAdvanced,
972
    "SetLatentNoiseMask": SetLatentNoiseMask,
comfyanonymous's avatar
comfyanonymous committed
973
    "LatentComposite": LatentComposite,
comfyanonymous's avatar
comfyanonymous committed
974
    "LatentRotate": LatentRotate,
comfyanonymous's avatar
comfyanonymous committed
975
    "LatentFlip": LatentFlip,
comfyanonymous's avatar
comfyanonymous committed
976
    "LatentCrop": LatentCrop,
977
    "LoraLoader": LoraLoader,
978
    "CLIPLoader": CLIPLoader,
979
    "CLIPVisionEncode": CLIPVisionEncode,
980
    "StyleModelApply": StyleModelApply,
comfyanonymous's avatar
comfyanonymous committed
981
982
    "ControlNetApply": ControlNetApply,
    "ControlNetLoader": ControlNetLoader,
983
    "DiffControlNetLoader": DiffControlNetLoader,
984
    "T2IAdapterLoader": T2IAdapterLoader,
comfyanonymous's avatar
comfyanonymous committed
985
986
    "StyleModelLoader": StyleModelLoader,
    "CLIPVisionLoader": CLIPVisionLoader,
987
    "VAEDecodeTiled": VAEDecodeTiled,
comfyanonymous's avatar
comfyanonymous committed
988
    "VAEEncodeTiled": VAEEncodeTiled,
comfyanonymous's avatar
comfyanonymous committed
989
990
}

991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
def load_custom_node(module_path):
    module_name = os.path.basename(module_path)
    if os.path.isfile(module_path):
        sp = os.path.splitext(module_path)
        module_name = sp[0]
    try:
        if os.path.isfile(module_path):
            module_spec = importlib.util.spec_from_file_location(module_name, module_path)
        else:
            module_spec = importlib.util.spec_from_file_location(module_name, os.path.join(module_path, "__init__.py"))
        module = importlib.util.module_from_spec(module_spec)
        sys.modules[module_name] = module
        module_spec.loader.exec_module(module)
        if hasattr(module, "NODE_CLASS_MAPPINGS") and getattr(module, "NODE_CLASS_MAPPINGS") is not None:
            NODE_CLASS_MAPPINGS.update(module.NODE_CLASS_MAPPINGS)
        else:
            print(f"Skip {module_path} module for custom nodes due to the lack of NODE_CLASS_MAPPINGS.")
    except Exception as e:
        print(traceback.format_exc())
        print(f"Cannot import {module_path} module for custom nodes:", e)

Hacker 17082006's avatar
Hacker 17082006 committed
1012
def load_custom_nodes():
1013
    CUSTOM_NODE_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), "custom_nodes")
1014
    possible_modules = os.listdir(CUSTOM_NODE_PATH)
1015
    if "__pycache__" in possible_modules:
Hacker 17082006's avatar
.  
Hacker 17082006 committed
1016
        possible_modules.remove("__pycache__")
1017

Hacker 17082006's avatar
Hacker 17082006 committed
1018
    for possible_module in possible_modules:
1019
1020
        module_path = os.path.join(CUSTOM_NODE_PATH, possible_module)
        if os.path.isfile(module_path) and os.path.splitext(module_path)[1] != ".py": continue
1021
        load_custom_node(module_path)
1022

1023
load_custom_nodes()
1024
1025

load_custom_node(os.path.join(os.path.join(os.path.dirname(os.path.realpath(__file__)), "comfy_extras"), "nodes_upscale_model.py"))