nodes.py 37.8 KB
Newer Older
comfyanonymous's avatar
comfyanonymous committed
1
2
3
4
5
import torch

import os
import sys
import json
6
import hashlib
comfyanonymous's avatar
comfyanonymous committed
7
import copy
8
import traceback
comfyanonymous's avatar
comfyanonymous committed
9
10
11
12
13

from PIL import Image
from PIL.PngImagePlugin import PngInfo
import numpy as np

comfyanonymous's avatar
comfyanonymous committed
14
sys.path.insert(0, os.path.join(os.path.dirname(os.path.realpath(__file__)), "comfy"))
comfyanonymous's avatar
comfyanonymous committed
15
16
17
18


import comfy.samplers
import comfy.sd
comfyanonymous's avatar
comfyanonymous committed
19
20
import comfy.utils

21
22
import comfy_extras.clip_vision

23
import model_management
24
import importlib
comfyanonymous's avatar
comfyanonymous committed
25

comfyanonymous's avatar
comfyanonymous committed
26
27
supported_ckpt_extensions = ['.ckpt', '.pth']
supported_pt_extensions = ['.ckpt', '.pt', '.bin', '.pth']
comfyanonymous's avatar
comfyanonymous committed
28
29
30
try:
    import safetensors.torch
    supported_ckpt_extensions += ['.safetensors']
comfyanonymous's avatar
comfyanonymous committed
31
    supported_pt_extensions += ['.safetensors']
comfyanonymous's avatar
comfyanonymous committed
32
33
34
except:
    print("Could not import safetensors, safetensors support disabled.")

35
36
37
38
def recursive_search(directory):  
    result = []
    for root, subdir, file in os.walk(directory, followlinks=True):
        for filepath in file:
39
40
            #we os.path,join directory with a blank string to generate a path separator at the end.
            result.append(os.path.join(root, filepath).replace(os.path.join(directory,''),'')) 
41
42
    return result

comfyanonymous's avatar
comfyanonymous committed
43
44
45
def filter_files_extensions(files, extensions):
    return sorted(list(filter(lambda a: os.path.splitext(a)[-1].lower() in extensions, files)))

46
47
48
49

def before_node_execution():
    model_management.throw_exception_if_processing_interrupted()

50
51
def interrupt_processing(value=True):
    model_management.interrupt_current_processing(value)
52

comfyanonymous's avatar
comfyanonymous committed
53
54
55
class CLIPTextEncode:
    @classmethod
    def INPUT_TYPES(s):
56
        return {"required": {"text": ("STRING", {"multiline": True}), "clip": ("CLIP", )}}
comfyanonymous's avatar
comfyanonymous committed
57
58
59
    RETURN_TYPES = ("CONDITIONING",)
    FUNCTION = "encode"

60
61
    CATEGORY = "conditioning"

comfyanonymous's avatar
comfyanonymous committed
62
    def encode(self, clip, text):
comfyanonymous's avatar
comfyanonymous committed
63
64
65
66
67
68
69
70
71
        return ([[clip.encode(text), {}]], )

class ConditioningCombine:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": {"conditioning_1": ("CONDITIONING", ), "conditioning_2": ("CONDITIONING", )}}
    RETURN_TYPES = ("CONDITIONING",)
    FUNCTION = "combine"

72
73
    CATEGORY = "conditioning"

comfyanonymous's avatar
comfyanonymous committed
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
    def combine(self, conditioning_1, conditioning_2):
        return (conditioning_1 + conditioning_2, )

class ConditioningSetArea:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": {"conditioning": ("CONDITIONING", ),
                              "width": ("INT", {"default": 64, "min": 64, "max": 4096, "step": 64}),
                              "height": ("INT", {"default": 64, "min": 64, "max": 4096, "step": 64}),
                              "x": ("INT", {"default": 0, "min": 0, "max": 4096, "step": 64}),
                              "y": ("INT", {"default": 0, "min": 0, "max": 4096, "step": 64}),
                              "strength": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}),
                             }}
    RETURN_TYPES = ("CONDITIONING",)
    FUNCTION = "append"

90
91
    CATEGORY = "conditioning"

comfyanonymous's avatar
comfyanonymous committed
92
    def append(self, conditioning, width, height, x, y, strength, min_sigma=0.0, max_sigma=99.0):
comfyanonymous's avatar
comfyanonymous committed
93
94
95
96
97
98
99
100
        c = []
        for t in conditioning:
            n = [t[0], t[1].copy()]
            n[1]['area'] = (height // 8, width // 8, y // 8, x // 8)
            n[1]['strength'] = strength
            n[1]['min_sigma'] = min_sigma
            n[1]['max_sigma'] = max_sigma
            c.append(n)
comfyanonymous's avatar
comfyanonymous committed
101
        return (c, )
comfyanonymous's avatar
comfyanonymous committed
102
103
104
105
106
107
108
109
110
111
112

class VAEDecode:
    def __init__(self, device="cpu"):
        self.device = device

    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "samples": ("LATENT", ), "vae": ("VAE", )}}
    RETURN_TYPES = ("IMAGE",)
    FUNCTION = "decode"

113
114
    CATEGORY = "latent"

comfyanonymous's avatar
comfyanonymous committed
115
    def decode(self, vae, samples):
116
        return (vae.decode(samples["samples"]), )
comfyanonymous's avatar
comfyanonymous committed
117

118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
class VAEDecodeTiled:
    def __init__(self, device="cpu"):
        self.device = device

    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "samples": ("LATENT", ), "vae": ("VAE", )}}
    RETURN_TYPES = ("IMAGE",)
    FUNCTION = "decode"

    CATEGORY = "_for_testing"

    def decode(self, vae, samples):
        return (vae.decode_tiled(samples["samples"]), )

comfyanonymous's avatar
comfyanonymous committed
133
134
135
136
137
138
139
140
141
142
class VAEEncode:
    def __init__(self, device="cpu"):
        self.device = device

    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "pixels": ("IMAGE", ), "vae": ("VAE", )}}
    RETURN_TYPES = ("LATENT",)
    FUNCTION = "encode"

143
144
    CATEGORY = "latent"

comfyanonymous's avatar
comfyanonymous committed
145
    def encode(self, vae, pixels):
146
147
148
149
        x = (pixels.shape[1] // 64) * 64
        y = (pixels.shape[2] // 64) * 64
        if pixels.shape[1] != x or pixels.shape[2] != y:
            pixels = pixels[:,:x,:y,:]
150
151
152
        t = vae.encode(pixels[:,:,:,:3])

        return ({"samples":t}, )
comfyanonymous's avatar
comfyanonymous committed
153

comfyanonymous's avatar
comfyanonymous committed
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174

class VAEEncodeTiled:
    def __init__(self, device="cpu"):
        self.device = device

    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "pixels": ("IMAGE", ), "vae": ("VAE", )}}
    RETURN_TYPES = ("LATENT",)
    FUNCTION = "encode"

    CATEGORY = "_for_testing"

    def encode(self, vae, pixels):
        x = (pixels.shape[1] // 64) * 64
        y = (pixels.shape[2] // 64) * 64
        if pixels.shape[1] != x or pixels.shape[2] != y:
            pixels = pixels[:,:x,:y,:]
        t = vae.encode_tiled(pixels[:,:,:,:3])

        return ({"samples":t}, )
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
class VAEEncodeForInpaint:
    def __init__(self, device="cpu"):
        self.device = device

    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "pixels": ("IMAGE", ), "vae": ("VAE", ), "mask": ("MASK", )}}
    RETURN_TYPES = ("LATENT",)
    FUNCTION = "encode"

    CATEGORY = "latent/inpaint"

    def encode(self, vae, pixels, mask):
        x = (pixels.shape[1] // 64) * 64
        y = (pixels.shape[2] // 64) * 64
190
191
        mask = torch.nn.functional.interpolate(mask[None,None,], size=(pixels.shape[1], pixels.shape[2]), mode="bilinear")[0][0]

192
193
194
195
        if pixels.shape[1] != x or pixels.shape[2] != y:
            pixels = pixels[:,:x,:y,:]
            mask = mask[:x,:y]

196
        #grow mask by a few pixels to keep things seamless in latent space
197
        kernel_tensor = torch.ones((1, 1, 6, 6))
198
199
        mask_erosion = torch.clamp(torch.nn.functional.conv2d((mask.round())[None], kernel_tensor, padding=3), 0, 1)
        m = (1.0 - mask.round())
200
201
        for i in range(3):
            pixels[:,:,:,i] -= 0.5
202
            pixels[:,:,:,i] *= m
203
204
205
            pixels[:,:,:,i] += 0.5
        t = vae.encode(pixels)

206
        return ({"samples":t, "noise_mask": (mask_erosion[0][:x,:y].round())}, )
comfyanonymous's avatar
comfyanonymous committed
207
208
209
210
211

class CheckpointLoader:
    models_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), "models")
    config_dir = os.path.join(models_dir, "configs")
    ckpt_dir = os.path.join(models_dir, "checkpoints")
212
    embedding_directory = os.path.join(models_dir, "embeddings")
comfyanonymous's avatar
comfyanonymous committed
213
214
215

    @classmethod
    def INPUT_TYPES(s):
216
217
        return {"required": { "config_name": (filter_files_extensions(recursive_search(s.config_dir), '.yaml'), ),
                              "ckpt_name": (filter_files_extensions(recursive_search(s.ckpt_dir), supported_ckpt_extensions), )}}
comfyanonymous's avatar
comfyanonymous committed
218
219
220
    RETURN_TYPES = ("MODEL", "CLIP", "VAE")
    FUNCTION = "load_checkpoint"

221
222
    CATEGORY = "loaders"

comfyanonymous's avatar
comfyanonymous committed
223
224
225
    def load_checkpoint(self, config_name, ckpt_name, output_vae=True, output_clip=True):
        config_path = os.path.join(self.config_dir, config_name)
        ckpt_path = os.path.join(self.ckpt_dir, ckpt_name)
226
        return comfy.sd.load_checkpoint(config_path, ckpt_path, output_vae=True, output_clip=True, embedding_directory=self.embedding_directory)
comfyanonymous's avatar
comfyanonymous committed
227

228
229
230
231
232
233
234
235
236
237
238
class CheckpointLoaderSimple:
    models_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), "models")
    ckpt_dir = os.path.join(models_dir, "checkpoints")

    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "ckpt_name": (filter_files_extensions(recursive_search(s.ckpt_dir), supported_ckpt_extensions), ),
                             }}
    RETURN_TYPES = ("MODEL", "CLIP", "VAE")
    FUNCTION = "load_checkpoint"

239
    CATEGORY = "loaders"
240

241
    def load_checkpoint(self, ckpt_name, output_vae=True, output_clip=True):
242
        ckpt_path = os.path.join(self.ckpt_dir, ckpt_name)
243
        out = comfy.sd.load_checkpoint_guess_config(ckpt_path, output_vae=True, output_clip=True, embedding_directory=CheckpointLoader.embedding_directory)
244
245
        return out

comfyanonymous's avatar
comfyanonymous committed
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
class CLIPSetLastLayer:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "clip": ("CLIP", ),
                              "stop_at_clip_layer": ("INT", {"default": -1, "min": -24, "max": -1, "step": 1}),
                              }}
    RETURN_TYPES = ("CLIP",)
    FUNCTION = "set_last_layer"

    CATEGORY = "conditioning"

    def set_last_layer(self, clip, stop_at_clip_layer):
        clip = clip.clone()
        clip.clip_layer(stop_at_clip_layer)
        return (clip,)

262
263
264
265
266
267
268
class LoraLoader:
    models_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), "models")
    lora_dir = os.path.join(models_dir, "loras")
    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "model": ("MODEL",),
                              "clip": ("CLIP", ),
269
                              "lora_name": (filter_files_extensions(recursive_search(s.lora_dir), supported_pt_extensions), ),
270
271
272
273
274
275
276
277
278
279
280
281
282
                              "strength_model": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}),
                              "strength_clip": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}),
                              }}
    RETURN_TYPES = ("MODEL", "CLIP")
    FUNCTION = "load_lora"

    CATEGORY = "loaders"

    def load_lora(self, model, clip, lora_name, strength_model, strength_clip):
        lora_path = os.path.join(self.lora_dir, lora_name)
        model_lora, clip_lora = comfy.sd.load_lora_for_models(model, clip, lora_path, strength_model, strength_clip)
        return (model_lora, clip_lora)

comfyanonymous's avatar
comfyanonymous committed
283
284
285
286
287
class VAELoader:
    models_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), "models")
    vae_dir = os.path.join(models_dir, "vae")
    @classmethod
    def INPUT_TYPES(s):
288
        return {"required": { "vae_name": (filter_files_extensions(recursive_search(s.vae_dir), supported_pt_extensions), )}}
comfyanonymous's avatar
comfyanonymous committed
289
290
291
    RETURN_TYPES = ("VAE",)
    FUNCTION = "load_vae"

292
293
    CATEGORY = "loaders"

comfyanonymous's avatar
comfyanonymous committed
294
295
296
297
298
299
    #TODO: scale factor?
    def load_vae(self, vae_name):
        vae_path = os.path.join(self.vae_dir, vae_name)
        vae = comfy.sd.VAE(ckpt_path=vae_path)
        return (vae,)

comfyanonymous's avatar
comfyanonymous committed
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
class ControlNetLoader:
    models_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), "models")
    controlnet_dir = os.path.join(models_dir, "controlnet")
    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "control_net_name": (filter_files_extensions(recursive_search(s.controlnet_dir), supported_pt_extensions), )}}

    RETURN_TYPES = ("CONTROL_NET",)
    FUNCTION = "load_controlnet"

    CATEGORY = "loaders"

    def load_controlnet(self, control_net_name):
        controlnet_path = os.path.join(self.controlnet_dir, control_net_name)
        controlnet = comfy.sd.load_controlnet(controlnet_path)
        return (controlnet,)

317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
class DiffControlNetLoader:
    models_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), "models")
    controlnet_dir = os.path.join(models_dir, "controlnet")
    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "model": ("MODEL",),
                              "control_net_name": (filter_files_extensions(recursive_search(s.controlnet_dir), supported_pt_extensions), )}}

    RETURN_TYPES = ("CONTROL_NET",)
    FUNCTION = "load_controlnet"

    CATEGORY = "loaders"

    def load_controlnet(self, model, control_net_name):
        controlnet_path = os.path.join(self.controlnet_dir, control_net_name)
        controlnet = comfy.sd.load_controlnet(controlnet_path, model)
        return (controlnet,)

comfyanonymous's avatar
comfyanonymous committed
335
336
337
338

class ControlNetApply:
    @classmethod
    def INPUT_TYPES(s):
339
340
341
342
343
        return {"required": {"conditioning": ("CONDITIONING", ),
                             "control_net": ("CONTROL_NET", ),
                             "image": ("IMAGE", ),
                             "strength": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01})
                             }}
comfyanonymous's avatar
comfyanonymous committed
344
345
346
347
348
    RETURN_TYPES = ("CONDITIONING",)
    FUNCTION = "apply_controlnet"

    CATEGORY = "conditioning"

349
    def apply_controlnet(self, conditioning, control_net, image, strength):
comfyanonymous's avatar
comfyanonymous committed
350
351
352
353
354
        c = []
        control_hint = image.movedim(-1,1)
        print(control_hint.shape)
        for t in conditioning:
            n = [t[0], t[1].copy()]
comfyanonymous's avatar
comfyanonymous committed
355
356
357
358
            c_net = control_net.copy().set_cond_hint(control_hint, strength)
            if 'control' in t[1]:
                c_net.set_previous_controlnet(t[1]['control'])
            n[1]['control'] = c_net
comfyanonymous's avatar
comfyanonymous committed
359
360
361
            c.append(n)
        return (c, )

362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
class T2IAdapterLoader:
    models_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), "models")
    t2i_adapter_dir = os.path.join(models_dir, "t2i_adapter")
    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "t2i_adapter_name": (filter_files_extensions(recursive_search(s.t2i_adapter_dir), supported_pt_extensions), )}}

    RETURN_TYPES = ("CONTROL_NET",)
    FUNCTION = "load_t2i_adapter"

    CATEGORY = "loaders"

    def load_t2i_adapter(self, t2i_adapter_name):
        t2i_path = os.path.join(self.t2i_adapter_dir, t2i_adapter_name)
        t2i_adapter = comfy.sd.load_t2i_adapter(t2i_path)
        return (t2i_adapter,)
comfyanonymous's avatar
comfyanonymous committed
378

379
380
381
382
383
class CLIPLoader:
    models_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), "models")
    clip_dir = os.path.join(models_dir, "clip")
    @classmethod
    def INPUT_TYPES(s):
384
        return {"required": { "clip_name": (filter_files_extensions(recursive_search(s.clip_dir), supported_pt_extensions), ),
385
386
387
388
389
390
                             }}
    RETURN_TYPES = ("CLIP",)
    FUNCTION = "load_clip"

    CATEGORY = "loaders"

391
    def load_clip(self, clip_name):
392
393
394
395
        clip_path = os.path.join(self.clip_dir, clip_name)
        clip = comfy.sd.load_clip(ckpt_path=clip_path, embedding_directory=CheckpointLoader.embedding_directory)
        return (clip,)

396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
class CLIPVisionLoader:
    models_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), "models")
    clip_dir = os.path.join(models_dir, "clip_vision")
    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "clip_name": (filter_files_extensions(recursive_search(s.clip_dir), supported_pt_extensions), ),
                             }}
    RETURN_TYPES = ("CLIP_VISION",)
    FUNCTION = "load_clip"

    CATEGORY = "loaders"

    def load_clip(self, clip_name):
        clip_path = os.path.join(self.clip_dir, clip_name)
        clip_vision = comfy_extras.clip_vision.load(clip_path)
        return (clip_vision,)

class CLIPVisionEncode:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "clip_vision": ("CLIP_VISION",),
                              "image": ("IMAGE",)
                             }}
comfyanonymous's avatar
comfyanonymous committed
419
    RETURN_TYPES = ("CLIP_VISION_OUTPUT",)
420
421
    FUNCTION = "encode"

comfyanonymous's avatar
comfyanonymous committed
422
    CATEGORY = "conditioning/style_model"
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448

    def encode(self, clip_vision, image):
        output = clip_vision.encode_image(image)
        return (output,)

class StyleModelLoader:
    models_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), "models")
    style_model_dir = os.path.join(models_dir, "style_models")
    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "style_model_name": (filter_files_extensions(recursive_search(s.style_model_dir), supported_pt_extensions), )}}

    RETURN_TYPES = ("STYLE_MODEL",)
    FUNCTION = "load_style_model"

    CATEGORY = "loaders"

    def load_style_model(self, style_model_name):
        style_model_path = os.path.join(self.style_model_dir, style_model_name)
        style_model = comfy.sd.load_style_model(style_model_path)
        return (style_model,)


class StyleModelApply:
    @classmethod
    def INPUT_TYPES(s):
449
450
451
        return {"required": {"conditioning": ("CONDITIONING", ),
                             "style_model": ("STYLE_MODEL", ),
                             "clip_vision_output": ("CLIP_VISION_OUTPUT", ),
452
453
454
455
                             }}
    RETURN_TYPES = ("CONDITIONING",)
    FUNCTION = "apply_stylemodel"

comfyanonymous's avatar
comfyanonymous committed
456
    CATEGORY = "conditioning/style_model"
457

458
459
    def apply_stylemodel(self, clip_vision_output, style_model, conditioning):
        cond = style_model.get_cond(clip_vision_output)
460
        c = []
461
462
        for t in conditioning:
            n = [torch.cat((t[0], cond), dim=1), t[1].copy()]
463
464
465
            c.append(n)
        return (c, )

comfyanonymous's avatar
comfyanonymous committed
466
467
468
469
470
471
472
473
474
475
476
477
class EmptyLatentImage:
    def __init__(self, device="cpu"):
        self.device = device

    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "width": ("INT", {"default": 512, "min": 64, "max": 4096, "step": 64}),
                              "height": ("INT", {"default": 512, "min": 64, "max": 4096, "step": 64}),
                              "batch_size": ("INT", {"default": 1, "min": 1, "max": 64})}}
    RETURN_TYPES = ("LATENT",)
    FUNCTION = "generate"

478
479
    CATEGORY = "latent"

comfyanonymous's avatar
comfyanonymous committed
480
481
    def generate(self, width, height, batch_size=1):
        latent = torch.zeros([batch_size, 4, height // 8, width // 8])
482
        return ({"samples":latent}, )
comfyanonymous's avatar
comfyanonymous committed
483

comfyanonymous's avatar
comfyanonymous committed
484

comfyanonymous's avatar
comfyanonymous committed
485

comfyanonymous's avatar
comfyanonymous committed
486
487
class LatentUpscale:
    upscale_methods = ["nearest-exact", "bilinear", "area"]
488
    crop_methods = ["disabled", "center"]
comfyanonymous's avatar
comfyanonymous committed
489
490
491
492
493

    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "samples": ("LATENT",), "upscale_method": (s.upscale_methods,),
                              "width": ("INT", {"default": 512, "min": 64, "max": 4096, "step": 64}),
494
495
                              "height": ("INT", {"default": 512, "min": 64, "max": 4096, "step": 64}),
                              "crop": (s.crop_methods,)}}
comfyanonymous's avatar
comfyanonymous committed
496
497
498
    RETURN_TYPES = ("LATENT",)
    FUNCTION = "upscale"

499
500
    CATEGORY = "latent"

501
    def upscale(self, samples, upscale_method, width, height, crop):
502
        s = samples.copy()
comfyanonymous's avatar
comfyanonymous committed
503
        s["samples"] = comfy.utils.common_upscale(samples["samples"], width // 8, height // 8, upscale_method, crop)
comfyanonymous's avatar
comfyanonymous committed
504
505
        return (s,)

comfyanonymous's avatar
comfyanonymous committed
506
507
508
509
510
511
512
513
514
class LatentRotate:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "samples": ("LATENT",),
                              "rotation": (["none", "90 degrees", "180 degrees", "270 degrees"],),
                              }}
    RETURN_TYPES = ("LATENT",)
    FUNCTION = "rotate"

comfyanonymous's avatar
comfyanonymous committed
515
    CATEGORY = "latent/transform"
comfyanonymous's avatar
comfyanonymous committed
516
517

    def rotate(self, samples, rotation):
518
        s = samples.copy()
comfyanonymous's avatar
comfyanonymous committed
519
520
521
522
523
524
525
526
        rotate_by = 0
        if rotation.startswith("90"):
            rotate_by = 1
        elif rotation.startswith("180"):
            rotate_by = 2
        elif rotation.startswith("270"):
            rotate_by = 3

527
        s["samples"] = torch.rot90(samples["samples"], k=rotate_by, dims=[3, 2])
comfyanonymous's avatar
comfyanonymous committed
528
        return (s,)
comfyanonymous's avatar
comfyanonymous committed
529
530
531
532
533
534
535
536
537
538

class LatentFlip:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "samples": ("LATENT",),
                              "flip_method": (["x-axis: vertically", "y-axis: horizontally"],),
                              }}
    RETURN_TYPES = ("LATENT",)
    FUNCTION = "flip"

comfyanonymous's avatar
comfyanonymous committed
539
    CATEGORY = "latent/transform"
comfyanonymous's avatar
comfyanonymous committed
540
541

    def flip(self, samples, flip_method):
542
        s = samples.copy()
comfyanonymous's avatar
comfyanonymous committed
543
        if flip_method.startswith("x"):
544
            s["samples"] = torch.flip(samples["samples"], dims=[2])
comfyanonymous's avatar
comfyanonymous committed
545
        elif flip_method.startswith("y"):
546
            s["samples"] = torch.flip(samples["samples"], dims=[3])
comfyanonymous's avatar
comfyanonymous committed
547
548

        return (s,)
comfyanonymous's avatar
comfyanonymous committed
549
550
551
552
553
554
555
556

class LatentComposite:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "samples_to": ("LATENT",),
                              "samples_from": ("LATENT",),
                              "x": ("INT", {"default": 0, "min": 0, "max": 4096, "step": 8}),
                              "y": ("INT", {"default": 0, "min": 0, "max": 4096, "step": 8}),
557
                              "feather": ("INT", {"default": 0, "min": 0, "max": 4096, "step": 8}),
comfyanonymous's avatar
comfyanonymous committed
558
559
560
561
562
563
                              }}
    RETURN_TYPES = ("LATENT",)
    FUNCTION = "composite"

    CATEGORY = "latent"

564
    def composite(self, samples_to, samples_from, x, y, composite_method="normal", feather=0):
comfyanonymous's avatar
comfyanonymous committed
565
566
        x =  x // 8
        y = y // 8
567
        feather = feather // 8
568
569
570
571
        samples_out = samples_to.copy()
        s = samples_to["samples"].clone()
        samples_to = samples_to["samples"]
        samples_from = samples_from["samples"]
572
573
574
        if feather == 0:
            s[:,:,y:y+samples_from.shape[2],x:x+samples_from.shape[3]] = samples_from[:,:,:samples_to.shape[2] - y, :samples_to.shape[3] - x]
        else:
575
576
            samples_from = samples_from[:,:,:samples_to.shape[2] - y, :samples_to.shape[3] - x]
            mask = torch.ones_like(samples_from)
577
578
579
580
581
582
583
584
585
586
587
588
            for t in range(feather):
                if y != 0:
                    mask[:,:,t:1+t,:] *= ((1.0/feather) * (t + 1))

                if y + samples_from.shape[2] < samples_to.shape[2]:
                    mask[:,:,mask.shape[2] -1 -t: mask.shape[2]-t,:] *= ((1.0/feather) * (t + 1))
                if x != 0:
                    mask[:,:,:,t:1+t] *= ((1.0/feather) * (t + 1))
                if x + samples_from.shape[3] < samples_to.shape[3]:
                    mask[:,:,:,mask.shape[3]- 1 - t: mask.shape[3]- t] *= ((1.0/feather) * (t + 1))
            rev_mask = torch.ones_like(mask) - mask
            s[:,:,y:y+samples_from.shape[2],x:x+samples_from.shape[3]] = samples_from[:,:,:samples_to.shape[2] - y, :samples_to.shape[3] - x] * mask + s[:,:,y:y+samples_from.shape[2],x:x+samples_from.shape[3]] * rev_mask
589
590
        samples_out["samples"] = s
        return (samples_out,)
comfyanonymous's avatar
comfyanonymous committed
591

comfyanonymous's avatar
comfyanonymous committed
592
593
594
595
596
597
598
599
600
601
602
603
class LatentCrop:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "samples": ("LATENT",),
                              "width": ("INT", {"default": 512, "min": 64, "max": 4096, "step": 64}),
                              "height": ("INT", {"default": 512, "min": 64, "max": 4096, "step": 64}),
                              "x": ("INT", {"default": 0, "min": 0, "max": 4096, "step": 8}),
                              "y": ("INT", {"default": 0, "min": 0, "max": 4096, "step": 8}),
                              }}
    RETURN_TYPES = ("LATENT",)
    FUNCTION = "crop"

comfyanonymous's avatar
comfyanonymous committed
604
    CATEGORY = "latent/transform"
comfyanonymous's avatar
comfyanonymous committed
605
606

    def crop(self, samples, width, height, x, y):
607
608
        s = samples.copy()
        samples = samples['samples']
comfyanonymous's avatar
comfyanonymous committed
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
        x =  x // 8
        y = y // 8

        #enfonce minimum size of 64
        if x > (samples.shape[3] - 8):
            x = samples.shape[3] - 8
        if y > (samples.shape[2] - 8):
            y = samples.shape[2] - 8

        new_height = height // 8
        new_width = width // 8
        to_x = new_width + x
        to_y = new_height + y
        def enforce_image_dim(d, to_d, max_d):
            if to_d > max_d:
                leftover = (to_d - max_d) % 8
                to_d = max_d
                d -= leftover
            return (d, to_d)

        #make sure size is always multiple of 64
        x, to_x = enforce_image_dim(x, to_x, samples.shape[3])
        y, to_y = enforce_image_dim(y, to_y, samples.shape[2])
632
        s['samples'] = samples[:,:,y:to_y, x:to_x]
comfyanonymous's avatar
comfyanonymous committed
633
634
        return (s,)

635
636
637
638
639
640
641
642
643
class SetLatentNoiseMask:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "samples": ("LATENT",),
                              "mask": ("MASK",),
                              }}
    RETURN_TYPES = ("LATENT",)
    FUNCTION = "set_mask"

644
    CATEGORY = "latent/inpaint"
645
646
647
648
649
650
651

    def set_mask(self, samples, mask):
        s = samples.copy()
        s["noise_mask"] = mask
        return (s,)


652
def common_ksampler(model, seed, steps, cfg, sampler_name, scheduler, positive, negative, latent, denoise=1.0, disable_noise=False, start_step=None, last_step=None, force_full_denoise=False):
653
654
    latent_image = latent["samples"]
    noise_mask = None
655
    device = model_management.get_torch_device()
656

comfyanonymous's avatar
comfyanonymous committed
657
658
659
660
661
    if disable_noise:
        noise = torch.zeros(latent_image.size(), dtype=latent_image.dtype, layout=latent_image.layout, device="cpu")
    else:
        noise = torch.randn(latent_image.size(), dtype=latent_image.dtype, layout=latent_image.layout, generator=torch.manual_seed(seed), device="cpu")

662
663
664
    if "noise_mask" in latent:
        noise_mask = latent['noise_mask']
        noise_mask = torch.nn.functional.interpolate(noise_mask[None,None,], size=(noise.shape[2], noise.shape[3]), mode="bilinear")
665
        noise_mask = noise_mask.round()
666
667
668
669
        noise_mask = torch.cat([noise_mask] * noise.shape[1], dim=1)
        noise_mask = torch.cat([noise_mask] * noise.shape[0])
        noise_mask = noise_mask.to(device)

670
    real_model = None
671
672
673
    model_management.load_model_gpu(model)
    real_model = model.model

674
675
676
677
678
679
    noise = noise.to(device)
    latent_image = latent_image.to(device)

    positive_copy = []
    negative_copy = []

comfyanonymous's avatar
comfyanonymous committed
680
    control_nets = []
681
682
683
684
685
    for p in positive:
        t = p[0]
        if t.shape[0] < noise.shape[0]:
            t = torch.cat([t] * noise.shape[0])
        t = t.to(device)
comfyanonymous's avatar
comfyanonymous committed
686
687
        if 'control' in p[1]:
            control_nets += [p[1]['control']]
688
689
690
691
692
693
        positive_copy += [[t] + p[1:]]
    for n in negative:
        t = n[0]
        if t.shape[0] < noise.shape[0]:
            t = torch.cat([t] * noise.shape[0])
        t = t.to(device)
comfyanonymous's avatar
comfyanonymous committed
694
695
        if 'control' in p[1]:
            control_nets += [p[1]['control']]
696
697
        negative_copy += [[t] + n[1:]]

comfyanonymous's avatar
comfyanonymous committed
698
699
700
701
    control_net_models = []
    for x in control_nets:
        control_net_models += x.get_control_models()
    model_management.load_controlnet_gpu(control_net_models)
comfyanonymous's avatar
comfyanonymous committed
702

703
704
705
706
707
708
    if sampler_name in comfy.samplers.KSampler.SAMPLERS:
        sampler = comfy.samplers.KSampler(real_model, steps=steps, device=device, sampler=sampler_name, scheduler=scheduler, denoise=denoise)
    else:
        #other samplers
        pass

709
    samples = sampler.sample(noise, positive_copy, negative_copy, cfg=cfg, latent_image=latent_image, start_step=start_step, last_step=last_step, force_full_denoise=force_full_denoise, denoise_mask=noise_mask)
710
    samples = samples.cpu()
comfyanonymous's avatar
comfyanonymous committed
711
712
    for c in control_nets:
        c.cleanup()
comfyanonymous's avatar
comfyanonymous committed
713

714
715
716
    out = latent.copy()
    out["samples"] = samples
    return (out, )
comfyanonymous's avatar
comfyanonymous committed
717

comfyanonymous's avatar
comfyanonymous committed
718
719
720
class KSampler:
    @classmethod
    def INPUT_TYPES(s):
comfyanonymous's avatar
comfyanonymous committed
721
        return {"required":
comfyanonymous's avatar
comfyanonymous committed
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
                    {"model": ("MODEL",),
                    "seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}),
                    "steps": ("INT", {"default": 20, "min": 1, "max": 10000}),
                    "cfg": ("FLOAT", {"default": 8.0, "min": 0.0, "max": 100.0}),
                    "sampler_name": (comfy.samplers.KSampler.SAMPLERS, ),
                    "scheduler": (comfy.samplers.KSampler.SCHEDULERS, ),
                    "positive": ("CONDITIONING", ),
                    "negative": ("CONDITIONING", ),
                    "latent_image": ("LATENT", ),
                    "denoise": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}),
                    }}

    RETURN_TYPES = ("LATENT",)
    FUNCTION = "sample"

737
738
    CATEGORY = "sampling"

comfyanonymous's avatar
comfyanonymous committed
739
    def sample(self, model, seed, steps, cfg, sampler_name, scheduler, positive, negative, latent_image, denoise=1.0):
740
        return common_ksampler(model, seed, steps, cfg, sampler_name, scheduler, positive, negative, latent_image, denoise=denoise)
comfyanonymous's avatar
comfyanonymous committed
741

comfyanonymous's avatar
comfyanonymous committed
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
class KSamplerAdvanced:
    @classmethod
    def INPUT_TYPES(s):
        return {"required":
                    {"model": ("MODEL",),
                    "add_noise": (["enable", "disable"], ),
                    "noise_seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}),
                    "steps": ("INT", {"default": 20, "min": 1, "max": 10000}),
                    "cfg": ("FLOAT", {"default": 8.0, "min": 0.0, "max": 100.0}),
                    "sampler_name": (comfy.samplers.KSampler.SAMPLERS, ),
                    "scheduler": (comfy.samplers.KSampler.SCHEDULERS, ),
                    "positive": ("CONDITIONING", ),
                    "negative": ("CONDITIONING", ),
                    "latent_image": ("LATENT", ),
                    "start_at_step": ("INT", {"default": 0, "min": 0, "max": 10000}),
                    "end_at_step": ("INT", {"default": 10000, "min": 0, "max": 10000}),
                    "return_with_leftover_noise": (["disable", "enable"], ),
                    }}

    RETURN_TYPES = ("LATENT",)
    FUNCTION = "sample"

    CATEGORY = "sampling"
comfyanonymous's avatar
comfyanonymous committed
765

comfyanonymous's avatar
comfyanonymous committed
766
767
768
769
770
771
772
    def sample(self, model, add_noise, noise_seed, steps, cfg, sampler_name, scheduler, positive, negative, latent_image, start_at_step, end_at_step, return_with_leftover_noise, denoise=1.0):
        force_full_denoise = True
        if return_with_leftover_noise == "enable":
            force_full_denoise = False
        disable_noise = False
        if add_noise == "disable":
            disable_noise = True
773
        return common_ksampler(model, noise_seed, steps, cfg, sampler_name, scheduler, positive, negative, latent_image, denoise=denoise, disable_noise=disable_noise, start_step=start_at_step, last_step=end_at_step, force_full_denoise=force_full_denoise)
comfyanonymous's avatar
comfyanonymous committed
774
775
776
777

class SaveImage:
    def __init__(self):
        self.output_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), "output")
m957ymj75urz's avatar
m957ymj75urz committed
778
        self.type = "output"
comfyanonymous's avatar
comfyanonymous committed
779
780
781
782

    @classmethod
    def INPUT_TYPES(s):
        return {"required": 
783
                    {"images": ("IMAGE", ),
784
                     "filename_prefix": ("STRING", {"default": "ComfyUI"})},
pythongosssss's avatar
pythongosssss committed
785
                "hidden": {"prompt": "PROMPT", "extra_pnginfo": "EXTRA_PNGINFO"},
comfyanonymous's avatar
comfyanonymous committed
786
787
788
789
790
791
792
                }

    RETURN_TYPES = ()
    FUNCTION = "save_images"

    OUTPUT_NODE = True

793
794
    CATEGORY = "image"

795
    def save_images(self, images, filename_prefix="ComfyUI", prompt=None, extra_pnginfo=None):
796
        def map_filename(filename):
797
            prefix_len = len(os.path.basename(filename_prefix))
798
799
800
801
802
803
            prefix = filename[:prefix_len + 1]
            try:
                digits = int(filename[prefix_len + 1:].split('_')[0])
            except:
                digits = 0
            return (digits, prefix)
804
        
m957ymj75urz's avatar
m957ymj75urz committed
805
806
807
        subfolder = os.path.dirname(os.path.normpath(filename_prefix))
        filename = os.path.basename(os.path.normpath(filename_prefix))

808
        full_output_folder = os.path.join(self.output_dir, subfolder) 
809

810
        if os.path.commonpath((self.output_dir, os.path.realpath(full_output_folder))) != self.output_dir:
811
812
            print("Saving image outside the output folder is not allowed.")
            return
813
        
814
        try:
815
            counter = max(filter(lambda a: a[1][:-1] == filename and a[1][-1] == "_", map(map_filename, os.listdir(full_output_folder))))[0] + 1
816
817
        except ValueError:
            counter = 1
818
        except FileNotFoundError:
819
            os.makedirs(full_output_folder, exist_ok=True)
820
            counter = 1
pythongosssss's avatar
pythongosssss committed
821

pythongosssss's avatar
pythongosssss committed
822
823
        if not os.path.exists(self.output_dir):
            os.makedirs(self.output_dir)
824

m957ymj75urz's avatar
m957ymj75urz committed
825
        results = list()
comfyanonymous's avatar
comfyanonymous committed
826
827
        for image in images:
            i = 255. * image.cpu().numpy()
828
            img = Image.fromarray(np.clip(i, 0, 255).astype(np.uint8))
comfyanonymous's avatar
comfyanonymous committed
829
830
831
832
833
834
            metadata = PngInfo()
            if prompt is not None:
                metadata.add_text("prompt", json.dumps(prompt))
            if extra_pnginfo is not None:
                for x in extra_pnginfo:
                    metadata.add_text(x, json.dumps(extra_pnginfo[x]))
835

836
837
            file = f"{filename}_{counter:05}_.png"
            img.save(os.path.join(full_output_folder, file), pnginfo=metadata, optimize=True)
m957ymj75urz's avatar
m957ymj75urz committed
838
839
840
841
842
            results.append({
                "filename": file,
                "subfolder": subfolder,
                "type": self.type
            });
843
            counter += 1
m957ymj75urz's avatar
m957ymj75urz committed
844
845
        
        return { "ui": { "images": results } }
comfyanonymous's avatar
comfyanonymous committed
846

pythongosssss's avatar
pythongosssss committed
847
848
849
class PreviewImage(SaveImage):
    def __init__(self):
        self.output_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), "temp")
m957ymj75urz's avatar
m957ymj75urz committed
850
        self.type = "temp"
pythongosssss's avatar
pythongosssss committed
851
852
853

    @classmethod
    def INPUT_TYPES(s):
854
        return {"required":
pythongosssss's avatar
pythongosssss committed
855
856
857
                    {"images": ("IMAGE", ), },
                "hidden": {"prompt": "PROMPT", "extra_pnginfo": "EXTRA_PNGINFO"},
                }
858

859
860
861
862
class LoadImage:
    input_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), "input")
    @classmethod
    def INPUT_TYPES(s):
pythongosssss's avatar
pythongosssss committed
863
864
        if not os.path.exists(s.input_dir):
            os.makedirs(s.input_dir)
865
        return {"required":
866
                    {"image": (sorted(os.listdir(s.input_dir)), )},
867
                }
868
869

    CATEGORY = "image"
870

871
    RETURN_TYPES = ("IMAGE", "MASK")
872
873
874
    FUNCTION = "load_image"
    def load_image(self, image):
        image_path = os.path.join(self.input_dir, image)
875
876
        i = Image.open(image_path)
        image = i.convert("RGB")
877
        image = np.array(image).astype(np.float32) / 255.0
878
        image = torch.from_numpy(image)[None,]
879
880
881
882
883
884
        if 'A' in i.getbands():
            mask = np.array(i.getchannel('A')).astype(np.float32) / 255.0
            mask = 1. - torch.from_numpy(mask)
        else:
            mask = torch.zeros((64,64), dtype=torch.float32, device="cpu")
        return (image, mask)
885

886
887
888
889
890
891
892
    @classmethod
    def IS_CHANGED(s, image):
        image_path = os.path.join(s.input_dir, image)
        m = hashlib.sha256()
        with open(image_path, 'rb') as f:
            m.update(f.read())
        return m.digest().hex()
pythongosssss's avatar
pythongosssss committed
893

894
895
896
897
898
class LoadImageMask:
    input_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), "input")
    @classmethod
    def INPUT_TYPES(s):
        return {"required":
899
                    {"image": (sorted(os.listdir(s.input_dir)), ),
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
                    "channel": (["alpha", "red", "green", "blue"], ),}
                }

    CATEGORY = "image"

    RETURN_TYPES = ("MASK",)
    FUNCTION = "load_image"
    def load_image(self, image, channel):
        image_path = os.path.join(self.input_dir, image)
        i = Image.open(image_path)
        mask = None
        c = channel[0].upper()
        if c in i.getbands():
            mask = np.array(i.getchannel(c)).astype(np.float32) / 255.0
            mask = torch.from_numpy(mask)
            if c == 'A':
                mask = 1. - mask
        else:
            mask = torch.zeros((64,64), dtype=torch.float32, device="cpu")
        return (mask,)

    @classmethod
    def IS_CHANGED(s, image, channel):
        image_path = os.path.join(s.input_dir, image)
        m = hashlib.sha256()
        with open(image_path, 'rb') as f:
            m.update(f.read())
        return m.digest().hex()
pythongosssss's avatar
pythongosssss committed
928

comfyanonymous's avatar
comfyanonymous committed
929
930
931
932
933
934
935
936
937
938
939
940
941
class ImageScale:
    upscale_methods = ["nearest-exact", "bilinear", "area"]
    crop_methods = ["disabled", "center"]

    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "image": ("IMAGE",), "upscale_method": (s.upscale_methods,),
                              "width": ("INT", {"default": 512, "min": 1, "max": 4096, "step": 1}),
                              "height": ("INT", {"default": 512, "min": 1, "max": 4096, "step": 1}),
                              "crop": (s.crop_methods,)}}
    RETURN_TYPES = ("IMAGE",)
    FUNCTION = "upscale"

942
    CATEGORY = "image/upscaling"
943

comfyanonymous's avatar
comfyanonymous committed
944
945
    def upscale(self, image, upscale_method, width, height, crop):
        samples = image.movedim(-1,1)
comfyanonymous's avatar
comfyanonymous committed
946
        s = comfy.utils.common_upscale(samples, width, height, upscale_method, crop)
comfyanonymous's avatar
comfyanonymous committed
947
948
        s = s.movedim(1,-1)
        return (s,)
comfyanonymous's avatar
comfyanonymous committed
949

950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
class ImageInvert:

    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "image": ("IMAGE",)}}

    RETURN_TYPES = ("IMAGE",)
    FUNCTION = "invert"

    CATEGORY = "image"

    def invert(self, image):
        s = 1.0 - image
        return (s,)


comfyanonymous's avatar
comfyanonymous committed
966
967
968
NODE_CLASS_MAPPINGS = {
    "KSampler": KSampler,
    "CheckpointLoader": CheckpointLoader,
969
    "CheckpointLoaderSimple": CheckpointLoaderSimple,
comfyanonymous's avatar
comfyanonymous committed
970
    "CLIPTextEncode": CLIPTextEncode,
comfyanonymous's avatar
comfyanonymous committed
971
    "CLIPSetLastLayer": CLIPSetLastLayer,
comfyanonymous's avatar
comfyanonymous committed
972
973
    "VAEDecode": VAEDecode,
    "VAEEncode": VAEEncode,
974
    "VAEEncodeForInpaint": VAEEncodeForInpaint,
comfyanonymous's avatar
comfyanonymous committed
975
976
977
978
    "VAELoader": VAELoader,
    "EmptyLatentImage": EmptyLatentImage,
    "LatentUpscale": LatentUpscale,
    "SaveImage": SaveImage,
pythongosssss's avatar
pythongosssss committed
979
    "PreviewImage": PreviewImage,
comfyanonymous's avatar
comfyanonymous committed
980
    "LoadImage": LoadImage,
981
    "LoadImageMask": LoadImageMask,
comfyanonymous's avatar
comfyanonymous committed
982
    "ImageScale": ImageScale,
983
    "ImageInvert": ImageInvert,
comfyanonymous's avatar
comfyanonymous committed
984
985
    "ConditioningCombine": ConditioningCombine,
    "ConditioningSetArea": ConditioningSetArea,
comfyanonymous's avatar
comfyanonymous committed
986
    "KSamplerAdvanced": KSamplerAdvanced,
987
    "SetLatentNoiseMask": SetLatentNoiseMask,
comfyanonymous's avatar
comfyanonymous committed
988
    "LatentComposite": LatentComposite,
comfyanonymous's avatar
comfyanonymous committed
989
    "LatentRotate": LatentRotate,
comfyanonymous's avatar
comfyanonymous committed
990
    "LatentFlip": LatentFlip,
comfyanonymous's avatar
comfyanonymous committed
991
    "LatentCrop": LatentCrop,
992
    "LoraLoader": LoraLoader,
993
    "CLIPLoader": CLIPLoader,
994
    "CLIPVisionEncode": CLIPVisionEncode,
995
    "StyleModelApply": StyleModelApply,
comfyanonymous's avatar
comfyanonymous committed
996
997
    "ControlNetApply": ControlNetApply,
    "ControlNetLoader": ControlNetLoader,
998
    "DiffControlNetLoader": DiffControlNetLoader,
999
    "T2IAdapterLoader": T2IAdapterLoader,
comfyanonymous's avatar
comfyanonymous committed
1000
1001
    "StyleModelLoader": StyleModelLoader,
    "CLIPVisionLoader": CLIPVisionLoader,
1002
    "VAEDecodeTiled": VAEDecodeTiled,
comfyanonymous's avatar
comfyanonymous committed
1003
    "VAEEncodeTiled": VAEEncodeTiled,
comfyanonymous's avatar
comfyanonymous committed
1004
1005
}

1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
def load_custom_node(module_path):
    module_name = os.path.basename(module_path)
    if os.path.isfile(module_path):
        sp = os.path.splitext(module_path)
        module_name = sp[0]
    try:
        if os.path.isfile(module_path):
            module_spec = importlib.util.spec_from_file_location(module_name, module_path)
        else:
            module_spec = importlib.util.spec_from_file_location(module_name, os.path.join(module_path, "__init__.py"))
        module = importlib.util.module_from_spec(module_spec)
        sys.modules[module_name] = module
        module_spec.loader.exec_module(module)
        if hasattr(module, "NODE_CLASS_MAPPINGS") and getattr(module, "NODE_CLASS_MAPPINGS") is not None:
            NODE_CLASS_MAPPINGS.update(module.NODE_CLASS_MAPPINGS)
        else:
            print(f"Skip {module_path} module for custom nodes due to the lack of NODE_CLASS_MAPPINGS.")
    except Exception as e:
        print(traceback.format_exc())
        print(f"Cannot import {module_path} module for custom nodes:", e)

Hacker 17082006's avatar
Hacker 17082006 committed
1027
def load_custom_nodes():
1028
    CUSTOM_NODE_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), "custom_nodes")
1029
    possible_modules = os.listdir(CUSTOM_NODE_PATH)
1030
    if "__pycache__" in possible_modules:
Hacker 17082006's avatar
.  
Hacker 17082006 committed
1031
        possible_modules.remove("__pycache__")
1032

Hacker 17082006's avatar
Hacker 17082006 committed
1033
    for possible_module in possible_modules:
1034
1035
        module_path = os.path.join(CUSTOM_NODE_PATH, possible_module)
        if os.path.isfile(module_path) and os.path.splitext(module_path)[1] != ".py": continue
1036
        load_custom_node(module_path)
1037

1038
load_custom_nodes()
1039
1040

load_custom_node(os.path.join(os.path.join(os.path.dirname(os.path.realpath(__file__)), "comfy_extras"), "nodes_upscale_model.py"))