nodes.py 36.8 KB
Newer Older
1
2
import math

comfyanonymous's avatar
comfyanonymous committed
3
4
5
6
7
import torch

import os
import sys
import json
8
import hashlib
comfyanonymous's avatar
comfyanonymous committed
9
import copy
10
import traceback
comfyanonymous's avatar
comfyanonymous committed
11
12
13
14
15

from PIL import Image
from PIL.PngImagePlugin import PngInfo
import numpy as np

comfyanonymous's avatar
comfyanonymous committed
16
sys.path.insert(0, os.path.join(os.path.dirname(os.path.realpath(__file__)), "comfy"))
comfyanonymous's avatar
comfyanonymous committed
17
18
19
20


import comfy.samplers
import comfy.sd
comfyanonymous's avatar
comfyanonymous committed
21
22
import comfy.utils

23
24
import comfy_extras.clip_vision

25
import model_management
26
import importlib
comfyanonymous's avatar
comfyanonymous committed
27

28
import folder_paths
29
30
31
32

def before_node_execution():
    model_management.throw_exception_if_processing_interrupted()

33
34
def interrupt_processing(value=True):
    model_management.interrupt_current_processing(value)
35

36
37
MAX_RESOLUTION=8192

comfyanonymous's avatar
comfyanonymous committed
38
39
40
class CLIPTextEncode:
    @classmethod
    def INPUT_TYPES(s):
41
        return {"required": {"text": ("STRING", {"multiline": True}), "clip": ("CLIP", )}}
comfyanonymous's avatar
comfyanonymous committed
42
43
44
    RETURN_TYPES = ("CONDITIONING",)
    FUNCTION = "encode"

45
46
    CATEGORY = "conditioning"

comfyanonymous's avatar
comfyanonymous committed
47
    def encode(self, clip, text):
comfyanonymous's avatar
comfyanonymous committed
48
49
50
51
52
53
54
55
56
        return ([[clip.encode(text), {}]], )

class ConditioningCombine:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": {"conditioning_1": ("CONDITIONING", ), "conditioning_2": ("CONDITIONING", )}}
    RETURN_TYPES = ("CONDITIONING",)
    FUNCTION = "combine"

57
58
    CATEGORY = "conditioning"

comfyanonymous's avatar
comfyanonymous committed
59
60
61
62
63
64
65
    def combine(self, conditioning_1, conditioning_2):
        return (conditioning_1 + conditioning_2, )

class ConditioningSetArea:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": {"conditioning": ("CONDITIONING", ),
66
67
68
69
                              "width": ("INT", {"default": 64, "min": 64, "max": MAX_RESOLUTION, "step": 64}),
                              "height": ("INT", {"default": 64, "min": 64, "max": MAX_RESOLUTION, "step": 64}),
                              "x": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 64}),
                              "y": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 64}),
comfyanonymous's avatar
comfyanonymous committed
70
71
72
73
74
                              "strength": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}),
                             }}
    RETURN_TYPES = ("CONDITIONING",)
    FUNCTION = "append"

75
76
    CATEGORY = "conditioning"

comfyanonymous's avatar
comfyanonymous committed
77
    def append(self, conditioning, width, height, x, y, strength, min_sigma=0.0, max_sigma=99.0):
comfyanonymous's avatar
comfyanonymous committed
78
79
80
81
82
83
84
85
        c = []
        for t in conditioning:
            n = [t[0], t[1].copy()]
            n[1]['area'] = (height // 8, width // 8, y // 8, x // 8)
            n[1]['strength'] = strength
            n[1]['min_sigma'] = min_sigma
            n[1]['max_sigma'] = max_sigma
            c.append(n)
comfyanonymous's avatar
comfyanonymous committed
86
        return (c, )
comfyanonymous's avatar
comfyanonymous committed
87
88
89
90
91
92
93
94
95
96
97

class VAEDecode:
    def __init__(self, device="cpu"):
        self.device = device

    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "samples": ("LATENT", ), "vae": ("VAE", )}}
    RETURN_TYPES = ("IMAGE",)
    FUNCTION = "decode"

98
99
    CATEGORY = "latent"

comfyanonymous's avatar
comfyanonymous committed
100
    def decode(self, vae, samples):
101
        return (vae.decode(samples["samples"]), )
comfyanonymous's avatar
comfyanonymous committed
102

103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
class VAEDecodeTiled:
    def __init__(self, device="cpu"):
        self.device = device

    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "samples": ("LATENT", ), "vae": ("VAE", )}}
    RETURN_TYPES = ("IMAGE",)
    FUNCTION = "decode"

    CATEGORY = "_for_testing"

    def decode(self, vae, samples):
        return (vae.decode_tiled(samples["samples"]), )

comfyanonymous's avatar
comfyanonymous committed
118
119
120
121
122
123
124
125
126
127
class VAEEncode:
    def __init__(self, device="cpu"):
        self.device = device

    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "pixels": ("IMAGE", ), "vae": ("VAE", )}}
    RETURN_TYPES = ("LATENT",)
    FUNCTION = "encode"

128
129
    CATEGORY = "latent"

comfyanonymous's avatar
comfyanonymous committed
130
    def encode(self, vae, pixels):
131
132
133
134
        x = (pixels.shape[1] // 64) * 64
        y = (pixels.shape[2] // 64) * 64
        if pixels.shape[1] != x or pixels.shape[2] != y:
            pixels = pixels[:,:x,:y,:]
135
136
137
        t = vae.encode(pixels[:,:,:,:3])

        return ({"samples":t}, )
comfyanonymous's avatar
comfyanonymous committed
138

comfyanonymous's avatar
comfyanonymous committed
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159

class VAEEncodeTiled:
    def __init__(self, device="cpu"):
        self.device = device

    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "pixels": ("IMAGE", ), "vae": ("VAE", )}}
    RETURN_TYPES = ("LATENT",)
    FUNCTION = "encode"

    CATEGORY = "_for_testing"

    def encode(self, vae, pixels):
        x = (pixels.shape[1] // 64) * 64
        y = (pixels.shape[2] // 64) * 64
        if pixels.shape[1] != x or pixels.shape[2] != y:
            pixels = pixels[:,:x,:y,:]
        t = vae.encode_tiled(pixels[:,:,:,:3])

        return ({"samples":t}, )
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
class VAEEncodeForInpaint:
    def __init__(self, device="cpu"):
        self.device = device

    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "pixels": ("IMAGE", ), "vae": ("VAE", ), "mask": ("MASK", )}}
    RETURN_TYPES = ("LATENT",)
    FUNCTION = "encode"

    CATEGORY = "latent/inpaint"

    def encode(self, vae, pixels, mask):
        x = (pixels.shape[1] // 64) * 64
        y = (pixels.shape[2] // 64) * 64
175
176
        mask = torch.nn.functional.interpolate(mask[None,None,], size=(pixels.shape[1], pixels.shape[2]), mode="bilinear")[0][0]

177
        pixels = pixels.clone()
178
179
180
181
        if pixels.shape[1] != x or pixels.shape[2] != y:
            pixels = pixels[:,:x,:y,:]
            mask = mask[:x,:y]

182
        #grow mask by a few pixels to keep things seamless in latent space
183
        kernel_tensor = torch.ones((1, 1, 6, 6))
184
185
        mask_erosion = torch.clamp(torch.nn.functional.conv2d((mask.round())[None], kernel_tensor, padding=3), 0, 1)
        m = (1.0 - mask.round())
186
187
        for i in range(3):
            pixels[:,:,:,i] -= 0.5
188
            pixels[:,:,:,i] *= m
189
190
191
            pixels[:,:,:,i] += 0.5
        t = vae.encode(pixels)

192
        return ({"samples":t, "noise_mask": (mask_erosion[0][:x,:y].round())}, )
comfyanonymous's avatar
comfyanonymous committed
193
194
195
196

class CheckpointLoader:
    @classmethod
    def INPUT_TYPES(s):
197
198
        return {"required": { "config_name": (folder_paths.get_filename_list("configs"), ),
                              "ckpt_name": (folder_paths.get_filename_list("checkpoints"), )}}
comfyanonymous's avatar
comfyanonymous committed
199
200
201
    RETURN_TYPES = ("MODEL", "CLIP", "VAE")
    FUNCTION = "load_checkpoint"

202
203
    CATEGORY = "loaders"

comfyanonymous's avatar
comfyanonymous committed
204
    def load_checkpoint(self, config_name, ckpt_name, output_vae=True, output_clip=True):
205
206
        config_path = folder_paths.get_full_path("configs", config_name)
        ckpt_path = folder_paths.get_full_path("checkpoints", ckpt_name)
207
        return comfy.sd.load_checkpoint(config_path, ckpt_path, output_vae=True, output_clip=True, embedding_directory=folder_paths.get_folder_paths("embeddings"))
comfyanonymous's avatar
comfyanonymous committed
208

209
210
211
class CheckpointLoaderSimple:
    @classmethod
    def INPUT_TYPES(s):
212
        return {"required": { "ckpt_name": (folder_paths.get_filename_list("checkpoints"), ),
213
214
215
216
                             }}
    RETURN_TYPES = ("MODEL", "CLIP", "VAE")
    FUNCTION = "load_checkpoint"

217
    CATEGORY = "loaders"
218

219
    def load_checkpoint(self, ckpt_name, output_vae=True, output_clip=True):
220
        ckpt_path = folder_paths.get_full_path("checkpoints", ckpt_name)
221
        out = comfy.sd.load_checkpoint_guess_config(ckpt_path, output_vae=True, output_clip=True, embedding_directory=folder_paths.get_folder_paths("embeddings"))
222
223
        return out

comfyanonymous's avatar
comfyanonymous committed
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
class CLIPSetLastLayer:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "clip": ("CLIP", ),
                              "stop_at_clip_layer": ("INT", {"default": -1, "min": -24, "max": -1, "step": 1}),
                              }}
    RETURN_TYPES = ("CLIP",)
    FUNCTION = "set_last_layer"

    CATEGORY = "conditioning"

    def set_last_layer(self, clip, stop_at_clip_layer):
        clip = clip.clone()
        clip.clip_layer(stop_at_clip_layer)
        return (clip,)

240
241
242
243
244
class LoraLoader:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "model": ("MODEL",),
                              "clip": ("CLIP", ),
245
                              "lora_name": (folder_paths.get_filename_list("loras"), ),
246
247
248
249
250
251
252
253
254
                              "strength_model": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}),
                              "strength_clip": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}),
                              }}
    RETURN_TYPES = ("MODEL", "CLIP")
    FUNCTION = "load_lora"

    CATEGORY = "loaders"

    def load_lora(self, model, clip, lora_name, strength_model, strength_clip):
255
        lora_path = folder_paths.get_full_path("loras", lora_name)
256
257
258
        model_lora, clip_lora = comfy.sd.load_lora_for_models(model, clip, lora_path, strength_model, strength_clip)
        return (model_lora, clip_lora)

comfyanonymous's avatar
comfyanonymous committed
259
260
261
class VAELoader:
    @classmethod
    def INPUT_TYPES(s):
262
        return {"required": { "vae_name": (folder_paths.get_filename_list("vae"), )}}
comfyanonymous's avatar
comfyanonymous committed
263
264
265
    RETURN_TYPES = ("VAE",)
    FUNCTION = "load_vae"

266
267
    CATEGORY = "loaders"

comfyanonymous's avatar
comfyanonymous committed
268
269
    #TODO: scale factor?
    def load_vae(self, vae_name):
270
        vae_path = folder_paths.get_full_path("vae", vae_name)
comfyanonymous's avatar
comfyanonymous committed
271
272
273
        vae = comfy.sd.VAE(ckpt_path=vae_path)
        return (vae,)

comfyanonymous's avatar
comfyanonymous committed
274
275
276
class ControlNetLoader:
    @classmethod
    def INPUT_TYPES(s):
277
        return {"required": { "control_net_name": (folder_paths.get_filename_list("controlnet"), )}}
comfyanonymous's avatar
comfyanonymous committed
278
279
280
281
282
283
284

    RETURN_TYPES = ("CONTROL_NET",)
    FUNCTION = "load_controlnet"

    CATEGORY = "loaders"

    def load_controlnet(self, control_net_name):
285
        controlnet_path = folder_paths.get_full_path("controlnet", control_net_name)
comfyanonymous's avatar
comfyanonymous committed
286
287
288
        controlnet = comfy.sd.load_controlnet(controlnet_path)
        return (controlnet,)

289
290
291
292
class DiffControlNetLoader:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "model": ("MODEL",),
293
                              "control_net_name": (folder_paths.get_filename_list("controlnet"), )}}
294
295
296
297
298
299
300

    RETURN_TYPES = ("CONTROL_NET",)
    FUNCTION = "load_controlnet"

    CATEGORY = "loaders"

    def load_controlnet(self, model, control_net_name):
301
        controlnet_path = folder_paths.get_full_path("controlnet", control_net_name)
302
303
304
        controlnet = comfy.sd.load_controlnet(controlnet_path, model)
        return (controlnet,)

comfyanonymous's avatar
comfyanonymous committed
305
306
307
308

class ControlNetApply:
    @classmethod
    def INPUT_TYPES(s):
309
310
311
312
313
        return {"required": {"conditioning": ("CONDITIONING", ),
                             "control_net": ("CONTROL_NET", ),
                             "image": ("IMAGE", ),
                             "strength": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01})
                             }}
comfyanonymous's avatar
comfyanonymous committed
314
315
316
317
318
    RETURN_TYPES = ("CONDITIONING",)
    FUNCTION = "apply_controlnet"

    CATEGORY = "conditioning"

319
    def apply_controlnet(self, conditioning, control_net, image, strength):
comfyanonymous's avatar
comfyanonymous committed
320
321
322
323
324
        c = []
        control_hint = image.movedim(-1,1)
        print(control_hint.shape)
        for t in conditioning:
            n = [t[0], t[1].copy()]
comfyanonymous's avatar
comfyanonymous committed
325
326
327
328
            c_net = control_net.copy().set_cond_hint(control_hint, strength)
            if 'control' in t[1]:
                c_net.set_previous_controlnet(t[1]['control'])
            n[1]['control'] = c_net
comfyanonymous's avatar
comfyanonymous committed
329
330
331
            c.append(n)
        return (c, )

332
333
334
class CLIPLoader:
    @classmethod
    def INPUT_TYPES(s):
335
        return {"required": { "clip_name": (folder_paths.get_filename_list("clip"), ),
336
337
338
339
340
341
                             }}
    RETURN_TYPES = ("CLIP",)
    FUNCTION = "load_clip"

    CATEGORY = "loaders"

342
    def load_clip(self, clip_name):
343
        clip_path = folder_paths.get_full_path("clip", clip_name)
comfyanonymous's avatar
comfyanonymous committed
344
        clip = comfy.sd.load_clip(ckpt_path=clip_path, embedding_directory=folder_paths.get_folder_paths("embeddings"))
345
346
        return (clip,)

347
348
349
class CLIPVisionLoader:
    @classmethod
    def INPUT_TYPES(s):
350
        return {"required": { "clip_name": (folder_paths.get_filename_list("clip_vision"), ),
351
352
353
354
355
356
357
                             }}
    RETURN_TYPES = ("CLIP_VISION",)
    FUNCTION = "load_clip"

    CATEGORY = "loaders"

    def load_clip(self, clip_name):
358
        clip_path = folder_paths.get_full_path("clip_vision", clip_name)
359
360
361
362
363
364
365
366
367
        clip_vision = comfy_extras.clip_vision.load(clip_path)
        return (clip_vision,)

class CLIPVisionEncode:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "clip_vision": ("CLIP_VISION",),
                              "image": ("IMAGE",)
                             }}
comfyanonymous's avatar
comfyanonymous committed
368
    RETURN_TYPES = ("CLIP_VISION_OUTPUT",)
369
370
    FUNCTION = "encode"

comfyanonymous's avatar
comfyanonymous committed
371
    CATEGORY = "conditioning/style_model"
372
373
374
375
376
377
378
379

    def encode(self, clip_vision, image):
        output = clip_vision.encode_image(image)
        return (output,)

class StyleModelLoader:
    @classmethod
    def INPUT_TYPES(s):
380
        return {"required": { "style_model_name": (folder_paths.get_filename_list("style_models"), )}}
381
382
383
384
385
386
387

    RETURN_TYPES = ("STYLE_MODEL",)
    FUNCTION = "load_style_model"

    CATEGORY = "loaders"

    def load_style_model(self, style_model_name):
388
        style_model_path = folder_paths.get_full_path("style_models", style_model_name)
389
390
391
392
393
394
395
        style_model = comfy.sd.load_style_model(style_model_path)
        return (style_model,)


class StyleModelApply:
    @classmethod
    def INPUT_TYPES(s):
396
397
398
        return {"required": {"conditioning": ("CONDITIONING", ),
                             "style_model": ("STYLE_MODEL", ),
                             "clip_vision_output": ("CLIP_VISION_OUTPUT", ),
399
400
401
402
                             }}
    RETURN_TYPES = ("CONDITIONING",)
    FUNCTION = "apply_stylemodel"

comfyanonymous's avatar
comfyanonymous committed
403
    CATEGORY = "conditioning/style_model"
404

405
406
    def apply_stylemodel(self, clip_vision_output, style_model, conditioning):
        cond = style_model.get_cond(clip_vision_output)
407
        c = []
408
409
        for t in conditioning:
            n = [torch.cat((t[0], cond), dim=1), t[1].copy()]
410
411
412
            c.append(n)
        return (c, )

comfyanonymous's avatar
comfyanonymous committed
413
414
415
416
417
418
class EmptyLatentImage:
    def __init__(self, device="cpu"):
        self.device = device

    @classmethod
    def INPUT_TYPES(s):
419
420
        return {"required": { "width": ("INT", {"default": 512, "min": 64, "max": MAX_RESOLUTION, "step": 64}),
                              "height": ("INT", {"default": 512, "min": 64, "max": MAX_RESOLUTION, "step": 64}),
comfyanonymous's avatar
comfyanonymous committed
421
422
423
424
                              "batch_size": ("INT", {"default": 1, "min": 1, "max": 64})}}
    RETURN_TYPES = ("LATENT",)
    FUNCTION = "generate"

425
426
    CATEGORY = "latent"

comfyanonymous's avatar
comfyanonymous committed
427
428
    def generate(self, width, height, batch_size=1):
        latent = torch.zeros([batch_size, 4, height // 8, width // 8])
429
        return ({"samples":latent}, )
comfyanonymous's avatar
comfyanonymous committed
430

comfyanonymous's avatar
comfyanonymous committed
431

comfyanonymous's avatar
comfyanonymous committed
432

comfyanonymous's avatar
comfyanonymous committed
433
434
class LatentUpscale:
    upscale_methods = ["nearest-exact", "bilinear", "area"]
435
    crop_methods = ["disabled", "center"]
comfyanonymous's avatar
comfyanonymous committed
436
437
438
439

    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "samples": ("LATENT",), "upscale_method": (s.upscale_methods,),
440
441
                              "width": ("INT", {"default": 512, "min": 64, "max": MAX_RESOLUTION, "step": 64}),
                              "height": ("INT", {"default": 512, "min": 64, "max": MAX_RESOLUTION, "step": 64}),
442
                              "crop": (s.crop_methods,)}}
comfyanonymous's avatar
comfyanonymous committed
443
444
445
    RETURN_TYPES = ("LATENT",)
    FUNCTION = "upscale"

446
447
    CATEGORY = "latent"

448
    def upscale(self, samples, upscale_method, width, height, crop):
449
        s = samples.copy()
comfyanonymous's avatar
comfyanonymous committed
450
        s["samples"] = comfy.utils.common_upscale(samples["samples"], width // 8, height // 8, upscale_method, crop)
comfyanonymous's avatar
comfyanonymous committed
451
452
        return (s,)

comfyanonymous's avatar
comfyanonymous committed
453
454
455
456
457
458
459
460
461
class LatentRotate:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "samples": ("LATENT",),
                              "rotation": (["none", "90 degrees", "180 degrees", "270 degrees"],),
                              }}
    RETURN_TYPES = ("LATENT",)
    FUNCTION = "rotate"

comfyanonymous's avatar
comfyanonymous committed
462
    CATEGORY = "latent/transform"
comfyanonymous's avatar
comfyanonymous committed
463
464

    def rotate(self, samples, rotation):
465
        s = samples.copy()
comfyanonymous's avatar
comfyanonymous committed
466
467
468
469
470
471
472
473
        rotate_by = 0
        if rotation.startswith("90"):
            rotate_by = 1
        elif rotation.startswith("180"):
            rotate_by = 2
        elif rotation.startswith("270"):
            rotate_by = 3

474
        s["samples"] = torch.rot90(samples["samples"], k=rotate_by, dims=[3, 2])
comfyanonymous's avatar
comfyanonymous committed
475
        return (s,)
comfyanonymous's avatar
comfyanonymous committed
476
477
478
479
480
481
482
483
484
485

class LatentFlip:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "samples": ("LATENT",),
                              "flip_method": (["x-axis: vertically", "y-axis: horizontally"],),
                              }}
    RETURN_TYPES = ("LATENT",)
    FUNCTION = "flip"

comfyanonymous's avatar
comfyanonymous committed
486
    CATEGORY = "latent/transform"
comfyanonymous's avatar
comfyanonymous committed
487
488

    def flip(self, samples, flip_method):
489
        s = samples.copy()
comfyanonymous's avatar
comfyanonymous committed
490
        if flip_method.startswith("x"):
491
            s["samples"] = torch.flip(samples["samples"], dims=[2])
comfyanonymous's avatar
comfyanonymous committed
492
        elif flip_method.startswith("y"):
493
            s["samples"] = torch.flip(samples["samples"], dims=[3])
comfyanonymous's avatar
comfyanonymous committed
494
495

        return (s,)
comfyanonymous's avatar
comfyanonymous committed
496
497
498
499
500
501

class LatentComposite:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "samples_to": ("LATENT",),
                              "samples_from": ("LATENT",),
502
503
504
                              "x": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 8}),
                              "y": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 8}),
                              "feather": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 8}),
comfyanonymous's avatar
comfyanonymous committed
505
506
507
508
509
510
                              }}
    RETURN_TYPES = ("LATENT",)
    FUNCTION = "composite"

    CATEGORY = "latent"

511
    def composite(self, samples_to, samples_from, x, y, composite_method="normal", feather=0):
comfyanonymous's avatar
comfyanonymous committed
512
513
        x =  x // 8
        y = y // 8
514
        feather = feather // 8
515
516
517
518
        samples_out = samples_to.copy()
        s = samples_to["samples"].clone()
        samples_to = samples_to["samples"]
        samples_from = samples_from["samples"]
519
520
521
        if feather == 0:
            s[:,:,y:y+samples_from.shape[2],x:x+samples_from.shape[3]] = samples_from[:,:,:samples_to.shape[2] - y, :samples_to.shape[3] - x]
        else:
522
523
            samples_from = samples_from[:,:,:samples_to.shape[2] - y, :samples_to.shape[3] - x]
            mask = torch.ones_like(samples_from)
524
525
526
527
528
529
530
531
532
533
534
535
            for t in range(feather):
                if y != 0:
                    mask[:,:,t:1+t,:] *= ((1.0/feather) * (t + 1))

                if y + samples_from.shape[2] < samples_to.shape[2]:
                    mask[:,:,mask.shape[2] -1 -t: mask.shape[2]-t,:] *= ((1.0/feather) * (t + 1))
                if x != 0:
                    mask[:,:,:,t:1+t] *= ((1.0/feather) * (t + 1))
                if x + samples_from.shape[3] < samples_to.shape[3]:
                    mask[:,:,:,mask.shape[3]- 1 - t: mask.shape[3]- t] *= ((1.0/feather) * (t + 1))
            rev_mask = torch.ones_like(mask) - mask
            s[:,:,y:y+samples_from.shape[2],x:x+samples_from.shape[3]] = samples_from[:,:,:samples_to.shape[2] - y, :samples_to.shape[3] - x] * mask + s[:,:,y:y+samples_from.shape[2],x:x+samples_from.shape[3]] * rev_mask
536
537
        samples_out["samples"] = s
        return (samples_out,)
comfyanonymous's avatar
comfyanonymous committed
538

comfyanonymous's avatar
comfyanonymous committed
539
540
541
542
class LatentCrop:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "samples": ("LATENT",),
543
544
545
546
                              "width": ("INT", {"default": 512, "min": 64, "max": MAX_RESOLUTION, "step": 64}),
                              "height": ("INT", {"default": 512, "min": 64, "max": MAX_RESOLUTION, "step": 64}),
                              "x": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 8}),
                              "y": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 8}),
comfyanonymous's avatar
comfyanonymous committed
547
548
549
550
                              }}
    RETURN_TYPES = ("LATENT",)
    FUNCTION = "crop"

comfyanonymous's avatar
comfyanonymous committed
551
    CATEGORY = "latent/transform"
comfyanonymous's avatar
comfyanonymous committed
552
553

    def crop(self, samples, width, height, x, y):
554
555
        s = samples.copy()
        samples = samples['samples']
comfyanonymous's avatar
comfyanonymous committed
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
        x =  x // 8
        y = y // 8

        #enfonce minimum size of 64
        if x > (samples.shape[3] - 8):
            x = samples.shape[3] - 8
        if y > (samples.shape[2] - 8):
            y = samples.shape[2] - 8

        new_height = height // 8
        new_width = width // 8
        to_x = new_width + x
        to_y = new_height + y
        def enforce_image_dim(d, to_d, max_d):
            if to_d > max_d:
                leftover = (to_d - max_d) % 8
                to_d = max_d
                d -= leftover
            return (d, to_d)

        #make sure size is always multiple of 64
        x, to_x = enforce_image_dim(x, to_x, samples.shape[3])
        y, to_y = enforce_image_dim(y, to_y, samples.shape[2])
579
        s['samples'] = samples[:,:,y:to_y, x:to_x]
comfyanonymous's avatar
comfyanonymous committed
580
581
        return (s,)

582
583
584
585
586
587
588
589
590
class SetLatentNoiseMask:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "samples": ("LATENT",),
                              "mask": ("MASK",),
                              }}
    RETURN_TYPES = ("LATENT",)
    FUNCTION = "set_mask"

591
    CATEGORY = "latent/inpaint"
592
593
594
595
596
597
598

    def set_mask(self, samples, mask):
        s = samples.copy()
        s["noise_mask"] = mask
        return (s,)


599
def common_ksampler(model, seed, steps, cfg, sampler_name, scheduler, positive, negative, latent, denoise=1.0, disable_noise=False, start_step=None, last_step=None, force_full_denoise=False):
600
601
    latent_image = latent["samples"]
    noise_mask = None
602
    device = model_management.get_torch_device()
603

comfyanonymous's avatar
comfyanonymous committed
604
605
606
607
608
    if disable_noise:
        noise = torch.zeros(latent_image.size(), dtype=latent_image.dtype, layout=latent_image.layout, device="cpu")
    else:
        noise = torch.randn(latent_image.size(), dtype=latent_image.dtype, layout=latent_image.layout, generator=torch.manual_seed(seed), device="cpu")

609
610
611
    if "noise_mask" in latent:
        noise_mask = latent['noise_mask']
        noise_mask = torch.nn.functional.interpolate(noise_mask[None,None,], size=(noise.shape[2], noise.shape[3]), mode="bilinear")
612
        noise_mask = noise_mask.round()
613
614
615
616
        noise_mask = torch.cat([noise_mask] * noise.shape[1], dim=1)
        noise_mask = torch.cat([noise_mask] * noise.shape[0])
        noise_mask = noise_mask.to(device)

617
    real_model = None
618
619
620
    model_management.load_model_gpu(model)
    real_model = model.model

621
622
623
624
625
626
    noise = noise.to(device)
    latent_image = latent_image.to(device)

    positive_copy = []
    negative_copy = []

comfyanonymous's avatar
comfyanonymous committed
627
    control_nets = []
628
629
630
631
632
    for p in positive:
        t = p[0]
        if t.shape[0] < noise.shape[0]:
            t = torch.cat([t] * noise.shape[0])
        t = t.to(device)
comfyanonymous's avatar
comfyanonymous committed
633
634
        if 'control' in p[1]:
            control_nets += [p[1]['control']]
635
636
637
638
639
640
        positive_copy += [[t] + p[1:]]
    for n in negative:
        t = n[0]
        if t.shape[0] < noise.shape[0]:
            t = torch.cat([t] * noise.shape[0])
        t = t.to(device)
641
642
        if 'control' in n[1]:
            control_nets += [n[1]['control']]
643
644
        negative_copy += [[t] + n[1:]]

comfyanonymous's avatar
comfyanonymous committed
645
646
647
648
    control_net_models = []
    for x in control_nets:
        control_net_models += x.get_control_models()
    model_management.load_controlnet_gpu(control_net_models)
comfyanonymous's avatar
comfyanonymous committed
649

650
651
652
653
654
655
    if sampler_name in comfy.samplers.KSampler.SAMPLERS:
        sampler = comfy.samplers.KSampler(real_model, steps=steps, device=device, sampler=sampler_name, scheduler=scheduler, denoise=denoise)
    else:
        #other samplers
        pass

656
    samples = sampler.sample(noise, positive_copy, negative_copy, cfg=cfg, latent_image=latent_image, start_step=start_step, last_step=last_step, force_full_denoise=force_full_denoise, denoise_mask=noise_mask)
657
    samples = samples.cpu()
comfyanonymous's avatar
comfyanonymous committed
658
659
    for c in control_nets:
        c.cleanup()
comfyanonymous's avatar
comfyanonymous committed
660

661
662
663
    out = latent.copy()
    out["samples"] = samples
    return (out, )
comfyanonymous's avatar
comfyanonymous committed
664

comfyanonymous's avatar
comfyanonymous committed
665
666
667
class KSampler:
    @classmethod
    def INPUT_TYPES(s):
comfyanonymous's avatar
comfyanonymous committed
668
        return {"required":
comfyanonymous's avatar
comfyanonymous committed
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
                    {"model": ("MODEL",),
                    "seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}),
                    "steps": ("INT", {"default": 20, "min": 1, "max": 10000}),
                    "cfg": ("FLOAT", {"default": 8.0, "min": 0.0, "max": 100.0}),
                    "sampler_name": (comfy.samplers.KSampler.SAMPLERS, ),
                    "scheduler": (comfy.samplers.KSampler.SCHEDULERS, ),
                    "positive": ("CONDITIONING", ),
                    "negative": ("CONDITIONING", ),
                    "latent_image": ("LATENT", ),
                    "denoise": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}),
                    }}

    RETURN_TYPES = ("LATENT",)
    FUNCTION = "sample"

684
685
    CATEGORY = "sampling"

comfyanonymous's avatar
comfyanonymous committed
686
    def sample(self, model, seed, steps, cfg, sampler_name, scheduler, positive, negative, latent_image, denoise=1.0):
687
        return common_ksampler(model, seed, steps, cfg, sampler_name, scheduler, positive, negative, latent_image, denoise=denoise)
comfyanonymous's avatar
comfyanonymous committed
688

comfyanonymous's avatar
comfyanonymous committed
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
class KSamplerAdvanced:
    @classmethod
    def INPUT_TYPES(s):
        return {"required":
                    {"model": ("MODEL",),
                    "add_noise": (["enable", "disable"], ),
                    "noise_seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}),
                    "steps": ("INT", {"default": 20, "min": 1, "max": 10000}),
                    "cfg": ("FLOAT", {"default": 8.0, "min": 0.0, "max": 100.0}),
                    "sampler_name": (comfy.samplers.KSampler.SAMPLERS, ),
                    "scheduler": (comfy.samplers.KSampler.SCHEDULERS, ),
                    "positive": ("CONDITIONING", ),
                    "negative": ("CONDITIONING", ),
                    "latent_image": ("LATENT", ),
                    "start_at_step": ("INT", {"default": 0, "min": 0, "max": 10000}),
                    "end_at_step": ("INT", {"default": 10000, "min": 0, "max": 10000}),
                    "return_with_leftover_noise": (["disable", "enable"], ),
                    }}

    RETURN_TYPES = ("LATENT",)
    FUNCTION = "sample"

    CATEGORY = "sampling"
comfyanonymous's avatar
comfyanonymous committed
712

comfyanonymous's avatar
comfyanonymous committed
713
714
715
716
717
718
719
    def sample(self, model, add_noise, noise_seed, steps, cfg, sampler_name, scheduler, positive, negative, latent_image, start_at_step, end_at_step, return_with_leftover_noise, denoise=1.0):
        force_full_denoise = True
        if return_with_leftover_noise == "enable":
            force_full_denoise = False
        disable_noise = False
        if add_noise == "disable":
            disable_noise = True
720
        return common_ksampler(model, noise_seed, steps, cfg, sampler_name, scheduler, positive, negative, latent_image, denoise=denoise, disable_noise=disable_noise, start_step=start_at_step, last_step=end_at_step, force_full_denoise=force_full_denoise)
comfyanonymous's avatar
comfyanonymous committed
721
722
723
724

class SaveImage:
    def __init__(self):
        self.output_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), "output")
m957ymj75urz's avatar
m957ymj75urz committed
725
        self.type = "output"
comfyanonymous's avatar
comfyanonymous committed
726
727
728
729

    @classmethod
    def INPUT_TYPES(s):
        return {"required": 
730
                    {"images": ("IMAGE", ),
pythongosssss's avatar
tidy  
pythongosssss committed
731
                     "filename_prefix": ("STRING", {"default": "ComfyUI"})},
pythongosssss's avatar
pythongosssss committed
732
                "hidden": {"prompt": "PROMPT", "extra_pnginfo": "EXTRA_PNGINFO"},
comfyanonymous's avatar
comfyanonymous committed
733
734
735
736
737
738
739
                }

    RETURN_TYPES = ()
    FUNCTION = "save_images"

    OUTPUT_NODE = True

740
741
    CATEGORY = "image"

pythongosssss's avatar
tidy  
pythongosssss committed
742
    def save_images(self, images, filename_prefix="ComfyUI", prompt=None, extra_pnginfo=None):
743
        def map_filename(filename):
744
            prefix_len = len(os.path.basename(filename_prefix))
745
746
747
748
749
750
            prefix = filename[:prefix_len + 1]
            try:
                digits = int(filename[prefix_len + 1:].split('_')[0])
            except:
                digits = 0
            return (digits, prefix)
comfyanonymous's avatar
comfyanonymous committed
751

m957ymj75urz's avatar
m957ymj75urz committed
752
753
754
        subfolder = os.path.dirname(os.path.normpath(filename_prefix))
        filename = os.path.basename(os.path.normpath(filename_prefix))

comfyanonymous's avatar
comfyanonymous committed
755
        full_output_folder = os.path.join(self.output_dir, subfolder)
756

757
        if os.path.commonpath((self.output_dir, os.path.realpath(full_output_folder))) != self.output_dir:
758
            print("Saving image outside the output folder is not allowed.")
comfyanonymous's avatar
comfyanonymous committed
759
760
            return {}

761
        try:
762
            counter = max(filter(lambda a: a[1][:-1] == filename and a[1][-1] == "_", map(map_filename, os.listdir(full_output_folder))))[0] + 1
763
764
        except ValueError:
            counter = 1
765
        except FileNotFoundError:
766
            os.makedirs(full_output_folder, exist_ok=True)
767
            counter = 1
pythongosssss's avatar
pythongosssss committed
768

pythongosssss's avatar
pythongosssss committed
769
770
        if not os.path.exists(self.output_dir):
            os.makedirs(self.output_dir)
771

m957ymj75urz's avatar
m957ymj75urz committed
772
        results = list()
comfyanonymous's avatar
comfyanonymous committed
773
774
        for image in images:
            i = 255. * image.cpu().numpy()
775
            img = Image.fromarray(np.clip(i, 0, 255).astype(np.uint8))
comfyanonymous's avatar
comfyanonymous committed
776
777
778
779
780
781
            metadata = PngInfo()
            if prompt is not None:
                metadata.add_text("prompt", json.dumps(prompt))
            if extra_pnginfo is not None:
                for x in extra_pnginfo:
                    metadata.add_text(x, json.dumps(extra_pnginfo[x]))
782

783
            file = f"{filename}_{counter:05}_.png"
784
            img.save(os.path.join(full_output_folder, file), pnginfo=metadata, compress_level=4)
m957ymj75urz's avatar
m957ymj75urz committed
785
786
787
788
789
            results.append({
                "filename": file,
                "subfolder": subfolder,
                "type": self.type
            });
790
            counter += 1
comfyanonymous's avatar
comfyanonymous committed
791

m957ymj75urz's avatar
m957ymj75urz committed
792
        return { "ui": { "images": results } }
comfyanonymous's avatar
comfyanonymous committed
793

pythongosssss's avatar
pythongosssss committed
794
795
796
class PreviewImage(SaveImage):
    def __init__(self):
        self.output_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), "temp")
m957ymj75urz's avatar
m957ymj75urz committed
797
        self.type = "temp"
pythongosssss's avatar
pythongosssss committed
798
799
800

    @classmethod
    def INPUT_TYPES(s):
801
        return {"required":
pythongosssss's avatar
pythongosssss committed
802
803
804
                    {"images": ("IMAGE", ), },
                "hidden": {"prompt": "PROMPT", "extra_pnginfo": "EXTRA_PNGINFO"},
                }
805

806
807
808
809
class LoadImage:
    input_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), "input")
    @classmethod
    def INPUT_TYPES(s):
pythongosssss's avatar
pythongosssss committed
810
811
        if not os.path.exists(s.input_dir):
            os.makedirs(s.input_dir)
812
        return {"required":
813
                    {"image": (sorted(os.listdir(s.input_dir)), )},
814
                }
815
816

    CATEGORY = "image"
817

818
    RETURN_TYPES = ("IMAGE", "MASK")
819
820
821
    FUNCTION = "load_image"
    def load_image(self, image):
        image_path = os.path.join(self.input_dir, image)
822
823
        i = Image.open(image_path)
        image = i.convert("RGB")
824
        image = np.array(image).astype(np.float32) / 255.0
825
        image = torch.from_numpy(image)[None,]
826
827
828
829
830
831
        if 'A' in i.getbands():
            mask = np.array(i.getchannel('A')).astype(np.float32) / 255.0
            mask = 1. - torch.from_numpy(mask)
        else:
            mask = torch.zeros((64,64), dtype=torch.float32, device="cpu")
        return (image, mask)
832

833
834
835
836
837
838
839
    @classmethod
    def IS_CHANGED(s, image):
        image_path = os.path.join(s.input_dir, image)
        m = hashlib.sha256()
        with open(image_path, 'rb') as f:
            m.update(f.read())
        return m.digest().hex()
pythongosssss's avatar
pythongosssss committed
840

841
842
843
844
845
class LoadImageMask:
    input_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), "input")
    @classmethod
    def INPUT_TYPES(s):
        return {"required":
846
                    {"image": (sorted(os.listdir(s.input_dir)), ),
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
                    "channel": (["alpha", "red", "green", "blue"], ),}
                }

    CATEGORY = "image"

    RETURN_TYPES = ("MASK",)
    FUNCTION = "load_image"
    def load_image(self, image, channel):
        image_path = os.path.join(self.input_dir, image)
        i = Image.open(image_path)
        mask = None
        c = channel[0].upper()
        if c in i.getbands():
            mask = np.array(i.getchannel(c)).astype(np.float32) / 255.0
            mask = torch.from_numpy(mask)
            if c == 'A':
                mask = 1. - mask
        else:
            mask = torch.zeros((64,64), dtype=torch.float32, device="cpu")
        return (mask,)

    @classmethod
    def IS_CHANGED(s, image, channel):
        image_path = os.path.join(s.input_dir, image)
        m = hashlib.sha256()
        with open(image_path, 'rb') as f:
            m.update(f.read())
        return m.digest().hex()
pythongosssss's avatar
pythongosssss committed
875

comfyanonymous's avatar
comfyanonymous committed
876
877
878
879
880
881
882
class ImageScale:
    upscale_methods = ["nearest-exact", "bilinear", "area"]
    crop_methods = ["disabled", "center"]

    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "image": ("IMAGE",), "upscale_method": (s.upscale_methods,),
883
884
                              "width": ("INT", {"default": 512, "min": 1, "max": MAX_RESOLUTION, "step": 1}),
                              "height": ("INT", {"default": 512, "min": 1, "max": MAX_RESOLUTION, "step": 1}),
comfyanonymous's avatar
comfyanonymous committed
885
886
887
888
                              "crop": (s.crop_methods,)}}
    RETURN_TYPES = ("IMAGE",)
    FUNCTION = "upscale"

889
    CATEGORY = "image/upscaling"
890

comfyanonymous's avatar
comfyanonymous committed
891
892
    def upscale(self, image, upscale_method, width, height, crop):
        samples = image.movedim(-1,1)
comfyanonymous's avatar
comfyanonymous committed
893
        s = comfy.utils.common_upscale(samples, width, height, upscale_method, crop)
comfyanonymous's avatar
comfyanonymous committed
894
895
        s = s.movedim(1,-1)
        return (s,)
comfyanonymous's avatar
comfyanonymous committed
896

897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
class ImageInvert:

    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "image": ("IMAGE",)}}

    RETURN_TYPES = ("IMAGE",)
    FUNCTION = "invert"

    CATEGORY = "image"

    def invert(self, image):
        s = 1.0 - image
        return (s,)


Guo Y.K's avatar
Guo Y.K committed
913
914
915
916
917
918
919
920
921
922
923
class ImagePadForOutpaint:

    @classmethod
    def INPUT_TYPES(s):
        return {
            "required": {
                "image": ("IMAGE",),
                "left": ("INT", {"default": 0, "min": 0, "max": 4096, "step": 64}),
                "top": ("INT", {"default": 0, "min": 0, "max": 4096, "step": 64}),
                "right": ("INT", {"default": 0, "min": 0, "max": 4096, "step": 64}),
                "bottom": ("INT", {"default": 0, "min": 0, "max": 4096, "step": 64}),
924
                "feathering": ("INT", {"default": 0, "min": 0, "max": 4096, "step": 1}),
Guo Y.K's avatar
Guo Y.K committed
925
926
927
928
929
930
931
932
            }
        }

    RETURN_TYPES = ("IMAGE", "MASK")
    FUNCTION = "expand_image"

    CATEGORY = "image"

933
    def expand_image(self, image, left, top, right, bottom, feathering):
Guo Y.K's avatar
Guo Y.K committed
934
935
936
937
938
939
940
941
942
943
944
945
        d1, d2, d3, d4 = image.size()

        new_image = torch.zeros(
            (d1, d2 + top + bottom, d3 + left + right, d4),
            dtype=torch.float32,
        )
        new_image[:, top:top + d2, left:left + d3, :] = image

        mask = torch.ones(
            (d2 + top + bottom, d3 + left + right),
            dtype=torch.float32,
        )
946

947
948
949
950
951
        t = torch.zeros(
            (d2, d3),
            dtype=torch.float32
        )

952
        if feathering > 0 and feathering * 2 < d2 and feathering * 2 < d3:
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971

            for i in range(d2):
                for j in range(d3):
                    dt = i if top != 0 else d2
                    db = d2 - i if bottom != 0 else d2

                    dl = j if left != 0 else d3
                    dr = d3 - j if right != 0 else d3

                    d = min(dt, db, dl, dr)

                    if d >= feathering:
                        continue

                    v = (feathering - d) / feathering

                    t[i, j] = v * v

        mask[top:top + d2, left:left + d3] = t
972

Guo Y.K's avatar
Guo Y.K committed
973
974
975
        return (new_image, mask)


comfyanonymous's avatar
comfyanonymous committed
976
977
978
NODE_CLASS_MAPPINGS = {
    "KSampler": KSampler,
    "CheckpointLoader": CheckpointLoader,
979
    "CheckpointLoaderSimple": CheckpointLoaderSimple,
comfyanonymous's avatar
comfyanonymous committed
980
    "CLIPTextEncode": CLIPTextEncode,
comfyanonymous's avatar
comfyanonymous committed
981
    "CLIPSetLastLayer": CLIPSetLastLayer,
comfyanonymous's avatar
comfyanonymous committed
982
983
    "VAEDecode": VAEDecode,
    "VAEEncode": VAEEncode,
984
    "VAEEncodeForInpaint": VAEEncodeForInpaint,
comfyanonymous's avatar
comfyanonymous committed
985
986
987
988
    "VAELoader": VAELoader,
    "EmptyLatentImage": EmptyLatentImage,
    "LatentUpscale": LatentUpscale,
    "SaveImage": SaveImage,
pythongosssss's avatar
pythongosssss committed
989
    "PreviewImage": PreviewImage,
comfyanonymous's avatar
comfyanonymous committed
990
    "LoadImage": LoadImage,
991
    "LoadImageMask": LoadImageMask,
comfyanonymous's avatar
comfyanonymous committed
992
    "ImageScale": ImageScale,
993
    "ImageInvert": ImageInvert,
Guo Y.K's avatar
Guo Y.K committed
994
    "ImagePadForOutpaint": ImagePadForOutpaint,
comfyanonymous's avatar
comfyanonymous committed
995
996
    "ConditioningCombine": ConditioningCombine,
    "ConditioningSetArea": ConditioningSetArea,
comfyanonymous's avatar
comfyanonymous committed
997
    "KSamplerAdvanced": KSamplerAdvanced,
998
    "SetLatentNoiseMask": SetLatentNoiseMask,
comfyanonymous's avatar
comfyanonymous committed
999
    "LatentComposite": LatentComposite,
comfyanonymous's avatar
comfyanonymous committed
1000
    "LatentRotate": LatentRotate,
comfyanonymous's avatar
comfyanonymous committed
1001
    "LatentFlip": LatentFlip,
comfyanonymous's avatar
comfyanonymous committed
1002
    "LatentCrop": LatentCrop,
1003
    "LoraLoader": LoraLoader,
1004
    "CLIPLoader": CLIPLoader,
1005
    "CLIPVisionEncode": CLIPVisionEncode,
1006
    "StyleModelApply": StyleModelApply,
comfyanonymous's avatar
comfyanonymous committed
1007
1008
    "ControlNetApply": ControlNetApply,
    "ControlNetLoader": ControlNetLoader,
1009
    "DiffControlNetLoader": DiffControlNetLoader,
comfyanonymous's avatar
comfyanonymous committed
1010
1011
    "StyleModelLoader": StyleModelLoader,
    "CLIPVisionLoader": CLIPVisionLoader,
1012
    "VAEDecodeTiled": VAEDecodeTiled,
comfyanonymous's avatar
comfyanonymous committed
1013
    "VAEEncodeTiled": VAEEncodeTiled,
comfyanonymous's avatar
comfyanonymous committed
1014
1015
}

1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
def load_custom_node(module_path):
    module_name = os.path.basename(module_path)
    if os.path.isfile(module_path):
        sp = os.path.splitext(module_path)
        module_name = sp[0]
    try:
        if os.path.isfile(module_path):
            module_spec = importlib.util.spec_from_file_location(module_name, module_path)
        else:
            module_spec = importlib.util.spec_from_file_location(module_name, os.path.join(module_path, "__init__.py"))
        module = importlib.util.module_from_spec(module_spec)
        sys.modules[module_name] = module
        module_spec.loader.exec_module(module)
        if hasattr(module, "NODE_CLASS_MAPPINGS") and getattr(module, "NODE_CLASS_MAPPINGS") is not None:
            NODE_CLASS_MAPPINGS.update(module.NODE_CLASS_MAPPINGS)
        else:
            print(f"Skip {module_path} module for custom nodes due to the lack of NODE_CLASS_MAPPINGS.")
    except Exception as e:
        print(traceback.format_exc())
        print(f"Cannot import {module_path} module for custom nodes:", e)

Hacker 17082006's avatar
Hacker 17082006 committed
1037
def load_custom_nodes():
1038
    CUSTOM_NODE_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), "custom_nodes")
1039
    possible_modules = os.listdir(CUSTOM_NODE_PATH)
1040
    if "__pycache__" in possible_modules:
Hacker 17082006's avatar
.  
Hacker 17082006 committed
1041
        possible_modules.remove("__pycache__")
1042

Hacker 17082006's avatar
Hacker 17082006 committed
1043
    for possible_module in possible_modules:
1044
1045
        module_path = os.path.join(CUSTOM_NODE_PATH, possible_module)
        if os.path.isfile(module_path) and os.path.splitext(module_path)[1] != ".py": continue
1046
        load_custom_node(module_path)
1047

1048
load_custom_nodes()
1049
1050

load_custom_node(os.path.join(os.path.join(os.path.dirname(os.path.realpath(__file__)), "comfy_extras"), "nodes_upscale_model.py"))