nodes.py 54.6 KB
Newer Older
comfyanonymous's avatar
comfyanonymous committed
1
2
3
4
5
import torch

import os
import sys
import json
6
import hashlib
7
import traceback
8
import math
9
import time
space-nuko's avatar
space-nuko committed
10
11
import struct
from io import BytesIO
comfyanonymous's avatar
comfyanonymous committed
12

13
from PIL import Image, ImageOps
comfyanonymous's avatar
comfyanonymous committed
14
15
from PIL.PngImagePlugin import PngInfo
import numpy as np
16
import safetensors.torch
comfyanonymous's avatar
comfyanonymous committed
17

sALTaccount's avatar
sALTaccount committed
18

comfyanonymous's avatar
comfyanonymous committed
19
sys.path.insert(0, os.path.join(os.path.dirname(os.path.realpath(__file__)), "comfy"))
comfyanonymous's avatar
comfyanonymous committed
20
21


22
import comfy.diffusers_load
comfyanonymous's avatar
comfyanonymous committed
23
import comfy.samplers
24
import comfy.sample
comfyanonymous's avatar
comfyanonymous committed
25
import comfy.sd
comfyanonymous's avatar
comfyanonymous committed
26
import comfy.utils
space-nuko's avatar
space-nuko committed
27
from comfy.cli_args import args
space-nuko's avatar
space-nuko committed
28
from comfy.taesd.taesd import TAESD
comfyanonymous's avatar
comfyanonymous committed
29

30
import comfy.clip_vision
31

32
import comfy.model_management
33
import importlib
comfyanonymous's avatar
comfyanonymous committed
34

35
import folder_paths
36

Dr.Lt.Data's avatar
Dr.Lt.Data committed
37

38
39
40
41
42
class LatentPreviewer:
    def decode_latent_to_preview(self, device, x0):
        pass


43
def before_node_execution():
44
    comfy.model_management.throw_exception_if_processing_interrupted()
45

46
def interrupt_processing(value=True):
47
    comfy.model_management.interrupt_current_processing(value)
48

49
MAX_RESOLUTION=8192
space-nuko's avatar
space-nuko committed
50
MAX_PREVIEW_RESOLUTION = 512
51

comfyanonymous's avatar
comfyanonymous committed
52
53
54
class CLIPTextEncode:
    @classmethod
    def INPUT_TYPES(s):
55
        return {"required": {"text": ("STRING", {"multiline": True}), "clip": ("CLIP", )}}
comfyanonymous's avatar
comfyanonymous committed
56
57
58
    RETURN_TYPES = ("CONDITIONING",)
    FUNCTION = "encode"

59
60
    CATEGORY = "conditioning"

comfyanonymous's avatar
comfyanonymous committed
61
    def encode(self, clip, text):
comfyanonymous's avatar
comfyanonymous committed
62
63
64
65
66
67
68
69
70
        return ([[clip.encode(text), {}]], )

class ConditioningCombine:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": {"conditioning_1": ("CONDITIONING", ), "conditioning_2": ("CONDITIONING", )}}
    RETURN_TYPES = ("CONDITIONING",)
    FUNCTION = "combine"

71
72
    CATEGORY = "conditioning"

comfyanonymous's avatar
comfyanonymous committed
73
74
75
    def combine(self, conditioning_1, conditioning_2):
        return (conditioning_1 + conditioning_2, )

FizzleDorf's avatar
FizzleDorf committed
76
77
78
class ConditioningAverage :
    @classmethod
    def INPUT_TYPES(s):
comfyanonymous's avatar
comfyanonymous committed
79
80
        return {"required": {"conditioning_to": ("CONDITIONING", ), "conditioning_from": ("CONDITIONING", ),
                              "conditioning_to_strength": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01})
FizzleDorf's avatar
FizzleDorf committed
81
82
83
84
85
86
                             }}
    RETURN_TYPES = ("CONDITIONING",)
    FUNCTION = "addWeighted"

    CATEGORY = "conditioning"

comfyanonymous's avatar
comfyanonymous committed
87
    def addWeighted(self, conditioning_to, conditioning_from, conditioning_to_strength):
FizzleDorf's avatar
FizzleDorf committed
88
        out = []
comfyanonymous's avatar
comfyanonymous committed
89
90
91
92
93
94
95
96
97
98
99
100
101
102

        if len(conditioning_from) > 1:
            print("Warning: ConditioningAverage conditioning_from contains more than 1 cond, only the first one will actually be applied to conditioning_to.")

        cond_from = conditioning_from[0][0]

        for i in range(len(conditioning_to)):
            t1 = conditioning_to[i][0]
            t0 = cond_from[:,:t1.shape[1]]
            if t0.shape[1] < t1.shape[1]:
                t0 = torch.cat([t0] + [torch.zeros((1, (t1.shape[1] - t0.shape[1]), t1.shape[2]))], dim=1)

            tw = torch.mul(t1, conditioning_to_strength) + torch.mul(t0, (1.0 - conditioning_to_strength))
            n = [tw, conditioning_to[i][1].copy()]
FizzleDorf's avatar
FizzleDorf committed
103
104
105
            out.append(n)
        return (out, )

comfyanonymous's avatar
comfyanonymous committed
106
107
108
109
class ConditioningSetArea:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": {"conditioning": ("CONDITIONING", ),
110
111
112
113
                              "width": ("INT", {"default": 64, "min": 64, "max": MAX_RESOLUTION, "step": 8}),
                              "height": ("INT", {"default": 64, "min": 64, "max": MAX_RESOLUTION, "step": 8}),
                              "x": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 8}),
                              "y": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 8}),
comfyanonymous's avatar
comfyanonymous committed
114
115
116
117
118
                              "strength": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}),
                             }}
    RETURN_TYPES = ("CONDITIONING",)
    FUNCTION = "append"

119
120
    CATEGORY = "conditioning"

121
    def append(self, conditioning, width, height, x, y, strength):
comfyanonymous's avatar
comfyanonymous committed
122
123
124
125
126
        c = []
        for t in conditioning:
            n = [t[0], t[1].copy()]
            n[1]['area'] = (height // 8, width // 8, y // 8, x // 8)
            n[1]['strength'] = strength
127
            n[1]['set_area_to_bounds'] = False
comfyanonymous's avatar
comfyanonymous committed
128
            c.append(n)
comfyanonymous's avatar
comfyanonymous committed
129
        return (c, )
comfyanonymous's avatar
comfyanonymous committed
130

Jacob Segal's avatar
Jacob Segal committed
131
132
133
134
135
136
class ConditioningSetMask:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": {"conditioning": ("CONDITIONING", ),
                              "mask": ("MASK", ),
                              "strength": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}),
137
                              "set_cond_area": (["default", "mask bounds"],),
Jacob Segal's avatar
Jacob Segal committed
138
139
140
141
142
143
                             }}
    RETURN_TYPES = ("CONDITIONING",)
    FUNCTION = "append"

    CATEGORY = "conditioning"

144
    def append(self, conditioning, mask, set_cond_area, strength):
Jacob Segal's avatar
Jacob Segal committed
145
        c = []
146
147
148
        set_area_to_bounds = False
        if set_cond_area != "default":
            set_area_to_bounds = True
Jacob Segal's avatar
Jacob Segal committed
149
150
151
152
153
154
        if len(mask.shape) < 3:
            mask = mask.unsqueeze(0)
        for t in conditioning:
            n = [t[0], t[1].copy()]
            _, h, w = mask.shape
            n[1]['mask'] = mask
Jacob Segal's avatar
Jacob Segal committed
155
            n[1]['set_area_to_bounds'] = set_area_to_bounds
156
            n[1]['mask_strength'] = strength
Jacob Segal's avatar
Jacob Segal committed
157
158
159
            c.append(n)
        return (c, )

comfyanonymous's avatar
comfyanonymous committed
160
161
162
163
164
165
166
class VAEDecode:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "samples": ("LATENT", ), "vae": ("VAE", )}}
    RETURN_TYPES = ("IMAGE",)
    FUNCTION = "decode"

167
168
    CATEGORY = "latent"

comfyanonymous's avatar
comfyanonymous committed
169
    def decode(self, vae, samples):
170
        return (vae.decode(samples["samples"]), )
comfyanonymous's avatar
comfyanonymous committed
171

172
173
174
175
176
177
178
179
180
181
182
183
class VAEDecodeTiled:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "samples": ("LATENT", ), "vae": ("VAE", )}}
    RETURN_TYPES = ("IMAGE",)
    FUNCTION = "decode"

    CATEGORY = "_for_testing"

    def decode(self, vae, samples):
        return (vae.decode_tiled(samples["samples"]), )

comfyanonymous's avatar
comfyanonymous committed
184
185
186
187
188
189
190
class VAEEncode:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "pixels": ("IMAGE", ), "vae": ("VAE", )}}
    RETURN_TYPES = ("LATENT",)
    FUNCTION = "encode"

191
192
    CATEGORY = "latent"

193
194
195
196
    @staticmethod
    def vae_encode_crop_pixels(pixels):
        x = (pixels.shape[1] // 8) * 8
        y = (pixels.shape[2] // 8) * 8
197
        if pixels.shape[1] != x or pixels.shape[2] != y:
198
199
200
201
            x_offset = (pixels.shape[1] % 8) // 2
            y_offset = (pixels.shape[2] % 8) // 2
            pixels = pixels[:, x_offset:x + x_offset, y_offset:y + y_offset, :]
        return pixels
202

203
204
205
    def encode(self, vae, pixels):
        pixels = self.vae_encode_crop_pixels(pixels)
        t = vae.encode(pixels[:,:,:,:3])
206
        return ({"samples":t}, )
comfyanonymous's avatar
comfyanonymous committed
207

comfyanonymous's avatar
comfyanonymous committed
208
209
210
211
212
213
214
215
216
217
class VAEEncodeTiled:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "pixels": ("IMAGE", ), "vae": ("VAE", )}}
    RETURN_TYPES = ("LATENT",)
    FUNCTION = "encode"

    CATEGORY = "_for_testing"

    def encode(self, vae, pixels):
218
        pixels = VAEEncode.vae_encode_crop_pixels(pixels)
comfyanonymous's avatar
comfyanonymous committed
219
220
        t = vae.encode_tiled(pixels[:,:,:,:3])
        return ({"samples":t}, )
221

222
223
224
class VAEEncodeForInpaint:
    @classmethod
    def INPUT_TYPES(s):
225
        return {"required": { "pixels": ("IMAGE", ), "vae": ("VAE", ), "mask": ("MASK", ), "grow_mask_by": ("INT", {"default": 6, "min": 0, "max": 64, "step": 1}),}}
226
227
228
229
230
    RETURN_TYPES = ("LATENT",)
    FUNCTION = "encode"

    CATEGORY = "latent/inpaint"

231
    def encode(self, vae, pixels, mask, grow_mask_by=6):
232
233
        x = (pixels.shape[1] // 8) * 8
        y = (pixels.shape[2] // 8) * 8
234
        mask = torch.nn.functional.interpolate(mask.reshape((-1, 1, mask.shape[-2], mask.shape[-1])), size=(pixels.shape[1], pixels.shape[2]), mode="bilinear")
235

236
        pixels = pixels.clone()
237
        if pixels.shape[1] != x or pixels.shape[2] != y:
238
239
240
241
            x_offset = (pixels.shape[1] % 8) // 2
            y_offset = (pixels.shape[2] % 8) // 2
            pixels = pixels[:,x_offset:x + x_offset, y_offset:y + y_offset,:]
            mask = mask[:,:,x_offset:x + x_offset, y_offset:y + y_offset]
242

243
        #grow mask by a few pixels to keep things seamless in latent space
244
245
246
247
248
249
250
251
        if grow_mask_by == 0:
            mask_erosion = mask
        else:
            kernel_tensor = torch.ones((1, 1, grow_mask_by, grow_mask_by))
            padding = math.ceil((grow_mask_by - 1) / 2)

            mask_erosion = torch.clamp(torch.nn.functional.conv2d(mask.round(), kernel_tensor, padding=padding), 0, 1)

252
        m = (1.0 - mask.round()).squeeze(1)
253
254
        for i in range(3):
            pixels[:,:,:,i] -= 0.5
255
            pixels[:,:,:,i] *= m
256
257
258
            pixels[:,:,:,i] += 0.5
        t = vae.encode(pixels)

259
        return ({"samples":t, "noise_mask": (mask_erosion[:,:,:x,:y].round())}, )
comfyanonymous's avatar
comfyanonymous committed
260

261
262
263
264
265
266
class TAESDPreviewerImpl(LatentPreviewer):
    def __init__(self, taesd):
        self.taesd = taesd

    def decode_latent_to_preview(self, device, x0):
        x_sample = self.taesd.decoder(x0.to(device))[0].detach()
space-nuko's avatar
space-nuko committed
267
268
        # x_sample = self.taesd.unscale_latents(x_sample).div(4).add(0.5)  # returns value in [-2, 2]
        x_sample = x_sample.sub(0.5).mul(2)
269
270
        return x_sample

Dr.Lt.Data's avatar
Dr.Lt.Data committed
271
272
class SaveLatent:
    def __init__(self):
273
        self.output_dir = folder_paths.get_output_directory()
Dr.Lt.Data's avatar
Dr.Lt.Data committed
274
275
276
277

    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "samples": ("LATENT", ),
278
                              "filename_prefix": ("STRING", {"default": "latents/ComfyUI"})},
Dr.Lt.Data's avatar
Dr.Lt.Data committed
279
280
281
282
283
284
285
286
287
288
                "hidden": {"prompt": "PROMPT", "extra_pnginfo": "EXTRA_PNGINFO"},
                }
    RETURN_TYPES = ()
    FUNCTION = "save"

    OUTPUT_NODE = True

    CATEGORY = "_for_testing"

    def save(self, samples, filename_prefix="ComfyUI", prompt=None, extra_pnginfo=None):
289
        full_output_folder, filename, counter, subfolder, filename_prefix = folder_paths.get_save_image_path(filename_prefix, self.output_dir)
Dr.Lt.Data's avatar
Dr.Lt.Data committed
290
291
292
293
294
295

        # support save metadata for latent sharing
        prompt_info = ""
        if prompt is not None:
            prompt_info = json.dumps(prompt)

296
        metadata = {"prompt": prompt_info}
Dr.Lt.Data's avatar
Dr.Lt.Data committed
297
298
299
300
301
302
303
        if extra_pnginfo is not None:
            for x in extra_pnginfo:
                metadata[x] = json.dumps(extra_pnginfo[x])

        file = f"{filename}_{counter:05}_.latent"
        file = os.path.join(full_output_folder, file)

304
305
306
307
        output = {}
        output["latent_tensor"] = samples["samples"]

        safetensors.torch.save_file(output, file, metadata=metadata)
Dr.Lt.Data's avatar
Dr.Lt.Data committed
308
309
310
311
312
313
314

        return {}


class LoadLatent:
    @classmethod
    def INPUT_TYPES(s):
315
316
        input_dir = folder_paths.get_input_directory()
        files = [f for f in os.listdir(input_dir) if os.path.isfile(os.path.join(input_dir, f)) and f.endswith(".latent")]
Dr.Lt.Data's avatar
Dr.Lt.Data committed
317
318
319
320
321
322
323
324
        return {"required": {"latent": [sorted(files), ]}, }

    CATEGORY = "_for_testing"

    RETURN_TYPES = ("LATENT", )
    FUNCTION = "load"

    def load(self, latent):
325
326
        latent_path = folder_paths.get_annotated_filepath(latent)
        latent = safetensors.torch.load_file(latent_path, device="cpu")
327
        samples = {"samples": latent["latent_tensor"].float()}
328
        return (samples, )
Dr.Lt.Data's avatar
Dr.Lt.Data committed
329

330
331
332
333
334
335
336
337
338
339
340
341
342
343
    @classmethod
    def IS_CHANGED(s, latent):
        image_path = folder_paths.get_annotated_filepath(latent)
        m = hashlib.sha256()
        with open(image_path, 'rb') as f:
            m.update(f.read())
        return m.digest().hex()

    @classmethod
    def VALIDATE_INPUTS(s, latent):
        if not folder_paths.exists_annotated_filepath(latent):
            return "Invalid latent file: {}".format(latent)
        return True

Dr.Lt.Data's avatar
Dr.Lt.Data committed
344

comfyanonymous's avatar
comfyanonymous committed
345
346
347
class CheckpointLoader:
    @classmethod
    def INPUT_TYPES(s):
348
349
        return {"required": { "config_name": (folder_paths.get_filename_list("configs"), ),
                              "ckpt_name": (folder_paths.get_filename_list("checkpoints"), )}}
comfyanonymous's avatar
comfyanonymous committed
350
351
352
    RETURN_TYPES = ("MODEL", "CLIP", "VAE")
    FUNCTION = "load_checkpoint"

353
    CATEGORY = "advanced/loaders"
354

comfyanonymous's avatar
comfyanonymous committed
355
    def load_checkpoint(self, config_name, ckpt_name, output_vae=True, output_clip=True):
356
357
        config_path = folder_paths.get_full_path("configs", config_name)
        ckpt_path = folder_paths.get_full_path("checkpoints", ckpt_name)
358
        return comfy.sd.load_checkpoint(config_path, ckpt_path, output_vae=True, output_clip=True, embedding_directory=folder_paths.get_folder_paths("embeddings"))
comfyanonymous's avatar
comfyanonymous committed
359

360
361
362
class CheckpointLoaderSimple:
    @classmethod
    def INPUT_TYPES(s):
363
        return {"required": { "ckpt_name": (folder_paths.get_filename_list("checkpoints"), ),
364
365
366
367
                             }}
    RETURN_TYPES = ("MODEL", "CLIP", "VAE")
    FUNCTION = "load_checkpoint"

368
    CATEGORY = "loaders"
369

370
    def load_checkpoint(self, ckpt_name, output_vae=True, output_clip=True):
371
        ckpt_path = folder_paths.get_full_path("checkpoints", ckpt_name)
372
        out = comfy.sd.load_checkpoint_guess_config(ckpt_path, output_vae=True, output_clip=True, embedding_directory=folder_paths.get_folder_paths("embeddings"))
373
374
        return out

sALTaccount's avatar
sALTaccount committed
375
376
377
class DiffusersLoader:
    @classmethod
    def INPUT_TYPES(cls):
378
        paths = []
sALTaccount's avatar
sALTaccount committed
379
        for search_path in folder_paths.get_folder_paths("diffusers"):
380
            if os.path.exists(search_path):
381
382
383
384
                for root, subdir, files in os.walk(search_path, followlinks=True):
                    if "model_index.json" in files:
                        paths.append(os.path.relpath(root, start=search_path))

385
        return {"required": {"model_path": (paths,), }}
sALTaccount's avatar
sALTaccount committed
386
387
388
    RETURN_TYPES = ("MODEL", "CLIP", "VAE")
    FUNCTION = "load_checkpoint"

389
    CATEGORY = "advanced/loaders"
sALTaccount's avatar
sALTaccount committed
390
391

    def load_checkpoint(self, model_path, output_vae=True, output_clip=True):
sALTaccount's avatar
sALTaccount committed
392
393
        for search_path in folder_paths.get_folder_paths("diffusers"):
            if os.path.exists(search_path):
394
395
396
                path = os.path.join(search_path, model_path)
                if os.path.exists(path):
                    model_path = path
sALTaccount's avatar
sALTaccount committed
397
                    break
398

399
        return comfy.diffusers_load.load_diffusers(model_path, fp16=comfy.model_management.should_use_fp16(), output_vae=output_vae, output_clip=output_clip, embedding_directory=folder_paths.get_folder_paths("embeddings"))
sALTaccount's avatar
sALTaccount committed
400
401


402
403
404
405
406
407
408
409
class unCLIPCheckpointLoader:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "ckpt_name": (folder_paths.get_filename_list("checkpoints"), ),
                             }}
    RETURN_TYPES = ("MODEL", "CLIP", "VAE", "CLIP_VISION")
    FUNCTION = "load_checkpoint"

410
    CATEGORY = "loaders"
411
412
413
414
415
416

    def load_checkpoint(self, ckpt_name, output_vae=True, output_clip=True):
        ckpt_path = folder_paths.get_full_path("checkpoints", ckpt_name)
        out = comfy.sd.load_checkpoint_guess_config(ckpt_path, output_vae=True, output_clip=True, output_clipvision=True, embedding_directory=folder_paths.get_folder_paths("embeddings"))
        return out

comfyanonymous's avatar
comfyanonymous committed
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
class CLIPSetLastLayer:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "clip": ("CLIP", ),
                              "stop_at_clip_layer": ("INT", {"default": -1, "min": -24, "max": -1, "step": 1}),
                              }}
    RETURN_TYPES = ("CLIP",)
    FUNCTION = "set_last_layer"

    CATEGORY = "conditioning"

    def set_last_layer(self, clip, stop_at_clip_layer):
        clip = clip.clone()
        clip.clip_layer(stop_at_clip_layer)
        return (clip,)

433
434
435
436
437
class LoraLoader:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "model": ("MODEL",),
                              "clip": ("CLIP", ),
438
                              "lora_name": (folder_paths.get_filename_list("loras"), ),
439
440
                              "strength_model": ("FLOAT", {"default": 1.0, "min": -10.0, "max": 10.0, "step": 0.01}),
                              "strength_clip": ("FLOAT", {"default": 1.0, "min": -10.0, "max": 10.0, "step": 0.01}),
441
442
443
444
445
446
447
                              }}
    RETURN_TYPES = ("MODEL", "CLIP")
    FUNCTION = "load_lora"

    CATEGORY = "loaders"

    def load_lora(self, model, clip, lora_name, strength_model, strength_clip):
448
449
450
        if strength_model == 0 and strength_clip == 0:
            return (model, clip)

451
        lora_path = folder_paths.get_full_path("loras", lora_name)
452
453
454
        model_lora, clip_lora = comfy.sd.load_lora_for_models(model, clip, lora_path, strength_model, strength_clip)
        return (model_lora, clip_lora)

455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
class TomePatchModel:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "model": ("MODEL",),
                              "ratio": ("FLOAT", {"default": 0.3, "min": 0.0, "max": 1.0, "step": 0.01}),
                              }}
    RETURN_TYPES = ("MODEL",)
    FUNCTION = "patch"

    CATEGORY = "_for_testing"

    def patch(self, model, ratio):
        m = model.clone()
        m.set_model_tomesd(ratio)
        return (m, )

comfyanonymous's avatar
comfyanonymous committed
471
472
473
class VAELoader:
    @classmethod
    def INPUT_TYPES(s):
474
        return {"required": { "vae_name": (folder_paths.get_filename_list("vae"), )}}
comfyanonymous's avatar
comfyanonymous committed
475
476
477
    RETURN_TYPES = ("VAE",)
    FUNCTION = "load_vae"

478
479
    CATEGORY = "loaders"

comfyanonymous's avatar
comfyanonymous committed
480
481
    #TODO: scale factor?
    def load_vae(self, vae_name):
482
        vae_path = folder_paths.get_full_path("vae", vae_name)
comfyanonymous's avatar
comfyanonymous committed
483
484
485
        vae = comfy.sd.VAE(ckpt_path=vae_path)
        return (vae,)

comfyanonymous's avatar
comfyanonymous committed
486
487
488
class ControlNetLoader:
    @classmethod
    def INPUT_TYPES(s):
489
        return {"required": { "control_net_name": (folder_paths.get_filename_list("controlnet"), )}}
comfyanonymous's avatar
comfyanonymous committed
490
491
492
493
494
495
496

    RETURN_TYPES = ("CONTROL_NET",)
    FUNCTION = "load_controlnet"

    CATEGORY = "loaders"

    def load_controlnet(self, control_net_name):
497
        controlnet_path = folder_paths.get_full_path("controlnet", control_net_name)
comfyanonymous's avatar
comfyanonymous committed
498
499
500
        controlnet = comfy.sd.load_controlnet(controlnet_path)
        return (controlnet,)

501
502
503
504
class DiffControlNetLoader:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "model": ("MODEL",),
505
                              "control_net_name": (folder_paths.get_filename_list("controlnet"), )}}
506
507
508
509
510
511
512

    RETURN_TYPES = ("CONTROL_NET",)
    FUNCTION = "load_controlnet"

    CATEGORY = "loaders"

    def load_controlnet(self, model, control_net_name):
513
        controlnet_path = folder_paths.get_full_path("controlnet", control_net_name)
514
515
516
        controlnet = comfy.sd.load_controlnet(controlnet_path, model)
        return (controlnet,)

comfyanonymous's avatar
comfyanonymous committed
517
518
519
520

class ControlNetApply:
    @classmethod
    def INPUT_TYPES(s):
521
522
523
524
525
        return {"required": {"conditioning": ("CONDITIONING", ),
                             "control_net": ("CONTROL_NET", ),
                             "image": ("IMAGE", ),
                             "strength": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01})
                             }}
comfyanonymous's avatar
comfyanonymous committed
526
527
528
529
530
    RETURN_TYPES = ("CONDITIONING",)
    FUNCTION = "apply_controlnet"

    CATEGORY = "conditioning"

531
    def apply_controlnet(self, conditioning, control_net, image, strength):
532
533
534
        if strength == 0:
            return (conditioning, )

comfyanonymous's avatar
comfyanonymous committed
535
536
537
538
        c = []
        control_hint = image.movedim(-1,1)
        for t in conditioning:
            n = [t[0], t[1].copy()]
comfyanonymous's avatar
comfyanonymous committed
539
540
541
542
            c_net = control_net.copy().set_cond_hint(control_hint, strength)
            if 'control' in t[1]:
                c_net.set_previous_controlnet(t[1]['control'])
            n[1]['control'] = c_net
comfyanonymous's avatar
comfyanonymous committed
543
544
545
            c.append(n)
        return (c, )

546
547
548
class CLIPLoader:
    @classmethod
    def INPUT_TYPES(s):
549
        return {"required": { "clip_name": (folder_paths.get_filename_list("clip"), ),
550
551
552
553
554
555
                             }}
    RETURN_TYPES = ("CLIP",)
    FUNCTION = "load_clip"

    CATEGORY = "loaders"

556
    def load_clip(self, clip_name):
557
        clip_path = folder_paths.get_full_path("clip", clip_name)
comfyanonymous's avatar
comfyanonymous committed
558
        clip = comfy.sd.load_clip(ckpt_path=clip_path, embedding_directory=folder_paths.get_folder_paths("embeddings"))
559
560
        return (clip,)

561
562
563
class CLIPVisionLoader:
    @classmethod
    def INPUT_TYPES(s):
564
        return {"required": { "clip_name": (folder_paths.get_filename_list("clip_vision"), ),
565
566
567
568
569
570
571
                             }}
    RETURN_TYPES = ("CLIP_VISION",)
    FUNCTION = "load_clip"

    CATEGORY = "loaders"

    def load_clip(self, clip_name):
572
        clip_path = folder_paths.get_full_path("clip_vision", clip_name)
573
        clip_vision = comfy.clip_vision.load(clip_path)
574
575
576
577
578
579
580
581
        return (clip_vision,)

class CLIPVisionEncode:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "clip_vision": ("CLIP_VISION",),
                              "image": ("IMAGE",)
                             }}
comfyanonymous's avatar
comfyanonymous committed
582
    RETURN_TYPES = ("CLIP_VISION_OUTPUT",)
583
584
    FUNCTION = "encode"

585
    CATEGORY = "conditioning"
586
587
588
589
590
591
592
593

    def encode(self, clip_vision, image):
        output = clip_vision.encode_image(image)
        return (output,)

class StyleModelLoader:
    @classmethod
    def INPUT_TYPES(s):
594
        return {"required": { "style_model_name": (folder_paths.get_filename_list("style_models"), )}}
595
596
597
598
599
600
601

    RETURN_TYPES = ("STYLE_MODEL",)
    FUNCTION = "load_style_model"

    CATEGORY = "loaders"

    def load_style_model(self, style_model_name):
602
        style_model_path = folder_paths.get_full_path("style_models", style_model_name)
603
604
605
606
607
608
609
        style_model = comfy.sd.load_style_model(style_model_path)
        return (style_model,)


class StyleModelApply:
    @classmethod
    def INPUT_TYPES(s):
610
611
612
        return {"required": {"conditioning": ("CONDITIONING", ),
                             "style_model": ("STYLE_MODEL", ),
                             "clip_vision_output": ("CLIP_VISION_OUTPUT", ),
613
614
615
616
                             }}
    RETURN_TYPES = ("CONDITIONING",)
    FUNCTION = "apply_stylemodel"

comfyanonymous's avatar
comfyanonymous committed
617
    CATEGORY = "conditioning/style_model"
618

619
620
    def apply_stylemodel(self, clip_vision_output, style_model, conditioning):
        cond = style_model.get_cond(clip_vision_output)
621
        c = []
622
623
        for t in conditioning:
            n = [torch.cat((t[0], cond), dim=1), t[1].copy()]
624
625
626
            c.append(n)
        return (c, )

627
628
629
630
631
632
class unCLIPConditioning:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": {"conditioning": ("CONDITIONING", ),
                             "clip_vision_output": ("CLIP_VISION_OUTPUT", ),
                             "strength": ("FLOAT", {"default": 1.0, "min": -10.0, "max": 10.0, "step": 0.01}),
633
                             "noise_augmentation": ("FLOAT", {"default": 0.0, "min": 0.0, "max": 1.0, "step": 0.01}),
634
635
636
637
                             }}
    RETURN_TYPES = ("CONDITIONING",)
    FUNCTION = "apply_adm"

638
    CATEGORY = "conditioning"
639

640
    def apply_adm(self, conditioning, clip_vision_output, strength, noise_augmentation):
641
642
643
        if strength == 0:
            return (conditioning, )

644
645
646
        c = []
        for t in conditioning:
            o = t[1].copy()
647
            x = (clip_vision_output, strength, noise_augmentation)
648
649
650
651
652
653
654
655
            if "adm" in o:
                o["adm"] = o["adm"][:] + [x]
            else:
                o["adm"] = [x]
            n = [t[0], o]
            c.append(n)
        return (c, )

656
657
658
659
660
661
662
663
class GLIGENLoader:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "gligen_name": (folder_paths.get_filename_list("gligen"), )}}

    RETURN_TYPES = ("GLIGEN",)
    FUNCTION = "load_gligen"

comfyanonymous's avatar
comfyanonymous committed
664
    CATEGORY = "loaders"
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685

    def load_gligen(self, gligen_name):
        gligen_path = folder_paths.get_full_path("gligen", gligen_name)
        gligen = comfy.sd.load_gligen(gligen_path)
        return (gligen,)

class GLIGENTextBoxApply:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": {"conditioning_to": ("CONDITIONING", ),
                              "clip": ("CLIP", ),
                              "gligen_textbox_model": ("GLIGEN", ),
                              "text": ("STRING", {"multiline": True}),
                              "width": ("INT", {"default": 64, "min": 8, "max": MAX_RESOLUTION, "step": 8}),
                              "height": ("INT", {"default": 64, "min": 8, "max": MAX_RESOLUTION, "step": 8}),
                              "x": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 8}),
                              "y": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 8}),
                             }}
    RETURN_TYPES = ("CONDITIONING",)
    FUNCTION = "append"

comfyanonymous's avatar
comfyanonymous committed
686
    CATEGORY = "conditioning/gligen"
687
688
689
690
691
692
693
694
695
696
697
698
699
700

    def append(self, conditioning_to, clip, gligen_textbox_model, text, width, height, x, y):
        c = []
        cond, cond_pooled = clip.encode_from_tokens(clip.tokenize(text), return_pooled=True)
        for t in conditioning_to:
            n = [t[0], t[1].copy()]
            position_params = [(cond_pooled, height // 8, width // 8, y // 8, x // 8)]
            prev = []
            if "gligen" in n[1]:
                prev = n[1]['gligen'][2]

            n[1]['gligen'] = ("position", gligen_textbox_model, prev + position_params)
            c.append(n)
        return (c, )
701

comfyanonymous's avatar
comfyanonymous committed
702
703
704
705
706
707
class EmptyLatentImage:
    def __init__(self, device="cpu"):
        self.device = device

    @classmethod
    def INPUT_TYPES(s):
708
709
        return {"required": { "width": ("INT", {"default": 512, "min": 64, "max": MAX_RESOLUTION, "step": 8}),
                              "height": ("INT", {"default": 512, "min": 64, "max": MAX_RESOLUTION, "step": 8}),
comfyanonymous's avatar
comfyanonymous committed
710
711
712
713
                              "batch_size": ("INT", {"default": 1, "min": 1, "max": 64})}}
    RETURN_TYPES = ("LATENT",)
    FUNCTION = "generate"

714
715
    CATEGORY = "latent"

comfyanonymous's avatar
comfyanonymous committed
716
717
    def generate(self, width, height, batch_size=1):
        latent = torch.zeros([batch_size, 4, height // 8, width // 8])
718
        return ({"samples":latent}, )
comfyanonymous's avatar
comfyanonymous committed
719

comfyanonymous's avatar
comfyanonymous committed
720

721
722
723
724
725
class LatentFromBatch:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "samples": ("LATENT",),
                              "batch_index": ("INT", {"default": 0, "min": 0, "max": 63}),
726
                              "length": ("INT", {"default": 1, "min": 1, "max": 64}),
727
728
                              }}
    RETURN_TYPES = ("LATENT",)
729
    FUNCTION = "frombatch"
730

731
    CATEGORY = "latent/batch"
732

733
    def frombatch(self, samples, batch_index, length):
734
735
736
        s = samples.copy()
        s_in = samples["samples"]
        batch_index = min(s_in.shape[0] - 1, batch_index)
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
        length = min(s_in.shape[0] - batch_index, length)
        s["samples"] = s_in[batch_index:batch_index + length].clone()
        if "noise_mask" in samples:
            masks = samples["noise_mask"]
            if masks.shape[0] == 1:
                s["noise_mask"] = masks.clone()
            else:
                if masks.shape[0] < s_in.shape[0]:
                    masks = masks.repeat(math.ceil(s_in.shape[0] / masks.shape[0]), 1, 1, 1)[:s_in.shape[0]]
                s["noise_mask"] = masks[batch_index:batch_index + length].clone()
        if "batch_index" not in s:
            s["batch_index"] = [x for x in range(batch_index, batch_index+length)]
        else:
            s["batch_index"] = samples["batch_index"][batch_index:batch_index + length]
        return (s,)
    
class RepeatLatentBatch:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "samples": ("LATENT",),
                              "amount": ("INT", {"default": 1, "min": 1, "max": 64}),
                              }}
    RETURN_TYPES = ("LATENT",)
    FUNCTION = "repeat"

    CATEGORY = "latent/batch"

    def repeat(self, samples, amount):
        s = samples.copy()
        s_in = samples["samples"]
        
        s["samples"] = s_in.repeat((amount, 1,1,1))
        if "noise_mask" in samples and samples["noise_mask"].shape[0] > 1:
            masks = samples["noise_mask"]
            if masks.shape[0] < s_in.shape[0]:
                masks = masks.repeat(math.ceil(s_in.shape[0] / masks.shape[0]), 1, 1, 1)[:s_in.shape[0]]
            s["noise_mask"] = samples["noise_mask"].repeat((amount, 1,1,1))
        if "batch_index" in s:
            offset = max(s["batch_index"]) - min(s["batch_index"]) + 1
            s["batch_index"] = s["batch_index"] + [x + (i * offset) for i in range(1, amount) for x in s["batch_index"]]
777
        return (s,)
comfyanonymous's avatar
comfyanonymous committed
778

comfyanonymous's avatar
comfyanonymous committed
779
class LatentUpscale:
780
    upscale_methods = ["nearest-exact", "bilinear", "area", "bislerp"]
781
    crop_methods = ["disabled", "center"]
comfyanonymous's avatar
comfyanonymous committed
782
783
784
785

    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "samples": ("LATENT",), "upscale_method": (s.upscale_methods,),
786
787
                              "width": ("INT", {"default": 512, "min": 64, "max": MAX_RESOLUTION, "step": 8}),
                              "height": ("INT", {"default": 512, "min": 64, "max": MAX_RESOLUTION, "step": 8}),
788
                              "crop": (s.crop_methods,)}}
comfyanonymous's avatar
comfyanonymous committed
789
790
791
    RETURN_TYPES = ("LATENT",)
    FUNCTION = "upscale"

792
793
    CATEGORY = "latent"

794
    def upscale(self, samples, upscale_method, width, height, crop):
795
        s = samples.copy()
comfyanonymous's avatar
comfyanonymous committed
796
        s["samples"] = comfy.utils.common_upscale(samples["samples"], width // 8, height // 8, upscale_method, crop)
comfyanonymous's avatar
comfyanonymous committed
797
798
        return (s,)

comfyanonymous's avatar
comfyanonymous committed
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
class LatentUpscaleBy:
    upscale_methods = ["nearest-exact", "bilinear", "area", "bislerp"]

    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "samples": ("LATENT",), "upscale_method": (s.upscale_methods,),
                              "scale_by": ("FLOAT", {"default": 1.5, "min": 0.01, "max": 8.0, "step": 0.01}),}}
    RETURN_TYPES = ("LATENT",)
    FUNCTION = "upscale"

    CATEGORY = "latent"

    def upscale(self, samples, upscale_method, scale_by):
        s = samples.copy()
        width = round(samples["samples"].shape[3] * scale_by)
        height = round(samples["samples"].shape[2] * scale_by)
        s["samples"] = comfy.utils.common_upscale(samples["samples"], width, height, upscale_method, "disabled")
        return (s,)

comfyanonymous's avatar
comfyanonymous committed
818
819
820
821
822
823
824
825
826
class LatentRotate:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "samples": ("LATENT",),
                              "rotation": (["none", "90 degrees", "180 degrees", "270 degrees"],),
                              }}
    RETURN_TYPES = ("LATENT",)
    FUNCTION = "rotate"

comfyanonymous's avatar
comfyanonymous committed
827
    CATEGORY = "latent/transform"
comfyanonymous's avatar
comfyanonymous committed
828
829

    def rotate(self, samples, rotation):
830
        s = samples.copy()
comfyanonymous's avatar
comfyanonymous committed
831
832
833
834
835
836
837
838
        rotate_by = 0
        if rotation.startswith("90"):
            rotate_by = 1
        elif rotation.startswith("180"):
            rotate_by = 2
        elif rotation.startswith("270"):
            rotate_by = 3

839
        s["samples"] = torch.rot90(samples["samples"], k=rotate_by, dims=[3, 2])
comfyanonymous's avatar
comfyanonymous committed
840
        return (s,)
comfyanonymous's avatar
comfyanonymous committed
841
842
843
844
845
846
847
848
849
850

class LatentFlip:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "samples": ("LATENT",),
                              "flip_method": (["x-axis: vertically", "y-axis: horizontally"],),
                              }}
    RETURN_TYPES = ("LATENT",)
    FUNCTION = "flip"

comfyanonymous's avatar
comfyanonymous committed
851
    CATEGORY = "latent/transform"
comfyanonymous's avatar
comfyanonymous committed
852
853

    def flip(self, samples, flip_method):
854
        s = samples.copy()
comfyanonymous's avatar
comfyanonymous committed
855
        if flip_method.startswith("x"):
856
            s["samples"] = torch.flip(samples["samples"], dims=[2])
comfyanonymous's avatar
comfyanonymous committed
857
        elif flip_method.startswith("y"):
858
            s["samples"] = torch.flip(samples["samples"], dims=[3])
comfyanonymous's avatar
comfyanonymous committed
859
860

        return (s,)
comfyanonymous's avatar
comfyanonymous committed
861
862
863
864

class LatentComposite:
    @classmethod
    def INPUT_TYPES(s):
comfyanonymous's avatar
comfyanonymous committed
865
866
867
868
869
870
        return {"required": { "samples_to": ("LATENT",),
                              "samples_from": ("LATENT",),
                              "x": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 8}),
                              "y": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 8}),
                              "feather": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 8}),
                              }}
comfyanonymous's avatar
comfyanonymous committed
871
872
873
874
875
    RETURN_TYPES = ("LATENT",)
    FUNCTION = "composite"

    CATEGORY = "latent"

comfyanonymous's avatar
comfyanonymous committed
876
877
878
    def composite(self, samples_to, samples_from, x, y, composite_method="normal", feather=0):
        x =  x // 8
        y = y // 8
879
        feather = feather // 8
comfyanonymous's avatar
comfyanonymous committed
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
        samples_out = samples_to.copy()
        s = samples_to["samples"].clone()
        samples_to = samples_to["samples"]
        samples_from = samples_from["samples"]
        if feather == 0:
            s[:,:,y:y+samples_from.shape[2],x:x+samples_from.shape[3]] = samples_from[:,:,:samples_to.shape[2] - y, :samples_to.shape[3] - x]
        else:
            samples_from = samples_from[:,:,:samples_to.shape[2] - y, :samples_to.shape[3] - x]
            mask = torch.ones_like(samples_from)
            for t in range(feather):
                if y != 0:
                    mask[:,:,t:1+t,:] *= ((1.0/feather) * (t + 1))

                if y + samples_from.shape[2] < samples_to.shape[2]:
                    mask[:,:,mask.shape[2] -1 -t: mask.shape[2]-t,:] *= ((1.0/feather) * (t + 1))
                if x != 0:
                    mask[:,:,:,t:1+t] *= ((1.0/feather) * (t + 1))
                if x + samples_from.shape[3] < samples_to.shape[3]:
                    mask[:,:,:,mask.shape[3]- 1 - t: mask.shape[3]- t] *= ((1.0/feather) * (t + 1))
            rev_mask = torch.ones_like(mask) - mask
            s[:,:,y:y+samples_from.shape[2],x:x+samples_from.shape[3]] = samples_from[:,:,:samples_to.shape[2] - y, :samples_to.shape[3] - x] * mask + s[:,:,y:y+samples_from.shape[2],x:x+samples_from.shape[3]] * rev_mask
        samples_out["samples"] = s
        return (samples_out,)
comfyanonymous's avatar
comfyanonymous committed
903

comfyanonymous's avatar
comfyanonymous committed
904
905
906
907
class LatentCrop:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "samples": ("LATENT",),
908
909
                              "width": ("INT", {"default": 512, "min": 64, "max": MAX_RESOLUTION, "step": 8}),
                              "height": ("INT", {"default": 512, "min": 64, "max": MAX_RESOLUTION, "step": 8}),
910
911
                              "x": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 8}),
                              "y": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 8}),
comfyanonymous's avatar
comfyanonymous committed
912
913
914
915
                              }}
    RETURN_TYPES = ("LATENT",)
    FUNCTION = "crop"

comfyanonymous's avatar
comfyanonymous committed
916
    CATEGORY = "latent/transform"
comfyanonymous's avatar
comfyanonymous committed
917
918

    def crop(self, samples, width, height, x, y):
919
920
        s = samples.copy()
        samples = samples['samples']
comfyanonymous's avatar
comfyanonymous committed
921
922
923
924
925
926
927
928
929
930
931
932
933
        x =  x // 8
        y = y // 8

        #enfonce minimum size of 64
        if x > (samples.shape[3] - 8):
            x = samples.shape[3] - 8
        if y > (samples.shape[2] - 8):
            y = samples.shape[2] - 8

        new_height = height // 8
        new_width = width // 8
        to_x = new_width + x
        to_y = new_height + y
934
        s['samples'] = samples[:,:,y:to_y, x:to_x]
comfyanonymous's avatar
comfyanonymous committed
935
936
        return (s,)

937
938
939
940
941
942
943
944
945
class SetLatentNoiseMask:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "samples": ("LATENT",),
                              "mask": ("MASK",),
                              }}
    RETURN_TYPES = ("LATENT",)
    FUNCTION = "set_mask"

946
    CATEGORY = "latent/inpaint"
947
948
949

    def set_mask(self, samples, mask):
        s = samples.copy()
950
        s["noise_mask"] = mask.reshape((-1, 1, mask.shape[-2], mask.shape[-1]))
951
952
        return (s,)

space-nuko's avatar
space-nuko committed
953

954
955
def decode_latent_to_preview_image(previewer, device, preview_format, x0):
    x_sample = previewer.decode_latent_to_preview(device, x0)
space-nuko's avatar
space-nuko committed
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980

    x_sample = torch.clamp((x_sample + 1.0) / 2.0, min=0.0, max=1.0)
    x_sample = 255. * np.moveaxis(x_sample.cpu().numpy(), 0, 2)
    x_sample = x_sample.astype(np.uint8)

    preview_image = Image.fromarray(x_sample)

    if preview_image.size[0] > MAX_PREVIEW_RESOLUTION or preview_image.size[1] > MAX_PREVIEW_RESOLUTION:
        preview_image.thumbnail((MAX_PREVIEW_RESOLUTION, MAX_PREVIEW_RESOLUTION), Image.ANTIALIAS)

    preview_type = 1
    if preview_format == "JPEG":
        preview_type = 1
    elif preview_format == "PNG":
        preview_type = 2

    bytesIO = BytesIO()
    header = struct.pack(">I", preview_type)
    bytesIO.write(header)
    preview_image.save(bytesIO, format=preview_format)
    preview_bytes = bytesIO.getvalue()

    return preview_bytes


space-nuko's avatar
space-nuko committed
981
def common_ksampler(model, seed, steps, cfg, sampler_name, scheduler, positive, negative, latent, denoise=1.0, disable_noise=False, start_step=None, last_step=None, force_full_denoise=False):
982
    device = comfy.model_management.get_torch_device()
983
    latent_image = latent["samples"]
984

comfyanonymous's avatar
comfyanonymous committed
985
986
987
    if disable_noise:
        noise = torch.zeros(latent_image.size(), dtype=latent_image.dtype, layout=latent_image.layout, device="cpu")
    else:
988
989
        batch_inds = latent["batch_index"] if "batch_index" in latent else None
        noise = comfy.sample.prepare_noise(latent_image, seed, batch_inds)
comfyanonymous's avatar
comfyanonymous committed
990

991
    noise_mask = None
992
    if "noise_mask" in latent:
993
        noise_mask = latent["noise_mask"]
comfyanonymous's avatar
comfyanonymous committed
994

space-nuko's avatar
space-nuko committed
995
996
997
998
    preview_format = "JPEG"
    if preview_format not in ["JPEG", "PNG"]:
        preview_format = "JPEG"

space-nuko's avatar
space-nuko committed
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
    previewer = None
    if not args.disable_previews:
        # TODO previewer methods
        encoder_path = folder_paths.get_full_path("taesd", "taesd_encoder.pth")
        decoder_path = folder_paths.get_full_path("taesd", "taesd_decoder.pth")
        if encoder_path and decoder_path:
            taesd = TAESD(encoder_path, decoder_path).to(device)
            previewer = TAESDPreviewerImpl(taesd)
        else:
            print("Warning: TAESD previews enabled, but could not find models/taesd/taesd_encoder.pth and models/taesd/taesd_decoder.pth")

1010
    pbar = comfy.utils.ProgressBar(steps)
1011
    def callback(step, x0, x, total_steps):
space-nuko's avatar
space-nuko committed
1012
        preview_bytes = None
1013
1014
        if previewer:
            preview_bytes = decode_latent_to_preview_image(previewer, device, preview_format, x0)
space-nuko's avatar
space-nuko committed
1015
        pbar.update_absolute(step + 1, total_steps, preview_bytes)
1016

1017
1018
    samples = comfy.sample.sample(model, noise, steps, cfg, sampler_name, scheduler, positive, negative, latent_image,
                                  denoise=denoise, disable_noise=disable_noise, start_step=start_step, last_step=last_step,
1019
                                  force_full_denoise=force_full_denoise, noise_mask=noise_mask, callback=callback)
1020
1021
1022
    out = latent.copy()
    out["samples"] = samples
    return (out, )
comfyanonymous's avatar
comfyanonymous committed
1023

comfyanonymous's avatar
comfyanonymous committed
1024
1025
1026
class KSampler:
    @classmethod
    def INPUT_TYPES(s):
comfyanonymous's avatar
comfyanonymous committed
1027
        return {"required":
comfyanonymous's avatar
comfyanonymous committed
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
                    {"model": ("MODEL",),
                    "seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}),
                    "steps": ("INT", {"default": 20, "min": 1, "max": 10000}),
                    "cfg": ("FLOAT", {"default": 8.0, "min": 0.0, "max": 100.0}),
                    "sampler_name": (comfy.samplers.KSampler.SAMPLERS, ),
                    "scheduler": (comfy.samplers.KSampler.SCHEDULERS, ),
                    "positive": ("CONDITIONING", ),
                    "negative": ("CONDITIONING", ),
                    "latent_image": ("LATENT", ),
                    "denoise": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}),
space-nuko's avatar
space-nuko committed
1038
1039
                     }
                }
comfyanonymous's avatar
comfyanonymous committed
1040
1041
1042
1043

    RETURN_TYPES = ("LATENT",)
    FUNCTION = "sample"

1044
1045
    CATEGORY = "sampling"

space-nuko's avatar
space-nuko committed
1046
1047
    def sample(self, model, seed, steps, cfg, sampler_name, scheduler, positive, negative, latent_image, denoise=1.0):
        return common_ksampler(model, seed, steps, cfg, sampler_name, scheduler, positive, negative, latent_image, denoise=denoise)
comfyanonymous's avatar
comfyanonymous committed
1048

comfyanonymous's avatar
comfyanonymous committed
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
class KSamplerAdvanced:
    @classmethod
    def INPUT_TYPES(s):
        return {"required":
                    {"model": ("MODEL",),
                    "add_noise": (["enable", "disable"], ),
                    "noise_seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}),
                    "steps": ("INT", {"default": 20, "min": 1, "max": 10000}),
                    "cfg": ("FLOAT", {"default": 8.0, "min": 0.0, "max": 100.0}),
                    "sampler_name": (comfy.samplers.KSampler.SAMPLERS, ),
                    "scheduler": (comfy.samplers.KSampler.SCHEDULERS, ),
                    "positive": ("CONDITIONING", ),
                    "negative": ("CONDITIONING", ),
                    "latent_image": ("LATENT", ),
                    "start_at_step": ("INT", {"default": 0, "min": 0, "max": 10000}),
                    "end_at_step": ("INT", {"default": 10000, "min": 0, "max": 10000}),
                    "return_with_leftover_noise": (["disable", "enable"], ),
space-nuko's avatar
space-nuko committed
1066
1067
                     }
                }
comfyanonymous's avatar
comfyanonymous committed
1068
1069
1070
1071
1072

    RETURN_TYPES = ("LATENT",)
    FUNCTION = "sample"

    CATEGORY = "sampling"
comfyanonymous's avatar
comfyanonymous committed
1073

space-nuko's avatar
space-nuko committed
1074
    def sample(self, model, add_noise, noise_seed, steps, cfg, sampler_name, scheduler, positive, negative, latent_image, start_at_step, end_at_step, return_with_leftover_noise, denoise=1.0):
comfyanonymous's avatar
comfyanonymous committed
1075
1076
1077
1078
1079
1080
        force_full_denoise = True
        if return_with_leftover_noise == "enable":
            force_full_denoise = False
        disable_noise = False
        if add_noise == "disable":
            disable_noise = True
space-nuko's avatar
space-nuko committed
1081
        return common_ksampler(model, noise_seed, steps, cfg, sampler_name, scheduler, positive, negative, latent_image, denoise=denoise, disable_noise=disable_noise, start_step=start_at_step, last_step=end_at_step, force_full_denoise=force_full_denoise)
comfyanonymous's avatar
comfyanonymous committed
1082
1083
1084

class SaveImage:
    def __init__(self):
1085
        self.output_dir = folder_paths.get_output_directory()
m957ymj75urz's avatar
m957ymj75urz committed
1086
        self.type = "output"
comfyanonymous's avatar
comfyanonymous committed
1087
1088
1089
1090

    @classmethod
    def INPUT_TYPES(s):
        return {"required": 
1091
                    {"images": ("IMAGE", ),
pythongosssss's avatar
tidy  
pythongosssss committed
1092
                     "filename_prefix": ("STRING", {"default": "ComfyUI"})},
pythongosssss's avatar
pythongosssss committed
1093
                "hidden": {"prompt": "PROMPT", "extra_pnginfo": "EXTRA_PNGINFO"},
comfyanonymous's avatar
comfyanonymous committed
1094
1095
1096
1097
1098
1099
1100
                }

    RETURN_TYPES = ()
    FUNCTION = "save_images"

    OUTPUT_NODE = True

1101
1102
    CATEGORY = "image"

pythongosssss's avatar
tidy  
pythongosssss committed
1103
    def save_images(self, images, filename_prefix="ComfyUI", prompt=None, extra_pnginfo=None):
1104
        full_output_folder, filename, counter, subfolder, filename_prefix = folder_paths.get_save_image_path(filename_prefix, self.output_dir, images[0].shape[1], images[0].shape[0])
m957ymj75urz's avatar
m957ymj75urz committed
1105
        results = list()
comfyanonymous's avatar
comfyanonymous committed
1106
1107
        for image in images:
            i = 255. * image.cpu().numpy()
1108
            img = Image.fromarray(np.clip(i, 0, 255).astype(np.uint8))
comfyanonymous's avatar
comfyanonymous committed
1109
1110
1111
1112
1113
1114
            metadata = PngInfo()
            if prompt is not None:
                metadata.add_text("prompt", json.dumps(prompt))
            if extra_pnginfo is not None:
                for x in extra_pnginfo:
                    metadata.add_text(x, json.dumps(extra_pnginfo[x]))
1115

1116
            file = f"{filename}_{counter:05}_.png"
1117
            img.save(os.path.join(full_output_folder, file), pnginfo=metadata, compress_level=4)
m957ymj75urz's avatar
m957ymj75urz committed
1118
1119
1120
1121
            results.append({
                "filename": file,
                "subfolder": subfolder,
                "type": self.type
Gavroche CryptoRUSH's avatar
Gavroche CryptoRUSH committed
1122
            })
1123
            counter += 1
comfyanonymous's avatar
comfyanonymous committed
1124

m957ymj75urz's avatar
m957ymj75urz committed
1125
        return { "ui": { "images": results } }
comfyanonymous's avatar
comfyanonymous committed
1126

pythongosssss's avatar
pythongosssss committed
1127
1128
class PreviewImage(SaveImage):
    def __init__(self):
1129
        self.output_dir = folder_paths.get_temp_directory()
m957ymj75urz's avatar
m957ymj75urz committed
1130
        self.type = "temp"
pythongosssss's avatar
pythongosssss committed
1131
1132
1133

    @classmethod
    def INPUT_TYPES(s):
1134
        return {"required":
pythongosssss's avatar
pythongosssss committed
1135
1136
1137
                    {"images": ("IMAGE", ), },
                "hidden": {"prompt": "PROMPT", "extra_pnginfo": "EXTRA_PNGINFO"},
                }
1138

1139
1140
1141
class LoadImage:
    @classmethod
    def INPUT_TYPES(s):
1142
        input_dir = folder_paths.get_input_directory()
comfyanonymous's avatar
comfyanonymous committed
1143
        files = [f for f in os.listdir(input_dir) if os.path.isfile(os.path.join(input_dir, f))]
1144
        return {"required":
comfyanonymous's avatar
comfyanonymous committed
1145
                    {"image": (sorted(files), )},
1146
                }
1147
1148

    CATEGORY = "image"
1149

1150
    RETURN_TYPES = ("IMAGE", "MASK")
1151
1152
    FUNCTION = "load_image"
    def load_image(self, image):
1153
        image_path = folder_paths.get_annotated_filepath(image)
1154
        i = Image.open(image_path)
1155
        i = ImageOps.exif_transpose(i)
1156
        image = i.convert("RGB")
1157
        image = np.array(image).astype(np.float32) / 255.0
1158
        image = torch.from_numpy(image)[None,]
1159
1160
1161
1162
1163
1164
        if 'A' in i.getbands():
            mask = np.array(i.getchannel('A')).astype(np.float32) / 255.0
            mask = 1. - torch.from_numpy(mask)
        else:
            mask = torch.zeros((64,64), dtype=torch.float32, device="cpu")
        return (image, mask)
1165

1166
1167
    @classmethod
    def IS_CHANGED(s, image):
1168
        image_path = folder_paths.get_annotated_filepath(image)
1169
1170
1171
1172
        m = hashlib.sha256()
        with open(image_path, 'rb') as f:
            m.update(f.read())
        return m.digest().hex()
pythongosssss's avatar
pythongosssss committed
1173

1174
1175
1176
1177
1178
1179
1180
    @classmethod
    def VALIDATE_INPUTS(s, image):
        if not folder_paths.exists_annotated_filepath(image):
            return "Invalid image file: {}".format(image)

        return True

1181
class LoadImageMask:
1182
    _color_channels = ["alpha", "red", "green", "blue"]
1183
1184
    @classmethod
    def INPUT_TYPES(s):
1185
        input_dir = folder_paths.get_input_directory()
comfyanonymous's avatar
comfyanonymous committed
1186
        files = [f for f in os.listdir(input_dir) if os.path.isfile(os.path.join(input_dir, f))]
1187
        return {"required":
comfyanonymous's avatar
comfyanonymous committed
1188
                    {"image": (sorted(files), ),
1189
                     "channel": (s._color_channels, ), }
1190
1191
                }

1192
    CATEGORY = "mask"
1193
1194
1195
1196

    RETURN_TYPES = ("MASK",)
    FUNCTION = "load_image"
    def load_image(self, image, channel):
1197
        image_path = folder_paths.get_annotated_filepath(image)
1198
        i = Image.open(image_path)
1199
        i = ImageOps.exif_transpose(i)
1200
1201
        if i.getbands() != ("R", "G", "B", "A"):
            i = i.convert("RGBA")
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
        mask = None
        c = channel[0].upper()
        if c in i.getbands():
            mask = np.array(i.getchannel(c)).astype(np.float32) / 255.0
            mask = torch.from_numpy(mask)
            if c == 'A':
                mask = 1. - mask
        else:
            mask = torch.zeros((64,64), dtype=torch.float32, device="cpu")
        return (mask,)

    @classmethod
    def IS_CHANGED(s, image, channel):
1215
        image_path = folder_paths.get_annotated_filepath(image)
1216
1217
1218
1219
        m = hashlib.sha256()
        with open(image_path, 'rb') as f:
            m.update(f.read())
        return m.digest().hex()
pythongosssss's avatar
pythongosssss committed
1220

1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
    @classmethod
    def VALIDATE_INPUTS(s, image, channel):
        if not folder_paths.exists_annotated_filepath(image):
            return "Invalid image file: {}".format(image)

        if channel not in s._color_channels:
            return "Invalid color channel: {}".format(channel)

        return True

comfyanonymous's avatar
comfyanonymous committed
1231
1232
1233
1234
1235
1236
1237
class ImageScale:
    upscale_methods = ["nearest-exact", "bilinear", "area"]
    crop_methods = ["disabled", "center"]

    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "image": ("IMAGE",), "upscale_method": (s.upscale_methods,),
1238
1239
                              "width": ("INT", {"default": 512, "min": 1, "max": MAX_RESOLUTION, "step": 1}),
                              "height": ("INT", {"default": 512, "min": 1, "max": MAX_RESOLUTION, "step": 1}),
comfyanonymous's avatar
comfyanonymous committed
1240
1241
1242
1243
                              "crop": (s.crop_methods,)}}
    RETURN_TYPES = ("IMAGE",)
    FUNCTION = "upscale"

1244
    CATEGORY = "image/upscaling"
1245

comfyanonymous's avatar
comfyanonymous committed
1246
1247
    def upscale(self, image, upscale_method, width, height, crop):
        samples = image.movedim(-1,1)
comfyanonymous's avatar
comfyanonymous committed
1248
        s = comfy.utils.common_upscale(samples, width, height, upscale_method, crop)
comfyanonymous's avatar
comfyanonymous committed
1249
1250
        s = s.movedim(1,-1)
        return (s,)
comfyanonymous's avatar
comfyanonymous committed
1251

1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
class ImageInvert:

    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "image": ("IMAGE",)}}

    RETURN_TYPES = ("IMAGE",)
    FUNCTION = "invert"

    CATEGORY = "image"

    def invert(self, image):
        s = 1.0 - image
        return (s,)


Guo Y.K's avatar
Guo Y.K committed
1268
1269
1270
1271
1272
1273
1274
class ImagePadForOutpaint:

    @classmethod
    def INPUT_TYPES(s):
        return {
            "required": {
                "image": ("IMAGE",),
1275
1276
1277
1278
                "left": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 8}),
                "top": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 8}),
                "right": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 8}),
                "bottom": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 8}),
1279
                "feathering": ("INT", {"default": 40, "min": 0, "max": MAX_RESOLUTION, "step": 1}),
Guo Y.K's avatar
Guo Y.K committed
1280
1281
1282
1283
1284
1285
1286
1287
            }
        }

    RETURN_TYPES = ("IMAGE", "MASK")
    FUNCTION = "expand_image"

    CATEGORY = "image"

1288
    def expand_image(self, image, left, top, right, bottom, feathering):
Guo Y.K's avatar
Guo Y.K committed
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
        d1, d2, d3, d4 = image.size()

        new_image = torch.zeros(
            (d1, d2 + top + bottom, d3 + left + right, d4),
            dtype=torch.float32,
        )
        new_image[:, top:top + d2, left:left + d3, :] = image

        mask = torch.ones(
            (d2 + top + bottom, d3 + left + right),
            dtype=torch.float32,
        )
1301

1302
1303
1304
1305
1306
        t = torch.zeros(
            (d2, d3),
            dtype=torch.float32
        )

1307
        if feathering > 0 and feathering * 2 < d2 and feathering * 2 < d3:
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326

            for i in range(d2):
                for j in range(d3):
                    dt = i if top != 0 else d2
                    db = d2 - i if bottom != 0 else d2

                    dl = j if left != 0 else d3
                    dr = d3 - j if right != 0 else d3

                    d = min(dt, db, dl, dr)

                    if d >= feathering:
                        continue

                    v = (feathering - d) / feathering

                    t[i, j] = v * v

        mask[top:top + d2, left:left + d3] = t
1327

Guo Y.K's avatar
Guo Y.K committed
1328
1329
1330
        return (new_image, mask)


comfyanonymous's avatar
comfyanonymous committed
1331
1332
NODE_CLASS_MAPPINGS = {
    "KSampler": KSampler,
1333
    "CheckpointLoaderSimple": CheckpointLoaderSimple,
comfyanonymous's avatar
comfyanonymous committed
1334
    "CLIPTextEncode": CLIPTextEncode,
comfyanonymous's avatar
comfyanonymous committed
1335
    "CLIPSetLastLayer": CLIPSetLastLayer,
comfyanonymous's avatar
comfyanonymous committed
1336
1337
    "VAEDecode": VAEDecode,
    "VAEEncode": VAEEncode,
1338
    "VAEEncodeForInpaint": VAEEncodeForInpaint,
comfyanonymous's avatar
comfyanonymous committed
1339
1340
1341
    "VAELoader": VAELoader,
    "EmptyLatentImage": EmptyLatentImage,
    "LatentUpscale": LatentUpscale,
comfyanonymous's avatar
comfyanonymous committed
1342
    "LatentUpscaleBy": LatentUpscaleBy,
1343
    "LatentFromBatch": LatentFromBatch,
1344
    "RepeatLatentBatch": RepeatLatentBatch,
comfyanonymous's avatar
comfyanonymous committed
1345
    "SaveImage": SaveImage,
pythongosssss's avatar
pythongosssss committed
1346
    "PreviewImage": PreviewImage,
comfyanonymous's avatar
comfyanonymous committed
1347
    "LoadImage": LoadImage,
1348
    "LoadImageMask": LoadImageMask,
comfyanonymous's avatar
comfyanonymous committed
1349
    "ImageScale": ImageScale,
1350
    "ImageInvert": ImageInvert,
Guo Y.K's avatar
Guo Y.K committed
1351
    "ImagePadForOutpaint": ImagePadForOutpaint,
FizzleDorf's avatar
FizzleDorf committed
1352
    "ConditioningAverage ": ConditioningAverage ,
comfyanonymous's avatar
comfyanonymous committed
1353
1354
    "ConditioningCombine": ConditioningCombine,
    "ConditioningSetArea": ConditioningSetArea,
Jacob Segal's avatar
Jacob Segal committed
1355
    "ConditioningSetMask": ConditioningSetMask,
comfyanonymous's avatar
comfyanonymous committed
1356
    "KSamplerAdvanced": KSamplerAdvanced,
1357
    "SetLatentNoiseMask": SetLatentNoiseMask,
comfyanonymous's avatar
comfyanonymous committed
1358
    "LatentComposite": LatentComposite,
comfyanonymous's avatar
comfyanonymous committed
1359
    "LatentRotate": LatentRotate,
comfyanonymous's avatar
comfyanonymous committed
1360
    "LatentFlip": LatentFlip,
comfyanonymous's avatar
comfyanonymous committed
1361
    "LatentCrop": LatentCrop,
1362
    "LoraLoader": LoraLoader,
1363
    "CLIPLoader": CLIPLoader,
1364
    "CLIPVisionEncode": CLIPVisionEncode,
1365
    "StyleModelApply": StyleModelApply,
1366
    "unCLIPConditioning": unCLIPConditioning,
comfyanonymous's avatar
comfyanonymous committed
1367
1368
    "ControlNetApply": ControlNetApply,
    "ControlNetLoader": ControlNetLoader,
1369
    "DiffControlNetLoader": DiffControlNetLoader,
comfyanonymous's avatar
comfyanonymous committed
1370
1371
    "StyleModelLoader": StyleModelLoader,
    "CLIPVisionLoader": CLIPVisionLoader,
1372
    "VAEDecodeTiled": VAEDecodeTiled,
comfyanonymous's avatar
comfyanonymous committed
1373
    "VAEEncodeTiled": VAEEncodeTiled,
1374
    "TomePatchModel": TomePatchModel,
1375
    "unCLIPCheckpointLoader": unCLIPCheckpointLoader,
1376
1377
1378
    "GLIGENLoader": GLIGENLoader,
    "GLIGENTextBoxApply": GLIGENTextBoxApply,

1379
    "CheckpointLoader": CheckpointLoader,
sALTaccount's avatar
sALTaccount committed
1380
    "DiffusersLoader": DiffusersLoader,
Dr.Lt.Data's avatar
Dr.Lt.Data committed
1381
1382
1383

    "LoadLatent": LoadLatent,
    "SaveLatent": SaveLatent
comfyanonymous's avatar
comfyanonymous committed
1384
1385
}

City's avatar
City committed
1386
1387
1388
1389
1390
NODE_DISPLAY_NAME_MAPPINGS = {
    # Sampling
    "KSampler": "KSampler",
    "KSamplerAdvanced": "KSampler (Advanced)",
    # Loaders
comfyanonymous's avatar
Rename.  
comfyanonymous committed
1391
1392
    "CheckpointLoader": "Load Checkpoint (With Config)",
    "CheckpointLoaderSimple": "Load Checkpoint",
City's avatar
City committed
1393
1394
1395
1396
1397
1398
1399
1400
1401
1402
1403
1404
1405
1406
    "VAELoader": "Load VAE",
    "LoraLoader": "Load LoRA",
    "CLIPLoader": "Load CLIP",
    "ControlNetLoader": "Load ControlNet Model",
    "DiffControlNetLoader": "Load ControlNet Model (diff)",
    "StyleModelLoader": "Load Style Model",
    "CLIPVisionLoader": "Load CLIP Vision",
    "UpscaleModelLoader": "Load Upscale Model",
    # Conditioning
    "CLIPVisionEncode": "CLIP Vision Encode",
    "StyleModelApply": "Apply Style Model",
    "CLIPTextEncode": "CLIP Text Encode (Prompt)",
    "CLIPSetLastLayer": "CLIP Set Last Layer",
    "ConditioningCombine": "Conditioning (Combine)",
FizzleDorf's avatar
FizzleDorf committed
1407
    "ConditioningAverage ": "Conditioning (Average)",
City's avatar
City committed
1408
    "ConditioningSetArea": "Conditioning (Set Area)",
Jacob Segal's avatar
Jacob Segal committed
1409
    "ConditioningSetMask": "Conditioning (Set Mask)",
City's avatar
City committed
1410
1411
1412
1413
1414
1415
1416
1417
1418
1419
1420
    "ControlNetApply": "Apply ControlNet",
    # Latent
    "VAEEncodeForInpaint": "VAE Encode (for Inpainting)",
    "SetLatentNoiseMask": "Set Latent Noise Mask",
    "VAEDecode": "VAE Decode",
    "VAEEncode": "VAE Encode",
    "LatentRotate": "Rotate Latent",
    "LatentFlip": "Flip Latent",
    "LatentCrop": "Crop Latent",
    "EmptyLatentImage": "Empty Latent Image",
    "LatentUpscale": "Upscale Latent",
comfyanonymous's avatar
comfyanonymous committed
1421
    "LatentUpscaleBy": "Upscale Latent By",
City's avatar
City committed
1422
    "LatentComposite": "Latent Composite",
1423
1424
    "LatentFromBatch" : "Latent From Batch",
    "RepeatLatentBatch": "Repeat Latent Batch",
City's avatar
City committed
1425
1426
1427
1428
1429
1430
1431
1432
1433
1434
1435
1436
1437
1438
    # Image
    "SaveImage": "Save Image",
    "PreviewImage": "Preview Image",
    "LoadImage": "Load Image",
    "LoadImageMask": "Load Image (as Mask)",
    "ImageScale": "Upscale Image",
    "ImageUpscaleWithModel": "Upscale Image (using Model)",
    "ImageInvert": "Invert Image",
    "ImagePadForOutpaint": "Pad Image for Outpainting",
    # _for_testing
    "VAEDecodeTiled": "VAE Decode (Tiled)",
    "VAEEncodeTiled": "VAE Encode (Tiled)",
}

1439
1440
1441
1442
1443
1444
1445
1446
1447
1448
1449
1450
1451
1452
1453
def load_custom_node(module_path):
    module_name = os.path.basename(module_path)
    if os.path.isfile(module_path):
        sp = os.path.splitext(module_path)
        module_name = sp[0]
    try:
        if os.path.isfile(module_path):
            module_spec = importlib.util.spec_from_file_location(module_name, module_path)
        else:
            module_spec = importlib.util.spec_from_file_location(module_name, os.path.join(module_path, "__init__.py"))
        module = importlib.util.module_from_spec(module_spec)
        sys.modules[module_name] = module
        module_spec.loader.exec_module(module)
        if hasattr(module, "NODE_CLASS_MAPPINGS") and getattr(module, "NODE_CLASS_MAPPINGS") is not None:
            NODE_CLASS_MAPPINGS.update(module.NODE_CLASS_MAPPINGS)
1454
1455
            if hasattr(module, "NODE_DISPLAY_NAME_MAPPINGS") and getattr(module, "NODE_DISPLAY_NAME_MAPPINGS") is not None:
                NODE_DISPLAY_NAME_MAPPINGS.update(module.NODE_DISPLAY_NAME_MAPPINGS)
1456
            return True
1457
1458
        else:
            print(f"Skip {module_path} module for custom nodes due to the lack of NODE_CLASS_MAPPINGS.")
1459
            return False
1460
1461
1462
    except Exception as e:
        print(traceback.format_exc())
        print(f"Cannot import {module_path} module for custom nodes:", e)
1463
        return False
1464

Hacker 17082006's avatar
Hacker 17082006 committed
1465
def load_custom_nodes():
1466
    node_paths = folder_paths.get_folder_paths("custom_nodes")
1467
    node_import_times = []
1468
1469
1470
1471
1472
1473
1474
1475
    for custom_node_path in node_paths:
        possible_modules = os.listdir(custom_node_path)
        if "__pycache__" in possible_modules:
            possible_modules.remove("__pycache__")

        for possible_module in possible_modules:
            module_path = os.path.join(custom_node_path, possible_module)
            if os.path.isfile(module_path) and os.path.splitext(module_path)[1] != ".py": continue
1476
            if module_path.endswith(".disabled"): continue
1477
            time_before = time.perf_counter()
1478
            success = load_custom_node(module_path)
1479
            node_import_times.append((time.perf_counter() - time_before, module_path, success))
1480

1481
    if len(node_import_times) > 0:
comfyanonymous's avatar
comfyanonymous committed
1482
        print("\nImport times for custom nodes:")
1483
        for n in sorted(node_import_times):
1484
1485
1486
1487
1488
            if n[2]:
                import_message = ""
            else:
                import_message = " (IMPORT FAILED)"
            print("{:6.1f} seconds{}:".format(n[0], import_message), n[1])
1489
        print()
1490

1491
def init_custom_nodes():
1492
    load_custom_node(os.path.join(os.path.join(os.path.dirname(os.path.realpath(__file__)), "comfy_extras"), "nodes_hypernetwork.py"))
1493
1494
    load_custom_node(os.path.join(os.path.join(os.path.dirname(os.path.realpath(__file__)), "comfy_extras"), "nodes_upscale_model.py"))
    load_custom_node(os.path.join(os.path.join(os.path.dirname(os.path.realpath(__file__)), "comfy_extras"), "nodes_post_processing.py"))
1495
    load_custom_node(os.path.join(os.path.join(os.path.dirname(os.path.realpath(__file__)), "comfy_extras"), "nodes_mask.py"))
1496
    load_custom_node(os.path.join(os.path.join(os.path.dirname(os.path.realpath(__file__)), "comfy_extras"), "nodes_rebatch.py"))
1497
    load_custom_nodes()