nodes.py 55.5 KB
Newer Older
comfyanonymous's avatar
comfyanonymous committed
1
2
3
4
5
import torch

import os
import sys
import json
6
import hashlib
7
import traceback
8
import math
9
import time
comfyanonymous's avatar
comfyanonymous committed
10

11
from PIL import Image, ImageOps
comfyanonymous's avatar
comfyanonymous committed
12
13
from PIL.PngImagePlugin import PngInfo
import numpy as np
14
import safetensors.torch
comfyanonymous's avatar
comfyanonymous committed
15

comfyanonymous's avatar
comfyanonymous committed
16
sys.path.insert(0, os.path.join(os.path.dirname(os.path.realpath(__file__)), "comfy"))
comfyanonymous's avatar
comfyanonymous committed
17
18


19
import comfy.diffusers_load
comfyanonymous's avatar
comfyanonymous committed
20
import comfy.samplers
21
import comfy.sample
comfyanonymous's avatar
comfyanonymous committed
22
import comfy.sd
comfyanonymous's avatar
comfyanonymous committed
23
24
import comfy.utils

25
import comfy.clip_vision
26

27
import comfy.model_management
28
import importlib
comfyanonymous's avatar
comfyanonymous committed
29

30
import folder_paths
31
import latent_preview
space-nuko's avatar
space-nuko committed
32

33
def before_node_execution():
34
    comfy.model_management.throw_exception_if_processing_interrupted()
35

36
def interrupt_processing(value=True):
37
    comfy.model_management.interrupt_current_processing(value)
38

39
40
MAX_RESOLUTION=8192

comfyanonymous's avatar
comfyanonymous committed
41
42
43
class CLIPTextEncode:
    @classmethod
    def INPUT_TYPES(s):
44
        return {"required": {"text": ("STRING", {"multiline": True}), "clip": ("CLIP", )}}
comfyanonymous's avatar
comfyanonymous committed
45
46
47
    RETURN_TYPES = ("CONDITIONING",)
    FUNCTION = "encode"

48
49
    CATEGORY = "conditioning"

comfyanonymous's avatar
comfyanonymous committed
50
    def encode(self, clip, text):
51
52
53
        tokens = clip.tokenize(text)
        cond, pooled = clip.encode_from_tokens(tokens, return_pooled=True)
        return ([[cond, {"pooled_output": pooled}]], )
comfyanonymous's avatar
comfyanonymous committed
54
55
56
57
58
59
60
61

class ConditioningCombine:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": {"conditioning_1": ("CONDITIONING", ), "conditioning_2": ("CONDITIONING", )}}
    RETURN_TYPES = ("CONDITIONING",)
    FUNCTION = "combine"

62
63
    CATEGORY = "conditioning"

comfyanonymous's avatar
comfyanonymous committed
64
65
66
    def combine(self, conditioning_1, conditioning_2):
        return (conditioning_1 + conditioning_2, )

FizzleDorf's avatar
FizzleDorf committed
67
68
69
class ConditioningAverage :
    @classmethod
    def INPUT_TYPES(s):
comfyanonymous's avatar
comfyanonymous committed
70
71
        return {"required": {"conditioning_to": ("CONDITIONING", ), "conditioning_from": ("CONDITIONING", ),
                              "conditioning_to_strength": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01})
FizzleDorf's avatar
FizzleDorf committed
72
73
74
75
76
77
                             }}
    RETURN_TYPES = ("CONDITIONING",)
    FUNCTION = "addWeighted"

    CATEGORY = "conditioning"

comfyanonymous's avatar
comfyanonymous committed
78
    def addWeighted(self, conditioning_to, conditioning_from, conditioning_to_strength):
FizzleDorf's avatar
FizzleDorf committed
79
        out = []
comfyanonymous's avatar
comfyanonymous committed
80
81
82
83
84
85
86
87
88
89
90
91
92
93

        if len(conditioning_from) > 1:
            print("Warning: ConditioningAverage conditioning_from contains more than 1 cond, only the first one will actually be applied to conditioning_to.")

        cond_from = conditioning_from[0][0]

        for i in range(len(conditioning_to)):
            t1 = conditioning_to[i][0]
            t0 = cond_from[:,:t1.shape[1]]
            if t0.shape[1] < t1.shape[1]:
                t0 = torch.cat([t0] + [torch.zeros((1, (t1.shape[1] - t0.shape[1]), t1.shape[2]))], dim=1)

            tw = torch.mul(t1, conditioning_to_strength) + torch.mul(t0, (1.0 - conditioning_to_strength))
            n = [tw, conditioning_to[i][1].copy()]
FizzleDorf's avatar
FizzleDorf committed
94
95
96
            out.append(n)
        return (out, )

comfyanonymous's avatar
comfyanonymous committed
97
98
99
100
class ConditioningSetArea:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": {"conditioning": ("CONDITIONING", ),
101
102
103
104
                              "width": ("INT", {"default": 64, "min": 64, "max": MAX_RESOLUTION, "step": 8}),
                              "height": ("INT", {"default": 64, "min": 64, "max": MAX_RESOLUTION, "step": 8}),
                              "x": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 8}),
                              "y": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 8}),
comfyanonymous's avatar
comfyanonymous committed
105
106
107
108
109
                              "strength": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}),
                             }}
    RETURN_TYPES = ("CONDITIONING",)
    FUNCTION = "append"

110
111
    CATEGORY = "conditioning"

112
    def append(self, conditioning, width, height, x, y, strength):
comfyanonymous's avatar
comfyanonymous committed
113
114
115
116
117
        c = []
        for t in conditioning:
            n = [t[0], t[1].copy()]
            n[1]['area'] = (height // 8, width // 8, y // 8, x // 8)
            n[1]['strength'] = strength
118
            n[1]['set_area_to_bounds'] = False
comfyanonymous's avatar
comfyanonymous committed
119
            c.append(n)
comfyanonymous's avatar
comfyanonymous committed
120
        return (c, )
comfyanonymous's avatar
comfyanonymous committed
121

Jacob Segal's avatar
Jacob Segal committed
122
123
124
125
126
127
class ConditioningSetMask:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": {"conditioning": ("CONDITIONING", ),
                              "mask": ("MASK", ),
                              "strength": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}),
128
                              "set_cond_area": (["default", "mask bounds"],),
Jacob Segal's avatar
Jacob Segal committed
129
130
131
132
133
134
                             }}
    RETURN_TYPES = ("CONDITIONING",)
    FUNCTION = "append"

    CATEGORY = "conditioning"

135
    def append(self, conditioning, mask, set_cond_area, strength):
Jacob Segal's avatar
Jacob Segal committed
136
        c = []
137
138
139
        set_area_to_bounds = False
        if set_cond_area != "default":
            set_area_to_bounds = True
Jacob Segal's avatar
Jacob Segal committed
140
141
142
143
144
145
        if len(mask.shape) < 3:
            mask = mask.unsqueeze(0)
        for t in conditioning:
            n = [t[0], t[1].copy()]
            _, h, w = mask.shape
            n[1]['mask'] = mask
Jacob Segal's avatar
Jacob Segal committed
146
            n[1]['set_area_to_bounds'] = set_area_to_bounds
147
            n[1]['mask_strength'] = strength
Jacob Segal's avatar
Jacob Segal committed
148
149
150
            c.append(n)
        return (c, )

151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
class ConditioningZeroOut:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": {"conditioning": ("CONDITIONING", )}}
    RETURN_TYPES = ("CONDITIONING",)
    FUNCTION = "zero_out"

    CATEGORY = "advanced/conditioning"

    def zero_out(self, conditioning):
        c = []
        for t in conditioning:
            d = t[1].copy()
            if "pooled_output" in d:
                d["pooled_output"] = torch.zeros_like(d["pooled_output"])
            n = [torch.zeros_like(t[0]), d]
            c.append(n)
        return (c, )

comfyanonymous's avatar
comfyanonymous committed
170
171
172
173
174
175
176
class VAEDecode:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "samples": ("LATENT", ), "vae": ("VAE", )}}
    RETURN_TYPES = ("IMAGE",)
    FUNCTION = "decode"

177
178
    CATEGORY = "latent"

comfyanonymous's avatar
comfyanonymous committed
179
    def decode(self, vae, samples):
180
        return (vae.decode(samples["samples"]), )
comfyanonymous's avatar
comfyanonymous committed
181

182
183
184
185
186
187
188
189
190
191
192
193
class VAEDecodeTiled:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "samples": ("LATENT", ), "vae": ("VAE", )}}
    RETURN_TYPES = ("IMAGE",)
    FUNCTION = "decode"

    CATEGORY = "_for_testing"

    def decode(self, vae, samples):
        return (vae.decode_tiled(samples["samples"]), )

comfyanonymous's avatar
comfyanonymous committed
194
195
196
197
198
199
200
class VAEEncode:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "pixels": ("IMAGE", ), "vae": ("VAE", )}}
    RETURN_TYPES = ("LATENT",)
    FUNCTION = "encode"

201
202
    CATEGORY = "latent"

203
204
205
206
    @staticmethod
    def vae_encode_crop_pixels(pixels):
        x = (pixels.shape[1] // 8) * 8
        y = (pixels.shape[2] // 8) * 8
207
        if pixels.shape[1] != x or pixels.shape[2] != y:
208
209
210
211
            x_offset = (pixels.shape[1] % 8) // 2
            y_offset = (pixels.shape[2] % 8) // 2
            pixels = pixels[:, x_offset:x + x_offset, y_offset:y + y_offset, :]
        return pixels
212

213
214
215
    def encode(self, vae, pixels):
        pixels = self.vae_encode_crop_pixels(pixels)
        t = vae.encode(pixels[:,:,:,:3])
216
        return ({"samples":t}, )
comfyanonymous's avatar
comfyanonymous committed
217

comfyanonymous's avatar
comfyanonymous committed
218
219
220
221
222
223
224
225
226
227
class VAEEncodeTiled:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "pixels": ("IMAGE", ), "vae": ("VAE", )}}
    RETURN_TYPES = ("LATENT",)
    FUNCTION = "encode"

    CATEGORY = "_for_testing"

    def encode(self, vae, pixels):
228
        pixels = VAEEncode.vae_encode_crop_pixels(pixels)
comfyanonymous's avatar
comfyanonymous committed
229
230
        t = vae.encode_tiled(pixels[:,:,:,:3])
        return ({"samples":t}, )
231

232
233
234
class VAEEncodeForInpaint:
    @classmethod
    def INPUT_TYPES(s):
235
        return {"required": { "pixels": ("IMAGE", ), "vae": ("VAE", ), "mask": ("MASK", ), "grow_mask_by": ("INT", {"default": 6, "min": 0, "max": 64, "step": 1}),}}
236
237
238
239
240
    RETURN_TYPES = ("LATENT",)
    FUNCTION = "encode"

    CATEGORY = "latent/inpaint"

241
    def encode(self, vae, pixels, mask, grow_mask_by=6):
242
243
        x = (pixels.shape[1] // 8) * 8
        y = (pixels.shape[2] // 8) * 8
244
        mask = torch.nn.functional.interpolate(mask.reshape((-1, 1, mask.shape[-2], mask.shape[-1])), size=(pixels.shape[1], pixels.shape[2]), mode="bilinear")
245

246
        pixels = pixels.clone()
247
        if pixels.shape[1] != x or pixels.shape[2] != y:
248
249
250
251
            x_offset = (pixels.shape[1] % 8) // 2
            y_offset = (pixels.shape[2] % 8) // 2
            pixels = pixels[:,x_offset:x + x_offset, y_offset:y + y_offset,:]
            mask = mask[:,:,x_offset:x + x_offset, y_offset:y + y_offset]
252

253
        #grow mask by a few pixels to keep things seamless in latent space
254
255
256
257
258
259
260
261
        if grow_mask_by == 0:
            mask_erosion = mask
        else:
            kernel_tensor = torch.ones((1, 1, grow_mask_by, grow_mask_by))
            padding = math.ceil((grow_mask_by - 1) / 2)

            mask_erosion = torch.clamp(torch.nn.functional.conv2d(mask.round(), kernel_tensor, padding=padding), 0, 1)

262
        m = (1.0 - mask.round()).squeeze(1)
263
264
        for i in range(3):
            pixels[:,:,:,i] -= 0.5
265
            pixels[:,:,:,i] *= m
266
267
268
            pixels[:,:,:,i] += 0.5
        t = vae.encode(pixels)

269
        return ({"samples":t, "noise_mask": (mask_erosion[:,:,:x,:y].round())}, )
comfyanonymous's avatar
comfyanonymous committed
270

Dr.Lt.Data's avatar
Dr.Lt.Data committed
271
272
class SaveLatent:
    def __init__(self):
273
        self.output_dir = folder_paths.get_output_directory()
Dr.Lt.Data's avatar
Dr.Lt.Data committed
274
275
276
277

    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "samples": ("LATENT", ),
278
                              "filename_prefix": ("STRING", {"default": "latents/ComfyUI"})},
Dr.Lt.Data's avatar
Dr.Lt.Data committed
279
280
281
282
283
284
285
286
287
288
                "hidden": {"prompt": "PROMPT", "extra_pnginfo": "EXTRA_PNGINFO"},
                }
    RETURN_TYPES = ()
    FUNCTION = "save"

    OUTPUT_NODE = True

    CATEGORY = "_for_testing"

    def save(self, samples, filename_prefix="ComfyUI", prompt=None, extra_pnginfo=None):
289
        full_output_folder, filename, counter, subfolder, filename_prefix = folder_paths.get_save_image_path(filename_prefix, self.output_dir)
Dr.Lt.Data's avatar
Dr.Lt.Data committed
290
291
292
293
294
295

        # support save metadata for latent sharing
        prompt_info = ""
        if prompt is not None:
            prompt_info = json.dumps(prompt)

296
        metadata = {"prompt": prompt_info}
Dr.Lt.Data's avatar
Dr.Lt.Data committed
297
298
299
300
301
302
303
        if extra_pnginfo is not None:
            for x in extra_pnginfo:
                metadata[x] = json.dumps(extra_pnginfo[x])

        file = f"{filename}_{counter:05}_.latent"
        file = os.path.join(full_output_folder, file)

304
305
        output = {}
        output["latent_tensor"] = samples["samples"]
306
        output["latent_format_version_0"] = torch.tensor([])
307

308
        comfy.utils.save_torch_file(output, file, metadata=metadata)
Dr.Lt.Data's avatar
Dr.Lt.Data committed
309
310
311
312
313
314
        return {}


class LoadLatent:
    @classmethod
    def INPUT_TYPES(s):
315
316
        input_dir = folder_paths.get_input_directory()
        files = [f for f in os.listdir(input_dir) if os.path.isfile(os.path.join(input_dir, f)) and f.endswith(".latent")]
Dr.Lt.Data's avatar
Dr.Lt.Data committed
317
318
319
320
321
322
323
324
        return {"required": {"latent": [sorted(files), ]}, }

    CATEGORY = "_for_testing"

    RETURN_TYPES = ("LATENT", )
    FUNCTION = "load"

    def load(self, latent):
325
326
        latent_path = folder_paths.get_annotated_filepath(latent)
        latent = safetensors.torch.load_file(latent_path, device="cpu")
327
328
329
330
        multiplier = 1.0
        if "latent_format_version_0" not in latent:
            multiplier = 1.0 / 0.18215
        samples = {"samples": latent["latent_tensor"].float() * multiplier}
331
        return (samples, )
Dr.Lt.Data's avatar
Dr.Lt.Data committed
332

333
334
335
336
337
338
339
340
341
342
343
344
345
346
    @classmethod
    def IS_CHANGED(s, latent):
        image_path = folder_paths.get_annotated_filepath(latent)
        m = hashlib.sha256()
        with open(image_path, 'rb') as f:
            m.update(f.read())
        return m.digest().hex()

    @classmethod
    def VALIDATE_INPUTS(s, latent):
        if not folder_paths.exists_annotated_filepath(latent):
            return "Invalid latent file: {}".format(latent)
        return True

Dr.Lt.Data's avatar
Dr.Lt.Data committed
347

comfyanonymous's avatar
comfyanonymous committed
348
349
350
class CheckpointLoader:
    @classmethod
    def INPUT_TYPES(s):
351
352
        return {"required": { "config_name": (folder_paths.get_filename_list("configs"), ),
                              "ckpt_name": (folder_paths.get_filename_list("checkpoints"), )}}
comfyanonymous's avatar
comfyanonymous committed
353
354
355
    RETURN_TYPES = ("MODEL", "CLIP", "VAE")
    FUNCTION = "load_checkpoint"

356
    CATEGORY = "advanced/loaders"
357

comfyanonymous's avatar
comfyanonymous committed
358
    def load_checkpoint(self, config_name, ckpt_name, output_vae=True, output_clip=True):
359
360
        config_path = folder_paths.get_full_path("configs", config_name)
        ckpt_path = folder_paths.get_full_path("checkpoints", ckpt_name)
361
        return comfy.sd.load_checkpoint(config_path, ckpt_path, output_vae=True, output_clip=True, embedding_directory=folder_paths.get_folder_paths("embeddings"))
comfyanonymous's avatar
comfyanonymous committed
362

363
364
365
class CheckpointLoaderSimple:
    @classmethod
    def INPUT_TYPES(s):
366
        return {"required": { "ckpt_name": (folder_paths.get_filename_list("checkpoints"), ),
367
368
369
370
                             }}
    RETURN_TYPES = ("MODEL", "CLIP", "VAE")
    FUNCTION = "load_checkpoint"

371
    CATEGORY = "loaders"
372

373
    def load_checkpoint(self, ckpt_name, output_vae=True, output_clip=True):
374
        ckpt_path = folder_paths.get_full_path("checkpoints", ckpt_name)
375
        out = comfy.sd.load_checkpoint_guess_config(ckpt_path, output_vae=True, output_clip=True, embedding_directory=folder_paths.get_folder_paths("embeddings"))
376
377
        return out

sALTaccount's avatar
sALTaccount committed
378
379
380
class DiffusersLoader:
    @classmethod
    def INPUT_TYPES(cls):
381
        paths = []
sALTaccount's avatar
sALTaccount committed
382
        for search_path in folder_paths.get_folder_paths("diffusers"):
383
            if os.path.exists(search_path):
384
385
386
387
                for root, subdir, files in os.walk(search_path, followlinks=True):
                    if "model_index.json" in files:
                        paths.append(os.path.relpath(root, start=search_path))

388
        return {"required": {"model_path": (paths,), }}
sALTaccount's avatar
sALTaccount committed
389
390
391
    RETURN_TYPES = ("MODEL", "CLIP", "VAE")
    FUNCTION = "load_checkpoint"

392
    CATEGORY = "advanced/loaders"
sALTaccount's avatar
sALTaccount committed
393
394

    def load_checkpoint(self, model_path, output_vae=True, output_clip=True):
sALTaccount's avatar
sALTaccount committed
395
396
        for search_path in folder_paths.get_folder_paths("diffusers"):
            if os.path.exists(search_path):
397
398
399
                path = os.path.join(search_path, model_path)
                if os.path.exists(path):
                    model_path = path
sALTaccount's avatar
sALTaccount committed
400
                    break
401

402
        return comfy.diffusers_load.load_diffusers(model_path, fp16=comfy.model_management.should_use_fp16(), output_vae=output_vae, output_clip=output_clip, embedding_directory=folder_paths.get_folder_paths("embeddings"))
sALTaccount's avatar
sALTaccount committed
403
404


405
406
407
408
409
410
411
412
class unCLIPCheckpointLoader:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "ckpt_name": (folder_paths.get_filename_list("checkpoints"), ),
                             }}
    RETURN_TYPES = ("MODEL", "CLIP", "VAE", "CLIP_VISION")
    FUNCTION = "load_checkpoint"

413
    CATEGORY = "loaders"
414
415
416
417
418
419

    def load_checkpoint(self, ckpt_name, output_vae=True, output_clip=True):
        ckpt_path = folder_paths.get_full_path("checkpoints", ckpt_name)
        out = comfy.sd.load_checkpoint_guess_config(ckpt_path, output_vae=True, output_clip=True, output_clipvision=True, embedding_directory=folder_paths.get_folder_paths("embeddings"))
        return out

comfyanonymous's avatar
comfyanonymous committed
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
class CLIPSetLastLayer:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "clip": ("CLIP", ),
                              "stop_at_clip_layer": ("INT", {"default": -1, "min": -24, "max": -1, "step": 1}),
                              }}
    RETURN_TYPES = ("CLIP",)
    FUNCTION = "set_last_layer"

    CATEGORY = "conditioning"

    def set_last_layer(self, clip, stop_at_clip_layer):
        clip = clip.clone()
        clip.clip_layer(stop_at_clip_layer)
        return (clip,)

436
class LoraLoader:
437
438
439
    def __init__(self):
        self.loaded_lora = None

440
441
442
443
    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "model": ("MODEL",),
                              "clip": ("CLIP", ),
444
                              "lora_name": (folder_paths.get_filename_list("loras"), ),
445
446
                              "strength_model": ("FLOAT", {"default": 1.0, "min": -10.0, "max": 10.0, "step": 0.01}),
                              "strength_clip": ("FLOAT", {"default": 1.0, "min": -10.0, "max": 10.0, "step": 0.01}),
447
448
449
450
451
452
453
                              }}
    RETURN_TYPES = ("MODEL", "CLIP")
    FUNCTION = "load_lora"

    CATEGORY = "loaders"

    def load_lora(self, model, clip, lora_name, strength_model, strength_clip):
454
455
456
        if strength_model == 0 and strength_clip == 0:
            return (model, clip)

457
        lora_path = folder_paths.get_full_path("loras", lora_name)
458
459
460
461
462
463
464
465
466
467
468
469
        lora = None
        if self.loaded_lora is not None:
            if self.loaded_lora[0] == lora_path:
                lora = self.loaded_lora[1]
            else:
                del self.loaded_lora

        if lora is None:
            lora = comfy.utils.load_torch_file(lora_path, safe_load=True)
            self.loaded_lora = (lora_path, lora)

        model_lora, clip_lora = comfy.sd.load_lora_for_models(model, clip, lora, strength_model, strength_clip)
470
471
        return (model_lora, clip_lora)

comfyanonymous's avatar
comfyanonymous committed
472
473
474
class VAELoader:
    @classmethod
    def INPUT_TYPES(s):
475
        return {"required": { "vae_name": (folder_paths.get_filename_list("vae"), )}}
comfyanonymous's avatar
comfyanonymous committed
476
477
478
    RETURN_TYPES = ("VAE",)
    FUNCTION = "load_vae"

479
480
    CATEGORY = "loaders"

comfyanonymous's avatar
comfyanonymous committed
481
482
    #TODO: scale factor?
    def load_vae(self, vae_name):
483
        vae_path = folder_paths.get_full_path("vae", vae_name)
comfyanonymous's avatar
comfyanonymous committed
484
485
486
        vae = comfy.sd.VAE(ckpt_path=vae_path)
        return (vae,)

comfyanonymous's avatar
comfyanonymous committed
487
488
489
class ControlNetLoader:
    @classmethod
    def INPUT_TYPES(s):
490
        return {"required": { "control_net_name": (folder_paths.get_filename_list("controlnet"), )}}
comfyanonymous's avatar
comfyanonymous committed
491
492
493
494
495
496
497

    RETURN_TYPES = ("CONTROL_NET",)
    FUNCTION = "load_controlnet"

    CATEGORY = "loaders"

    def load_controlnet(self, control_net_name):
498
        controlnet_path = folder_paths.get_full_path("controlnet", control_net_name)
comfyanonymous's avatar
comfyanonymous committed
499
500
501
        controlnet = comfy.sd.load_controlnet(controlnet_path)
        return (controlnet,)

502
503
504
505
class DiffControlNetLoader:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "model": ("MODEL",),
506
                              "control_net_name": (folder_paths.get_filename_list("controlnet"), )}}
507
508
509
510
511
512
513

    RETURN_TYPES = ("CONTROL_NET",)
    FUNCTION = "load_controlnet"

    CATEGORY = "loaders"

    def load_controlnet(self, model, control_net_name):
514
        controlnet_path = folder_paths.get_full_path("controlnet", control_net_name)
515
516
517
        controlnet = comfy.sd.load_controlnet(controlnet_path, model)
        return (controlnet,)

comfyanonymous's avatar
comfyanonymous committed
518
519
520
521

class ControlNetApply:
    @classmethod
    def INPUT_TYPES(s):
522
523
524
525
526
        return {"required": {"conditioning": ("CONDITIONING", ),
                             "control_net": ("CONTROL_NET", ),
                             "image": ("IMAGE", ),
                             "strength": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01})
                             }}
comfyanonymous's avatar
comfyanonymous committed
527
528
529
530
531
    RETURN_TYPES = ("CONDITIONING",)
    FUNCTION = "apply_controlnet"

    CATEGORY = "conditioning"

532
    def apply_controlnet(self, conditioning, control_net, image, strength):
533
534
535
        if strength == 0:
            return (conditioning, )

comfyanonymous's avatar
comfyanonymous committed
536
537
538
539
        c = []
        control_hint = image.movedim(-1,1)
        for t in conditioning:
            n = [t[0], t[1].copy()]
comfyanonymous's avatar
comfyanonymous committed
540
541
542
543
            c_net = control_net.copy().set_cond_hint(control_hint, strength)
            if 'control' in t[1]:
                c_net.set_previous_controlnet(t[1]['control'])
            n[1]['control'] = c_net
comfyanonymous's avatar
comfyanonymous committed
544
545
546
            c.append(n)
        return (c, )

547
548
549
class CLIPLoader:
    @classmethod
    def INPUT_TYPES(s):
550
        return {"required": { "clip_name": (folder_paths.get_filename_list("clip"), ),
551
552
553
554
                             }}
    RETURN_TYPES = ("CLIP",)
    FUNCTION = "load_clip"

555
    CATEGORY = "advanced/loaders"
556

557
    def load_clip(self, clip_name):
558
        clip_path = folder_paths.get_full_path("clip", clip_name)
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
        clip = comfy.sd.load_clip(ckpt_paths=[clip_path], embedding_directory=folder_paths.get_folder_paths("embeddings"))
        return (clip,)

class DualCLIPLoader:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "clip_name1": (folder_paths.get_filename_list("clip"), ), "clip_name2": (folder_paths.get_filename_list("clip"), ),
                             }}
    RETURN_TYPES = ("CLIP",)
    FUNCTION = "load_clip"

    CATEGORY = "advanced/loaders"

    def load_clip(self, clip_name1, clip_name2):
        clip_path1 = folder_paths.get_full_path("clip", clip_name1)
        clip_path2 = folder_paths.get_full_path("clip", clip_name2)
        clip = comfy.sd.load_clip(ckpt_paths=[clip_path1, clip_path2], embedding_directory=folder_paths.get_folder_paths("embeddings"))
576
577
        return (clip,)

578
579
580
class CLIPVisionLoader:
    @classmethod
    def INPUT_TYPES(s):
581
        return {"required": { "clip_name": (folder_paths.get_filename_list("clip_vision"), ),
582
583
584
585
586
587
588
                             }}
    RETURN_TYPES = ("CLIP_VISION",)
    FUNCTION = "load_clip"

    CATEGORY = "loaders"

    def load_clip(self, clip_name):
589
        clip_path = folder_paths.get_full_path("clip_vision", clip_name)
590
        clip_vision = comfy.clip_vision.load(clip_path)
591
592
593
594
595
596
597
598
        return (clip_vision,)

class CLIPVisionEncode:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "clip_vision": ("CLIP_VISION",),
                              "image": ("IMAGE",)
                             }}
comfyanonymous's avatar
comfyanonymous committed
599
    RETURN_TYPES = ("CLIP_VISION_OUTPUT",)
600
601
    FUNCTION = "encode"

602
    CATEGORY = "conditioning"
603
604
605
606
607
608
609
610

    def encode(self, clip_vision, image):
        output = clip_vision.encode_image(image)
        return (output,)

class StyleModelLoader:
    @classmethod
    def INPUT_TYPES(s):
611
        return {"required": { "style_model_name": (folder_paths.get_filename_list("style_models"), )}}
612
613
614
615
616
617
618

    RETURN_TYPES = ("STYLE_MODEL",)
    FUNCTION = "load_style_model"

    CATEGORY = "loaders"

    def load_style_model(self, style_model_name):
619
        style_model_path = folder_paths.get_full_path("style_models", style_model_name)
620
621
622
623
624
625
626
        style_model = comfy.sd.load_style_model(style_model_path)
        return (style_model,)


class StyleModelApply:
    @classmethod
    def INPUT_TYPES(s):
627
628
629
        return {"required": {"conditioning": ("CONDITIONING", ),
                             "style_model": ("STYLE_MODEL", ),
                             "clip_vision_output": ("CLIP_VISION_OUTPUT", ),
630
631
632
633
                             }}
    RETURN_TYPES = ("CONDITIONING",)
    FUNCTION = "apply_stylemodel"

comfyanonymous's avatar
comfyanonymous committed
634
    CATEGORY = "conditioning/style_model"
635

636
637
    def apply_stylemodel(self, clip_vision_output, style_model, conditioning):
        cond = style_model.get_cond(clip_vision_output)
638
        c = []
639
640
        for t in conditioning:
            n = [torch.cat((t[0], cond), dim=1), t[1].copy()]
641
642
643
            c.append(n)
        return (c, )

644
645
646
647
648
649
class unCLIPConditioning:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": {"conditioning": ("CONDITIONING", ),
                             "clip_vision_output": ("CLIP_VISION_OUTPUT", ),
                             "strength": ("FLOAT", {"default": 1.0, "min": -10.0, "max": 10.0, "step": 0.01}),
650
                             "noise_augmentation": ("FLOAT", {"default": 0.0, "min": 0.0, "max": 1.0, "step": 0.01}),
651
652
653
654
                             }}
    RETURN_TYPES = ("CONDITIONING",)
    FUNCTION = "apply_adm"

655
    CATEGORY = "conditioning"
656

657
    def apply_adm(self, conditioning, clip_vision_output, strength, noise_augmentation):
658
659
660
        if strength == 0:
            return (conditioning, )

661
662
663
        c = []
        for t in conditioning:
            o = t[1].copy()
664
665
666
            x = {"clip_vision_output": clip_vision_output, "strength": strength, "noise_augmentation": noise_augmentation}
            if "unclip_conditioning" in o:
                o["unclip_conditioning"] = o["unclip_conditioning"][:] + [x]
667
            else:
668
                o["unclip_conditioning"] = [x]
669
670
671
672
            n = [t[0], o]
            c.append(n)
        return (c, )

673
674
675
676
677
678
679
680
class GLIGENLoader:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "gligen_name": (folder_paths.get_filename_list("gligen"), )}}

    RETURN_TYPES = ("GLIGEN",)
    FUNCTION = "load_gligen"

comfyanonymous's avatar
comfyanonymous committed
681
    CATEGORY = "loaders"
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702

    def load_gligen(self, gligen_name):
        gligen_path = folder_paths.get_full_path("gligen", gligen_name)
        gligen = comfy.sd.load_gligen(gligen_path)
        return (gligen,)

class GLIGENTextBoxApply:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": {"conditioning_to": ("CONDITIONING", ),
                              "clip": ("CLIP", ),
                              "gligen_textbox_model": ("GLIGEN", ),
                              "text": ("STRING", {"multiline": True}),
                              "width": ("INT", {"default": 64, "min": 8, "max": MAX_RESOLUTION, "step": 8}),
                              "height": ("INT", {"default": 64, "min": 8, "max": MAX_RESOLUTION, "step": 8}),
                              "x": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 8}),
                              "y": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 8}),
                             }}
    RETURN_TYPES = ("CONDITIONING",)
    FUNCTION = "append"

comfyanonymous's avatar
comfyanonymous committed
703
    CATEGORY = "conditioning/gligen"
704
705
706
707
708
709
710
711
712
713
714
715
716
717

    def append(self, conditioning_to, clip, gligen_textbox_model, text, width, height, x, y):
        c = []
        cond, cond_pooled = clip.encode_from_tokens(clip.tokenize(text), return_pooled=True)
        for t in conditioning_to:
            n = [t[0], t[1].copy()]
            position_params = [(cond_pooled, height // 8, width // 8, y // 8, x // 8)]
            prev = []
            if "gligen" in n[1]:
                prev = n[1]['gligen'][2]

            n[1]['gligen'] = ("position", gligen_textbox_model, prev + position_params)
            c.append(n)
        return (c, )
718

comfyanonymous's avatar
comfyanonymous committed
719
720
721
722
723
724
class EmptyLatentImage:
    def __init__(self, device="cpu"):
        self.device = device

    @classmethod
    def INPUT_TYPES(s):
725
726
        return {"required": { "width": ("INT", {"default": 512, "min": 64, "max": MAX_RESOLUTION, "step": 8}),
                              "height": ("INT", {"default": 512, "min": 64, "max": MAX_RESOLUTION, "step": 8}),
comfyanonymous's avatar
comfyanonymous committed
727
728
729
730
                              "batch_size": ("INT", {"default": 1, "min": 1, "max": 64})}}
    RETURN_TYPES = ("LATENT",)
    FUNCTION = "generate"

731
732
    CATEGORY = "latent"

comfyanonymous's avatar
comfyanonymous committed
733
734
    def generate(self, width, height, batch_size=1):
        latent = torch.zeros([batch_size, 4, height // 8, width // 8])
735
        return ({"samples":latent}, )
comfyanonymous's avatar
comfyanonymous committed
736

comfyanonymous's avatar
comfyanonymous committed
737

738
739
740
741
742
class LatentFromBatch:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "samples": ("LATENT",),
                              "batch_index": ("INT", {"default": 0, "min": 0, "max": 63}),
743
                              "length": ("INT", {"default": 1, "min": 1, "max": 64}),
744
745
                              }}
    RETURN_TYPES = ("LATENT",)
746
    FUNCTION = "frombatch"
747

748
    CATEGORY = "latent/batch"
749

750
    def frombatch(self, samples, batch_index, length):
751
752
753
        s = samples.copy()
        s_in = samples["samples"]
        batch_index = min(s_in.shape[0] - 1, batch_index)
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
        length = min(s_in.shape[0] - batch_index, length)
        s["samples"] = s_in[batch_index:batch_index + length].clone()
        if "noise_mask" in samples:
            masks = samples["noise_mask"]
            if masks.shape[0] == 1:
                s["noise_mask"] = masks.clone()
            else:
                if masks.shape[0] < s_in.shape[0]:
                    masks = masks.repeat(math.ceil(s_in.shape[0] / masks.shape[0]), 1, 1, 1)[:s_in.shape[0]]
                s["noise_mask"] = masks[batch_index:batch_index + length].clone()
        if "batch_index" not in s:
            s["batch_index"] = [x for x in range(batch_index, batch_index+length)]
        else:
            s["batch_index"] = samples["batch_index"][batch_index:batch_index + length]
        return (s,)
    
class RepeatLatentBatch:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "samples": ("LATENT",),
                              "amount": ("INT", {"default": 1, "min": 1, "max": 64}),
                              }}
    RETURN_TYPES = ("LATENT",)
    FUNCTION = "repeat"

    CATEGORY = "latent/batch"

    def repeat(self, samples, amount):
        s = samples.copy()
        s_in = samples["samples"]
        
        s["samples"] = s_in.repeat((amount, 1,1,1))
        if "noise_mask" in samples and samples["noise_mask"].shape[0] > 1:
            masks = samples["noise_mask"]
            if masks.shape[0] < s_in.shape[0]:
                masks = masks.repeat(math.ceil(s_in.shape[0] / masks.shape[0]), 1, 1, 1)[:s_in.shape[0]]
            s["noise_mask"] = samples["noise_mask"].repeat((amount, 1,1,1))
        if "batch_index" in s:
            offset = max(s["batch_index"]) - min(s["batch_index"]) + 1
            s["batch_index"] = s["batch_index"] + [x + (i * offset) for i in range(1, amount) for x in s["batch_index"]]
794
        return (s,)
comfyanonymous's avatar
comfyanonymous committed
795

comfyanonymous's avatar
comfyanonymous committed
796
class LatentUpscale:
comfyanonymous's avatar
comfyanonymous committed
797
    upscale_methods = ["nearest-exact", "bilinear", "area", "bicubic", "bislerp"]
798
    crop_methods = ["disabled", "center"]
comfyanonymous's avatar
comfyanonymous committed
799
800
801
802

    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "samples": ("LATENT",), "upscale_method": (s.upscale_methods,),
803
804
                              "width": ("INT", {"default": 512, "min": 64, "max": MAX_RESOLUTION, "step": 8}),
                              "height": ("INT", {"default": 512, "min": 64, "max": MAX_RESOLUTION, "step": 8}),
805
                              "crop": (s.crop_methods,)}}
comfyanonymous's avatar
comfyanonymous committed
806
807
808
    RETURN_TYPES = ("LATENT",)
    FUNCTION = "upscale"

809
810
    CATEGORY = "latent"

811
    def upscale(self, samples, upscale_method, width, height, crop):
812
        s = samples.copy()
comfyanonymous's avatar
comfyanonymous committed
813
        s["samples"] = comfy.utils.common_upscale(samples["samples"], width // 8, height // 8, upscale_method, crop)
comfyanonymous's avatar
comfyanonymous committed
814
815
        return (s,)

comfyanonymous's avatar
comfyanonymous committed
816
class LatentUpscaleBy:
comfyanonymous's avatar
comfyanonymous committed
817
    upscale_methods = ["nearest-exact", "bilinear", "area", "bicubic", "bislerp"]
comfyanonymous's avatar
comfyanonymous committed
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834

    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "samples": ("LATENT",), "upscale_method": (s.upscale_methods,),
                              "scale_by": ("FLOAT", {"default": 1.5, "min": 0.01, "max": 8.0, "step": 0.01}),}}
    RETURN_TYPES = ("LATENT",)
    FUNCTION = "upscale"

    CATEGORY = "latent"

    def upscale(self, samples, upscale_method, scale_by):
        s = samples.copy()
        width = round(samples["samples"].shape[3] * scale_by)
        height = round(samples["samples"].shape[2] * scale_by)
        s["samples"] = comfy.utils.common_upscale(samples["samples"], width, height, upscale_method, "disabled")
        return (s,)

comfyanonymous's avatar
comfyanonymous committed
835
836
837
838
839
840
841
842
843
class LatentRotate:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "samples": ("LATENT",),
                              "rotation": (["none", "90 degrees", "180 degrees", "270 degrees"],),
                              }}
    RETURN_TYPES = ("LATENT",)
    FUNCTION = "rotate"

comfyanonymous's avatar
comfyanonymous committed
844
    CATEGORY = "latent/transform"
comfyanonymous's avatar
comfyanonymous committed
845
846

    def rotate(self, samples, rotation):
847
        s = samples.copy()
comfyanonymous's avatar
comfyanonymous committed
848
849
850
851
852
853
854
855
        rotate_by = 0
        if rotation.startswith("90"):
            rotate_by = 1
        elif rotation.startswith("180"):
            rotate_by = 2
        elif rotation.startswith("270"):
            rotate_by = 3

856
        s["samples"] = torch.rot90(samples["samples"], k=rotate_by, dims=[3, 2])
comfyanonymous's avatar
comfyanonymous committed
857
        return (s,)
comfyanonymous's avatar
comfyanonymous committed
858
859
860
861
862
863
864
865
866
867

class LatentFlip:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "samples": ("LATENT",),
                              "flip_method": (["x-axis: vertically", "y-axis: horizontally"],),
                              }}
    RETURN_TYPES = ("LATENT",)
    FUNCTION = "flip"

comfyanonymous's avatar
comfyanonymous committed
868
    CATEGORY = "latent/transform"
comfyanonymous's avatar
comfyanonymous committed
869
870

    def flip(self, samples, flip_method):
871
        s = samples.copy()
comfyanonymous's avatar
comfyanonymous committed
872
        if flip_method.startswith("x"):
873
            s["samples"] = torch.flip(samples["samples"], dims=[2])
comfyanonymous's avatar
comfyanonymous committed
874
        elif flip_method.startswith("y"):
875
            s["samples"] = torch.flip(samples["samples"], dims=[3])
comfyanonymous's avatar
comfyanonymous committed
876
877

        return (s,)
comfyanonymous's avatar
comfyanonymous committed
878
879
880
881

class LatentComposite:
    @classmethod
    def INPUT_TYPES(s):
comfyanonymous's avatar
comfyanonymous committed
882
883
884
885
886
887
        return {"required": { "samples_to": ("LATENT",),
                              "samples_from": ("LATENT",),
                              "x": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 8}),
                              "y": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 8}),
                              "feather": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 8}),
                              }}
comfyanonymous's avatar
comfyanonymous committed
888
889
890
891
892
    RETURN_TYPES = ("LATENT",)
    FUNCTION = "composite"

    CATEGORY = "latent"

comfyanonymous's avatar
comfyanonymous committed
893
894
895
    def composite(self, samples_to, samples_from, x, y, composite_method="normal", feather=0):
        x =  x // 8
        y = y // 8
896
        feather = feather // 8
comfyanonymous's avatar
comfyanonymous committed
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
        samples_out = samples_to.copy()
        s = samples_to["samples"].clone()
        samples_to = samples_to["samples"]
        samples_from = samples_from["samples"]
        if feather == 0:
            s[:,:,y:y+samples_from.shape[2],x:x+samples_from.shape[3]] = samples_from[:,:,:samples_to.shape[2] - y, :samples_to.shape[3] - x]
        else:
            samples_from = samples_from[:,:,:samples_to.shape[2] - y, :samples_to.shape[3] - x]
            mask = torch.ones_like(samples_from)
            for t in range(feather):
                if y != 0:
                    mask[:,:,t:1+t,:] *= ((1.0/feather) * (t + 1))

                if y + samples_from.shape[2] < samples_to.shape[2]:
                    mask[:,:,mask.shape[2] -1 -t: mask.shape[2]-t,:] *= ((1.0/feather) * (t + 1))
                if x != 0:
                    mask[:,:,:,t:1+t] *= ((1.0/feather) * (t + 1))
                if x + samples_from.shape[3] < samples_to.shape[3]:
                    mask[:,:,:,mask.shape[3]- 1 - t: mask.shape[3]- t] *= ((1.0/feather) * (t + 1))
            rev_mask = torch.ones_like(mask) - mask
            s[:,:,y:y+samples_from.shape[2],x:x+samples_from.shape[3]] = samples_from[:,:,:samples_to.shape[2] - y, :samples_to.shape[3] - x] * mask + s[:,:,y:y+samples_from.shape[2],x:x+samples_from.shape[3]] * rev_mask
        samples_out["samples"] = s
        return (samples_out,)
comfyanonymous's avatar
comfyanonymous committed
920

comfyanonymous's avatar
comfyanonymous committed
921
922
923
924
class LatentCrop:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "samples": ("LATENT",),
925
926
                              "width": ("INT", {"default": 512, "min": 64, "max": MAX_RESOLUTION, "step": 8}),
                              "height": ("INT", {"default": 512, "min": 64, "max": MAX_RESOLUTION, "step": 8}),
927
928
                              "x": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 8}),
                              "y": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 8}),
comfyanonymous's avatar
comfyanonymous committed
929
930
931
932
                              }}
    RETURN_TYPES = ("LATENT",)
    FUNCTION = "crop"

comfyanonymous's avatar
comfyanonymous committed
933
    CATEGORY = "latent/transform"
comfyanonymous's avatar
comfyanonymous committed
934
935

    def crop(self, samples, width, height, x, y):
936
937
        s = samples.copy()
        samples = samples['samples']
comfyanonymous's avatar
comfyanonymous committed
938
939
940
941
942
943
944
945
946
947
948
949
950
        x =  x // 8
        y = y // 8

        #enfonce minimum size of 64
        if x > (samples.shape[3] - 8):
            x = samples.shape[3] - 8
        if y > (samples.shape[2] - 8):
            y = samples.shape[2] - 8

        new_height = height // 8
        new_width = width // 8
        to_x = new_width + x
        to_y = new_height + y
951
        s['samples'] = samples[:,:,y:to_y, x:to_x]
comfyanonymous's avatar
comfyanonymous committed
952
953
        return (s,)

954
955
956
957
958
959
960
961
962
class SetLatentNoiseMask:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "samples": ("LATENT",),
                              "mask": ("MASK",),
                              }}
    RETURN_TYPES = ("LATENT",)
    FUNCTION = "set_mask"

963
    CATEGORY = "latent/inpaint"
964
965
966

    def set_mask(self, samples, mask):
        s = samples.copy()
967
        s["noise_mask"] = mask.reshape((-1, 1, mask.shape[-2], mask.shape[-1]))
968
969
        return (s,)

space-nuko's avatar
space-nuko committed
970

space-nuko's avatar
space-nuko committed
971
def common_ksampler(model, seed, steps, cfg, sampler_name, scheduler, positive, negative, latent, denoise=1.0, disable_noise=False, start_step=None, last_step=None, force_full_denoise=False):
972
    device = comfy.model_management.get_torch_device()
973
    latent_image = latent["samples"]
974

comfyanonymous's avatar
comfyanonymous committed
975
976
977
    if disable_noise:
        noise = torch.zeros(latent_image.size(), dtype=latent_image.dtype, layout=latent_image.layout, device="cpu")
    else:
978
979
        batch_inds = latent["batch_index"] if "batch_index" in latent else None
        noise = comfy.sample.prepare_noise(latent_image, seed, batch_inds)
comfyanonymous's avatar
comfyanonymous committed
980

981
    noise_mask = None
982
    if "noise_mask" in latent:
983
        noise_mask = latent["noise_mask"]
comfyanonymous's avatar
comfyanonymous committed
984

space-nuko's avatar
space-nuko committed
985
986
987
988
    preview_format = "JPEG"
    if preview_format not in ["JPEG", "PNG"]:
        preview_format = "JPEG"

989
    previewer = latent_preview.get_previewer(device, model.model.latent_format)
space-nuko's avatar
space-nuko committed
990

991
    pbar = comfy.utils.ProgressBar(steps)
992
    def callback(step, x0, x, total_steps):
space-nuko's avatar
space-nuko committed
993
        preview_bytes = None
994
        if previewer:
995
            preview_bytes = previewer.decode_latent_to_preview_image(preview_format, x0)
space-nuko's avatar
space-nuko committed
996
        pbar.update_absolute(step + 1, total_steps, preview_bytes)
997

998
999
    samples = comfy.sample.sample(model, noise, steps, cfg, sampler_name, scheduler, positive, negative, latent_image,
                                  denoise=denoise, disable_noise=disable_noise, start_step=start_step, last_step=last_step,
1000
                                  force_full_denoise=force_full_denoise, noise_mask=noise_mask, callback=callback, seed=seed)
1001
1002
1003
    out = latent.copy()
    out["samples"] = samples
    return (out, )
comfyanonymous's avatar
comfyanonymous committed
1004

comfyanonymous's avatar
comfyanonymous committed
1005
1006
1007
class KSampler:
    @classmethod
    def INPUT_TYPES(s):
comfyanonymous's avatar
comfyanonymous committed
1008
        return {"required":
comfyanonymous's avatar
comfyanonymous committed
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
                    {"model": ("MODEL",),
                    "seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}),
                    "steps": ("INT", {"default": 20, "min": 1, "max": 10000}),
                    "cfg": ("FLOAT", {"default": 8.0, "min": 0.0, "max": 100.0}),
                    "sampler_name": (comfy.samplers.KSampler.SAMPLERS, ),
                    "scheduler": (comfy.samplers.KSampler.SCHEDULERS, ),
                    "positive": ("CONDITIONING", ),
                    "negative": ("CONDITIONING", ),
                    "latent_image": ("LATENT", ),
                    "denoise": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}),
space-nuko's avatar
space-nuko committed
1019
1020
                     }
                }
comfyanonymous's avatar
comfyanonymous committed
1021
1022
1023
1024

    RETURN_TYPES = ("LATENT",)
    FUNCTION = "sample"

1025
1026
    CATEGORY = "sampling"

space-nuko's avatar
space-nuko committed
1027
1028
    def sample(self, model, seed, steps, cfg, sampler_name, scheduler, positive, negative, latent_image, denoise=1.0):
        return common_ksampler(model, seed, steps, cfg, sampler_name, scheduler, positive, negative, latent_image, denoise=denoise)
comfyanonymous's avatar
comfyanonymous committed
1029

comfyanonymous's avatar
comfyanonymous committed
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
class KSamplerAdvanced:
    @classmethod
    def INPUT_TYPES(s):
        return {"required":
                    {"model": ("MODEL",),
                    "add_noise": (["enable", "disable"], ),
                    "noise_seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}),
                    "steps": ("INT", {"default": 20, "min": 1, "max": 10000}),
                    "cfg": ("FLOAT", {"default": 8.0, "min": 0.0, "max": 100.0}),
                    "sampler_name": (comfy.samplers.KSampler.SAMPLERS, ),
                    "scheduler": (comfy.samplers.KSampler.SCHEDULERS, ),
                    "positive": ("CONDITIONING", ),
                    "negative": ("CONDITIONING", ),
                    "latent_image": ("LATENT", ),
                    "start_at_step": ("INT", {"default": 0, "min": 0, "max": 10000}),
                    "end_at_step": ("INT", {"default": 10000, "min": 0, "max": 10000}),
                    "return_with_leftover_noise": (["disable", "enable"], ),
space-nuko's avatar
space-nuko committed
1047
1048
                     }
                }
comfyanonymous's avatar
comfyanonymous committed
1049
1050
1051
1052
1053

    RETURN_TYPES = ("LATENT",)
    FUNCTION = "sample"

    CATEGORY = "sampling"
comfyanonymous's avatar
comfyanonymous committed
1054

space-nuko's avatar
space-nuko committed
1055
    def sample(self, model, add_noise, noise_seed, steps, cfg, sampler_name, scheduler, positive, negative, latent_image, start_at_step, end_at_step, return_with_leftover_noise, denoise=1.0):
comfyanonymous's avatar
comfyanonymous committed
1056
1057
1058
1059
1060
1061
        force_full_denoise = True
        if return_with_leftover_noise == "enable":
            force_full_denoise = False
        disable_noise = False
        if add_noise == "disable":
            disable_noise = True
space-nuko's avatar
space-nuko committed
1062
        return common_ksampler(model, noise_seed, steps, cfg, sampler_name, scheduler, positive, negative, latent_image, denoise=denoise, disable_noise=disable_noise, start_step=start_at_step, last_step=end_at_step, force_full_denoise=force_full_denoise)
comfyanonymous's avatar
comfyanonymous committed
1063
1064
1065

class SaveImage:
    def __init__(self):
1066
        self.output_dir = folder_paths.get_output_directory()
m957ymj75urz's avatar
m957ymj75urz committed
1067
        self.type = "output"
comfyanonymous's avatar
comfyanonymous committed
1068
1069
1070
1071

    @classmethod
    def INPUT_TYPES(s):
        return {"required": 
1072
                    {"images": ("IMAGE", ),
pythongosssss's avatar
tidy  
pythongosssss committed
1073
                     "filename_prefix": ("STRING", {"default": "ComfyUI"})},
pythongosssss's avatar
pythongosssss committed
1074
                "hidden": {"prompt": "PROMPT", "extra_pnginfo": "EXTRA_PNGINFO"},
comfyanonymous's avatar
comfyanonymous committed
1075
1076
1077
1078
1079
1080
1081
                }

    RETURN_TYPES = ()
    FUNCTION = "save_images"

    OUTPUT_NODE = True

1082
1083
    CATEGORY = "image"

pythongosssss's avatar
tidy  
pythongosssss committed
1084
    def save_images(self, images, filename_prefix="ComfyUI", prompt=None, extra_pnginfo=None):
1085
        full_output_folder, filename, counter, subfolder, filename_prefix = folder_paths.get_save_image_path(filename_prefix, self.output_dir, images[0].shape[1], images[0].shape[0])
m957ymj75urz's avatar
m957ymj75urz committed
1086
        results = list()
comfyanonymous's avatar
comfyanonymous committed
1087
1088
        for image in images:
            i = 255. * image.cpu().numpy()
1089
            img = Image.fromarray(np.clip(i, 0, 255).astype(np.uint8))
comfyanonymous's avatar
comfyanonymous committed
1090
1091
1092
1093
1094
1095
            metadata = PngInfo()
            if prompt is not None:
                metadata.add_text("prompt", json.dumps(prompt))
            if extra_pnginfo is not None:
                for x in extra_pnginfo:
                    metadata.add_text(x, json.dumps(extra_pnginfo[x]))
1096

1097
            file = f"{filename}_{counter:05}_.png"
1098
            img.save(os.path.join(full_output_folder, file), pnginfo=metadata, compress_level=4)
m957ymj75urz's avatar
m957ymj75urz committed
1099
1100
1101
1102
            results.append({
                "filename": file,
                "subfolder": subfolder,
                "type": self.type
Gavroche CryptoRUSH's avatar
Gavroche CryptoRUSH committed
1103
            })
1104
            counter += 1
comfyanonymous's avatar
comfyanonymous committed
1105

m957ymj75urz's avatar
m957ymj75urz committed
1106
        return { "ui": { "images": results } }
comfyanonymous's avatar
comfyanonymous committed
1107

pythongosssss's avatar
pythongosssss committed
1108
1109
class PreviewImage(SaveImage):
    def __init__(self):
1110
        self.output_dir = folder_paths.get_temp_directory()
m957ymj75urz's avatar
m957ymj75urz committed
1111
        self.type = "temp"
pythongosssss's avatar
pythongosssss committed
1112
1113
1114

    @classmethod
    def INPUT_TYPES(s):
1115
        return {"required":
pythongosssss's avatar
pythongosssss committed
1116
1117
1118
                    {"images": ("IMAGE", ), },
                "hidden": {"prompt": "PROMPT", "extra_pnginfo": "EXTRA_PNGINFO"},
                }
1119

1120
1121
1122
class LoadImage:
    @classmethod
    def INPUT_TYPES(s):
1123
        input_dir = folder_paths.get_input_directory()
comfyanonymous's avatar
comfyanonymous committed
1124
        files = [f for f in os.listdir(input_dir) if os.path.isfile(os.path.join(input_dir, f))]
1125
        return {"required":
comfyanonymous's avatar
comfyanonymous committed
1126
                    {"image": (sorted(files), )},
1127
                }
1128
1129

    CATEGORY = "image"
1130

1131
    RETURN_TYPES = ("IMAGE", "MASK")
1132
1133
    FUNCTION = "load_image"
    def load_image(self, image):
1134
        image_path = folder_paths.get_annotated_filepath(image)
1135
        i = Image.open(image_path)
1136
        i = ImageOps.exif_transpose(i)
1137
        image = i.convert("RGB")
1138
        image = np.array(image).astype(np.float32) / 255.0
1139
        image = torch.from_numpy(image)[None,]
1140
1141
1142
1143
1144
1145
        if 'A' in i.getbands():
            mask = np.array(i.getchannel('A')).astype(np.float32) / 255.0
            mask = 1. - torch.from_numpy(mask)
        else:
            mask = torch.zeros((64,64), dtype=torch.float32, device="cpu")
        return (image, mask)
1146

1147
1148
    @classmethod
    def IS_CHANGED(s, image):
1149
        image_path = folder_paths.get_annotated_filepath(image)
1150
1151
1152
1153
        m = hashlib.sha256()
        with open(image_path, 'rb') as f:
            m.update(f.read())
        return m.digest().hex()
pythongosssss's avatar
pythongosssss committed
1154

1155
1156
1157
1158
1159
1160
1161
    @classmethod
    def VALIDATE_INPUTS(s, image):
        if not folder_paths.exists_annotated_filepath(image):
            return "Invalid image file: {}".format(image)

        return True

1162
class LoadImageMask:
1163
    _color_channels = ["alpha", "red", "green", "blue"]
1164
1165
    @classmethod
    def INPUT_TYPES(s):
1166
        input_dir = folder_paths.get_input_directory()
comfyanonymous's avatar
comfyanonymous committed
1167
        files = [f for f in os.listdir(input_dir) if os.path.isfile(os.path.join(input_dir, f))]
1168
        return {"required":
comfyanonymous's avatar
comfyanonymous committed
1169
                    {"image": (sorted(files), ),
1170
                     "channel": (s._color_channels, ), }
1171
1172
                }

1173
    CATEGORY = "mask"
1174
1175
1176
1177

    RETURN_TYPES = ("MASK",)
    FUNCTION = "load_image"
    def load_image(self, image, channel):
1178
        image_path = folder_paths.get_annotated_filepath(image)
1179
        i = Image.open(image_path)
1180
        i = ImageOps.exif_transpose(i)
1181
1182
        if i.getbands() != ("R", "G", "B", "A"):
            i = i.convert("RGBA")
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
        mask = None
        c = channel[0].upper()
        if c in i.getbands():
            mask = np.array(i.getchannel(c)).astype(np.float32) / 255.0
            mask = torch.from_numpy(mask)
            if c == 'A':
                mask = 1. - mask
        else:
            mask = torch.zeros((64,64), dtype=torch.float32, device="cpu")
        return (mask,)

    @classmethod
    def IS_CHANGED(s, image, channel):
1196
        image_path = folder_paths.get_annotated_filepath(image)
1197
1198
1199
1200
        m = hashlib.sha256()
        with open(image_path, 'rb') as f:
            m.update(f.read())
        return m.digest().hex()
pythongosssss's avatar
pythongosssss committed
1201

1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
    @classmethod
    def VALIDATE_INPUTS(s, image, channel):
        if not folder_paths.exists_annotated_filepath(image):
            return "Invalid image file: {}".format(image)

        if channel not in s._color_channels:
            return "Invalid color channel: {}".format(channel)

        return True

comfyanonymous's avatar
comfyanonymous committed
1212
class ImageScale:
comfyanonymous's avatar
comfyanonymous committed
1213
    upscale_methods = ["nearest-exact", "bilinear", "area", "bicubic"]
comfyanonymous's avatar
comfyanonymous committed
1214
1215
1216
1217
1218
    crop_methods = ["disabled", "center"]

    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "image": ("IMAGE",), "upscale_method": (s.upscale_methods,),
1219
1220
                              "width": ("INT", {"default": 512, "min": 1, "max": MAX_RESOLUTION, "step": 1}),
                              "height": ("INT", {"default": 512, "min": 1, "max": MAX_RESOLUTION, "step": 1}),
comfyanonymous's avatar
comfyanonymous committed
1221
1222
1223
1224
                              "crop": (s.crop_methods,)}}
    RETURN_TYPES = ("IMAGE",)
    FUNCTION = "upscale"

1225
    CATEGORY = "image/upscaling"
1226

comfyanonymous's avatar
comfyanonymous committed
1227
1228
    def upscale(self, image, upscale_method, width, height, crop):
        samples = image.movedim(-1,1)
comfyanonymous's avatar
comfyanonymous committed
1229
        s = comfy.utils.common_upscale(samples, width, height, upscale_method, crop)
comfyanonymous's avatar
comfyanonymous committed
1230
1231
        s = s.movedim(1,-1)
        return (s,)
comfyanonymous's avatar
comfyanonymous committed
1232

comfyanonymous's avatar
comfyanonymous committed
1233
class ImageScaleBy:
comfyanonymous's avatar
comfyanonymous committed
1234
    upscale_methods = ["nearest-exact", "bilinear", "area", "bicubic"]
comfyanonymous's avatar
comfyanonymous committed
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252

    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "image": ("IMAGE",), "upscale_method": (s.upscale_methods,),
                              "scale_by": ("FLOAT", {"default": 1.0, "min": 0.01, "max": 8.0, "step": 0.01}),}}
    RETURN_TYPES = ("IMAGE",)
    FUNCTION = "upscale"

    CATEGORY = "image/upscaling"

    def upscale(self, image, upscale_method, scale_by):
        samples = image.movedim(-1,1)
        width = round(samples.shape[3] * scale_by)
        height = round(samples.shape[2] * scale_by)
        s = comfy.utils.common_upscale(samples, width, height, upscale_method, "disabled")
        s = s.movedim(1,-1)
        return (s,)

1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
class ImageInvert:

    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "image": ("IMAGE",)}}

    RETURN_TYPES = ("IMAGE",)
    FUNCTION = "invert"

    CATEGORY = "image"

    def invert(self, image):
        s = 1.0 - image
        return (s,)


Guo Y.K's avatar
Guo Y.K committed
1269
1270
1271
1272
1273
1274
1275
class ImagePadForOutpaint:

    @classmethod
    def INPUT_TYPES(s):
        return {
            "required": {
                "image": ("IMAGE",),
1276
1277
1278
1279
                "left": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 8}),
                "top": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 8}),
                "right": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 8}),
                "bottom": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 8}),
1280
                "feathering": ("INT", {"default": 40, "min": 0, "max": MAX_RESOLUTION, "step": 1}),
Guo Y.K's avatar
Guo Y.K committed
1281
1282
1283
1284
1285
1286
1287
1288
            }
        }

    RETURN_TYPES = ("IMAGE", "MASK")
    FUNCTION = "expand_image"

    CATEGORY = "image"

1289
    def expand_image(self, image, left, top, right, bottom, feathering):
Guo Y.K's avatar
Guo Y.K committed
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
        d1, d2, d3, d4 = image.size()

        new_image = torch.zeros(
            (d1, d2 + top + bottom, d3 + left + right, d4),
            dtype=torch.float32,
        )
        new_image[:, top:top + d2, left:left + d3, :] = image

        mask = torch.ones(
            (d2 + top + bottom, d3 + left + right),
            dtype=torch.float32,
        )
1302

1303
1304
1305
1306
1307
        t = torch.zeros(
            (d2, d3),
            dtype=torch.float32
        )

1308
        if feathering > 0 and feathering * 2 < d2 and feathering * 2 < d3:
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327

            for i in range(d2):
                for j in range(d3):
                    dt = i if top != 0 else d2
                    db = d2 - i if bottom != 0 else d2

                    dl = j if left != 0 else d3
                    dr = d3 - j if right != 0 else d3

                    d = min(dt, db, dl, dr)

                    if d >= feathering:
                        continue

                    v = (feathering - d) / feathering

                    t[i, j] = v * v

        mask[top:top + d2, left:left + d3] = t
1328

Guo Y.K's avatar
Guo Y.K committed
1329
1330
1331
        return (new_image, mask)


comfyanonymous's avatar
comfyanonymous committed
1332
1333
NODE_CLASS_MAPPINGS = {
    "KSampler": KSampler,
1334
    "CheckpointLoaderSimple": CheckpointLoaderSimple,
comfyanonymous's avatar
comfyanonymous committed
1335
    "CLIPTextEncode": CLIPTextEncode,
comfyanonymous's avatar
comfyanonymous committed
1336
    "CLIPSetLastLayer": CLIPSetLastLayer,
comfyanonymous's avatar
comfyanonymous committed
1337
1338
    "VAEDecode": VAEDecode,
    "VAEEncode": VAEEncode,
1339
    "VAEEncodeForInpaint": VAEEncodeForInpaint,
comfyanonymous's avatar
comfyanonymous committed
1340
1341
1342
    "VAELoader": VAELoader,
    "EmptyLatentImage": EmptyLatentImage,
    "LatentUpscale": LatentUpscale,
comfyanonymous's avatar
comfyanonymous committed
1343
    "LatentUpscaleBy": LatentUpscaleBy,
1344
    "LatentFromBatch": LatentFromBatch,
1345
    "RepeatLatentBatch": RepeatLatentBatch,
comfyanonymous's avatar
comfyanonymous committed
1346
    "SaveImage": SaveImage,
pythongosssss's avatar
pythongosssss committed
1347
    "PreviewImage": PreviewImage,
comfyanonymous's avatar
comfyanonymous committed
1348
    "LoadImage": LoadImage,
1349
    "LoadImageMask": LoadImageMask,
comfyanonymous's avatar
comfyanonymous committed
1350
    "ImageScale": ImageScale,
comfyanonymous's avatar
comfyanonymous committed
1351
    "ImageScaleBy": ImageScaleBy,
1352
    "ImageInvert": ImageInvert,
Guo Y.K's avatar
Guo Y.K committed
1353
    "ImagePadForOutpaint": ImagePadForOutpaint,
FizzleDorf's avatar
FizzleDorf committed
1354
    "ConditioningAverage ": ConditioningAverage ,
comfyanonymous's avatar
comfyanonymous committed
1355
1356
    "ConditioningCombine": ConditioningCombine,
    "ConditioningSetArea": ConditioningSetArea,
Jacob Segal's avatar
Jacob Segal committed
1357
    "ConditioningSetMask": ConditioningSetMask,
comfyanonymous's avatar
comfyanonymous committed
1358
    "KSamplerAdvanced": KSamplerAdvanced,
1359
    "SetLatentNoiseMask": SetLatentNoiseMask,
comfyanonymous's avatar
comfyanonymous committed
1360
    "LatentComposite": LatentComposite,
comfyanonymous's avatar
comfyanonymous committed
1361
    "LatentRotate": LatentRotate,
comfyanonymous's avatar
comfyanonymous committed
1362
    "LatentFlip": LatentFlip,
comfyanonymous's avatar
comfyanonymous committed
1363
    "LatentCrop": LatentCrop,
1364
    "LoraLoader": LoraLoader,
1365
    "CLIPLoader": CLIPLoader,
1366
    "DualCLIPLoader": DualCLIPLoader,
1367
    "CLIPVisionEncode": CLIPVisionEncode,
1368
    "StyleModelApply": StyleModelApply,
1369
    "unCLIPConditioning": unCLIPConditioning,
comfyanonymous's avatar
comfyanonymous committed
1370
1371
    "ControlNetApply": ControlNetApply,
    "ControlNetLoader": ControlNetLoader,
1372
    "DiffControlNetLoader": DiffControlNetLoader,
comfyanonymous's avatar
comfyanonymous committed
1373
1374
    "StyleModelLoader": StyleModelLoader,
    "CLIPVisionLoader": CLIPVisionLoader,
1375
    "VAEDecodeTiled": VAEDecodeTiled,
comfyanonymous's avatar
comfyanonymous committed
1376
    "VAEEncodeTiled": VAEEncodeTiled,
1377
    "unCLIPCheckpointLoader": unCLIPCheckpointLoader,
1378
1379
1380
    "GLIGENLoader": GLIGENLoader,
    "GLIGENTextBoxApply": GLIGENTextBoxApply,

1381
    "CheckpointLoader": CheckpointLoader,
sALTaccount's avatar
sALTaccount committed
1382
    "DiffusersLoader": DiffusersLoader,
Dr.Lt.Data's avatar
Dr.Lt.Data committed
1383
1384

    "LoadLatent": LoadLatent,
1385
    "SaveLatent": SaveLatent,
1386
1387

    "ConditioningZeroOut": ConditioningZeroOut,
comfyanonymous's avatar
comfyanonymous committed
1388
1389
}

City's avatar
City committed
1390
1391
1392
1393
1394
NODE_DISPLAY_NAME_MAPPINGS = {
    # Sampling
    "KSampler": "KSampler",
    "KSamplerAdvanced": "KSampler (Advanced)",
    # Loaders
comfyanonymous's avatar
Rename.  
comfyanonymous committed
1395
1396
    "CheckpointLoader": "Load Checkpoint (With Config)",
    "CheckpointLoaderSimple": "Load Checkpoint",
City's avatar
City committed
1397
1398
1399
1400
1401
1402
1403
1404
1405
1406
1407
1408
1409
1410
    "VAELoader": "Load VAE",
    "LoraLoader": "Load LoRA",
    "CLIPLoader": "Load CLIP",
    "ControlNetLoader": "Load ControlNet Model",
    "DiffControlNetLoader": "Load ControlNet Model (diff)",
    "StyleModelLoader": "Load Style Model",
    "CLIPVisionLoader": "Load CLIP Vision",
    "UpscaleModelLoader": "Load Upscale Model",
    # Conditioning
    "CLIPVisionEncode": "CLIP Vision Encode",
    "StyleModelApply": "Apply Style Model",
    "CLIPTextEncode": "CLIP Text Encode (Prompt)",
    "CLIPSetLastLayer": "CLIP Set Last Layer",
    "ConditioningCombine": "Conditioning (Combine)",
FizzleDorf's avatar
FizzleDorf committed
1411
    "ConditioningAverage ": "Conditioning (Average)",
City's avatar
City committed
1412
    "ConditioningSetArea": "Conditioning (Set Area)",
Jacob Segal's avatar
Jacob Segal committed
1413
    "ConditioningSetMask": "Conditioning (Set Mask)",
City's avatar
City committed
1414
1415
1416
1417
1418
1419
1420
1421
1422
1423
1424
    "ControlNetApply": "Apply ControlNet",
    # Latent
    "VAEEncodeForInpaint": "VAE Encode (for Inpainting)",
    "SetLatentNoiseMask": "Set Latent Noise Mask",
    "VAEDecode": "VAE Decode",
    "VAEEncode": "VAE Encode",
    "LatentRotate": "Rotate Latent",
    "LatentFlip": "Flip Latent",
    "LatentCrop": "Crop Latent",
    "EmptyLatentImage": "Empty Latent Image",
    "LatentUpscale": "Upscale Latent",
comfyanonymous's avatar
comfyanonymous committed
1425
    "LatentUpscaleBy": "Upscale Latent By",
City's avatar
City committed
1426
    "LatentComposite": "Latent Composite",
1427
1428
    "LatentFromBatch" : "Latent From Batch",
    "RepeatLatentBatch": "Repeat Latent Batch",
City's avatar
City committed
1429
1430
1431
1432
1433
1434
    # Image
    "SaveImage": "Save Image",
    "PreviewImage": "Preview Image",
    "LoadImage": "Load Image",
    "LoadImageMask": "Load Image (as Mask)",
    "ImageScale": "Upscale Image",
comfyanonymous's avatar
comfyanonymous committed
1435
    "ImageScaleBy": "Upscale Image By",
City's avatar
City committed
1436
1437
1438
1439
1440
1441
1442
1443
    "ImageUpscaleWithModel": "Upscale Image (using Model)",
    "ImageInvert": "Invert Image",
    "ImagePadForOutpaint": "Pad Image for Outpainting",
    # _for_testing
    "VAEDecodeTiled": "VAE Decode (Tiled)",
    "VAEEncodeTiled": "VAE Encode (Tiled)",
}

1444
1445
1446
1447
1448
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458
def load_custom_node(module_path):
    module_name = os.path.basename(module_path)
    if os.path.isfile(module_path):
        sp = os.path.splitext(module_path)
        module_name = sp[0]
    try:
        if os.path.isfile(module_path):
            module_spec = importlib.util.spec_from_file_location(module_name, module_path)
        else:
            module_spec = importlib.util.spec_from_file_location(module_name, os.path.join(module_path, "__init__.py"))
        module = importlib.util.module_from_spec(module_spec)
        sys.modules[module_name] = module
        module_spec.loader.exec_module(module)
        if hasattr(module, "NODE_CLASS_MAPPINGS") and getattr(module, "NODE_CLASS_MAPPINGS") is not None:
            NODE_CLASS_MAPPINGS.update(module.NODE_CLASS_MAPPINGS)
1459
1460
            if hasattr(module, "NODE_DISPLAY_NAME_MAPPINGS") and getattr(module, "NODE_DISPLAY_NAME_MAPPINGS") is not None:
                NODE_DISPLAY_NAME_MAPPINGS.update(module.NODE_DISPLAY_NAME_MAPPINGS)
1461
            return True
1462
1463
        else:
            print(f"Skip {module_path} module for custom nodes due to the lack of NODE_CLASS_MAPPINGS.")
1464
            return False
1465
1466
1467
    except Exception as e:
        print(traceback.format_exc())
        print(f"Cannot import {module_path} module for custom nodes:", e)
1468
        return False
1469

Hacker 17082006's avatar
Hacker 17082006 committed
1470
def load_custom_nodes():
1471
    node_paths = folder_paths.get_folder_paths("custom_nodes")
1472
    node_import_times = []
1473
1474
1475
1476
1477
1478
1479
1480
    for custom_node_path in node_paths:
        possible_modules = os.listdir(custom_node_path)
        if "__pycache__" in possible_modules:
            possible_modules.remove("__pycache__")

        for possible_module in possible_modules:
            module_path = os.path.join(custom_node_path, possible_module)
            if os.path.isfile(module_path) and os.path.splitext(module_path)[1] != ".py": continue
1481
            if module_path.endswith(".disabled"): continue
1482
            time_before = time.perf_counter()
1483
            success = load_custom_node(module_path)
1484
            node_import_times.append((time.perf_counter() - time_before, module_path, success))
1485

1486
    if len(node_import_times) > 0:
comfyanonymous's avatar
comfyanonymous committed
1487
        print("\nImport times for custom nodes:")
1488
        for n in sorted(node_import_times):
1489
1490
1491
1492
1493
            if n[2]:
                import_message = ""
            else:
                import_message = " (IMPORT FAILED)"
            print("{:6.1f} seconds{}:".format(n[0], import_message), n[1])
1494
        print()
1495

1496
def init_custom_nodes():
1497
    load_custom_node(os.path.join(os.path.join(os.path.dirname(os.path.realpath(__file__)), "comfy_extras"), "nodes_hypernetwork.py"))
1498
1499
    load_custom_node(os.path.join(os.path.join(os.path.dirname(os.path.realpath(__file__)), "comfy_extras"), "nodes_upscale_model.py"))
    load_custom_node(os.path.join(os.path.join(os.path.dirname(os.path.realpath(__file__)), "comfy_extras"), "nodes_post_processing.py"))
1500
    load_custom_node(os.path.join(os.path.join(os.path.dirname(os.path.realpath(__file__)), "comfy_extras"), "nodes_mask.py"))
1501
    load_custom_node(os.path.join(os.path.join(os.path.dirname(os.path.realpath(__file__)), "comfy_extras"), "nodes_rebatch.py"))
1502
    load_custom_node(os.path.join(os.path.join(os.path.dirname(os.path.realpath(__file__)), "comfy_extras"), "nodes_model_merging.py"))
1503
    load_custom_node(os.path.join(os.path.join(os.path.dirname(os.path.realpath(__file__)), "comfy_extras"), "nodes_tomesd.py"))
1504
    load_custom_node(os.path.join(os.path.join(os.path.dirname(os.path.realpath(__file__)), "comfy_extras"), "nodes_clip_sdxl.py"))
1505
    load_custom_nodes()