nodes.py 49.3 KB
Newer Older
comfyanonymous's avatar
comfyanonymous committed
1
2
3
4
5
import torch

import os
import sys
import json
6
import hashlib
7
import traceback
8
import math
comfyanonymous's avatar
comfyanonymous committed
9
10
11
12
13

from PIL import Image
from PIL.PngImagePlugin import PngInfo
import numpy as np

sALTaccount's avatar
sALTaccount committed
14

comfyanonymous's avatar
comfyanonymous committed
15
sys.path.insert(0, os.path.join(os.path.dirname(os.path.realpath(__file__)), "comfy"))
comfyanonymous's avatar
comfyanonymous committed
16
17


comfyanonymous's avatar
comfyanonymous committed
18
import comfy.diffusers_convert
comfyanonymous's avatar
comfyanonymous committed
19
import comfy.samplers
20
import comfy.sample
comfyanonymous's avatar
comfyanonymous committed
21
import comfy.sd
comfyanonymous's avatar
comfyanonymous committed
22
23
import comfy.utils

24
import comfy.clip_vision
25

26
import comfy.model_management
27
import importlib
comfyanonymous's avatar
comfyanonymous committed
28

29
import folder_paths
30
31

def before_node_execution():
32
    comfy.model_management.throw_exception_if_processing_interrupted()
33

34
def interrupt_processing(value=True):
35
    comfy.model_management.interrupt_current_processing(value)
36

37
38
MAX_RESOLUTION=8192

comfyanonymous's avatar
comfyanonymous committed
39
40
41
class CLIPTextEncode:
    @classmethod
    def INPUT_TYPES(s):
42
        return {"required": {"text": ("STRING", {"multiline": True}), "clip": ("CLIP", )}}
comfyanonymous's avatar
comfyanonymous committed
43
44
45
    RETURN_TYPES = ("CONDITIONING",)
    FUNCTION = "encode"

46
47
    CATEGORY = "conditioning"

comfyanonymous's avatar
comfyanonymous committed
48
    def encode(self, clip, text):
comfyanonymous's avatar
comfyanonymous committed
49
50
51
52
53
54
55
56
57
        return ([[clip.encode(text), {}]], )

class ConditioningCombine:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": {"conditioning_1": ("CONDITIONING", ), "conditioning_2": ("CONDITIONING", )}}
    RETURN_TYPES = ("CONDITIONING",)
    FUNCTION = "combine"

58
59
    CATEGORY = "conditioning"

comfyanonymous's avatar
comfyanonymous committed
60
61
62
    def combine(self, conditioning_1, conditioning_2):
        return (conditioning_1 + conditioning_2, )

FizzleDorf's avatar
FizzleDorf committed
63
64
65
class ConditioningAverage :
    @classmethod
    def INPUT_TYPES(s):
comfyanonymous's avatar
comfyanonymous committed
66
67
        return {"required": {"conditioning_to": ("CONDITIONING", ), "conditioning_from": ("CONDITIONING", ),
                              "conditioning_to_strength": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01})
FizzleDorf's avatar
FizzleDorf committed
68
69
70
71
72
73
                             }}
    RETURN_TYPES = ("CONDITIONING",)
    FUNCTION = "addWeighted"

    CATEGORY = "conditioning"

comfyanonymous's avatar
comfyanonymous committed
74
    def addWeighted(self, conditioning_to, conditioning_from, conditioning_to_strength):
FizzleDorf's avatar
FizzleDorf committed
75
        out = []
comfyanonymous's avatar
comfyanonymous committed
76
77
78
79
80
81
82
83
84
85
86
87
88
89

        if len(conditioning_from) > 1:
            print("Warning: ConditioningAverage conditioning_from contains more than 1 cond, only the first one will actually be applied to conditioning_to.")

        cond_from = conditioning_from[0][0]

        for i in range(len(conditioning_to)):
            t1 = conditioning_to[i][0]
            t0 = cond_from[:,:t1.shape[1]]
            if t0.shape[1] < t1.shape[1]:
                t0 = torch.cat([t0] + [torch.zeros((1, (t1.shape[1] - t0.shape[1]), t1.shape[2]))], dim=1)

            tw = torch.mul(t1, conditioning_to_strength) + torch.mul(t0, (1.0 - conditioning_to_strength))
            n = [tw, conditioning_to[i][1].copy()]
FizzleDorf's avatar
FizzleDorf committed
90
91
92
            out.append(n)
        return (out, )

comfyanonymous's avatar
comfyanonymous committed
93
94
95
96
class ConditioningSetArea:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": {"conditioning": ("CONDITIONING", ),
97
98
99
100
                              "width": ("INT", {"default": 64, "min": 64, "max": MAX_RESOLUTION, "step": 8}),
                              "height": ("INT", {"default": 64, "min": 64, "max": MAX_RESOLUTION, "step": 8}),
                              "x": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 8}),
                              "y": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 8}),
comfyanonymous's avatar
comfyanonymous committed
101
102
103
104
105
                              "strength": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}),
                             }}
    RETURN_TYPES = ("CONDITIONING",)
    FUNCTION = "append"

106
107
    CATEGORY = "conditioning"

108
    def append(self, conditioning, width, height, x, y, strength):
comfyanonymous's avatar
comfyanonymous committed
109
110
111
112
113
        c = []
        for t in conditioning:
            n = [t[0], t[1].copy()]
            n[1]['area'] = (height // 8, width // 8, y // 8, x // 8)
            n[1]['strength'] = strength
114
            n[1]['set_area_to_bounds'] = False
comfyanonymous's avatar
comfyanonymous committed
115
            c.append(n)
comfyanonymous's avatar
comfyanonymous committed
116
        return (c, )
comfyanonymous's avatar
comfyanonymous committed
117

Jacob Segal's avatar
Jacob Segal committed
118
119
120
121
122
123
class ConditioningSetMask:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": {"conditioning": ("CONDITIONING", ),
                              "mask": ("MASK", ),
                              "strength": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}),
124
                              "set_cond_area": (["default", "mask bounds"],),
Jacob Segal's avatar
Jacob Segal committed
125
126
127
128
129
130
                             }}
    RETURN_TYPES = ("CONDITIONING",)
    FUNCTION = "append"

    CATEGORY = "conditioning"

131
    def append(self, conditioning, mask, set_cond_area, strength):
Jacob Segal's avatar
Jacob Segal committed
132
        c = []
133
134
135
        set_area_to_bounds = False
        if set_cond_area != "default":
            set_area_to_bounds = True
Jacob Segal's avatar
Jacob Segal committed
136
137
138
139
140
141
        if len(mask.shape) < 3:
            mask = mask.unsqueeze(0)
        for t in conditioning:
            n = [t[0], t[1].copy()]
            _, h, w = mask.shape
            n[1]['mask'] = mask
Jacob Segal's avatar
Jacob Segal committed
142
            n[1]['set_area_to_bounds'] = set_area_to_bounds
143
            n[1]['mask_strength'] = strength
Jacob Segal's avatar
Jacob Segal committed
144
145
146
            c.append(n)
        return (c, )

comfyanonymous's avatar
comfyanonymous committed
147
148
149
150
151
152
153
154
155
156
class VAEDecode:
    def __init__(self, device="cpu"):
        self.device = device

    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "samples": ("LATENT", ), "vae": ("VAE", )}}
    RETURN_TYPES = ("IMAGE",)
    FUNCTION = "decode"

157
158
    CATEGORY = "latent"

comfyanonymous's avatar
comfyanonymous committed
159
    def decode(self, vae, samples):
160
        return (vae.decode(samples["samples"]), )
comfyanonymous's avatar
comfyanonymous committed
161

162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
class VAEDecodeTiled:
    def __init__(self, device="cpu"):
        self.device = device

    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "samples": ("LATENT", ), "vae": ("VAE", )}}
    RETURN_TYPES = ("IMAGE",)
    FUNCTION = "decode"

    CATEGORY = "_for_testing"

    def decode(self, vae, samples):
        return (vae.decode_tiled(samples["samples"]), )

comfyanonymous's avatar
comfyanonymous committed
177
178
179
180
181
182
183
184
185
186
class VAEEncode:
    def __init__(self, device="cpu"):
        self.device = device

    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "pixels": ("IMAGE", ), "vae": ("VAE", )}}
    RETURN_TYPES = ("LATENT",)
    FUNCTION = "encode"

187
188
    CATEGORY = "latent"

189
190
191
192
    @staticmethod
    def vae_encode_crop_pixels(pixels):
        x = (pixels.shape[1] // 8) * 8
        y = (pixels.shape[2] // 8) * 8
193
        if pixels.shape[1] != x or pixels.shape[2] != y:
194
195
196
197
            x_offset = (pixels.shape[1] % 8) // 2
            y_offset = (pixels.shape[2] % 8) // 2
            pixels = pixels[:, x_offset:x + x_offset, y_offset:y + y_offset, :]
        return pixels
198

199
200
201
    def encode(self, vae, pixels):
        pixels = self.vae_encode_crop_pixels(pixels)
        t = vae.encode(pixels[:,:,:,:3])
202
        return ({"samples":t}, )
comfyanonymous's avatar
comfyanonymous committed
203

comfyanonymous's avatar
comfyanonymous committed
204
205
206
207
208
209
210
211
212
213
214
215
216
class VAEEncodeTiled:
    def __init__(self, device="cpu"):
        self.device = device

    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "pixels": ("IMAGE", ), "vae": ("VAE", )}}
    RETURN_TYPES = ("LATENT",)
    FUNCTION = "encode"

    CATEGORY = "_for_testing"

    def encode(self, vae, pixels):
217
        pixels = VAEEncode.vae_encode_crop_pixels(pixels)
comfyanonymous's avatar
comfyanonymous committed
218
219
        t = vae.encode_tiled(pixels[:,:,:,:3])
        return ({"samples":t}, )
220

221
222
223
224
225
226
class VAEEncodeForInpaint:
    def __init__(self, device="cpu"):
        self.device = device

    @classmethod
    def INPUT_TYPES(s):
227
        return {"required": { "pixels": ("IMAGE", ), "vae": ("VAE", ), "mask": ("MASK", ), "grow_mask_by": ("INT", {"default": 6, "min": 0, "max": 64, "step": 1}),}}
228
229
230
231
232
    RETURN_TYPES = ("LATENT",)
    FUNCTION = "encode"

    CATEGORY = "latent/inpaint"

233
    def encode(self, vae, pixels, mask, grow_mask_by=6):
234
235
        x = (pixels.shape[1] // 8) * 8
        y = (pixels.shape[2] // 8) * 8
236
        mask = torch.nn.functional.interpolate(mask.reshape((-1, 1, mask.shape[-2], mask.shape[-1])), size=(pixels.shape[1], pixels.shape[2]), mode="bilinear")
237

238
        pixels = pixels.clone()
239
        if pixels.shape[1] != x or pixels.shape[2] != y:
240
241
242
243
            x_offset = (pixels.shape[1] % 8) // 2
            y_offset = (pixels.shape[2] % 8) // 2
            pixels = pixels[:,x_offset:x + x_offset, y_offset:y + y_offset,:]
            mask = mask[:,:,x_offset:x + x_offset, y_offset:y + y_offset]
244

245
        #grow mask by a few pixels to keep things seamless in latent space
246
247
248
249
250
251
252
253
        if grow_mask_by == 0:
            mask_erosion = mask
        else:
            kernel_tensor = torch.ones((1, 1, grow_mask_by, grow_mask_by))
            padding = math.ceil((grow_mask_by - 1) / 2)

            mask_erosion = torch.clamp(torch.nn.functional.conv2d(mask.round(), kernel_tensor, padding=padding), 0, 1)

254
        m = (1.0 - mask.round()).squeeze(1)
255
256
        for i in range(3):
            pixels[:,:,:,i] -= 0.5
257
            pixels[:,:,:,i] *= m
258
259
260
            pixels[:,:,:,i] += 0.5
        t = vae.encode(pixels)

261
        return ({"samples":t, "noise_mask": (mask_erosion[:,:,:x,:y].round())}, )
comfyanonymous's avatar
comfyanonymous committed
262
263
264
265

class CheckpointLoader:
    @classmethod
    def INPUT_TYPES(s):
266
267
        return {"required": { "config_name": (folder_paths.get_filename_list("configs"), ),
                              "ckpt_name": (folder_paths.get_filename_list("checkpoints"), )}}
comfyanonymous's avatar
comfyanonymous committed
268
269
270
    RETURN_TYPES = ("MODEL", "CLIP", "VAE")
    FUNCTION = "load_checkpoint"

271
    CATEGORY = "advanced/loaders"
272

comfyanonymous's avatar
comfyanonymous committed
273
    def load_checkpoint(self, config_name, ckpt_name, output_vae=True, output_clip=True):
274
275
        config_path = folder_paths.get_full_path("configs", config_name)
        ckpt_path = folder_paths.get_full_path("checkpoints", ckpt_name)
276
        return comfy.sd.load_checkpoint(config_path, ckpt_path, output_vae=True, output_clip=True, embedding_directory=folder_paths.get_folder_paths("embeddings"))
comfyanonymous's avatar
comfyanonymous committed
277

278
279
280
class CheckpointLoaderSimple:
    @classmethod
    def INPUT_TYPES(s):
281
        return {"required": { "ckpt_name": (folder_paths.get_filename_list("checkpoints"), ),
282
283
284
285
                             }}
    RETURN_TYPES = ("MODEL", "CLIP", "VAE")
    FUNCTION = "load_checkpoint"

286
    CATEGORY = "loaders"
287

288
    def load_checkpoint(self, ckpt_name, output_vae=True, output_clip=True):
289
        ckpt_path = folder_paths.get_full_path("checkpoints", ckpt_name)
290
        out = comfy.sd.load_checkpoint_guess_config(ckpt_path, output_vae=True, output_clip=True, embedding_directory=folder_paths.get_folder_paths("embeddings"))
291
292
        return out

sALTaccount's avatar
sALTaccount committed
293
294
295
class DiffusersLoader:
    @classmethod
    def INPUT_TYPES(cls):
296
        paths = []
sALTaccount's avatar
sALTaccount committed
297
        for search_path in folder_paths.get_folder_paths("diffusers"):
298
            if os.path.exists(search_path):
sALTaccount's avatar
sALTaccount committed
299
                paths += next(os.walk(search_path))[1]
300
        return {"required": {"model_path": (paths,), }}
sALTaccount's avatar
sALTaccount committed
301
302
303
    RETURN_TYPES = ("MODEL", "CLIP", "VAE")
    FUNCTION = "load_checkpoint"

304
    CATEGORY = "advanced/loaders"
sALTaccount's avatar
sALTaccount committed
305
306

    def load_checkpoint(self, model_path, output_vae=True, output_clip=True):
sALTaccount's avatar
sALTaccount committed
307
308
309
310
311
312
        for search_path in folder_paths.get_folder_paths("diffusers"):
            if os.path.exists(search_path):
                paths = next(os.walk(search_path))[1]
                if model_path in paths:
                    model_path = os.path.join(search_path, model_path)
                    break
313

314
        return comfy.diffusers_convert.load_diffusers(model_path, fp16=comfy.model_management.should_use_fp16(), output_vae=output_vae, output_clip=output_clip, embedding_directory=folder_paths.get_folder_paths("embeddings"))
sALTaccount's avatar
sALTaccount committed
315
316


317
318
319
320
321
322
323
324
class unCLIPCheckpointLoader:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "ckpt_name": (folder_paths.get_filename_list("checkpoints"), ),
                             }}
    RETURN_TYPES = ("MODEL", "CLIP", "VAE", "CLIP_VISION")
    FUNCTION = "load_checkpoint"

325
    CATEGORY = "loaders"
326
327
328
329
330
331

    def load_checkpoint(self, ckpt_name, output_vae=True, output_clip=True):
        ckpt_path = folder_paths.get_full_path("checkpoints", ckpt_name)
        out = comfy.sd.load_checkpoint_guess_config(ckpt_path, output_vae=True, output_clip=True, output_clipvision=True, embedding_directory=folder_paths.get_folder_paths("embeddings"))
        return out

comfyanonymous's avatar
comfyanonymous committed
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
class CLIPSetLastLayer:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "clip": ("CLIP", ),
                              "stop_at_clip_layer": ("INT", {"default": -1, "min": -24, "max": -1, "step": 1}),
                              }}
    RETURN_TYPES = ("CLIP",)
    FUNCTION = "set_last_layer"

    CATEGORY = "conditioning"

    def set_last_layer(self, clip, stop_at_clip_layer):
        clip = clip.clone()
        clip.clip_layer(stop_at_clip_layer)
        return (clip,)

348
349
350
351
352
class LoraLoader:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "model": ("MODEL",),
                              "clip": ("CLIP", ),
353
                              "lora_name": (folder_paths.get_filename_list("loras"), ),
354
355
                              "strength_model": ("FLOAT", {"default": 1.0, "min": -10.0, "max": 10.0, "step": 0.01}),
                              "strength_clip": ("FLOAT", {"default": 1.0, "min": -10.0, "max": 10.0, "step": 0.01}),
356
357
358
359
360
361
362
                              }}
    RETURN_TYPES = ("MODEL", "CLIP")
    FUNCTION = "load_lora"

    CATEGORY = "loaders"

    def load_lora(self, model, clip, lora_name, strength_model, strength_clip):
363
        lora_path = folder_paths.get_full_path("loras", lora_name)
364
365
366
        model_lora, clip_lora = comfy.sd.load_lora_for_models(model, clip, lora_path, strength_model, strength_clip)
        return (model_lora, clip_lora)

367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
class TomePatchModel:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "model": ("MODEL",),
                              "ratio": ("FLOAT", {"default": 0.3, "min": 0.0, "max": 1.0, "step": 0.01}),
                              }}
    RETURN_TYPES = ("MODEL",)
    FUNCTION = "patch"

    CATEGORY = "_for_testing"

    def patch(self, model, ratio):
        m = model.clone()
        m.set_model_tomesd(ratio)
        return (m, )

comfyanonymous's avatar
comfyanonymous committed
383
384
385
class VAELoader:
    @classmethod
    def INPUT_TYPES(s):
386
        return {"required": { "vae_name": (folder_paths.get_filename_list("vae"), )}}
comfyanonymous's avatar
comfyanonymous committed
387
388
389
    RETURN_TYPES = ("VAE",)
    FUNCTION = "load_vae"

390
391
    CATEGORY = "loaders"

comfyanonymous's avatar
comfyanonymous committed
392
393
    #TODO: scale factor?
    def load_vae(self, vae_name):
394
        vae_path = folder_paths.get_full_path("vae", vae_name)
comfyanonymous's avatar
comfyanonymous committed
395
396
397
        vae = comfy.sd.VAE(ckpt_path=vae_path)
        return (vae,)

comfyanonymous's avatar
comfyanonymous committed
398
399
400
class ControlNetLoader:
    @classmethod
    def INPUT_TYPES(s):
401
        return {"required": { "control_net_name": (folder_paths.get_filename_list("controlnet"), )}}
comfyanonymous's avatar
comfyanonymous committed
402
403
404
405
406
407
408

    RETURN_TYPES = ("CONTROL_NET",)
    FUNCTION = "load_controlnet"

    CATEGORY = "loaders"

    def load_controlnet(self, control_net_name):
409
        controlnet_path = folder_paths.get_full_path("controlnet", control_net_name)
comfyanonymous's avatar
comfyanonymous committed
410
411
412
        controlnet = comfy.sd.load_controlnet(controlnet_path)
        return (controlnet,)

413
414
415
416
class DiffControlNetLoader:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "model": ("MODEL",),
417
                              "control_net_name": (folder_paths.get_filename_list("controlnet"), )}}
418
419
420
421
422
423
424

    RETURN_TYPES = ("CONTROL_NET",)
    FUNCTION = "load_controlnet"

    CATEGORY = "loaders"

    def load_controlnet(self, model, control_net_name):
425
        controlnet_path = folder_paths.get_full_path("controlnet", control_net_name)
426
427
428
        controlnet = comfy.sd.load_controlnet(controlnet_path, model)
        return (controlnet,)

comfyanonymous's avatar
comfyanonymous committed
429
430
431
432

class ControlNetApply:
    @classmethod
    def INPUT_TYPES(s):
433
434
435
436
437
        return {"required": {"conditioning": ("CONDITIONING", ),
                             "control_net": ("CONTROL_NET", ),
                             "image": ("IMAGE", ),
                             "strength": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01})
                             }}
comfyanonymous's avatar
comfyanonymous committed
438
439
440
441
442
    RETURN_TYPES = ("CONDITIONING",)
    FUNCTION = "apply_controlnet"

    CATEGORY = "conditioning"

443
    def apply_controlnet(self, conditioning, control_net, image, strength):
comfyanonymous's avatar
comfyanonymous committed
444
445
446
447
        c = []
        control_hint = image.movedim(-1,1)
        for t in conditioning:
            n = [t[0], t[1].copy()]
comfyanonymous's avatar
comfyanonymous committed
448
449
450
451
            c_net = control_net.copy().set_cond_hint(control_hint, strength)
            if 'control' in t[1]:
                c_net.set_previous_controlnet(t[1]['control'])
            n[1]['control'] = c_net
comfyanonymous's avatar
comfyanonymous committed
452
453
454
            c.append(n)
        return (c, )

455
456
457
class CLIPLoader:
    @classmethod
    def INPUT_TYPES(s):
458
        return {"required": { "clip_name": (folder_paths.get_filename_list("clip"), ),
459
460
461
462
463
464
                             }}
    RETURN_TYPES = ("CLIP",)
    FUNCTION = "load_clip"

    CATEGORY = "loaders"

465
    def load_clip(self, clip_name):
466
        clip_path = folder_paths.get_full_path("clip", clip_name)
comfyanonymous's avatar
comfyanonymous committed
467
        clip = comfy.sd.load_clip(ckpt_path=clip_path, embedding_directory=folder_paths.get_folder_paths("embeddings"))
468
469
        return (clip,)

470
471
472
class CLIPVisionLoader:
    @classmethod
    def INPUT_TYPES(s):
473
        return {"required": { "clip_name": (folder_paths.get_filename_list("clip_vision"), ),
474
475
476
477
478
479
480
                             }}
    RETURN_TYPES = ("CLIP_VISION",)
    FUNCTION = "load_clip"

    CATEGORY = "loaders"

    def load_clip(self, clip_name):
481
        clip_path = folder_paths.get_full_path("clip_vision", clip_name)
482
        clip_vision = comfy.clip_vision.load(clip_path)
483
484
485
486
487
488
489
490
        return (clip_vision,)

class CLIPVisionEncode:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "clip_vision": ("CLIP_VISION",),
                              "image": ("IMAGE",)
                             }}
comfyanonymous's avatar
comfyanonymous committed
491
    RETURN_TYPES = ("CLIP_VISION_OUTPUT",)
492
493
    FUNCTION = "encode"

494
    CATEGORY = "conditioning"
495
496
497
498
499
500
501
502

    def encode(self, clip_vision, image):
        output = clip_vision.encode_image(image)
        return (output,)

class StyleModelLoader:
    @classmethod
    def INPUT_TYPES(s):
503
        return {"required": { "style_model_name": (folder_paths.get_filename_list("style_models"), )}}
504
505
506
507
508
509
510

    RETURN_TYPES = ("STYLE_MODEL",)
    FUNCTION = "load_style_model"

    CATEGORY = "loaders"

    def load_style_model(self, style_model_name):
511
        style_model_path = folder_paths.get_full_path("style_models", style_model_name)
512
513
514
515
516
517
518
        style_model = comfy.sd.load_style_model(style_model_path)
        return (style_model,)


class StyleModelApply:
    @classmethod
    def INPUT_TYPES(s):
519
520
521
        return {"required": {"conditioning": ("CONDITIONING", ),
                             "style_model": ("STYLE_MODEL", ),
                             "clip_vision_output": ("CLIP_VISION_OUTPUT", ),
522
523
524
525
                             }}
    RETURN_TYPES = ("CONDITIONING",)
    FUNCTION = "apply_stylemodel"

comfyanonymous's avatar
comfyanonymous committed
526
    CATEGORY = "conditioning/style_model"
527

528
529
    def apply_stylemodel(self, clip_vision_output, style_model, conditioning):
        cond = style_model.get_cond(clip_vision_output)
530
        c = []
531
532
        for t in conditioning:
            n = [torch.cat((t[0], cond), dim=1), t[1].copy()]
533
534
535
            c.append(n)
        return (c, )

536
537
538
539
540
541
class unCLIPConditioning:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": {"conditioning": ("CONDITIONING", ),
                             "clip_vision_output": ("CLIP_VISION_OUTPUT", ),
                             "strength": ("FLOAT", {"default": 1.0, "min": -10.0, "max": 10.0, "step": 0.01}),
542
                             "noise_augmentation": ("FLOAT", {"default": 0.0, "min": 0.0, "max": 1.0, "step": 0.01}),
543
544
545
546
                             }}
    RETURN_TYPES = ("CONDITIONING",)
    FUNCTION = "apply_adm"

547
    CATEGORY = "conditioning"
548

549
    def apply_adm(self, conditioning, clip_vision_output, strength, noise_augmentation):
550
551
552
        c = []
        for t in conditioning:
            o = t[1].copy()
553
            x = (clip_vision_output, strength, noise_augmentation)
554
555
556
557
558
559
560
561
            if "adm" in o:
                o["adm"] = o["adm"][:] + [x]
            else:
                o["adm"] = [x]
            n = [t[0], o]
            c.append(n)
        return (c, )

562
563
564
565
566
567
568
569
class GLIGENLoader:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "gligen_name": (folder_paths.get_filename_list("gligen"), )}}

    RETURN_TYPES = ("GLIGEN",)
    FUNCTION = "load_gligen"

comfyanonymous's avatar
comfyanonymous committed
570
    CATEGORY = "loaders"
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591

    def load_gligen(self, gligen_name):
        gligen_path = folder_paths.get_full_path("gligen", gligen_name)
        gligen = comfy.sd.load_gligen(gligen_path)
        return (gligen,)

class GLIGENTextBoxApply:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": {"conditioning_to": ("CONDITIONING", ),
                              "clip": ("CLIP", ),
                              "gligen_textbox_model": ("GLIGEN", ),
                              "text": ("STRING", {"multiline": True}),
                              "width": ("INT", {"default": 64, "min": 8, "max": MAX_RESOLUTION, "step": 8}),
                              "height": ("INT", {"default": 64, "min": 8, "max": MAX_RESOLUTION, "step": 8}),
                              "x": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 8}),
                              "y": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 8}),
                             }}
    RETURN_TYPES = ("CONDITIONING",)
    FUNCTION = "append"

comfyanonymous's avatar
comfyanonymous committed
592
    CATEGORY = "conditioning/gligen"
593
594
595
596
597
598
599
600
601
602
603
604
605
606

    def append(self, conditioning_to, clip, gligen_textbox_model, text, width, height, x, y):
        c = []
        cond, cond_pooled = clip.encode_from_tokens(clip.tokenize(text), return_pooled=True)
        for t in conditioning_to:
            n = [t[0], t[1].copy()]
            position_params = [(cond_pooled, height // 8, width // 8, y // 8, x // 8)]
            prev = []
            if "gligen" in n[1]:
                prev = n[1]['gligen'][2]

            n[1]['gligen'] = ("position", gligen_textbox_model, prev + position_params)
            c.append(n)
        return (c, )
607

comfyanonymous's avatar
comfyanonymous committed
608
609
610
611
612
613
class EmptyLatentImage:
    def __init__(self, device="cpu"):
        self.device = device

    @classmethod
    def INPUT_TYPES(s):
614
615
        return {"required": { "width": ("INT", {"default": 512, "min": 64, "max": MAX_RESOLUTION, "step": 8}),
                              "height": ("INT", {"default": 512, "min": 64, "max": MAX_RESOLUTION, "step": 8}),
comfyanonymous's avatar
comfyanonymous committed
616
617
618
619
                              "batch_size": ("INT", {"default": 1, "min": 1, "max": 64})}}
    RETURN_TYPES = ("LATENT",)
    FUNCTION = "generate"

620
621
    CATEGORY = "latent"

comfyanonymous's avatar
comfyanonymous committed
622
623
    def generate(self, width, height, batch_size=1):
        latent = torch.zeros([batch_size, 4, height // 8, width // 8])
624
        return ({"samples":latent}, )
comfyanonymous's avatar
comfyanonymous committed
625

comfyanonymous's avatar
comfyanonymous committed
626

627
628
629
630
631
class LatentFromBatch:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "samples": ("LATENT",),
                              "batch_index": ("INT", {"default": 0, "min": 0, "max": 63}),
632
                              "length": ("INT", {"default": 1, "min": 1, "max": 64}),
633
634
                              }}
    RETURN_TYPES = ("LATENT",)
635
    FUNCTION = "frombatch"
636

637
    CATEGORY = "latent/batch"
638

639
    def frombatch(self, samples, batch_index, length):
640
641
642
        s = samples.copy()
        s_in = samples["samples"]
        batch_index = min(s_in.shape[0] - 1, batch_index)
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
        length = min(s_in.shape[0] - batch_index, length)
        s["samples"] = s_in[batch_index:batch_index + length].clone()
        if "noise_mask" in samples:
            masks = samples["noise_mask"]
            if masks.shape[0] == 1:
                s["noise_mask"] = masks.clone()
            else:
                if masks.shape[0] < s_in.shape[0]:
                    masks = masks.repeat(math.ceil(s_in.shape[0] / masks.shape[0]), 1, 1, 1)[:s_in.shape[0]]
                s["noise_mask"] = masks[batch_index:batch_index + length].clone()
        if "batch_index" not in s:
            s["batch_index"] = [x for x in range(batch_index, batch_index+length)]
        else:
            s["batch_index"] = samples["batch_index"][batch_index:batch_index + length]
        return (s,)
    
class RepeatLatentBatch:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "samples": ("LATENT",),
                              "amount": ("INT", {"default": 1, "min": 1, "max": 64}),
                              }}
    RETURN_TYPES = ("LATENT",)
    FUNCTION = "repeat"

    CATEGORY = "latent/batch"

    def repeat(self, samples, amount):
        s = samples.copy()
        s_in = samples["samples"]
        
        s["samples"] = s_in.repeat((amount, 1,1,1))
        if "noise_mask" in samples and samples["noise_mask"].shape[0] > 1:
            masks = samples["noise_mask"]
            if masks.shape[0] < s_in.shape[0]:
                masks = masks.repeat(math.ceil(s_in.shape[0] / masks.shape[0]), 1, 1, 1)[:s_in.shape[0]]
            s["noise_mask"] = samples["noise_mask"].repeat((amount, 1,1,1))
        if "batch_index" in s:
            offset = max(s["batch_index"]) - min(s["batch_index"]) + 1
            s["batch_index"] = s["batch_index"] + [x + (i * offset) for i in range(1, amount) for x in s["batch_index"]]
683
        return (s,)
comfyanonymous's avatar
comfyanonymous committed
684

comfyanonymous's avatar
comfyanonymous committed
685
686
class LatentUpscale:
    upscale_methods = ["nearest-exact", "bilinear", "area"]
687
    crop_methods = ["disabled", "center"]
comfyanonymous's avatar
comfyanonymous committed
688
689
690
691

    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "samples": ("LATENT",), "upscale_method": (s.upscale_methods,),
692
693
                              "width": ("INT", {"default": 512, "min": 64, "max": MAX_RESOLUTION, "step": 8}),
                              "height": ("INT", {"default": 512, "min": 64, "max": MAX_RESOLUTION, "step": 8}),
694
                              "crop": (s.crop_methods,)}}
comfyanonymous's avatar
comfyanonymous committed
695
696
697
    RETURN_TYPES = ("LATENT",)
    FUNCTION = "upscale"

698
699
    CATEGORY = "latent"

700
    def upscale(self, samples, upscale_method, width, height, crop):
701
        s = samples.copy()
comfyanonymous's avatar
comfyanonymous committed
702
        s["samples"] = comfy.utils.common_upscale(samples["samples"], width // 8, height // 8, upscale_method, crop)
comfyanonymous's avatar
comfyanonymous committed
703
704
        return (s,)

comfyanonymous's avatar
comfyanonymous committed
705
706
707
708
709
710
711
712
713
class LatentRotate:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "samples": ("LATENT",),
                              "rotation": (["none", "90 degrees", "180 degrees", "270 degrees"],),
                              }}
    RETURN_TYPES = ("LATENT",)
    FUNCTION = "rotate"

comfyanonymous's avatar
comfyanonymous committed
714
    CATEGORY = "latent/transform"
comfyanonymous's avatar
comfyanonymous committed
715
716

    def rotate(self, samples, rotation):
717
        s = samples.copy()
comfyanonymous's avatar
comfyanonymous committed
718
719
720
721
722
723
724
725
        rotate_by = 0
        if rotation.startswith("90"):
            rotate_by = 1
        elif rotation.startswith("180"):
            rotate_by = 2
        elif rotation.startswith("270"):
            rotate_by = 3

726
        s["samples"] = torch.rot90(samples["samples"], k=rotate_by, dims=[3, 2])
comfyanonymous's avatar
comfyanonymous committed
727
        return (s,)
comfyanonymous's avatar
comfyanonymous committed
728
729
730
731
732
733
734
735
736
737

class LatentFlip:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "samples": ("LATENT",),
                              "flip_method": (["x-axis: vertically", "y-axis: horizontally"],),
                              }}
    RETURN_TYPES = ("LATENT",)
    FUNCTION = "flip"

comfyanonymous's avatar
comfyanonymous committed
738
    CATEGORY = "latent/transform"
comfyanonymous's avatar
comfyanonymous committed
739
740

    def flip(self, samples, flip_method):
741
        s = samples.copy()
comfyanonymous's avatar
comfyanonymous committed
742
        if flip_method.startswith("x"):
743
            s["samples"] = torch.flip(samples["samples"], dims=[2])
comfyanonymous's avatar
comfyanonymous committed
744
        elif flip_method.startswith("y"):
745
            s["samples"] = torch.flip(samples["samples"], dims=[3])
comfyanonymous's avatar
comfyanonymous committed
746
747

        return (s,)
comfyanonymous's avatar
comfyanonymous committed
748
749
750
751

class LatentComposite:
    @classmethod
    def INPUT_TYPES(s):
comfyanonymous's avatar
comfyanonymous committed
752
753
754
755
756
757
        return {"required": { "samples_to": ("LATENT",),
                              "samples_from": ("LATENT",),
                              "x": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 8}),
                              "y": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 8}),
                              "feather": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 8}),
                              }}
comfyanonymous's avatar
comfyanonymous committed
758
759
760
761
762
    RETURN_TYPES = ("LATENT",)
    FUNCTION = "composite"

    CATEGORY = "latent"

comfyanonymous's avatar
comfyanonymous committed
763
764
765
    def composite(self, samples_to, samples_from, x, y, composite_method="normal", feather=0):
        x =  x // 8
        y = y // 8
766
        feather = feather // 8
comfyanonymous's avatar
comfyanonymous committed
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
        samples_out = samples_to.copy()
        s = samples_to["samples"].clone()
        samples_to = samples_to["samples"]
        samples_from = samples_from["samples"]
        if feather == 0:
            s[:,:,y:y+samples_from.shape[2],x:x+samples_from.shape[3]] = samples_from[:,:,:samples_to.shape[2] - y, :samples_to.shape[3] - x]
        else:
            samples_from = samples_from[:,:,:samples_to.shape[2] - y, :samples_to.shape[3] - x]
            mask = torch.ones_like(samples_from)
            for t in range(feather):
                if y != 0:
                    mask[:,:,t:1+t,:] *= ((1.0/feather) * (t + 1))

                if y + samples_from.shape[2] < samples_to.shape[2]:
                    mask[:,:,mask.shape[2] -1 -t: mask.shape[2]-t,:] *= ((1.0/feather) * (t + 1))
                if x != 0:
                    mask[:,:,:,t:1+t] *= ((1.0/feather) * (t + 1))
                if x + samples_from.shape[3] < samples_to.shape[3]:
                    mask[:,:,:,mask.shape[3]- 1 - t: mask.shape[3]- t] *= ((1.0/feather) * (t + 1))
            rev_mask = torch.ones_like(mask) - mask
            s[:,:,y:y+samples_from.shape[2],x:x+samples_from.shape[3]] = samples_from[:,:,:samples_to.shape[2] - y, :samples_to.shape[3] - x] * mask + s[:,:,y:y+samples_from.shape[2],x:x+samples_from.shape[3]] * rev_mask
        samples_out["samples"] = s
        return (samples_out,)
comfyanonymous's avatar
comfyanonymous committed
790

comfyanonymous's avatar
comfyanonymous committed
791
792
793
794
class LatentCrop:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "samples": ("LATENT",),
795
796
                              "width": ("INT", {"default": 512, "min": 64, "max": MAX_RESOLUTION, "step": 8}),
                              "height": ("INT", {"default": 512, "min": 64, "max": MAX_RESOLUTION, "step": 8}),
797
798
                              "x": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 8}),
                              "y": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 8}),
comfyanonymous's avatar
comfyanonymous committed
799
800
801
802
                              }}
    RETURN_TYPES = ("LATENT",)
    FUNCTION = "crop"

comfyanonymous's avatar
comfyanonymous committed
803
    CATEGORY = "latent/transform"
comfyanonymous's avatar
comfyanonymous committed
804
805

    def crop(self, samples, width, height, x, y):
806
807
        s = samples.copy()
        samples = samples['samples']
comfyanonymous's avatar
comfyanonymous committed
808
809
810
811
812
813
814
815
816
817
818
819
820
        x =  x // 8
        y = y // 8

        #enfonce minimum size of 64
        if x > (samples.shape[3] - 8):
            x = samples.shape[3] - 8
        if y > (samples.shape[2] - 8):
            y = samples.shape[2] - 8

        new_height = height // 8
        new_width = width // 8
        to_x = new_width + x
        to_y = new_height + y
821
        s['samples'] = samples[:,:,y:to_y, x:to_x]
comfyanonymous's avatar
comfyanonymous committed
822
823
        return (s,)

824
825
826
827
828
829
830
831
832
class SetLatentNoiseMask:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "samples": ("LATENT",),
                              "mask": ("MASK",),
                              }}
    RETURN_TYPES = ("LATENT",)
    FUNCTION = "set_mask"

833
    CATEGORY = "latent/inpaint"
834
835
836

    def set_mask(self, samples, mask):
        s = samples.copy()
837
        s["noise_mask"] = mask.reshape((-1, 1, mask.shape[-2], mask.shape[-1]))
838
839
        return (s,)

840
def common_ksampler(model, seed, steps, cfg, sampler_name, scheduler, positive, negative, latent, denoise=1.0, disable_noise=False, start_step=None, last_step=None, force_full_denoise=False):
841
    device = comfy.model_management.get_torch_device()
842
    latent_image = latent["samples"]
843

comfyanonymous's avatar
comfyanonymous committed
844
845
846
    if disable_noise:
        noise = torch.zeros(latent_image.size(), dtype=latent_image.dtype, layout=latent_image.layout, device="cpu")
    else:
847
848
        batch_inds = latent["batch_index"] if "batch_index" in latent else None
        noise = comfy.sample.prepare_noise(latent_image, seed, batch_inds)
comfyanonymous's avatar
comfyanonymous committed
849

850
    noise_mask = None
851
    if "noise_mask" in latent:
852
        noise_mask = latent["noise_mask"]
comfyanonymous's avatar
comfyanonymous committed
853

854
    pbar = comfy.utils.ProgressBar(steps)
855
856
    def callback(step, x0, x, total_steps):
        pbar.update_absolute(step + 1, total_steps)
857

858
859
    samples = comfy.sample.sample(model, noise, steps, cfg, sampler_name, scheduler, positive, negative, latent_image,
                                  denoise=denoise, disable_noise=disable_noise, start_step=start_step, last_step=last_step,
860
                                  force_full_denoise=force_full_denoise, noise_mask=noise_mask, callback=callback)
861
862
863
    out = latent.copy()
    out["samples"] = samples
    return (out, )
comfyanonymous's avatar
comfyanonymous committed
864

comfyanonymous's avatar
comfyanonymous committed
865
866
867
class KSampler:
    @classmethod
    def INPUT_TYPES(s):
comfyanonymous's avatar
comfyanonymous committed
868
        return {"required":
comfyanonymous's avatar
comfyanonymous committed
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
                    {"model": ("MODEL",),
                    "seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}),
                    "steps": ("INT", {"default": 20, "min": 1, "max": 10000}),
                    "cfg": ("FLOAT", {"default": 8.0, "min": 0.0, "max": 100.0}),
                    "sampler_name": (comfy.samplers.KSampler.SAMPLERS, ),
                    "scheduler": (comfy.samplers.KSampler.SCHEDULERS, ),
                    "positive": ("CONDITIONING", ),
                    "negative": ("CONDITIONING", ),
                    "latent_image": ("LATENT", ),
                    "denoise": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}),
                    }}

    RETURN_TYPES = ("LATENT",)
    FUNCTION = "sample"

884
885
    CATEGORY = "sampling"

comfyanonymous's avatar
comfyanonymous committed
886
    def sample(self, model, seed, steps, cfg, sampler_name, scheduler, positive, negative, latent_image, denoise=1.0):
887
        return common_ksampler(model, seed, steps, cfg, sampler_name, scheduler, positive, negative, latent_image, denoise=denoise)
comfyanonymous's avatar
comfyanonymous committed
888

comfyanonymous's avatar
comfyanonymous committed
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
class KSamplerAdvanced:
    @classmethod
    def INPUT_TYPES(s):
        return {"required":
                    {"model": ("MODEL",),
                    "add_noise": (["enable", "disable"], ),
                    "noise_seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}),
                    "steps": ("INT", {"default": 20, "min": 1, "max": 10000}),
                    "cfg": ("FLOAT", {"default": 8.0, "min": 0.0, "max": 100.0}),
                    "sampler_name": (comfy.samplers.KSampler.SAMPLERS, ),
                    "scheduler": (comfy.samplers.KSampler.SCHEDULERS, ),
                    "positive": ("CONDITIONING", ),
                    "negative": ("CONDITIONING", ),
                    "latent_image": ("LATENT", ),
                    "start_at_step": ("INT", {"default": 0, "min": 0, "max": 10000}),
                    "end_at_step": ("INT", {"default": 10000, "min": 0, "max": 10000}),
                    "return_with_leftover_noise": (["disable", "enable"], ),
                    }}

    RETURN_TYPES = ("LATENT",)
    FUNCTION = "sample"

    CATEGORY = "sampling"
comfyanonymous's avatar
comfyanonymous committed
912

comfyanonymous's avatar
comfyanonymous committed
913
914
915
916
917
918
919
    def sample(self, model, add_noise, noise_seed, steps, cfg, sampler_name, scheduler, positive, negative, latent_image, start_at_step, end_at_step, return_with_leftover_noise, denoise=1.0):
        force_full_denoise = True
        if return_with_leftover_noise == "enable":
            force_full_denoise = False
        disable_noise = False
        if add_noise == "disable":
            disable_noise = True
920
        return common_ksampler(model, noise_seed, steps, cfg, sampler_name, scheduler, positive, negative, latent_image, denoise=denoise, disable_noise=disable_noise, start_step=start_at_step, last_step=end_at_step, force_full_denoise=force_full_denoise)
comfyanonymous's avatar
comfyanonymous committed
921
922
923

class SaveImage:
    def __init__(self):
924
        self.output_dir = folder_paths.get_output_directory()
m957ymj75urz's avatar
m957ymj75urz committed
925
        self.type = "output"
comfyanonymous's avatar
comfyanonymous committed
926
927
928
929

    @classmethod
    def INPUT_TYPES(s):
        return {"required": 
930
                    {"images": ("IMAGE", ),
pythongosssss's avatar
tidy  
pythongosssss committed
931
                     "filename_prefix": ("STRING", {"default": "ComfyUI"})},
pythongosssss's avatar
pythongosssss committed
932
                "hidden": {"prompt": "PROMPT", "extra_pnginfo": "EXTRA_PNGINFO"},
comfyanonymous's avatar
comfyanonymous committed
933
934
935
936
937
938
939
                }

    RETURN_TYPES = ()
    FUNCTION = "save_images"

    OUTPUT_NODE = True

940
941
    CATEGORY = "image"

pythongosssss's avatar
tidy  
pythongosssss committed
942
    def save_images(self, images, filename_prefix="ComfyUI", prompt=None, extra_pnginfo=None):
943
        def map_filename(filename):
944
            prefix_len = len(os.path.basename(filename_prefix))
945
946
947
948
949
950
            prefix = filename[:prefix_len + 1]
            try:
                digits = int(filename[prefix_len + 1:].split('_')[0])
            except:
                digits = 0
            return (digits, prefix)
comfyanonymous's avatar
Style.  
comfyanonymous committed
951

952
953
954
955
        def compute_vars(input):
            input = input.replace("%width%", str(images[0].shape[1]))
            input = input.replace("%height%", str(images[0].shape[0]))
            return input
comfyanonymous's avatar
Style.  
comfyanonymous committed
956

957
        filename_prefix = compute_vars(filename_prefix)
comfyanonymous's avatar
comfyanonymous committed
958

m957ymj75urz's avatar
m957ymj75urz committed
959
960
961
        subfolder = os.path.dirname(os.path.normpath(filename_prefix))
        filename = os.path.basename(os.path.normpath(filename_prefix))

comfyanonymous's avatar
comfyanonymous committed
962
        full_output_folder = os.path.join(self.output_dir, subfolder)
963

964
        if os.path.commonpath((self.output_dir, os.path.abspath(full_output_folder))) != self.output_dir:
965
            print("Saving image outside the output folder is not allowed.")
comfyanonymous's avatar
comfyanonymous committed
966
967
            return {}

968
        try:
969
            counter = max(filter(lambda a: a[1][:-1] == filename and a[1][-1] == "_", map(map_filename, os.listdir(full_output_folder))))[0] + 1
970
971
        except ValueError:
            counter = 1
972
        except FileNotFoundError:
973
            os.makedirs(full_output_folder, exist_ok=True)
974
            counter = 1
pythongosssss's avatar
pythongosssss committed
975

m957ymj75urz's avatar
m957ymj75urz committed
976
        results = list()
comfyanonymous's avatar
comfyanonymous committed
977
978
        for image in images:
            i = 255. * image.cpu().numpy()
979
            img = Image.fromarray(np.clip(i, 0, 255).astype(np.uint8))
comfyanonymous's avatar
comfyanonymous committed
980
981
982
983
984
985
            metadata = PngInfo()
            if prompt is not None:
                metadata.add_text("prompt", json.dumps(prompt))
            if extra_pnginfo is not None:
                for x in extra_pnginfo:
                    metadata.add_text(x, json.dumps(extra_pnginfo[x]))
986

987
            file = f"{filename}_{counter:05}_.png"
988
            img.save(os.path.join(full_output_folder, file), pnginfo=metadata, compress_level=4)
m957ymj75urz's avatar
m957ymj75urz committed
989
990
991
992
            results.append({
                "filename": file,
                "subfolder": subfolder,
                "type": self.type
Gavroche CryptoRUSH's avatar
Gavroche CryptoRUSH committed
993
            })
994
            counter += 1
comfyanonymous's avatar
comfyanonymous committed
995

m957ymj75urz's avatar
m957ymj75urz committed
996
        return { "ui": { "images": results } }
comfyanonymous's avatar
comfyanonymous committed
997

pythongosssss's avatar
pythongosssss committed
998
999
class PreviewImage(SaveImage):
    def __init__(self):
1000
        self.output_dir = folder_paths.get_temp_directory()
m957ymj75urz's avatar
m957ymj75urz committed
1001
        self.type = "temp"
pythongosssss's avatar
pythongosssss committed
1002
1003
1004

    @classmethod
    def INPUT_TYPES(s):
1005
        return {"required":
pythongosssss's avatar
pythongosssss committed
1006
1007
1008
                    {"images": ("IMAGE", ), },
                "hidden": {"prompt": "PROMPT", "extra_pnginfo": "EXTRA_PNGINFO"},
                }
1009

1010
1011
1012
class LoadImage:
    @classmethod
    def INPUT_TYPES(s):
1013
        input_dir = folder_paths.get_input_directory()
comfyanonymous's avatar
comfyanonymous committed
1014
        files = [f for f in os.listdir(input_dir) if os.path.isfile(os.path.join(input_dir, f))]
1015
        return {"required":
comfyanonymous's avatar
comfyanonymous committed
1016
                    {"image": (sorted(files), )},
1017
                }
1018
1019

    CATEGORY = "image"
1020

1021
    RETURN_TYPES = ("IMAGE", "MASK")
1022
1023
    FUNCTION = "load_image"
    def load_image(self, image):
1024
        image_path = folder_paths.get_annotated_filepath(image)
1025
1026
        i = Image.open(image_path)
        image = i.convert("RGB")
1027
        image = np.array(image).astype(np.float32) / 255.0
1028
        image = torch.from_numpy(image)[None,]
1029
1030
1031
1032
1033
1034
        if 'A' in i.getbands():
            mask = np.array(i.getchannel('A')).astype(np.float32) / 255.0
            mask = 1. - torch.from_numpy(mask)
        else:
            mask = torch.zeros((64,64), dtype=torch.float32, device="cpu")
        return (image, mask)
1035

1036
1037
    @classmethod
    def IS_CHANGED(s, image):
1038
        image_path = folder_paths.get_annotated_filepath(image)
1039
1040
1041
1042
        m = hashlib.sha256()
        with open(image_path, 'rb') as f:
            m.update(f.read())
        return m.digest().hex()
pythongosssss's avatar
pythongosssss committed
1043

1044
1045
1046
1047
1048
1049
1050
    @classmethod
    def VALIDATE_INPUTS(s, image):
        if not folder_paths.exists_annotated_filepath(image):
            return "Invalid image file: {}".format(image)

        return True

1051
class LoadImageMask:
1052
    _color_channels = ["alpha", "red", "green", "blue"]
1053
1054
    @classmethod
    def INPUT_TYPES(s):
1055
        input_dir = folder_paths.get_input_directory()
comfyanonymous's avatar
comfyanonymous committed
1056
        files = [f for f in os.listdir(input_dir) if os.path.isfile(os.path.join(input_dir, f))]
1057
        return {"required":
comfyanonymous's avatar
comfyanonymous committed
1058
                    {"image": (sorted(files), ),
1059
                     "channel": (s._color_channels, ), }
1060
1061
                }

1062
    CATEGORY = "mask"
1063
1064
1065
1066

    RETURN_TYPES = ("MASK",)
    FUNCTION = "load_image"
    def load_image(self, image, channel):
1067
        image_path = folder_paths.get_annotated_filepath(image)
1068
        i = Image.open(image_path)
1069
1070
        if i.getbands() != ("R", "G", "B", "A"):
            i = i.convert("RGBA")
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
        mask = None
        c = channel[0].upper()
        if c in i.getbands():
            mask = np.array(i.getchannel(c)).astype(np.float32) / 255.0
            mask = torch.from_numpy(mask)
            if c == 'A':
                mask = 1. - mask
        else:
            mask = torch.zeros((64,64), dtype=torch.float32, device="cpu")
        return (mask,)

    @classmethod
    def IS_CHANGED(s, image, channel):
1084
        image_path = folder_paths.get_annotated_filepath(image)
1085
1086
1087
1088
        m = hashlib.sha256()
        with open(image_path, 'rb') as f:
            m.update(f.read())
        return m.digest().hex()
pythongosssss's avatar
pythongosssss committed
1089

1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
    @classmethod
    def VALIDATE_INPUTS(s, image, channel):
        if not folder_paths.exists_annotated_filepath(image):
            return "Invalid image file: {}".format(image)

        if channel not in s._color_channels:
            return "Invalid color channel: {}".format(channel)

        return True

comfyanonymous's avatar
comfyanonymous committed
1100
1101
1102
1103
1104
1105
1106
class ImageScale:
    upscale_methods = ["nearest-exact", "bilinear", "area"]
    crop_methods = ["disabled", "center"]

    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "image": ("IMAGE",), "upscale_method": (s.upscale_methods,),
1107
1108
                              "width": ("INT", {"default": 512, "min": 1, "max": MAX_RESOLUTION, "step": 1}),
                              "height": ("INT", {"default": 512, "min": 1, "max": MAX_RESOLUTION, "step": 1}),
comfyanonymous's avatar
comfyanonymous committed
1109
1110
1111
1112
                              "crop": (s.crop_methods,)}}
    RETURN_TYPES = ("IMAGE",)
    FUNCTION = "upscale"

1113
    CATEGORY = "image/upscaling"
1114

comfyanonymous's avatar
comfyanonymous committed
1115
1116
    def upscale(self, image, upscale_method, width, height, crop):
        samples = image.movedim(-1,1)
comfyanonymous's avatar
comfyanonymous committed
1117
        s = comfy.utils.common_upscale(samples, width, height, upscale_method, crop)
comfyanonymous's avatar
comfyanonymous committed
1118
1119
        s = s.movedim(1,-1)
        return (s,)
comfyanonymous's avatar
comfyanonymous committed
1120

1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
class ImageInvert:

    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "image": ("IMAGE",)}}

    RETURN_TYPES = ("IMAGE",)
    FUNCTION = "invert"

    CATEGORY = "image"

    def invert(self, image):
        s = 1.0 - image
        return (s,)


Guo Y.K's avatar
Guo Y.K committed
1137
1138
1139
1140
1141
1142
1143
class ImagePadForOutpaint:

    @classmethod
    def INPUT_TYPES(s):
        return {
            "required": {
                "image": ("IMAGE",),
1144
1145
1146
1147
                "left": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 8}),
                "top": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 8}),
                "right": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 8}),
                "bottom": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 8}),
1148
                "feathering": ("INT", {"default": 40, "min": 0, "max": MAX_RESOLUTION, "step": 1}),
Guo Y.K's avatar
Guo Y.K committed
1149
1150
1151
1152
1153
1154
1155
1156
            }
        }

    RETURN_TYPES = ("IMAGE", "MASK")
    FUNCTION = "expand_image"

    CATEGORY = "image"

1157
    def expand_image(self, image, left, top, right, bottom, feathering):
Guo Y.K's avatar
Guo Y.K committed
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
        d1, d2, d3, d4 = image.size()

        new_image = torch.zeros(
            (d1, d2 + top + bottom, d3 + left + right, d4),
            dtype=torch.float32,
        )
        new_image[:, top:top + d2, left:left + d3, :] = image

        mask = torch.ones(
            (d2 + top + bottom, d3 + left + right),
            dtype=torch.float32,
        )
1170

1171
1172
1173
1174
1175
        t = torch.zeros(
            (d2, d3),
            dtype=torch.float32
        )

1176
        if feathering > 0 and feathering * 2 < d2 and feathering * 2 < d3:
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195

            for i in range(d2):
                for j in range(d3):
                    dt = i if top != 0 else d2
                    db = d2 - i if bottom != 0 else d2

                    dl = j if left != 0 else d3
                    dr = d3 - j if right != 0 else d3

                    d = min(dt, db, dl, dr)

                    if d >= feathering:
                        continue

                    v = (feathering - d) / feathering

                    t[i, j] = v * v

        mask[top:top + d2, left:left + d3] = t
1196

Guo Y.K's avatar
Guo Y.K committed
1197
1198
1199
        return (new_image, mask)


comfyanonymous's avatar
comfyanonymous committed
1200
1201
NODE_CLASS_MAPPINGS = {
    "KSampler": KSampler,
1202
    "CheckpointLoaderSimple": CheckpointLoaderSimple,
comfyanonymous's avatar
comfyanonymous committed
1203
    "CLIPTextEncode": CLIPTextEncode,
comfyanonymous's avatar
comfyanonymous committed
1204
    "CLIPSetLastLayer": CLIPSetLastLayer,
comfyanonymous's avatar
comfyanonymous committed
1205
1206
    "VAEDecode": VAEDecode,
    "VAEEncode": VAEEncode,
1207
    "VAEEncodeForInpaint": VAEEncodeForInpaint,
comfyanonymous's avatar
comfyanonymous committed
1208
1209
1210
    "VAELoader": VAELoader,
    "EmptyLatentImage": EmptyLatentImage,
    "LatentUpscale": LatentUpscale,
1211
    "LatentFromBatch": LatentFromBatch,
1212
    "RepeatLatentBatch": RepeatLatentBatch,
comfyanonymous's avatar
comfyanonymous committed
1213
    "SaveImage": SaveImage,
pythongosssss's avatar
pythongosssss committed
1214
    "PreviewImage": PreviewImage,
comfyanonymous's avatar
comfyanonymous committed
1215
    "LoadImage": LoadImage,
1216
    "LoadImageMask": LoadImageMask,
comfyanonymous's avatar
comfyanonymous committed
1217
    "ImageScale": ImageScale,
1218
    "ImageInvert": ImageInvert,
Guo Y.K's avatar
Guo Y.K committed
1219
    "ImagePadForOutpaint": ImagePadForOutpaint,
FizzleDorf's avatar
FizzleDorf committed
1220
    "ConditioningAverage ": ConditioningAverage ,
comfyanonymous's avatar
comfyanonymous committed
1221
1222
    "ConditioningCombine": ConditioningCombine,
    "ConditioningSetArea": ConditioningSetArea,
Jacob Segal's avatar
Jacob Segal committed
1223
    "ConditioningSetMask": ConditioningSetMask,
comfyanonymous's avatar
comfyanonymous committed
1224
    "KSamplerAdvanced": KSamplerAdvanced,
1225
    "SetLatentNoiseMask": SetLatentNoiseMask,
comfyanonymous's avatar
comfyanonymous committed
1226
    "LatentComposite": LatentComposite,
comfyanonymous's avatar
comfyanonymous committed
1227
    "LatentRotate": LatentRotate,
comfyanonymous's avatar
comfyanonymous committed
1228
    "LatentFlip": LatentFlip,
comfyanonymous's avatar
comfyanonymous committed
1229
    "LatentCrop": LatentCrop,
1230
    "LoraLoader": LoraLoader,
1231
    "CLIPLoader": CLIPLoader,
1232
    "CLIPVisionEncode": CLIPVisionEncode,
1233
    "StyleModelApply": StyleModelApply,
1234
    "unCLIPConditioning": unCLIPConditioning,
comfyanonymous's avatar
comfyanonymous committed
1235
1236
    "ControlNetApply": ControlNetApply,
    "ControlNetLoader": ControlNetLoader,
1237
    "DiffControlNetLoader": DiffControlNetLoader,
comfyanonymous's avatar
comfyanonymous committed
1238
1239
    "StyleModelLoader": StyleModelLoader,
    "CLIPVisionLoader": CLIPVisionLoader,
1240
    "VAEDecodeTiled": VAEDecodeTiled,
comfyanonymous's avatar
comfyanonymous committed
1241
    "VAEEncodeTiled": VAEEncodeTiled,
1242
    "TomePatchModel": TomePatchModel,
1243
    "unCLIPCheckpointLoader": unCLIPCheckpointLoader,
1244
1245
1246
    "GLIGENLoader": GLIGENLoader,
    "GLIGENTextBoxApply": GLIGENTextBoxApply,

1247
    "CheckpointLoader": CheckpointLoader,
sALTaccount's avatar
sALTaccount committed
1248
    "DiffusersLoader": DiffusersLoader,
comfyanonymous's avatar
comfyanonymous committed
1249
1250
}

City's avatar
City committed
1251
1252
1253
1254
1255
NODE_DISPLAY_NAME_MAPPINGS = {
    # Sampling
    "KSampler": "KSampler",
    "KSamplerAdvanced": "KSampler (Advanced)",
    # Loaders
comfyanonymous's avatar
Rename.  
comfyanonymous committed
1256
1257
    "CheckpointLoader": "Load Checkpoint (With Config)",
    "CheckpointLoaderSimple": "Load Checkpoint",
City's avatar
City committed
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
    "VAELoader": "Load VAE",
    "LoraLoader": "Load LoRA",
    "CLIPLoader": "Load CLIP",
    "ControlNetLoader": "Load ControlNet Model",
    "DiffControlNetLoader": "Load ControlNet Model (diff)",
    "StyleModelLoader": "Load Style Model",
    "CLIPVisionLoader": "Load CLIP Vision",
    "UpscaleModelLoader": "Load Upscale Model",
    # Conditioning
    "CLIPVisionEncode": "CLIP Vision Encode",
    "StyleModelApply": "Apply Style Model",
    "CLIPTextEncode": "CLIP Text Encode (Prompt)",
    "CLIPSetLastLayer": "CLIP Set Last Layer",
    "ConditioningCombine": "Conditioning (Combine)",
FizzleDorf's avatar
FizzleDorf committed
1272
    "ConditioningAverage ": "Conditioning (Average)",
City's avatar
City committed
1273
    "ConditioningSetArea": "Conditioning (Set Area)",
Jacob Segal's avatar
Jacob Segal committed
1274
    "ConditioningSetMask": "Conditioning (Set Mask)",
City's avatar
City committed
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
    "ControlNetApply": "Apply ControlNet",
    # Latent
    "VAEEncodeForInpaint": "VAE Encode (for Inpainting)",
    "SetLatentNoiseMask": "Set Latent Noise Mask",
    "VAEDecode": "VAE Decode",
    "VAEEncode": "VAE Encode",
    "LatentRotate": "Rotate Latent",
    "LatentFlip": "Flip Latent",
    "LatentCrop": "Crop Latent",
    "EmptyLatentImage": "Empty Latent Image",
    "LatentUpscale": "Upscale Latent",
    "LatentComposite": "Latent Composite",
1287
1288
    "LatentFromBatch" : "Latent From Batch",
    "RepeatLatentBatch": "Repeat Latent Batch",
City's avatar
City committed
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
    # Image
    "SaveImage": "Save Image",
    "PreviewImage": "Preview Image",
    "LoadImage": "Load Image",
    "LoadImageMask": "Load Image (as Mask)",
    "ImageScale": "Upscale Image",
    "ImageUpscaleWithModel": "Upscale Image (using Model)",
    "ImageInvert": "Invert Image",
    "ImagePadForOutpaint": "Pad Image for Outpainting",
    # _for_testing
    "VAEDecodeTiled": "VAE Decode (Tiled)",
    "VAEEncodeTiled": "VAE Encode (Tiled)",
}

1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
def load_custom_node(module_path):
    module_name = os.path.basename(module_path)
    if os.path.isfile(module_path):
        sp = os.path.splitext(module_path)
        module_name = sp[0]
    try:
        if os.path.isfile(module_path):
            module_spec = importlib.util.spec_from_file_location(module_name, module_path)
        else:
            module_spec = importlib.util.spec_from_file_location(module_name, os.path.join(module_path, "__init__.py"))
        module = importlib.util.module_from_spec(module_spec)
        sys.modules[module_name] = module
        module_spec.loader.exec_module(module)
        if hasattr(module, "NODE_CLASS_MAPPINGS") and getattr(module, "NODE_CLASS_MAPPINGS") is not None:
            NODE_CLASS_MAPPINGS.update(module.NODE_CLASS_MAPPINGS)
1318
1319
            if hasattr(module, "NODE_DISPLAY_NAME_MAPPINGS") and getattr(module, "NODE_DISPLAY_NAME_MAPPINGS") is not None:
                NODE_DISPLAY_NAME_MAPPINGS.update(module.NODE_DISPLAY_NAME_MAPPINGS)
1320
1321
1322
1323
1324
1325
        else:
            print(f"Skip {module_path} module for custom nodes due to the lack of NODE_CLASS_MAPPINGS.")
    except Exception as e:
        print(traceback.format_exc())
        print(f"Cannot import {module_path} module for custom nodes:", e)

Hacker 17082006's avatar
Hacker 17082006 committed
1326
def load_custom_nodes():
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
    node_paths = folder_paths.get_folder_paths("custom_nodes")
    for custom_node_path in node_paths:
        possible_modules = os.listdir(custom_node_path)
        if "__pycache__" in possible_modules:
            possible_modules.remove("__pycache__")

        for possible_module in possible_modules:
            module_path = os.path.join(custom_node_path, possible_module)
            if os.path.isfile(module_path) and os.path.splitext(module_path)[1] != ".py": continue
            load_custom_node(module_path)
1337

1338
1339
def init_custom_nodes():
    load_custom_nodes()
1340
    load_custom_node(os.path.join(os.path.join(os.path.dirname(os.path.realpath(__file__)), "comfy_extras"), "nodes_hypernetwork.py"))
1341
1342
    load_custom_node(os.path.join(os.path.join(os.path.dirname(os.path.realpath(__file__)), "comfy_extras"), "nodes_upscale_model.py"))
    load_custom_node(os.path.join(os.path.join(os.path.dirname(os.path.realpath(__file__)), "comfy_extras"), "nodes_post_processing.py"))
1343
    load_custom_node(os.path.join(os.path.join(os.path.dirname(os.path.realpath(__file__)), "comfy_extras"), "nodes_mask.py"))
1344
    load_custom_node(os.path.join(os.path.join(os.path.dirname(os.path.realpath(__file__)), "comfy_extras"), "nodes_rebatch.py"))