nodes.py 34.8 KB
Newer Older
comfyanonymous's avatar
comfyanonymous committed
1
2
3
4
5
import torch

import os
import sys
import json
6
import hashlib
comfyanonymous's avatar
comfyanonymous committed
7
import copy
8
import traceback
comfyanonymous's avatar
comfyanonymous committed
9
10
11
12
13

from PIL import Image
from PIL.PngImagePlugin import PngInfo
import numpy as np

comfyanonymous's avatar
comfyanonymous committed
14
sys.path.insert(0, os.path.join(os.path.dirname(os.path.realpath(__file__)), "comfy"))
comfyanonymous's avatar
comfyanonymous committed
15
16
17
18


import comfy.samplers
import comfy.sd
comfyanonymous's avatar
comfyanonymous committed
19
20
import comfy.utils

21
22
import comfy_extras.clip_vision

23
import model_management
24
import importlib
comfyanonymous's avatar
comfyanonymous committed
25

26
import folder_paths
27
28
29
30

def before_node_execution():
    model_management.throw_exception_if_processing_interrupted()

31
32
def interrupt_processing(value=True):
    model_management.interrupt_current_processing(value)
33

comfyanonymous's avatar
comfyanonymous committed
34
35
36
class CLIPTextEncode:
    @classmethod
    def INPUT_TYPES(s):
37
        return {"required": {"text": ("STRING", {"multiline": True}), "clip": ("CLIP", )}}
comfyanonymous's avatar
comfyanonymous committed
38
39
40
    RETURN_TYPES = ("CONDITIONING",)
    FUNCTION = "encode"

41
42
    CATEGORY = "conditioning"

comfyanonymous's avatar
comfyanonymous committed
43
    def encode(self, clip, text):
comfyanonymous's avatar
comfyanonymous committed
44
45
46
47
48
49
50
51
52
        return ([[clip.encode(text), {}]], )

class ConditioningCombine:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": {"conditioning_1": ("CONDITIONING", ), "conditioning_2": ("CONDITIONING", )}}
    RETURN_TYPES = ("CONDITIONING",)
    FUNCTION = "combine"

53
54
    CATEGORY = "conditioning"

comfyanonymous's avatar
comfyanonymous committed
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
    def combine(self, conditioning_1, conditioning_2):
        return (conditioning_1 + conditioning_2, )

class ConditioningSetArea:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": {"conditioning": ("CONDITIONING", ),
                              "width": ("INT", {"default": 64, "min": 64, "max": 4096, "step": 64}),
                              "height": ("INT", {"default": 64, "min": 64, "max": 4096, "step": 64}),
                              "x": ("INT", {"default": 0, "min": 0, "max": 4096, "step": 64}),
                              "y": ("INT", {"default": 0, "min": 0, "max": 4096, "step": 64}),
                              "strength": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}),
                             }}
    RETURN_TYPES = ("CONDITIONING",)
    FUNCTION = "append"

71
72
    CATEGORY = "conditioning"

comfyanonymous's avatar
comfyanonymous committed
73
    def append(self, conditioning, width, height, x, y, strength, min_sigma=0.0, max_sigma=99.0):
comfyanonymous's avatar
comfyanonymous committed
74
75
76
77
78
79
80
81
        c = []
        for t in conditioning:
            n = [t[0], t[1].copy()]
            n[1]['area'] = (height // 8, width // 8, y // 8, x // 8)
            n[1]['strength'] = strength
            n[1]['min_sigma'] = min_sigma
            n[1]['max_sigma'] = max_sigma
            c.append(n)
comfyanonymous's avatar
comfyanonymous committed
82
        return (c, )
comfyanonymous's avatar
comfyanonymous committed
83
84
85
86
87
88
89
90
91
92
93

class VAEDecode:
    def __init__(self, device="cpu"):
        self.device = device

    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "samples": ("LATENT", ), "vae": ("VAE", )}}
    RETURN_TYPES = ("IMAGE",)
    FUNCTION = "decode"

94
95
    CATEGORY = "latent"

comfyanonymous's avatar
comfyanonymous committed
96
    def decode(self, vae, samples):
97
        return (vae.decode(samples["samples"]), )
comfyanonymous's avatar
comfyanonymous committed
98

99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
class VAEDecodeTiled:
    def __init__(self, device="cpu"):
        self.device = device

    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "samples": ("LATENT", ), "vae": ("VAE", )}}
    RETURN_TYPES = ("IMAGE",)
    FUNCTION = "decode"

    CATEGORY = "_for_testing"

    def decode(self, vae, samples):
        return (vae.decode_tiled(samples["samples"]), )

comfyanonymous's avatar
comfyanonymous committed
114
115
116
117
118
119
120
121
122
123
class VAEEncode:
    def __init__(self, device="cpu"):
        self.device = device

    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "pixels": ("IMAGE", ), "vae": ("VAE", )}}
    RETURN_TYPES = ("LATENT",)
    FUNCTION = "encode"

124
125
    CATEGORY = "latent"

comfyanonymous's avatar
comfyanonymous committed
126
    def encode(self, vae, pixels):
127
128
129
130
        x = (pixels.shape[1] // 64) * 64
        y = (pixels.shape[2] // 64) * 64
        if pixels.shape[1] != x or pixels.shape[2] != y:
            pixels = pixels[:,:x,:y,:]
131
132
133
        t = vae.encode(pixels[:,:,:,:3])

        return ({"samples":t}, )
comfyanonymous's avatar
comfyanonymous committed
134

comfyanonymous's avatar
comfyanonymous committed
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155

class VAEEncodeTiled:
    def __init__(self, device="cpu"):
        self.device = device

    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "pixels": ("IMAGE", ), "vae": ("VAE", )}}
    RETURN_TYPES = ("LATENT",)
    FUNCTION = "encode"

    CATEGORY = "_for_testing"

    def encode(self, vae, pixels):
        x = (pixels.shape[1] // 64) * 64
        y = (pixels.shape[2] // 64) * 64
        if pixels.shape[1] != x or pixels.shape[2] != y:
            pixels = pixels[:,:x,:y,:]
        t = vae.encode_tiled(pixels[:,:,:,:3])

        return ({"samples":t}, )
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
class VAEEncodeForInpaint:
    def __init__(self, device="cpu"):
        self.device = device

    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "pixels": ("IMAGE", ), "vae": ("VAE", ), "mask": ("MASK", )}}
    RETURN_TYPES = ("LATENT",)
    FUNCTION = "encode"

    CATEGORY = "latent/inpaint"

    def encode(self, vae, pixels, mask):
        x = (pixels.shape[1] // 64) * 64
        y = (pixels.shape[2] // 64) * 64
171
172
        mask = torch.nn.functional.interpolate(mask[None,None,], size=(pixels.shape[1], pixels.shape[2]), mode="bilinear")[0][0]

173
        pixels = pixels.clone()
174
175
176
177
        if pixels.shape[1] != x or pixels.shape[2] != y:
            pixels = pixels[:,:x,:y,:]
            mask = mask[:x,:y]

178
        #grow mask by a few pixels to keep things seamless in latent space
179
        kernel_tensor = torch.ones((1, 1, 6, 6))
180
181
        mask_erosion = torch.clamp(torch.nn.functional.conv2d((mask.round())[None], kernel_tensor, padding=3), 0, 1)
        m = (1.0 - mask.round())
182
183
        for i in range(3):
            pixels[:,:,:,i] -= 0.5
184
            pixels[:,:,:,i] *= m
185
186
187
            pixels[:,:,:,i] += 0.5
        t = vae.encode(pixels)

188
        return ({"samples":t, "noise_mask": (mask_erosion[0][:x,:y].round())}, )
comfyanonymous's avatar
comfyanonymous committed
189
190
191
192

class CheckpointLoader:
    @classmethod
    def INPUT_TYPES(s):
193
194
        return {"required": { "config_name": (folder_paths.get_filename_list("configs"), ),
                              "ckpt_name": (folder_paths.get_filename_list("checkpoints"), )}}
comfyanonymous's avatar
comfyanonymous committed
195
196
197
    RETURN_TYPES = ("MODEL", "CLIP", "VAE")
    FUNCTION = "load_checkpoint"

198
199
    CATEGORY = "loaders"

comfyanonymous's avatar
comfyanonymous committed
200
    def load_checkpoint(self, config_name, ckpt_name, output_vae=True, output_clip=True):
201
202
        config_path = folder_paths.get_full_path("configs", config_name)
        ckpt_path = folder_paths.get_full_path("checkpoints", ckpt_name)
203
        return comfy.sd.load_checkpoint(config_path, ckpt_path, output_vae=True, output_clip=True, embedding_directory=folder_paths.get_folder_paths("embeddings"))
comfyanonymous's avatar
comfyanonymous committed
204

205
206
207
class CheckpointLoaderSimple:
    @classmethod
    def INPUT_TYPES(s):
208
        return {"required": { "ckpt_name": (folder_paths.get_filename_list("checkpoints"), ),
209
210
211
212
                             }}
    RETURN_TYPES = ("MODEL", "CLIP", "VAE")
    FUNCTION = "load_checkpoint"

213
    CATEGORY = "loaders"
214

215
    def load_checkpoint(self, ckpt_name, output_vae=True, output_clip=True):
216
        ckpt_path = folder_paths.get_full_path("checkpoints", ckpt_name)
217
        out = comfy.sd.load_checkpoint_guess_config(ckpt_path, output_vae=True, output_clip=True, embedding_directory=folder_paths.get_folder_paths("embeddings"))
218
219
        return out

comfyanonymous's avatar
comfyanonymous committed
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
class CLIPSetLastLayer:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "clip": ("CLIP", ),
                              "stop_at_clip_layer": ("INT", {"default": -1, "min": -24, "max": -1, "step": 1}),
                              }}
    RETURN_TYPES = ("CLIP",)
    FUNCTION = "set_last_layer"

    CATEGORY = "conditioning"

    def set_last_layer(self, clip, stop_at_clip_layer):
        clip = clip.clone()
        clip.clip_layer(stop_at_clip_layer)
        return (clip,)

236
237
238
239
240
class LoraLoader:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "model": ("MODEL",),
                              "clip": ("CLIP", ),
241
                              "lora_name": (folder_paths.get_filename_list("loras"), ),
242
243
244
245
246
247
248
249
250
                              "strength_model": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}),
                              "strength_clip": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}),
                              }}
    RETURN_TYPES = ("MODEL", "CLIP")
    FUNCTION = "load_lora"

    CATEGORY = "loaders"

    def load_lora(self, model, clip, lora_name, strength_model, strength_clip):
251
        lora_path = folder_paths.get_full_path("loras", lora_name)
252
253
254
        model_lora, clip_lora = comfy.sd.load_lora_for_models(model, clip, lora_path, strength_model, strength_clip)
        return (model_lora, clip_lora)

comfyanonymous's avatar
comfyanonymous committed
255
256
257
class VAELoader:
    @classmethod
    def INPUT_TYPES(s):
258
        return {"required": { "vae_name": (folder_paths.get_filename_list("vae"), )}}
comfyanonymous's avatar
comfyanonymous committed
259
260
261
    RETURN_TYPES = ("VAE",)
    FUNCTION = "load_vae"

262
263
    CATEGORY = "loaders"

comfyanonymous's avatar
comfyanonymous committed
264
265
    #TODO: scale factor?
    def load_vae(self, vae_name):
266
        vae_path = folder_paths.get_full_path("vae", vae_name)
comfyanonymous's avatar
comfyanonymous committed
267
268
269
        vae = comfy.sd.VAE(ckpt_path=vae_path)
        return (vae,)

comfyanonymous's avatar
comfyanonymous committed
270
271
272
class ControlNetLoader:
    @classmethod
    def INPUT_TYPES(s):
273
        return {"required": { "control_net_name": (folder_paths.get_filename_list("controlnet"), )}}
comfyanonymous's avatar
comfyanonymous committed
274
275
276
277
278
279
280

    RETURN_TYPES = ("CONTROL_NET",)
    FUNCTION = "load_controlnet"

    CATEGORY = "loaders"

    def load_controlnet(self, control_net_name):
281
        controlnet_path = folder_paths.get_full_path("controlnet", control_net_name)
comfyanonymous's avatar
comfyanonymous committed
282
283
284
        controlnet = comfy.sd.load_controlnet(controlnet_path)
        return (controlnet,)

285
286
287
288
class DiffControlNetLoader:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "model": ("MODEL",),
289
                              "control_net_name": (folder_paths.get_filename_list("controlnet"), )}}
290
291
292
293
294
295
296

    RETURN_TYPES = ("CONTROL_NET",)
    FUNCTION = "load_controlnet"

    CATEGORY = "loaders"

    def load_controlnet(self, model, control_net_name):
297
        controlnet_path = folder_paths.get_full_path("controlnet", control_net_name)
298
299
300
        controlnet = comfy.sd.load_controlnet(controlnet_path, model)
        return (controlnet,)

comfyanonymous's avatar
comfyanonymous committed
301
302
303
304

class ControlNetApply:
    @classmethod
    def INPUT_TYPES(s):
305
306
307
308
309
        return {"required": {"conditioning": ("CONDITIONING", ),
                             "control_net": ("CONTROL_NET", ),
                             "image": ("IMAGE", ),
                             "strength": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01})
                             }}
comfyanonymous's avatar
comfyanonymous committed
310
311
312
313
314
    RETURN_TYPES = ("CONDITIONING",)
    FUNCTION = "apply_controlnet"

    CATEGORY = "conditioning"

315
    def apply_controlnet(self, conditioning, control_net, image, strength):
comfyanonymous's avatar
comfyanonymous committed
316
317
318
319
320
        c = []
        control_hint = image.movedim(-1,1)
        print(control_hint.shape)
        for t in conditioning:
            n = [t[0], t[1].copy()]
comfyanonymous's avatar
comfyanonymous committed
321
322
323
324
            c_net = control_net.copy().set_cond_hint(control_hint, strength)
            if 'control' in t[1]:
                c_net.set_previous_controlnet(t[1]['control'])
            n[1]['control'] = c_net
comfyanonymous's avatar
comfyanonymous committed
325
326
327
            c.append(n)
        return (c, )

328
329
330
class CLIPLoader:
    @classmethod
    def INPUT_TYPES(s):
331
        return {"required": { "clip_name": (folder_paths.get_filename_list("clip"), ),
332
333
334
335
336
337
                             }}
    RETURN_TYPES = ("CLIP",)
    FUNCTION = "load_clip"

    CATEGORY = "loaders"

338
    def load_clip(self, clip_name):
339
        clip_path = folder_paths.get_full_path("clip", clip_name)
340
341
342
        clip = comfy.sd.load_clip(ckpt_path=clip_path, embedding_directory=CheckpointLoader.embedding_directory)
        return (clip,)

343
344
345
class CLIPVisionLoader:
    @classmethod
    def INPUT_TYPES(s):
346
        return {"required": { "clip_name": (folder_paths.get_filename_list("clip_vision"), ),
347
348
349
350
351
352
353
                             }}
    RETURN_TYPES = ("CLIP_VISION",)
    FUNCTION = "load_clip"

    CATEGORY = "loaders"

    def load_clip(self, clip_name):
354
        clip_path = folder_paths.get_full_path("clip_vision", clip_name)
355
356
357
358
359
360
361
362
363
        clip_vision = comfy_extras.clip_vision.load(clip_path)
        return (clip_vision,)

class CLIPVisionEncode:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "clip_vision": ("CLIP_VISION",),
                              "image": ("IMAGE",)
                             }}
comfyanonymous's avatar
comfyanonymous committed
364
    RETURN_TYPES = ("CLIP_VISION_OUTPUT",)
365
366
    FUNCTION = "encode"

comfyanonymous's avatar
comfyanonymous committed
367
    CATEGORY = "conditioning/style_model"
368
369
370
371
372
373
374
375

    def encode(self, clip_vision, image):
        output = clip_vision.encode_image(image)
        return (output,)

class StyleModelLoader:
    @classmethod
    def INPUT_TYPES(s):
376
        return {"required": { "style_model_name": (folder_paths.get_filename_list("style_models"), )}}
377
378
379
380
381
382
383

    RETURN_TYPES = ("STYLE_MODEL",)
    FUNCTION = "load_style_model"

    CATEGORY = "loaders"

    def load_style_model(self, style_model_name):
384
        style_model_path = folder_paths.get_full_path("style_models", style_model_name)
385
386
387
388
389
390
391
        style_model = comfy.sd.load_style_model(style_model_path)
        return (style_model,)


class StyleModelApply:
    @classmethod
    def INPUT_TYPES(s):
392
393
394
        return {"required": {"conditioning": ("CONDITIONING", ),
                             "style_model": ("STYLE_MODEL", ),
                             "clip_vision_output": ("CLIP_VISION_OUTPUT", ),
395
396
397
398
                             }}
    RETURN_TYPES = ("CONDITIONING",)
    FUNCTION = "apply_stylemodel"

comfyanonymous's avatar
comfyanonymous committed
399
    CATEGORY = "conditioning/style_model"
400

401
402
    def apply_stylemodel(self, clip_vision_output, style_model, conditioning):
        cond = style_model.get_cond(clip_vision_output)
403
        c = []
404
405
        for t in conditioning:
            n = [torch.cat((t[0], cond), dim=1), t[1].copy()]
406
407
408
            c.append(n)
        return (c, )

comfyanonymous's avatar
comfyanonymous committed
409
410
411
412
413
414
415
416
417
418
419
420
class EmptyLatentImage:
    def __init__(self, device="cpu"):
        self.device = device

    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "width": ("INT", {"default": 512, "min": 64, "max": 4096, "step": 64}),
                              "height": ("INT", {"default": 512, "min": 64, "max": 4096, "step": 64}),
                              "batch_size": ("INT", {"default": 1, "min": 1, "max": 64})}}
    RETURN_TYPES = ("LATENT",)
    FUNCTION = "generate"

421
422
    CATEGORY = "latent"

comfyanonymous's avatar
comfyanonymous committed
423
424
    def generate(self, width, height, batch_size=1):
        latent = torch.zeros([batch_size, 4, height // 8, width // 8])
425
        return ({"samples":latent}, )
comfyanonymous's avatar
comfyanonymous committed
426

comfyanonymous's avatar
comfyanonymous committed
427

comfyanonymous's avatar
comfyanonymous committed
428

comfyanonymous's avatar
comfyanonymous committed
429
430
class LatentUpscale:
    upscale_methods = ["nearest-exact", "bilinear", "area"]
431
    crop_methods = ["disabled", "center"]
comfyanonymous's avatar
comfyanonymous committed
432
433
434
435
436

    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "samples": ("LATENT",), "upscale_method": (s.upscale_methods,),
                              "width": ("INT", {"default": 512, "min": 64, "max": 4096, "step": 64}),
437
438
                              "height": ("INT", {"default": 512, "min": 64, "max": 4096, "step": 64}),
                              "crop": (s.crop_methods,)}}
comfyanonymous's avatar
comfyanonymous committed
439
440
441
    RETURN_TYPES = ("LATENT",)
    FUNCTION = "upscale"

442
443
    CATEGORY = "latent"

444
    def upscale(self, samples, upscale_method, width, height, crop):
445
        s = samples.copy()
comfyanonymous's avatar
comfyanonymous committed
446
        s["samples"] = comfy.utils.common_upscale(samples["samples"], width // 8, height // 8, upscale_method, crop)
comfyanonymous's avatar
comfyanonymous committed
447
448
        return (s,)

comfyanonymous's avatar
comfyanonymous committed
449
450
451
452
453
454
455
456
457
class LatentRotate:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "samples": ("LATENT",),
                              "rotation": (["none", "90 degrees", "180 degrees", "270 degrees"],),
                              }}
    RETURN_TYPES = ("LATENT",)
    FUNCTION = "rotate"

comfyanonymous's avatar
comfyanonymous committed
458
    CATEGORY = "latent/transform"
comfyanonymous's avatar
comfyanonymous committed
459
460

    def rotate(self, samples, rotation):
461
        s = samples.copy()
comfyanonymous's avatar
comfyanonymous committed
462
463
464
465
466
467
468
469
        rotate_by = 0
        if rotation.startswith("90"):
            rotate_by = 1
        elif rotation.startswith("180"):
            rotate_by = 2
        elif rotation.startswith("270"):
            rotate_by = 3

470
        s["samples"] = torch.rot90(samples["samples"], k=rotate_by, dims=[3, 2])
comfyanonymous's avatar
comfyanonymous committed
471
        return (s,)
comfyanonymous's avatar
comfyanonymous committed
472
473
474
475
476
477
478
479
480
481

class LatentFlip:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "samples": ("LATENT",),
                              "flip_method": (["x-axis: vertically", "y-axis: horizontally"],),
                              }}
    RETURN_TYPES = ("LATENT",)
    FUNCTION = "flip"

comfyanonymous's avatar
comfyanonymous committed
482
    CATEGORY = "latent/transform"
comfyanonymous's avatar
comfyanonymous committed
483
484

    def flip(self, samples, flip_method):
485
        s = samples.copy()
comfyanonymous's avatar
comfyanonymous committed
486
        if flip_method.startswith("x"):
487
            s["samples"] = torch.flip(samples["samples"], dims=[2])
comfyanonymous's avatar
comfyanonymous committed
488
        elif flip_method.startswith("y"):
489
            s["samples"] = torch.flip(samples["samples"], dims=[3])
comfyanonymous's avatar
comfyanonymous committed
490
491

        return (s,)
comfyanonymous's avatar
comfyanonymous committed
492
493
494
495
496
497
498
499

class LatentComposite:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "samples_to": ("LATENT",),
                              "samples_from": ("LATENT",),
                              "x": ("INT", {"default": 0, "min": 0, "max": 4096, "step": 8}),
                              "y": ("INT", {"default": 0, "min": 0, "max": 4096, "step": 8}),
500
                              "feather": ("INT", {"default": 0, "min": 0, "max": 4096, "step": 8}),
comfyanonymous's avatar
comfyanonymous committed
501
502
503
504
505
506
                              }}
    RETURN_TYPES = ("LATENT",)
    FUNCTION = "composite"

    CATEGORY = "latent"

507
    def composite(self, samples_to, samples_from, x, y, composite_method="normal", feather=0):
comfyanonymous's avatar
comfyanonymous committed
508
509
        x =  x // 8
        y = y // 8
510
        feather = feather // 8
511
512
513
514
        samples_out = samples_to.copy()
        s = samples_to["samples"].clone()
        samples_to = samples_to["samples"]
        samples_from = samples_from["samples"]
515
516
517
        if feather == 0:
            s[:,:,y:y+samples_from.shape[2],x:x+samples_from.shape[3]] = samples_from[:,:,:samples_to.shape[2] - y, :samples_to.shape[3] - x]
        else:
518
519
            samples_from = samples_from[:,:,:samples_to.shape[2] - y, :samples_to.shape[3] - x]
            mask = torch.ones_like(samples_from)
520
521
522
523
524
525
526
527
528
529
530
531
            for t in range(feather):
                if y != 0:
                    mask[:,:,t:1+t,:] *= ((1.0/feather) * (t + 1))

                if y + samples_from.shape[2] < samples_to.shape[2]:
                    mask[:,:,mask.shape[2] -1 -t: mask.shape[2]-t,:] *= ((1.0/feather) * (t + 1))
                if x != 0:
                    mask[:,:,:,t:1+t] *= ((1.0/feather) * (t + 1))
                if x + samples_from.shape[3] < samples_to.shape[3]:
                    mask[:,:,:,mask.shape[3]- 1 - t: mask.shape[3]- t] *= ((1.0/feather) * (t + 1))
            rev_mask = torch.ones_like(mask) - mask
            s[:,:,y:y+samples_from.shape[2],x:x+samples_from.shape[3]] = samples_from[:,:,:samples_to.shape[2] - y, :samples_to.shape[3] - x] * mask + s[:,:,y:y+samples_from.shape[2],x:x+samples_from.shape[3]] * rev_mask
532
533
        samples_out["samples"] = s
        return (samples_out,)
comfyanonymous's avatar
comfyanonymous committed
534

comfyanonymous's avatar
comfyanonymous committed
535
536
537
538
539
540
541
542
543
544
545
546
class LatentCrop:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "samples": ("LATENT",),
                              "width": ("INT", {"default": 512, "min": 64, "max": 4096, "step": 64}),
                              "height": ("INT", {"default": 512, "min": 64, "max": 4096, "step": 64}),
                              "x": ("INT", {"default": 0, "min": 0, "max": 4096, "step": 8}),
                              "y": ("INT", {"default": 0, "min": 0, "max": 4096, "step": 8}),
                              }}
    RETURN_TYPES = ("LATENT",)
    FUNCTION = "crop"

comfyanonymous's avatar
comfyanonymous committed
547
    CATEGORY = "latent/transform"
comfyanonymous's avatar
comfyanonymous committed
548
549

    def crop(self, samples, width, height, x, y):
550
551
        s = samples.copy()
        samples = samples['samples']
comfyanonymous's avatar
comfyanonymous committed
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
        x =  x // 8
        y = y // 8

        #enfonce minimum size of 64
        if x > (samples.shape[3] - 8):
            x = samples.shape[3] - 8
        if y > (samples.shape[2] - 8):
            y = samples.shape[2] - 8

        new_height = height // 8
        new_width = width // 8
        to_x = new_width + x
        to_y = new_height + y
        def enforce_image_dim(d, to_d, max_d):
            if to_d > max_d:
                leftover = (to_d - max_d) % 8
                to_d = max_d
                d -= leftover
            return (d, to_d)

        #make sure size is always multiple of 64
        x, to_x = enforce_image_dim(x, to_x, samples.shape[3])
        y, to_y = enforce_image_dim(y, to_y, samples.shape[2])
575
        s['samples'] = samples[:,:,y:to_y, x:to_x]
comfyanonymous's avatar
comfyanonymous committed
576
577
        return (s,)

578
579
580
581
582
583
584
585
586
class SetLatentNoiseMask:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "samples": ("LATENT",),
                              "mask": ("MASK",),
                              }}
    RETURN_TYPES = ("LATENT",)
    FUNCTION = "set_mask"

587
    CATEGORY = "latent/inpaint"
588
589
590
591
592
593
594

    def set_mask(self, samples, mask):
        s = samples.copy()
        s["noise_mask"] = mask
        return (s,)


595
def common_ksampler(model, seed, steps, cfg, sampler_name, scheduler, positive, negative, latent, denoise=1.0, disable_noise=False, start_step=None, last_step=None, force_full_denoise=False):
596
597
    latent_image = latent["samples"]
    noise_mask = None
598
    device = model_management.get_torch_device()
599

comfyanonymous's avatar
comfyanonymous committed
600
601
602
603
604
    if disable_noise:
        noise = torch.zeros(latent_image.size(), dtype=latent_image.dtype, layout=latent_image.layout, device="cpu")
    else:
        noise = torch.randn(latent_image.size(), dtype=latent_image.dtype, layout=latent_image.layout, generator=torch.manual_seed(seed), device="cpu")

605
606
607
    if "noise_mask" in latent:
        noise_mask = latent['noise_mask']
        noise_mask = torch.nn.functional.interpolate(noise_mask[None,None,], size=(noise.shape[2], noise.shape[3]), mode="bilinear")
608
        noise_mask = noise_mask.round()
609
610
611
612
        noise_mask = torch.cat([noise_mask] * noise.shape[1], dim=1)
        noise_mask = torch.cat([noise_mask] * noise.shape[0])
        noise_mask = noise_mask.to(device)

613
    real_model = None
614
615
616
    model_management.load_model_gpu(model)
    real_model = model.model

617
618
619
620
621
622
    noise = noise.to(device)
    latent_image = latent_image.to(device)

    positive_copy = []
    negative_copy = []

comfyanonymous's avatar
comfyanonymous committed
623
    control_nets = []
624
625
626
627
628
    for p in positive:
        t = p[0]
        if t.shape[0] < noise.shape[0]:
            t = torch.cat([t] * noise.shape[0])
        t = t.to(device)
comfyanonymous's avatar
comfyanonymous committed
629
630
        if 'control' in p[1]:
            control_nets += [p[1]['control']]
631
632
633
634
635
636
        positive_copy += [[t] + p[1:]]
    for n in negative:
        t = n[0]
        if t.shape[0] < noise.shape[0]:
            t = torch.cat([t] * noise.shape[0])
        t = t.to(device)
637
638
        if 'control' in n[1]:
            control_nets += [n[1]['control']]
639
640
        negative_copy += [[t] + n[1:]]

comfyanonymous's avatar
comfyanonymous committed
641
642
643
644
    control_net_models = []
    for x in control_nets:
        control_net_models += x.get_control_models()
    model_management.load_controlnet_gpu(control_net_models)
comfyanonymous's avatar
comfyanonymous committed
645

646
647
648
649
650
651
    if sampler_name in comfy.samplers.KSampler.SAMPLERS:
        sampler = comfy.samplers.KSampler(real_model, steps=steps, device=device, sampler=sampler_name, scheduler=scheduler, denoise=denoise)
    else:
        #other samplers
        pass

652
    samples = sampler.sample(noise, positive_copy, negative_copy, cfg=cfg, latent_image=latent_image, start_step=start_step, last_step=last_step, force_full_denoise=force_full_denoise, denoise_mask=noise_mask)
653
    samples = samples.cpu()
comfyanonymous's avatar
comfyanonymous committed
654
655
    for c in control_nets:
        c.cleanup()
comfyanonymous's avatar
comfyanonymous committed
656

657
658
659
    out = latent.copy()
    out["samples"] = samples
    return (out, )
comfyanonymous's avatar
comfyanonymous committed
660

comfyanonymous's avatar
comfyanonymous committed
661
662
663
class KSampler:
    @classmethod
    def INPUT_TYPES(s):
comfyanonymous's avatar
comfyanonymous committed
664
        return {"required":
comfyanonymous's avatar
comfyanonymous committed
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
                    {"model": ("MODEL",),
                    "seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}),
                    "steps": ("INT", {"default": 20, "min": 1, "max": 10000}),
                    "cfg": ("FLOAT", {"default": 8.0, "min": 0.0, "max": 100.0}),
                    "sampler_name": (comfy.samplers.KSampler.SAMPLERS, ),
                    "scheduler": (comfy.samplers.KSampler.SCHEDULERS, ),
                    "positive": ("CONDITIONING", ),
                    "negative": ("CONDITIONING", ),
                    "latent_image": ("LATENT", ),
                    "denoise": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}),
                    }}

    RETURN_TYPES = ("LATENT",)
    FUNCTION = "sample"

680
681
    CATEGORY = "sampling"

comfyanonymous's avatar
comfyanonymous committed
682
    def sample(self, model, seed, steps, cfg, sampler_name, scheduler, positive, negative, latent_image, denoise=1.0):
683
        return common_ksampler(model, seed, steps, cfg, sampler_name, scheduler, positive, negative, latent_image, denoise=denoise)
comfyanonymous's avatar
comfyanonymous committed
684

comfyanonymous's avatar
comfyanonymous committed
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
class KSamplerAdvanced:
    @classmethod
    def INPUT_TYPES(s):
        return {"required":
                    {"model": ("MODEL",),
                    "add_noise": (["enable", "disable"], ),
                    "noise_seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}),
                    "steps": ("INT", {"default": 20, "min": 1, "max": 10000}),
                    "cfg": ("FLOAT", {"default": 8.0, "min": 0.0, "max": 100.0}),
                    "sampler_name": (comfy.samplers.KSampler.SAMPLERS, ),
                    "scheduler": (comfy.samplers.KSampler.SCHEDULERS, ),
                    "positive": ("CONDITIONING", ),
                    "negative": ("CONDITIONING", ),
                    "latent_image": ("LATENT", ),
                    "start_at_step": ("INT", {"default": 0, "min": 0, "max": 10000}),
                    "end_at_step": ("INT", {"default": 10000, "min": 0, "max": 10000}),
                    "return_with_leftover_noise": (["disable", "enable"], ),
                    }}

    RETURN_TYPES = ("LATENT",)
    FUNCTION = "sample"

    CATEGORY = "sampling"
comfyanonymous's avatar
comfyanonymous committed
708

comfyanonymous's avatar
comfyanonymous committed
709
710
711
712
713
714
715
    def sample(self, model, add_noise, noise_seed, steps, cfg, sampler_name, scheduler, positive, negative, latent_image, start_at_step, end_at_step, return_with_leftover_noise, denoise=1.0):
        force_full_denoise = True
        if return_with_leftover_noise == "enable":
            force_full_denoise = False
        disable_noise = False
        if add_noise == "disable":
            disable_noise = True
716
        return common_ksampler(model, noise_seed, steps, cfg, sampler_name, scheduler, positive, negative, latent_image, denoise=denoise, disable_noise=disable_noise, start_step=start_at_step, last_step=end_at_step, force_full_denoise=force_full_denoise)
comfyanonymous's avatar
comfyanonymous committed
717
718
719
720

class SaveImage:
    def __init__(self):
        self.output_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), "output")
m957ymj75urz's avatar
m957ymj75urz committed
721
        self.type = "output"
comfyanonymous's avatar
comfyanonymous committed
722
723
724
725

    @classmethod
    def INPUT_TYPES(s):
        return {"required": 
726
                    {"images": ("IMAGE", ),
pythongosssss's avatar
tidy  
pythongosssss committed
727
                     "filename_prefix": ("STRING", {"default": "ComfyUI"})},
pythongosssss's avatar
pythongosssss committed
728
                "hidden": {"prompt": "PROMPT", "extra_pnginfo": "EXTRA_PNGINFO"},
comfyanonymous's avatar
comfyanonymous committed
729
730
731
732
733
734
735
                }

    RETURN_TYPES = ()
    FUNCTION = "save_images"

    OUTPUT_NODE = True

736
737
    CATEGORY = "image"

pythongosssss's avatar
tidy  
pythongosssss committed
738
    def save_images(self, images, filename_prefix="ComfyUI", prompt=None, extra_pnginfo=None):
739
        def map_filename(filename):
740
            prefix_len = len(os.path.basename(filename_prefix))
741
742
743
744
745
746
            prefix = filename[:prefix_len + 1]
            try:
                digits = int(filename[prefix_len + 1:].split('_')[0])
            except:
                digits = 0
            return (digits, prefix)
comfyanonymous's avatar
comfyanonymous committed
747

m957ymj75urz's avatar
m957ymj75urz committed
748
749
750
        subfolder = os.path.dirname(os.path.normpath(filename_prefix))
        filename = os.path.basename(os.path.normpath(filename_prefix))

comfyanonymous's avatar
comfyanonymous committed
751
        full_output_folder = os.path.join(self.output_dir, subfolder)
752

753
        if os.path.commonpath((self.output_dir, os.path.realpath(full_output_folder))) != self.output_dir:
754
            print("Saving image outside the output folder is not allowed.")
comfyanonymous's avatar
comfyanonymous committed
755
756
            return {}

757
        try:
758
            counter = max(filter(lambda a: a[1][:-1] == filename and a[1][-1] == "_", map(map_filename, os.listdir(full_output_folder))))[0] + 1
759
760
        except ValueError:
            counter = 1
761
        except FileNotFoundError:
762
            os.makedirs(full_output_folder, exist_ok=True)
763
            counter = 1
pythongosssss's avatar
pythongosssss committed
764

pythongosssss's avatar
pythongosssss committed
765
766
        if not os.path.exists(self.output_dir):
            os.makedirs(self.output_dir)
767

m957ymj75urz's avatar
m957ymj75urz committed
768
        results = list()
comfyanonymous's avatar
comfyanonymous committed
769
770
        for image in images:
            i = 255. * image.cpu().numpy()
771
            img = Image.fromarray(np.clip(i, 0, 255).astype(np.uint8))
comfyanonymous's avatar
comfyanonymous committed
772
773
774
775
776
777
            metadata = PngInfo()
            if prompt is not None:
                metadata.add_text("prompt", json.dumps(prompt))
            if extra_pnginfo is not None:
                for x in extra_pnginfo:
                    metadata.add_text(x, json.dumps(extra_pnginfo[x]))
778

779
780
            file = f"{filename}_{counter:05}_.png"
            img.save(os.path.join(full_output_folder, file), pnginfo=metadata, optimize=True)
m957ymj75urz's avatar
m957ymj75urz committed
781
782
783
784
785
            results.append({
                "filename": file,
                "subfolder": subfolder,
                "type": self.type
            });
786
            counter += 1
comfyanonymous's avatar
comfyanonymous committed
787

m957ymj75urz's avatar
m957ymj75urz committed
788
        return { "ui": { "images": results } }
comfyanonymous's avatar
comfyanonymous committed
789

pythongosssss's avatar
pythongosssss committed
790
791
792
class PreviewImage(SaveImage):
    def __init__(self):
        self.output_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), "temp")
m957ymj75urz's avatar
m957ymj75urz committed
793
        self.type = "temp"
pythongosssss's avatar
pythongosssss committed
794
795
796

    @classmethod
    def INPUT_TYPES(s):
797
        return {"required":
pythongosssss's avatar
pythongosssss committed
798
799
800
                    {"images": ("IMAGE", ), },
                "hidden": {"prompt": "PROMPT", "extra_pnginfo": "EXTRA_PNGINFO"},
                }
801

802
803
804
805
class LoadImage:
    input_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), "input")
    @classmethod
    def INPUT_TYPES(s):
pythongosssss's avatar
pythongosssss committed
806
807
        if not os.path.exists(s.input_dir):
            os.makedirs(s.input_dir)
808
        return {"required":
809
                    {"image": (sorted(os.listdir(s.input_dir)), )},
810
                }
811
812

    CATEGORY = "image"
813

814
    RETURN_TYPES = ("IMAGE", "MASK")
815
816
817
    FUNCTION = "load_image"
    def load_image(self, image):
        image_path = os.path.join(self.input_dir, image)
818
819
        i = Image.open(image_path)
        image = i.convert("RGB")
820
        image = np.array(image).astype(np.float32) / 255.0
821
        image = torch.from_numpy(image)[None,]
822
823
824
825
826
827
        if 'A' in i.getbands():
            mask = np.array(i.getchannel('A')).astype(np.float32) / 255.0
            mask = 1. - torch.from_numpy(mask)
        else:
            mask = torch.zeros((64,64), dtype=torch.float32, device="cpu")
        return (image, mask)
828

829
830
831
832
833
834
835
    @classmethod
    def IS_CHANGED(s, image):
        image_path = os.path.join(s.input_dir, image)
        m = hashlib.sha256()
        with open(image_path, 'rb') as f:
            m.update(f.read())
        return m.digest().hex()
pythongosssss's avatar
pythongosssss committed
836

837
838
839
840
841
class LoadImageMask:
    input_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), "input")
    @classmethod
    def INPUT_TYPES(s):
        return {"required":
842
                    {"image": (sorted(os.listdir(s.input_dir)), ),
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
                    "channel": (["alpha", "red", "green", "blue"], ),}
                }

    CATEGORY = "image"

    RETURN_TYPES = ("MASK",)
    FUNCTION = "load_image"
    def load_image(self, image, channel):
        image_path = os.path.join(self.input_dir, image)
        i = Image.open(image_path)
        mask = None
        c = channel[0].upper()
        if c in i.getbands():
            mask = np.array(i.getchannel(c)).astype(np.float32) / 255.0
            mask = torch.from_numpy(mask)
            if c == 'A':
                mask = 1. - mask
        else:
            mask = torch.zeros((64,64), dtype=torch.float32, device="cpu")
        return (mask,)

    @classmethod
    def IS_CHANGED(s, image, channel):
        image_path = os.path.join(s.input_dir, image)
        m = hashlib.sha256()
        with open(image_path, 'rb') as f:
            m.update(f.read())
        return m.digest().hex()
pythongosssss's avatar
pythongosssss committed
871

comfyanonymous's avatar
comfyanonymous committed
872
873
874
875
876
877
878
879
880
881
882
883
884
class ImageScale:
    upscale_methods = ["nearest-exact", "bilinear", "area"]
    crop_methods = ["disabled", "center"]

    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "image": ("IMAGE",), "upscale_method": (s.upscale_methods,),
                              "width": ("INT", {"default": 512, "min": 1, "max": 4096, "step": 1}),
                              "height": ("INT", {"default": 512, "min": 1, "max": 4096, "step": 1}),
                              "crop": (s.crop_methods,)}}
    RETURN_TYPES = ("IMAGE",)
    FUNCTION = "upscale"

885
    CATEGORY = "image/upscaling"
886

comfyanonymous's avatar
comfyanonymous committed
887
888
    def upscale(self, image, upscale_method, width, height, crop):
        samples = image.movedim(-1,1)
comfyanonymous's avatar
comfyanonymous committed
889
        s = comfy.utils.common_upscale(samples, width, height, upscale_method, crop)
comfyanonymous's avatar
comfyanonymous committed
890
891
        s = s.movedim(1,-1)
        return (s,)
comfyanonymous's avatar
comfyanonymous committed
892

893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
class ImageInvert:

    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "image": ("IMAGE",)}}

    RETURN_TYPES = ("IMAGE",)
    FUNCTION = "invert"

    CATEGORY = "image"

    def invert(self, image):
        s = 1.0 - image
        return (s,)


comfyanonymous's avatar
comfyanonymous committed
909
910
911
NODE_CLASS_MAPPINGS = {
    "KSampler": KSampler,
    "CheckpointLoader": CheckpointLoader,
912
    "CheckpointLoaderSimple": CheckpointLoaderSimple,
comfyanonymous's avatar
comfyanonymous committed
913
    "CLIPTextEncode": CLIPTextEncode,
comfyanonymous's avatar
comfyanonymous committed
914
    "CLIPSetLastLayer": CLIPSetLastLayer,
comfyanonymous's avatar
comfyanonymous committed
915
916
    "VAEDecode": VAEDecode,
    "VAEEncode": VAEEncode,
917
    "VAEEncodeForInpaint": VAEEncodeForInpaint,
comfyanonymous's avatar
comfyanonymous committed
918
919
920
921
    "VAELoader": VAELoader,
    "EmptyLatentImage": EmptyLatentImage,
    "LatentUpscale": LatentUpscale,
    "SaveImage": SaveImage,
pythongosssss's avatar
pythongosssss committed
922
    "PreviewImage": PreviewImage,
comfyanonymous's avatar
comfyanonymous committed
923
    "LoadImage": LoadImage,
924
    "LoadImageMask": LoadImageMask,
comfyanonymous's avatar
comfyanonymous committed
925
    "ImageScale": ImageScale,
926
    "ImageInvert": ImageInvert,
comfyanonymous's avatar
comfyanonymous committed
927
928
    "ConditioningCombine": ConditioningCombine,
    "ConditioningSetArea": ConditioningSetArea,
comfyanonymous's avatar
comfyanonymous committed
929
    "KSamplerAdvanced": KSamplerAdvanced,
930
    "SetLatentNoiseMask": SetLatentNoiseMask,
comfyanonymous's avatar
comfyanonymous committed
931
    "LatentComposite": LatentComposite,
comfyanonymous's avatar
comfyanonymous committed
932
    "LatentRotate": LatentRotate,
comfyanonymous's avatar
comfyanonymous committed
933
    "LatentFlip": LatentFlip,
comfyanonymous's avatar
comfyanonymous committed
934
    "LatentCrop": LatentCrop,
935
    "LoraLoader": LoraLoader,
936
    "CLIPLoader": CLIPLoader,
937
    "CLIPVisionEncode": CLIPVisionEncode,
938
    "StyleModelApply": StyleModelApply,
comfyanonymous's avatar
comfyanonymous committed
939
940
    "ControlNetApply": ControlNetApply,
    "ControlNetLoader": ControlNetLoader,
941
    "DiffControlNetLoader": DiffControlNetLoader,
comfyanonymous's avatar
comfyanonymous committed
942
943
    "StyleModelLoader": StyleModelLoader,
    "CLIPVisionLoader": CLIPVisionLoader,
944
    "VAEDecodeTiled": VAEDecodeTiled,
comfyanonymous's avatar
comfyanonymous committed
945
    "VAEEncodeTiled": VAEEncodeTiled,
comfyanonymous's avatar
comfyanonymous committed
946
947
}

948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
def load_custom_node(module_path):
    module_name = os.path.basename(module_path)
    if os.path.isfile(module_path):
        sp = os.path.splitext(module_path)
        module_name = sp[0]
    try:
        if os.path.isfile(module_path):
            module_spec = importlib.util.spec_from_file_location(module_name, module_path)
        else:
            module_spec = importlib.util.spec_from_file_location(module_name, os.path.join(module_path, "__init__.py"))
        module = importlib.util.module_from_spec(module_spec)
        sys.modules[module_name] = module
        module_spec.loader.exec_module(module)
        if hasattr(module, "NODE_CLASS_MAPPINGS") and getattr(module, "NODE_CLASS_MAPPINGS") is not None:
            NODE_CLASS_MAPPINGS.update(module.NODE_CLASS_MAPPINGS)
        else:
            print(f"Skip {module_path} module for custom nodes due to the lack of NODE_CLASS_MAPPINGS.")
    except Exception as e:
        print(traceback.format_exc())
        print(f"Cannot import {module_path} module for custom nodes:", e)

Hacker 17082006's avatar
Hacker 17082006 committed
969
def load_custom_nodes():
970
    CUSTOM_NODE_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), "custom_nodes")
971
    possible_modules = os.listdir(CUSTOM_NODE_PATH)
972
    if "__pycache__" in possible_modules:
Hacker 17082006's avatar
.  
Hacker 17082006 committed
973
        possible_modules.remove("__pycache__")
974

Hacker 17082006's avatar
Hacker 17082006 committed
975
    for possible_module in possible_modules:
976
977
        module_path = os.path.join(CUSTOM_NODE_PATH, possible_module)
        if os.path.isfile(module_path) and os.path.splitext(module_path)[1] != ".py": continue
978
        load_custom_node(module_path)
979

980
load_custom_nodes()
981
982

load_custom_node(os.path.join(os.path.join(os.path.dirname(os.path.realpath(__file__)), "comfy_extras"), "nodes_upscale_model.py"))