nodes.py 34.3 KB
Newer Older
comfyanonymous's avatar
comfyanonymous committed
1
2
3
4
5
import torch

import os
import sys
import json
6
import hashlib
comfyanonymous's avatar
comfyanonymous committed
7
import copy
8
import traceback
comfyanonymous's avatar
comfyanonymous committed
9
10
11
12
13

from PIL import Image
from PIL.PngImagePlugin import PngInfo
import numpy as np

comfyanonymous's avatar
comfyanonymous committed
14
sys.path.insert(0, os.path.join(os.path.dirname(os.path.realpath(__file__)), "comfy"))
comfyanonymous's avatar
comfyanonymous committed
15
16
17
18


import comfy.samplers
import comfy.sd
comfyanonymous's avatar
comfyanonymous committed
19
20
import comfy.utils

21
22
import comfy_extras.clip_vision

23
import model_management
24
import importlib
comfyanonymous's avatar
comfyanonymous committed
25

26
import folder_paths
27
28
29
30

def before_node_execution():
    model_management.throw_exception_if_processing_interrupted()

31
32
def interrupt_processing(value=True):
    model_management.interrupt_current_processing(value)
33

comfyanonymous's avatar
comfyanonymous committed
34
35
36
class CLIPTextEncode:
    @classmethod
    def INPUT_TYPES(s):
37
        return {"required": {"text": ("STRING", {"multiline": True}), "clip": ("CLIP", )}}
comfyanonymous's avatar
comfyanonymous committed
38
39
40
    RETURN_TYPES = ("CONDITIONING",)
    FUNCTION = "encode"

41
42
    CATEGORY = "conditioning"

comfyanonymous's avatar
comfyanonymous committed
43
    def encode(self, clip, text):
comfyanonymous's avatar
comfyanonymous committed
44
45
46
47
48
49
50
51
52
        return ([[clip.encode(text), {}]], )

class ConditioningCombine:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": {"conditioning_1": ("CONDITIONING", ), "conditioning_2": ("CONDITIONING", )}}
    RETURN_TYPES = ("CONDITIONING",)
    FUNCTION = "combine"

53
54
    CATEGORY = "conditioning"

comfyanonymous's avatar
comfyanonymous committed
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
    def combine(self, conditioning_1, conditioning_2):
        return (conditioning_1 + conditioning_2, )

class ConditioningSetArea:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": {"conditioning": ("CONDITIONING", ),
                              "width": ("INT", {"default": 64, "min": 64, "max": 4096, "step": 64}),
                              "height": ("INT", {"default": 64, "min": 64, "max": 4096, "step": 64}),
                              "x": ("INT", {"default": 0, "min": 0, "max": 4096, "step": 64}),
                              "y": ("INT", {"default": 0, "min": 0, "max": 4096, "step": 64}),
                              "strength": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}),
                             }}
    RETURN_TYPES = ("CONDITIONING",)
    FUNCTION = "append"

71
72
    CATEGORY = "conditioning"

comfyanonymous's avatar
comfyanonymous committed
73
    def append(self, conditioning, width, height, x, y, strength, min_sigma=0.0, max_sigma=99.0):
comfyanonymous's avatar
comfyanonymous committed
74
75
76
77
78
79
80
81
        c = []
        for t in conditioning:
            n = [t[0], t[1].copy()]
            n[1]['area'] = (height // 8, width // 8, y // 8, x // 8)
            n[1]['strength'] = strength
            n[1]['min_sigma'] = min_sigma
            n[1]['max_sigma'] = max_sigma
            c.append(n)
comfyanonymous's avatar
comfyanonymous committed
82
        return (c, )
comfyanonymous's avatar
comfyanonymous committed
83
84
85
86
87
88
89
90
91
92
93

class VAEDecode:
    def __init__(self, device="cpu"):
        self.device = device

    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "samples": ("LATENT", ), "vae": ("VAE", )}}
    RETURN_TYPES = ("IMAGE",)
    FUNCTION = "decode"

94
95
    CATEGORY = "latent"

comfyanonymous's avatar
comfyanonymous committed
96
    def decode(self, vae, samples):
97
        return (vae.decode(samples["samples"]), )
comfyanonymous's avatar
comfyanonymous committed
98

99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
class VAEDecodeTiled:
    def __init__(self, device="cpu"):
        self.device = device

    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "samples": ("LATENT", ), "vae": ("VAE", )}}
    RETURN_TYPES = ("IMAGE",)
    FUNCTION = "decode"

    CATEGORY = "_for_testing"

    def decode(self, vae, samples):
        return (vae.decode_tiled(samples["samples"]), )

comfyanonymous's avatar
comfyanonymous committed
114
115
116
117
118
119
120
121
122
123
class VAEEncode:
    def __init__(self, device="cpu"):
        self.device = device

    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "pixels": ("IMAGE", ), "vae": ("VAE", )}}
    RETURN_TYPES = ("LATENT",)
    FUNCTION = "encode"

124
125
    CATEGORY = "latent"

comfyanonymous's avatar
comfyanonymous committed
126
    def encode(self, vae, pixels):
127
128
129
130
        x = (pixels.shape[1] // 64) * 64
        y = (pixels.shape[2] // 64) * 64
        if pixels.shape[1] != x or pixels.shape[2] != y:
            pixels = pixels[:,:x,:y,:]
131
132
133
        t = vae.encode(pixels[:,:,:,:3])

        return ({"samples":t}, )
comfyanonymous's avatar
comfyanonymous committed
134

comfyanonymous's avatar
comfyanonymous committed
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155

class VAEEncodeTiled:
    def __init__(self, device="cpu"):
        self.device = device

    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "pixels": ("IMAGE", ), "vae": ("VAE", )}}
    RETURN_TYPES = ("LATENT",)
    FUNCTION = "encode"

    CATEGORY = "_for_testing"

    def encode(self, vae, pixels):
        x = (pixels.shape[1] // 64) * 64
        y = (pixels.shape[2] // 64) * 64
        if pixels.shape[1] != x or pixels.shape[2] != y:
            pixels = pixels[:,:x,:y,:]
        t = vae.encode_tiled(pixels[:,:,:,:3])

        return ({"samples":t}, )
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
class VAEEncodeForInpaint:
    def __init__(self, device="cpu"):
        self.device = device

    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "pixels": ("IMAGE", ), "vae": ("VAE", ), "mask": ("MASK", )}}
    RETURN_TYPES = ("LATENT",)
    FUNCTION = "encode"

    CATEGORY = "latent/inpaint"

    def encode(self, vae, pixels, mask):
        x = (pixels.shape[1] // 64) * 64
        y = (pixels.shape[2] // 64) * 64
171
172
        mask = torch.nn.functional.interpolate(mask[None,None,], size=(pixels.shape[1], pixels.shape[2]), mode="bilinear")[0][0]

173
        pixels = pixels.clone()
174
175
176
177
        if pixels.shape[1] != x or pixels.shape[2] != y:
            pixels = pixels[:,:x,:y,:]
            mask = mask[:x,:y]

178
        #grow mask by a few pixels to keep things seamless in latent space
179
        kernel_tensor = torch.ones((1, 1, 6, 6))
180
181
        mask_erosion = torch.clamp(torch.nn.functional.conv2d((mask.round())[None], kernel_tensor, padding=3), 0, 1)
        m = (1.0 - mask.round())
182
183
        for i in range(3):
            pixels[:,:,:,i] -= 0.5
184
            pixels[:,:,:,i] *= m
185
186
187
            pixels[:,:,:,i] += 0.5
        t = vae.encode(pixels)

188
        return ({"samples":t, "noise_mask": (mask_erosion[0][:x,:y].round())}, )
comfyanonymous's avatar
comfyanonymous committed
189
190
191

class CheckpointLoader:
    models_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), "models")
192
    embedding_directory = os.path.join(models_dir, "embeddings")
comfyanonymous's avatar
comfyanonymous committed
193
194
195

    @classmethod
    def INPUT_TYPES(s):
196
197
        return {"required": { "config_name": (folder_paths.get_filename_list("configs"), ),
                              "ckpt_name": (folder_paths.get_filename_list("checkpoints"), )}}
comfyanonymous's avatar
comfyanonymous committed
198
199
200
    RETURN_TYPES = ("MODEL", "CLIP", "VAE")
    FUNCTION = "load_checkpoint"

201
202
    CATEGORY = "loaders"

comfyanonymous's avatar
comfyanonymous committed
203
    def load_checkpoint(self, config_name, ckpt_name, output_vae=True, output_clip=True):
204
205
        config_path = folder_paths.get_full_path("configs", config_name)
        ckpt_path = folder_paths.get_full_path("checkpoints", ckpt_name)
206
        return comfy.sd.load_checkpoint(config_path, ckpt_path, output_vae=True, output_clip=True, embedding_directory=self.embedding_directory)
comfyanonymous's avatar
comfyanonymous committed
207

208
209
210
class CheckpointLoaderSimple:
    @classmethod
    def INPUT_TYPES(s):
211
        return {"required": { "ckpt_name": (folder_paths.get_filename_list("checkpoints"), ),
212
213
214
215
                             }}
    RETURN_TYPES = ("MODEL", "CLIP", "VAE")
    FUNCTION = "load_checkpoint"

216
    CATEGORY = "loaders"
217

218
    def load_checkpoint(self, ckpt_name, output_vae=True, output_clip=True):
219
        ckpt_path = folder_paths.get_full_path("checkpoints", ckpt_name)
220
        out = comfy.sd.load_checkpoint_guess_config(ckpt_path, output_vae=True, output_clip=True, embedding_directory=CheckpointLoader.embedding_directory)
221
222
        return out

comfyanonymous's avatar
comfyanonymous committed
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
class CLIPSetLastLayer:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "clip": ("CLIP", ),
                              "stop_at_clip_layer": ("INT", {"default": -1, "min": -24, "max": -1, "step": 1}),
                              }}
    RETURN_TYPES = ("CLIP",)
    FUNCTION = "set_last_layer"

    CATEGORY = "conditioning"

    def set_last_layer(self, clip, stop_at_clip_layer):
        clip = clip.clone()
        clip.clip_layer(stop_at_clip_layer)
        return (clip,)

239
240
241
242
243
class LoraLoader:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "model": ("MODEL",),
                              "clip": ("CLIP", ),
244
                              "lora_name": (folder_paths.get_filename_list("loras"), ),
245
246
247
248
249
250
251
252
253
                              "strength_model": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}),
                              "strength_clip": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}),
                              }}
    RETURN_TYPES = ("MODEL", "CLIP")
    FUNCTION = "load_lora"

    CATEGORY = "loaders"

    def load_lora(self, model, clip, lora_name, strength_model, strength_clip):
254
        lora_path = folder_paths.get_full_path("loras", lora_name)
255
256
257
        model_lora, clip_lora = comfy.sd.load_lora_for_models(model, clip, lora_path, strength_model, strength_clip)
        return (model_lora, clip_lora)

comfyanonymous's avatar
comfyanonymous committed
258
259
260
class VAELoader:
    @classmethod
    def INPUT_TYPES(s):
261
        return {"required": { "vae_name": (folder_paths.get_filename_list("vae"), )}}
comfyanonymous's avatar
comfyanonymous committed
262
263
264
    RETURN_TYPES = ("VAE",)
    FUNCTION = "load_vae"

265
266
    CATEGORY = "loaders"

comfyanonymous's avatar
comfyanonymous committed
267
268
    #TODO: scale factor?
    def load_vae(self, vae_name):
269
        vae_path = folder_paths.get_full_path("vae", vae_name)
comfyanonymous's avatar
comfyanonymous committed
270
271
272
        vae = comfy.sd.VAE(ckpt_path=vae_path)
        return (vae,)

comfyanonymous's avatar
comfyanonymous committed
273
274
275
class ControlNetLoader:
    @classmethod
    def INPUT_TYPES(s):
276
        return {"required": { "control_net_name": (folder_paths.get_filename_list("controlnet"), )}}
comfyanonymous's avatar
comfyanonymous committed
277
278
279
280
281
282
283

    RETURN_TYPES = ("CONTROL_NET",)
    FUNCTION = "load_controlnet"

    CATEGORY = "loaders"

    def load_controlnet(self, control_net_name):
284
        controlnet_path = folder_paths.get_full_path("controlnet", control_net_name)
comfyanonymous's avatar
comfyanonymous committed
285
286
287
        controlnet = comfy.sd.load_controlnet(controlnet_path)
        return (controlnet,)

288
289
290
291
class DiffControlNetLoader:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "model": ("MODEL",),
292
                              "control_net_name": (folder_paths.get_filename_list("controlnet"), )}}
293
294
295
296
297
298
299

    RETURN_TYPES = ("CONTROL_NET",)
    FUNCTION = "load_controlnet"

    CATEGORY = "loaders"

    def load_controlnet(self, model, control_net_name):
300
        controlnet_path = folder_paths.get_full_path("controlnet", control_net_name)
301
302
303
        controlnet = comfy.sd.load_controlnet(controlnet_path, model)
        return (controlnet,)

comfyanonymous's avatar
comfyanonymous committed
304
305
306
307

class ControlNetApply:
    @classmethod
    def INPUT_TYPES(s):
308
309
310
311
312
        return {"required": {"conditioning": ("CONDITIONING", ),
                             "control_net": ("CONTROL_NET", ),
                             "image": ("IMAGE", ),
                             "strength": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01})
                             }}
comfyanonymous's avatar
comfyanonymous committed
313
314
315
316
317
    RETURN_TYPES = ("CONDITIONING",)
    FUNCTION = "apply_controlnet"

    CATEGORY = "conditioning"

318
    def apply_controlnet(self, conditioning, control_net, image, strength):
comfyanonymous's avatar
comfyanonymous committed
319
320
321
322
323
        c = []
        control_hint = image.movedim(-1,1)
        print(control_hint.shape)
        for t in conditioning:
            n = [t[0], t[1].copy()]
comfyanonymous's avatar
comfyanonymous committed
324
325
326
327
            c_net = control_net.copy().set_cond_hint(control_hint, strength)
            if 'control' in t[1]:
                c_net.set_previous_controlnet(t[1]['control'])
            n[1]['control'] = c_net
comfyanonymous's avatar
comfyanonymous committed
328
329
330
            c.append(n)
        return (c, )

331
332
333
class CLIPLoader:
    @classmethod
    def INPUT_TYPES(s):
334
        return {"required": { "clip_name": (folder_paths.get_filename_list("clip"), ),
335
336
337
338
339
340
                             }}
    RETURN_TYPES = ("CLIP",)
    FUNCTION = "load_clip"

    CATEGORY = "loaders"

341
    def load_clip(self, clip_name):
342
        clip_path = folder_paths.get_full_path("clip", clip_name)
343
344
345
        clip = comfy.sd.load_clip(ckpt_path=clip_path, embedding_directory=CheckpointLoader.embedding_directory)
        return (clip,)

346
347
348
class CLIPVisionLoader:
    @classmethod
    def INPUT_TYPES(s):
349
        return {"required": { "clip_name": (folder_paths.get_filename_list("clip_vision"), ),
350
351
352
353
354
355
356
                             }}
    RETURN_TYPES = ("CLIP_VISION",)
    FUNCTION = "load_clip"

    CATEGORY = "loaders"

    def load_clip(self, clip_name):
357
        clip_path = folder_paths.get_full_path("clip_vision", clip_name)
358
359
360
361
362
363
364
365
366
        clip_vision = comfy_extras.clip_vision.load(clip_path)
        return (clip_vision,)

class CLIPVisionEncode:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "clip_vision": ("CLIP_VISION",),
                              "image": ("IMAGE",)
                             }}
comfyanonymous's avatar
comfyanonymous committed
367
    RETURN_TYPES = ("CLIP_VISION_OUTPUT",)
368
369
    FUNCTION = "encode"

comfyanonymous's avatar
comfyanonymous committed
370
    CATEGORY = "conditioning/style_model"
371
372
373
374
375
376
377
378

    def encode(self, clip_vision, image):
        output = clip_vision.encode_image(image)
        return (output,)

class StyleModelLoader:
    @classmethod
    def INPUT_TYPES(s):
379
        return {"required": { "style_model_name": (folder_paths.get_filename_list("style_models"), )}}
380
381
382
383
384
385
386

    RETURN_TYPES = ("STYLE_MODEL",)
    FUNCTION = "load_style_model"

    CATEGORY = "loaders"

    def load_style_model(self, style_model_name):
387
        style_model_path = folder_paths.get_full_path("style_models", style_model_name)
388
389
390
391
392
393
394
        style_model = comfy.sd.load_style_model(style_model_path)
        return (style_model,)


class StyleModelApply:
    @classmethod
    def INPUT_TYPES(s):
395
396
397
        return {"required": {"conditioning": ("CONDITIONING", ),
                             "style_model": ("STYLE_MODEL", ),
                             "clip_vision_output": ("CLIP_VISION_OUTPUT", ),
398
399
400
401
                             }}
    RETURN_TYPES = ("CONDITIONING",)
    FUNCTION = "apply_stylemodel"

comfyanonymous's avatar
comfyanonymous committed
402
    CATEGORY = "conditioning/style_model"
403

404
405
    def apply_stylemodel(self, clip_vision_output, style_model, conditioning):
        cond = style_model.get_cond(clip_vision_output)
406
        c = []
407
408
        for t in conditioning:
            n = [torch.cat((t[0], cond), dim=1), t[1].copy()]
409
410
411
            c.append(n)
        return (c, )

comfyanonymous's avatar
comfyanonymous committed
412
413
414
415
416
417
418
419
420
421
422
423
class EmptyLatentImage:
    def __init__(self, device="cpu"):
        self.device = device

    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "width": ("INT", {"default": 512, "min": 64, "max": 4096, "step": 64}),
                              "height": ("INT", {"default": 512, "min": 64, "max": 4096, "step": 64}),
                              "batch_size": ("INT", {"default": 1, "min": 1, "max": 64})}}
    RETURN_TYPES = ("LATENT",)
    FUNCTION = "generate"

424
425
    CATEGORY = "latent"

comfyanonymous's avatar
comfyanonymous committed
426
427
    def generate(self, width, height, batch_size=1):
        latent = torch.zeros([batch_size, 4, height // 8, width // 8])
428
        return ({"samples":latent}, )
comfyanonymous's avatar
comfyanonymous committed
429

comfyanonymous's avatar
comfyanonymous committed
430

comfyanonymous's avatar
comfyanonymous committed
431

comfyanonymous's avatar
comfyanonymous committed
432
433
class LatentUpscale:
    upscale_methods = ["nearest-exact", "bilinear", "area"]
434
    crop_methods = ["disabled", "center"]
comfyanonymous's avatar
comfyanonymous committed
435
436
437
438
439

    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "samples": ("LATENT",), "upscale_method": (s.upscale_methods,),
                              "width": ("INT", {"default": 512, "min": 64, "max": 4096, "step": 64}),
440
441
                              "height": ("INT", {"default": 512, "min": 64, "max": 4096, "step": 64}),
                              "crop": (s.crop_methods,)}}
comfyanonymous's avatar
comfyanonymous committed
442
443
444
    RETURN_TYPES = ("LATENT",)
    FUNCTION = "upscale"

445
446
    CATEGORY = "latent"

447
    def upscale(self, samples, upscale_method, width, height, crop):
448
        s = samples.copy()
comfyanonymous's avatar
comfyanonymous committed
449
        s["samples"] = comfy.utils.common_upscale(samples["samples"], width // 8, height // 8, upscale_method, crop)
comfyanonymous's avatar
comfyanonymous committed
450
451
        return (s,)

comfyanonymous's avatar
comfyanonymous committed
452
453
454
455
456
457
458
459
460
class LatentRotate:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "samples": ("LATENT",),
                              "rotation": (["none", "90 degrees", "180 degrees", "270 degrees"],),
                              }}
    RETURN_TYPES = ("LATENT",)
    FUNCTION = "rotate"

comfyanonymous's avatar
comfyanonymous committed
461
    CATEGORY = "latent/transform"
comfyanonymous's avatar
comfyanonymous committed
462
463

    def rotate(self, samples, rotation):
464
        s = samples.copy()
comfyanonymous's avatar
comfyanonymous committed
465
466
467
468
469
470
471
472
        rotate_by = 0
        if rotation.startswith("90"):
            rotate_by = 1
        elif rotation.startswith("180"):
            rotate_by = 2
        elif rotation.startswith("270"):
            rotate_by = 3

473
        s["samples"] = torch.rot90(samples["samples"], k=rotate_by, dims=[3, 2])
comfyanonymous's avatar
comfyanonymous committed
474
        return (s,)
comfyanonymous's avatar
comfyanonymous committed
475
476
477
478
479
480
481
482
483
484

class LatentFlip:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "samples": ("LATENT",),
                              "flip_method": (["x-axis: vertically", "y-axis: horizontally"],),
                              }}
    RETURN_TYPES = ("LATENT",)
    FUNCTION = "flip"

comfyanonymous's avatar
comfyanonymous committed
485
    CATEGORY = "latent/transform"
comfyanonymous's avatar
comfyanonymous committed
486
487

    def flip(self, samples, flip_method):
488
        s = samples.copy()
comfyanonymous's avatar
comfyanonymous committed
489
        if flip_method.startswith("x"):
490
            s["samples"] = torch.flip(samples["samples"], dims=[2])
comfyanonymous's avatar
comfyanonymous committed
491
        elif flip_method.startswith("y"):
492
            s["samples"] = torch.flip(samples["samples"], dims=[3])
comfyanonymous's avatar
comfyanonymous committed
493
494

        return (s,)
comfyanonymous's avatar
comfyanonymous committed
495
496
497
498
499
500
501
502

class LatentComposite:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "samples_to": ("LATENT",),
                              "samples_from": ("LATENT",),
                              "x": ("INT", {"default": 0, "min": 0, "max": 4096, "step": 8}),
                              "y": ("INT", {"default": 0, "min": 0, "max": 4096, "step": 8}),
503
                              "feather": ("INT", {"default": 0, "min": 0, "max": 4096, "step": 8}),
comfyanonymous's avatar
comfyanonymous committed
504
505
506
507
508
509
                              }}
    RETURN_TYPES = ("LATENT",)
    FUNCTION = "composite"

    CATEGORY = "latent"

510
    def composite(self, samples_to, samples_from, x, y, composite_method="normal", feather=0):
comfyanonymous's avatar
comfyanonymous committed
511
512
        x =  x // 8
        y = y // 8
513
        feather = feather // 8
514
515
516
517
        samples_out = samples_to.copy()
        s = samples_to["samples"].clone()
        samples_to = samples_to["samples"]
        samples_from = samples_from["samples"]
518
519
520
        if feather == 0:
            s[:,:,y:y+samples_from.shape[2],x:x+samples_from.shape[3]] = samples_from[:,:,:samples_to.shape[2] - y, :samples_to.shape[3] - x]
        else:
521
522
            samples_from = samples_from[:,:,:samples_to.shape[2] - y, :samples_to.shape[3] - x]
            mask = torch.ones_like(samples_from)
523
524
525
526
527
528
529
530
531
532
533
534
            for t in range(feather):
                if y != 0:
                    mask[:,:,t:1+t,:] *= ((1.0/feather) * (t + 1))

                if y + samples_from.shape[2] < samples_to.shape[2]:
                    mask[:,:,mask.shape[2] -1 -t: mask.shape[2]-t,:] *= ((1.0/feather) * (t + 1))
                if x != 0:
                    mask[:,:,:,t:1+t] *= ((1.0/feather) * (t + 1))
                if x + samples_from.shape[3] < samples_to.shape[3]:
                    mask[:,:,:,mask.shape[3]- 1 - t: mask.shape[3]- t] *= ((1.0/feather) * (t + 1))
            rev_mask = torch.ones_like(mask) - mask
            s[:,:,y:y+samples_from.shape[2],x:x+samples_from.shape[3]] = samples_from[:,:,:samples_to.shape[2] - y, :samples_to.shape[3] - x] * mask + s[:,:,y:y+samples_from.shape[2],x:x+samples_from.shape[3]] * rev_mask
535
536
        samples_out["samples"] = s
        return (samples_out,)
comfyanonymous's avatar
comfyanonymous committed
537

comfyanonymous's avatar
comfyanonymous committed
538
539
540
541
542
543
544
545
546
547
548
549
class LatentCrop:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "samples": ("LATENT",),
                              "width": ("INT", {"default": 512, "min": 64, "max": 4096, "step": 64}),
                              "height": ("INT", {"default": 512, "min": 64, "max": 4096, "step": 64}),
                              "x": ("INT", {"default": 0, "min": 0, "max": 4096, "step": 8}),
                              "y": ("INT", {"default": 0, "min": 0, "max": 4096, "step": 8}),
                              }}
    RETURN_TYPES = ("LATENT",)
    FUNCTION = "crop"

comfyanonymous's avatar
comfyanonymous committed
550
    CATEGORY = "latent/transform"
comfyanonymous's avatar
comfyanonymous committed
551
552

    def crop(self, samples, width, height, x, y):
553
554
        s = samples.copy()
        samples = samples['samples']
comfyanonymous's avatar
comfyanonymous committed
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
        x =  x // 8
        y = y // 8

        #enfonce minimum size of 64
        if x > (samples.shape[3] - 8):
            x = samples.shape[3] - 8
        if y > (samples.shape[2] - 8):
            y = samples.shape[2] - 8

        new_height = height // 8
        new_width = width // 8
        to_x = new_width + x
        to_y = new_height + y
        def enforce_image_dim(d, to_d, max_d):
            if to_d > max_d:
                leftover = (to_d - max_d) % 8
                to_d = max_d
                d -= leftover
            return (d, to_d)

        #make sure size is always multiple of 64
        x, to_x = enforce_image_dim(x, to_x, samples.shape[3])
        y, to_y = enforce_image_dim(y, to_y, samples.shape[2])
578
        s['samples'] = samples[:,:,y:to_y, x:to_x]
comfyanonymous's avatar
comfyanonymous committed
579
580
        return (s,)

581
582
583
584
585
586
587
588
589
class SetLatentNoiseMask:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "samples": ("LATENT",),
                              "mask": ("MASK",),
                              }}
    RETURN_TYPES = ("LATENT",)
    FUNCTION = "set_mask"

590
    CATEGORY = "latent/inpaint"
591
592
593
594
595
596
597

    def set_mask(self, samples, mask):
        s = samples.copy()
        s["noise_mask"] = mask
        return (s,)


598
def common_ksampler(model, seed, steps, cfg, sampler_name, scheduler, positive, negative, latent, denoise=1.0, disable_noise=False, start_step=None, last_step=None, force_full_denoise=False):
599
600
    latent_image = latent["samples"]
    noise_mask = None
601
    device = model_management.get_torch_device()
602

comfyanonymous's avatar
comfyanonymous committed
603
604
605
606
607
    if disable_noise:
        noise = torch.zeros(latent_image.size(), dtype=latent_image.dtype, layout=latent_image.layout, device="cpu")
    else:
        noise = torch.randn(latent_image.size(), dtype=latent_image.dtype, layout=latent_image.layout, generator=torch.manual_seed(seed), device="cpu")

608
609
610
    if "noise_mask" in latent:
        noise_mask = latent['noise_mask']
        noise_mask = torch.nn.functional.interpolate(noise_mask[None,None,], size=(noise.shape[2], noise.shape[3]), mode="bilinear")
611
        noise_mask = noise_mask.round()
612
613
614
615
        noise_mask = torch.cat([noise_mask] * noise.shape[1], dim=1)
        noise_mask = torch.cat([noise_mask] * noise.shape[0])
        noise_mask = noise_mask.to(device)

616
    real_model = None
617
618
619
    model_management.load_model_gpu(model)
    real_model = model.model

620
621
622
623
624
625
    noise = noise.to(device)
    latent_image = latent_image.to(device)

    positive_copy = []
    negative_copy = []

comfyanonymous's avatar
comfyanonymous committed
626
    control_nets = []
627
628
629
630
631
    for p in positive:
        t = p[0]
        if t.shape[0] < noise.shape[0]:
            t = torch.cat([t] * noise.shape[0])
        t = t.to(device)
comfyanonymous's avatar
comfyanonymous committed
632
633
        if 'control' in p[1]:
            control_nets += [p[1]['control']]
634
635
636
637
638
639
        positive_copy += [[t] + p[1:]]
    for n in negative:
        t = n[0]
        if t.shape[0] < noise.shape[0]:
            t = torch.cat([t] * noise.shape[0])
        t = t.to(device)
640
641
        if 'control' in n[1]:
            control_nets += [n[1]['control']]
642
643
        negative_copy += [[t] + n[1:]]

comfyanonymous's avatar
comfyanonymous committed
644
645
646
647
    control_net_models = []
    for x in control_nets:
        control_net_models += x.get_control_models()
    model_management.load_controlnet_gpu(control_net_models)
comfyanonymous's avatar
comfyanonymous committed
648

649
650
651
652
653
654
    if sampler_name in comfy.samplers.KSampler.SAMPLERS:
        sampler = comfy.samplers.KSampler(real_model, steps=steps, device=device, sampler=sampler_name, scheduler=scheduler, denoise=denoise)
    else:
        #other samplers
        pass

655
    samples = sampler.sample(noise, positive_copy, negative_copy, cfg=cfg, latent_image=latent_image, start_step=start_step, last_step=last_step, force_full_denoise=force_full_denoise, denoise_mask=noise_mask)
656
    samples = samples.cpu()
comfyanonymous's avatar
comfyanonymous committed
657
658
    for c in control_nets:
        c.cleanup()
comfyanonymous's avatar
comfyanonymous committed
659

660
661
662
    out = latent.copy()
    out["samples"] = samples
    return (out, )
comfyanonymous's avatar
comfyanonymous committed
663

comfyanonymous's avatar
comfyanonymous committed
664
665
666
class KSampler:
    @classmethod
    def INPUT_TYPES(s):
comfyanonymous's avatar
comfyanonymous committed
667
        return {"required":
comfyanonymous's avatar
comfyanonymous committed
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
                    {"model": ("MODEL",),
                    "seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}),
                    "steps": ("INT", {"default": 20, "min": 1, "max": 10000}),
                    "cfg": ("FLOAT", {"default": 8.0, "min": 0.0, "max": 100.0}),
                    "sampler_name": (comfy.samplers.KSampler.SAMPLERS, ),
                    "scheduler": (comfy.samplers.KSampler.SCHEDULERS, ),
                    "positive": ("CONDITIONING", ),
                    "negative": ("CONDITIONING", ),
                    "latent_image": ("LATENT", ),
                    "denoise": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}),
                    }}

    RETURN_TYPES = ("LATENT",)
    FUNCTION = "sample"

683
684
    CATEGORY = "sampling"

comfyanonymous's avatar
comfyanonymous committed
685
    def sample(self, model, seed, steps, cfg, sampler_name, scheduler, positive, negative, latent_image, denoise=1.0):
686
        return common_ksampler(model, seed, steps, cfg, sampler_name, scheduler, positive, negative, latent_image, denoise=denoise)
comfyanonymous's avatar
comfyanonymous committed
687

comfyanonymous's avatar
comfyanonymous committed
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
class KSamplerAdvanced:
    @classmethod
    def INPUT_TYPES(s):
        return {"required":
                    {"model": ("MODEL",),
                    "add_noise": (["enable", "disable"], ),
                    "noise_seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}),
                    "steps": ("INT", {"default": 20, "min": 1, "max": 10000}),
                    "cfg": ("FLOAT", {"default": 8.0, "min": 0.0, "max": 100.0}),
                    "sampler_name": (comfy.samplers.KSampler.SAMPLERS, ),
                    "scheduler": (comfy.samplers.KSampler.SCHEDULERS, ),
                    "positive": ("CONDITIONING", ),
                    "negative": ("CONDITIONING", ),
                    "latent_image": ("LATENT", ),
                    "start_at_step": ("INT", {"default": 0, "min": 0, "max": 10000}),
                    "end_at_step": ("INT", {"default": 10000, "min": 0, "max": 10000}),
                    "return_with_leftover_noise": (["disable", "enable"], ),
                    }}

    RETURN_TYPES = ("LATENT",)
    FUNCTION = "sample"

    CATEGORY = "sampling"
comfyanonymous's avatar
comfyanonymous committed
711

comfyanonymous's avatar
comfyanonymous committed
712
713
714
715
716
717
718
    def sample(self, model, add_noise, noise_seed, steps, cfg, sampler_name, scheduler, positive, negative, latent_image, start_at_step, end_at_step, return_with_leftover_noise, denoise=1.0):
        force_full_denoise = True
        if return_with_leftover_noise == "enable":
            force_full_denoise = False
        disable_noise = False
        if add_noise == "disable":
            disable_noise = True
719
        return common_ksampler(model, noise_seed, steps, cfg, sampler_name, scheduler, positive, negative, latent_image, denoise=denoise, disable_noise=disable_noise, start_step=start_at_step, last_step=end_at_step, force_full_denoise=force_full_denoise)
comfyanonymous's avatar
comfyanonymous committed
720
721
722
723

class SaveImage:
    def __init__(self):
        self.output_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), "output")
pythongosssss's avatar
pythongosssss committed
724
        self.url_suffix = ""
comfyanonymous's avatar
comfyanonymous committed
725
726
727
728

    @classmethod
    def INPUT_TYPES(s):
        return {"required": 
729
                    {"images": ("IMAGE", ),
pythongosssss's avatar
tidy  
pythongosssss committed
730
                     "filename_prefix": ("STRING", {"default": "ComfyUI"})},
pythongosssss's avatar
pythongosssss committed
731
                "hidden": {"prompt": "PROMPT", "extra_pnginfo": "EXTRA_PNGINFO"},
comfyanonymous's avatar
comfyanonymous committed
732
733
734
735
736
737
738
                }

    RETURN_TYPES = ()
    FUNCTION = "save_images"

    OUTPUT_NODE = True

739
740
    CATEGORY = "image"

pythongosssss's avatar
tidy  
pythongosssss committed
741
    def save_images(self, images, filename_prefix="ComfyUI", prompt=None, extra_pnginfo=None):
742
743
744
745
746
747
748
749
750
751
752
753
        def map_filename(filename):
            prefix_len = len(filename_prefix)
            prefix = filename[:prefix_len + 1]
            try:
                digits = int(filename[prefix_len + 1:].split('_')[0])
            except:
                digits = 0
            return (digits, prefix)
        try:
            counter = max(filter(lambda a: a[1][:-1] == filename_prefix and a[1][-1] == "_", map(map_filename, os.listdir(self.output_dir))))[0] + 1
        except ValueError:
            counter = 1
754
755
756
        except FileNotFoundError:
            os.mkdir(self.output_dir)
            counter = 1
pythongosssss's avatar
pythongosssss committed
757

pythongosssss's avatar
pythongosssss committed
758
759
        if not os.path.exists(self.output_dir):
            os.makedirs(self.output_dir)
760

pythongosssss's avatar
pythongosssss committed
761
        paths = list()
comfyanonymous's avatar
comfyanonymous committed
762
763
        for image in images:
            i = 255. * image.cpu().numpy()
764
            img = Image.fromarray(np.clip(i, 0, 255).astype(np.uint8))
comfyanonymous's avatar
comfyanonymous committed
765
766
767
768
769
770
            metadata = PngInfo()
            if prompt is not None:
                metadata.add_text("prompt", json.dumps(prompt))
            if extra_pnginfo is not None:
                for x in extra_pnginfo:
                    metadata.add_text(x, json.dumps(extra_pnginfo[x]))
pythongosssss's avatar
pythongosssss committed
771
            file = f"{filename_prefix}_{counter:05}_.png"
pythongosssss's avatar
pythongosssss committed
772
773
            img.save(os.path.join(self.output_dir, file), pnginfo=metadata, optimize=True)
            paths.append(file + self.url_suffix)
774
            counter += 1
pythongosssss's avatar
pythongosssss committed
775
        return { "ui": { "images": paths } }
comfyanonymous's avatar
comfyanonymous committed
776

pythongosssss's avatar
pythongosssss committed
777
778
779
780
781
782
783
class PreviewImage(SaveImage):
    def __init__(self):
        self.output_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), "temp")
        self.url_suffix = "?type=temp"

    @classmethod
    def INPUT_TYPES(s):
784
        return {"required":
pythongosssss's avatar
pythongosssss committed
785
786
787
                    {"images": ("IMAGE", ), },
                "hidden": {"prompt": "PROMPT", "extra_pnginfo": "EXTRA_PNGINFO"},
                }
788

789
790
791
792
class LoadImage:
    input_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), "input")
    @classmethod
    def INPUT_TYPES(s):
pythongosssss's avatar
pythongosssss committed
793
794
        if not os.path.exists(s.input_dir):
            os.makedirs(s.input_dir)
795
        return {"required":
796
                    {"image": (sorted(os.listdir(s.input_dir)), )},
797
                }
798
799

    CATEGORY = "image"
800

801
    RETURN_TYPES = ("IMAGE", "MASK")
802
803
804
    FUNCTION = "load_image"
    def load_image(self, image):
        image_path = os.path.join(self.input_dir, image)
805
806
        i = Image.open(image_path)
        image = i.convert("RGB")
807
        image = np.array(image).astype(np.float32) / 255.0
808
        image = torch.from_numpy(image)[None,]
809
810
811
812
813
814
        if 'A' in i.getbands():
            mask = np.array(i.getchannel('A')).astype(np.float32) / 255.0
            mask = 1. - torch.from_numpy(mask)
        else:
            mask = torch.zeros((64,64), dtype=torch.float32, device="cpu")
        return (image, mask)
815

816
817
818
819
820
821
822
    @classmethod
    def IS_CHANGED(s, image):
        image_path = os.path.join(s.input_dir, image)
        m = hashlib.sha256()
        with open(image_path, 'rb') as f:
            m.update(f.read())
        return m.digest().hex()
pythongosssss's avatar
pythongosssss committed
823

824
825
826
827
828
class LoadImageMask:
    input_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), "input")
    @classmethod
    def INPUT_TYPES(s):
        return {"required":
829
                    {"image": (sorted(os.listdir(s.input_dir)), ),
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
                    "channel": (["alpha", "red", "green", "blue"], ),}
                }

    CATEGORY = "image"

    RETURN_TYPES = ("MASK",)
    FUNCTION = "load_image"
    def load_image(self, image, channel):
        image_path = os.path.join(self.input_dir, image)
        i = Image.open(image_path)
        mask = None
        c = channel[0].upper()
        if c in i.getbands():
            mask = np.array(i.getchannel(c)).astype(np.float32) / 255.0
            mask = torch.from_numpy(mask)
            if c == 'A':
                mask = 1. - mask
        else:
            mask = torch.zeros((64,64), dtype=torch.float32, device="cpu")
        return (mask,)

    @classmethod
    def IS_CHANGED(s, image, channel):
        image_path = os.path.join(s.input_dir, image)
        m = hashlib.sha256()
        with open(image_path, 'rb') as f:
            m.update(f.read())
        return m.digest().hex()
pythongosssss's avatar
pythongosssss committed
858

comfyanonymous's avatar
comfyanonymous committed
859
860
861
862
863
864
865
866
867
868
869
870
871
class ImageScale:
    upscale_methods = ["nearest-exact", "bilinear", "area"]
    crop_methods = ["disabled", "center"]

    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "image": ("IMAGE",), "upscale_method": (s.upscale_methods,),
                              "width": ("INT", {"default": 512, "min": 1, "max": 4096, "step": 1}),
                              "height": ("INT", {"default": 512, "min": 1, "max": 4096, "step": 1}),
                              "crop": (s.crop_methods,)}}
    RETURN_TYPES = ("IMAGE",)
    FUNCTION = "upscale"

872
    CATEGORY = "image/upscaling"
873

comfyanonymous's avatar
comfyanonymous committed
874
875
    def upscale(self, image, upscale_method, width, height, crop):
        samples = image.movedim(-1,1)
comfyanonymous's avatar
comfyanonymous committed
876
        s = comfy.utils.common_upscale(samples, width, height, upscale_method, crop)
comfyanonymous's avatar
comfyanonymous committed
877
878
        s = s.movedim(1,-1)
        return (s,)
comfyanonymous's avatar
comfyanonymous committed
879

880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
class ImageInvert:

    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "image": ("IMAGE",)}}

    RETURN_TYPES = ("IMAGE",)
    FUNCTION = "invert"

    CATEGORY = "image"

    def invert(self, image):
        s = 1.0 - image
        return (s,)


comfyanonymous's avatar
comfyanonymous committed
896
897
898
NODE_CLASS_MAPPINGS = {
    "KSampler": KSampler,
    "CheckpointLoader": CheckpointLoader,
899
    "CheckpointLoaderSimple": CheckpointLoaderSimple,
comfyanonymous's avatar
comfyanonymous committed
900
    "CLIPTextEncode": CLIPTextEncode,
comfyanonymous's avatar
comfyanonymous committed
901
    "CLIPSetLastLayer": CLIPSetLastLayer,
comfyanonymous's avatar
comfyanonymous committed
902
903
    "VAEDecode": VAEDecode,
    "VAEEncode": VAEEncode,
904
    "VAEEncodeForInpaint": VAEEncodeForInpaint,
comfyanonymous's avatar
comfyanonymous committed
905
906
907
908
    "VAELoader": VAELoader,
    "EmptyLatentImage": EmptyLatentImage,
    "LatentUpscale": LatentUpscale,
    "SaveImage": SaveImage,
pythongosssss's avatar
pythongosssss committed
909
    "PreviewImage": PreviewImage,
comfyanonymous's avatar
comfyanonymous committed
910
    "LoadImage": LoadImage,
911
    "LoadImageMask": LoadImageMask,
comfyanonymous's avatar
comfyanonymous committed
912
    "ImageScale": ImageScale,
913
    "ImageInvert": ImageInvert,
comfyanonymous's avatar
comfyanonymous committed
914
915
    "ConditioningCombine": ConditioningCombine,
    "ConditioningSetArea": ConditioningSetArea,
comfyanonymous's avatar
comfyanonymous committed
916
    "KSamplerAdvanced": KSamplerAdvanced,
917
    "SetLatentNoiseMask": SetLatentNoiseMask,
comfyanonymous's avatar
comfyanonymous committed
918
    "LatentComposite": LatentComposite,
comfyanonymous's avatar
comfyanonymous committed
919
    "LatentRotate": LatentRotate,
comfyanonymous's avatar
comfyanonymous committed
920
    "LatentFlip": LatentFlip,
comfyanonymous's avatar
comfyanonymous committed
921
    "LatentCrop": LatentCrop,
922
    "LoraLoader": LoraLoader,
923
    "CLIPLoader": CLIPLoader,
924
    "CLIPVisionEncode": CLIPVisionEncode,
925
    "StyleModelApply": StyleModelApply,
comfyanonymous's avatar
comfyanonymous committed
926
927
    "ControlNetApply": ControlNetApply,
    "ControlNetLoader": ControlNetLoader,
928
    "DiffControlNetLoader": DiffControlNetLoader,
comfyanonymous's avatar
comfyanonymous committed
929
930
    "StyleModelLoader": StyleModelLoader,
    "CLIPVisionLoader": CLIPVisionLoader,
931
    "VAEDecodeTiled": VAEDecodeTiled,
comfyanonymous's avatar
comfyanonymous committed
932
    "VAEEncodeTiled": VAEEncodeTiled,
comfyanonymous's avatar
comfyanonymous committed
933
934
}

935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
def load_custom_node(module_path):
    module_name = os.path.basename(module_path)
    if os.path.isfile(module_path):
        sp = os.path.splitext(module_path)
        module_name = sp[0]
    try:
        if os.path.isfile(module_path):
            module_spec = importlib.util.spec_from_file_location(module_name, module_path)
        else:
            module_spec = importlib.util.spec_from_file_location(module_name, os.path.join(module_path, "__init__.py"))
        module = importlib.util.module_from_spec(module_spec)
        sys.modules[module_name] = module
        module_spec.loader.exec_module(module)
        if hasattr(module, "NODE_CLASS_MAPPINGS") and getattr(module, "NODE_CLASS_MAPPINGS") is not None:
            NODE_CLASS_MAPPINGS.update(module.NODE_CLASS_MAPPINGS)
        else:
            print(f"Skip {module_path} module for custom nodes due to the lack of NODE_CLASS_MAPPINGS.")
    except Exception as e:
        print(traceback.format_exc())
        print(f"Cannot import {module_path} module for custom nodes:", e)

Hacker 17082006's avatar
Hacker 17082006 committed
956
def load_custom_nodes():
957
    CUSTOM_NODE_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), "custom_nodes")
958
    possible_modules = os.listdir(CUSTOM_NODE_PATH)
959
    if "__pycache__" in possible_modules:
Hacker 17082006's avatar
.  
Hacker 17082006 committed
960
        possible_modules.remove("__pycache__")
961

Hacker 17082006's avatar
Hacker 17082006 committed
962
    for possible_module in possible_modules:
963
964
        module_path = os.path.join(CUSTOM_NODE_PATH, possible_module)
        if os.path.isfile(module_path) and os.path.splitext(module_path)[1] != ".py": continue
965
        load_custom_node(module_path)
966

967
load_custom_nodes()
968
969

load_custom_node(os.path.join(os.path.join(os.path.dirname(os.path.realpath(__file__)), "comfy_extras"), "nodes_upscale_model.py"))