nodes.py 29 KB
Newer Older
comfyanonymous's avatar
comfyanonymous committed
1
2
3
4
5
import torch

import os
import sys
import json
6
import hashlib
comfyanonymous's avatar
comfyanonymous committed
7
import copy
comfyanonymous's avatar
comfyanonymous committed
8
9
10
11
12

from PIL import Image
from PIL.PngImagePlugin import PngInfo
import numpy as np

13
sys.path.insert(0, os.path.join(sys.path[0], "comfy"))
comfyanonymous's avatar
comfyanonymous committed
14
15
16
17


import comfy.samplers
import comfy.sd
comfyanonymous's avatar
comfyanonymous committed
18
19
import comfy.utils

20
import model_management
21
import importlib
comfyanonymous's avatar
comfyanonymous committed
22

comfyanonymous's avatar
comfyanonymous committed
23
24
supported_ckpt_extensions = ['.ckpt', '.pth']
supported_pt_extensions = ['.ckpt', '.pt', '.bin', '.pth']
comfyanonymous's avatar
comfyanonymous committed
25
26
27
try:
    import safetensors.torch
    supported_ckpt_extensions += ['.safetensors']
comfyanonymous's avatar
comfyanonymous committed
28
    supported_pt_extensions += ['.safetensors']
comfyanonymous's avatar
comfyanonymous committed
29
30
31
except:
    print("Could not import safetensors, safetensors support disabled.")

32
33
34
35
def recursive_search(directory):  
    result = []
    for root, subdir, file in os.walk(directory, followlinks=True):
        for filepath in file:
36
37
            #we os.path,join directory with a blank string to generate a path separator at the end.
            result.append(os.path.join(root, filepath).replace(os.path.join(directory,''),'')) 
38
39
    return result

comfyanonymous's avatar
comfyanonymous committed
40
41
42
43
44
45
def filter_files_extensions(files, extensions):
    return sorted(list(filter(lambda a: os.path.splitext(a)[-1].lower() in extensions, files)))

class CLIPTextEncode:
    @classmethod
    def INPUT_TYPES(s):
46
        return {"required": {"text": ("STRING", {"multiline": True}), "clip": ("CLIP", )}}
comfyanonymous's avatar
comfyanonymous committed
47
48
49
    RETURN_TYPES = ("CONDITIONING",)
    FUNCTION = "encode"

50
51
    CATEGORY = "conditioning"

comfyanonymous's avatar
comfyanonymous committed
52
    def encode(self, clip, text):
comfyanonymous's avatar
comfyanonymous committed
53
54
55
56
57
58
59
60
61
        return ([[clip.encode(text), {}]], )

class ConditioningCombine:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": {"conditioning_1": ("CONDITIONING", ), "conditioning_2": ("CONDITIONING", )}}
    RETURN_TYPES = ("CONDITIONING",)
    FUNCTION = "combine"

62
63
    CATEGORY = "conditioning"

comfyanonymous's avatar
comfyanonymous committed
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
    def combine(self, conditioning_1, conditioning_2):
        return (conditioning_1 + conditioning_2, )

class ConditioningSetArea:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": {"conditioning": ("CONDITIONING", ),
                              "width": ("INT", {"default": 64, "min": 64, "max": 4096, "step": 64}),
                              "height": ("INT", {"default": 64, "min": 64, "max": 4096, "step": 64}),
                              "x": ("INT", {"default": 0, "min": 0, "max": 4096, "step": 64}),
                              "y": ("INT", {"default": 0, "min": 0, "max": 4096, "step": 64}),
                              "strength": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}),
                             }}
    RETURN_TYPES = ("CONDITIONING",)
    FUNCTION = "append"

80
81
    CATEGORY = "conditioning"

comfyanonymous's avatar
comfyanonymous committed
82
    def append(self, conditioning, width, height, x, y, strength, min_sigma=0.0, max_sigma=99.0):
comfyanonymous's avatar
comfyanonymous committed
83
84
85
86
87
88
89
90
        c = []
        for t in conditioning:
            n = [t[0], t[1].copy()]
            n[1]['area'] = (height // 8, width // 8, y // 8, x // 8)
            n[1]['strength'] = strength
            n[1]['min_sigma'] = min_sigma
            n[1]['max_sigma'] = max_sigma
            c.append(n)
comfyanonymous's avatar
comfyanonymous committed
91
        return (c, )
comfyanonymous's avatar
comfyanonymous committed
92
93
94
95
96
97
98
99
100
101
102

class VAEDecode:
    def __init__(self, device="cpu"):
        self.device = device

    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "samples": ("LATENT", ), "vae": ("VAE", )}}
    RETURN_TYPES = ("IMAGE",)
    FUNCTION = "decode"

103
104
    CATEGORY = "latent"

comfyanonymous's avatar
comfyanonymous committed
105
    def decode(self, vae, samples):
106
        return (vae.decode(samples["samples"]), )
comfyanonymous's avatar
comfyanonymous committed
107
108
109
110
111
112
113
114
115
116
117

class VAEEncode:
    def __init__(self, device="cpu"):
        self.device = device

    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "pixels": ("IMAGE", ), "vae": ("VAE", )}}
    RETURN_TYPES = ("LATENT",)
    FUNCTION = "encode"

118
119
    CATEGORY = "latent"

comfyanonymous's avatar
comfyanonymous committed
120
    def encode(self, vae, pixels):
121
122
123
124
        x = (pixels.shape[1] // 64) * 64
        y = (pixels.shape[2] // 64) * 64
        if pixels.shape[1] != x or pixels.shape[2] != y:
            pixels = pixels[:,:x,:y,:]
125
126
127
        t = vae.encode(pixels[:,:,:,:3])

        return ({"samples":t}, )
comfyanonymous's avatar
comfyanonymous committed
128

129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
class VAEEncodeForInpaint:
    def __init__(self, device="cpu"):
        self.device = device

    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "pixels": ("IMAGE", ), "vae": ("VAE", ), "mask": ("MASK", )}}
    RETURN_TYPES = ("LATENT",)
    FUNCTION = "encode"

    CATEGORY = "latent/inpaint"

    def encode(self, vae, pixels, mask):
        x = (pixels.shape[1] // 64) * 64
        y = (pixels.shape[2] // 64) * 64
        if pixels.shape[1] != x or pixels.shape[2] != y:
            pixels = pixels[:,:x,:y,:]
            mask = mask[:x,:y]

        #shave off a few pixels to keep things seamless
        kernel_tensor = torch.ones((1, 1, 6, 6))
        mask_erosion = torch.clamp(torch.nn.functional.conv2d((1.0 - mask.round())[None], kernel_tensor, padding=3), 0, 1)
        for i in range(3):
            pixels[:,:,:,i] -= 0.5
            pixels[:,:,:,i] *= mask_erosion[0][:x,:y].round()
            pixels[:,:,:,i] += 0.5
        t = vae.encode(pixels)

        return ({"samples":t, "noise_mask": mask}, )
comfyanonymous's avatar
comfyanonymous committed
158
159
160
161
162

class CheckpointLoader:
    models_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), "models")
    config_dir = os.path.join(models_dir, "configs")
    ckpt_dir = os.path.join(models_dir, "checkpoints")
163
    embedding_directory = os.path.join(models_dir, "embeddings")
comfyanonymous's avatar
comfyanonymous committed
164
165
166

    @classmethod
    def INPUT_TYPES(s):
167
168
        return {"required": { "config_name": (filter_files_extensions(recursive_search(s.config_dir), '.yaml'), ),
                              "ckpt_name": (filter_files_extensions(recursive_search(s.ckpt_dir), supported_ckpt_extensions), )}}
comfyanonymous's avatar
comfyanonymous committed
169
170
171
    RETURN_TYPES = ("MODEL", "CLIP", "VAE")
    FUNCTION = "load_checkpoint"

172
173
    CATEGORY = "loaders"

comfyanonymous's avatar
comfyanonymous committed
174
175
176
    def load_checkpoint(self, config_name, ckpt_name, output_vae=True, output_clip=True):
        config_path = os.path.join(self.config_dir, config_name)
        ckpt_path = os.path.join(self.ckpt_dir, ckpt_name)
177
        return comfy.sd.load_checkpoint(config_path, ckpt_path, output_vae=True, output_clip=True, embedding_directory=self.embedding_directory)
comfyanonymous's avatar
comfyanonymous committed
178

179
180
181
182
183
184
185
class LoraLoader:
    models_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), "models")
    lora_dir = os.path.join(models_dir, "loras")
    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "model": ("MODEL",),
                              "clip": ("CLIP", ),
186
                              "lora_name": (filter_files_extensions(recursive_search(s.lora_dir), supported_pt_extensions), ),
187
188
189
190
191
192
193
194
195
196
197
198
199
                              "strength_model": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}),
                              "strength_clip": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}),
                              }}
    RETURN_TYPES = ("MODEL", "CLIP")
    FUNCTION = "load_lora"

    CATEGORY = "loaders"

    def load_lora(self, model, clip, lora_name, strength_model, strength_clip):
        lora_path = os.path.join(self.lora_dir, lora_name)
        model_lora, clip_lora = comfy.sd.load_lora_for_models(model, clip, lora_path, strength_model, strength_clip)
        return (model_lora, clip_lora)

comfyanonymous's avatar
comfyanonymous committed
200
201
202
203
204
class VAELoader:
    models_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), "models")
    vae_dir = os.path.join(models_dir, "vae")
    @classmethod
    def INPUT_TYPES(s):
205
        return {"required": { "vae_name": (filter_files_extensions(recursive_search(s.vae_dir), supported_pt_extensions), )}}
comfyanonymous's avatar
comfyanonymous committed
206
207
208
    RETURN_TYPES = ("VAE",)
    FUNCTION = "load_vae"

209
210
    CATEGORY = "loaders"

comfyanonymous's avatar
comfyanonymous committed
211
212
213
214
215
216
    #TODO: scale factor?
    def load_vae(self, vae_name):
        vae_path = os.path.join(self.vae_dir, vae_name)
        vae = comfy.sd.VAE(ckpt_path=vae_path)
        return (vae,)

comfyanonymous's avatar
comfyanonymous committed
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
class ControlNetLoader:
    models_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), "models")
    controlnet_dir = os.path.join(models_dir, "controlnet")
    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "control_net_name": (filter_files_extensions(recursive_search(s.controlnet_dir), supported_pt_extensions), )}}

    RETURN_TYPES = ("CONTROL_NET",)
    FUNCTION = "load_controlnet"

    CATEGORY = "loaders"

    def load_controlnet(self, control_net_name):
        controlnet_path = os.path.join(self.controlnet_dir, control_net_name)
        controlnet = comfy.sd.load_controlnet(controlnet_path)
        return (controlnet,)


class ControlNetApply:
    @classmethod
    def INPUT_TYPES(s):
238
239
240
241
242
        return {"required": {"conditioning": ("CONDITIONING", ),
                             "control_net": ("CONTROL_NET", ),
                             "image": ("IMAGE", ),
                             "strength": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01})
                             }}
comfyanonymous's avatar
comfyanonymous committed
243
244
245
246
247
    RETURN_TYPES = ("CONDITIONING",)
    FUNCTION = "apply_controlnet"

    CATEGORY = "conditioning"

248
    def apply_controlnet(self, conditioning, control_net, image, strength):
comfyanonymous's avatar
comfyanonymous committed
249
250
251
252
253
        c = []
        control_hint = image.movedim(-1,1)
        print(control_hint.shape)
        for t in conditioning:
            n = [t[0], t[1].copy()]
254
            n[1]['control'] = control_net.copy().set_cond_hint(control_hint, strength)
comfyanonymous's avatar
comfyanonymous committed
255
256
257
258
            c.append(n)
        return (c, )


259
260
261
262
263
class CLIPLoader:
    models_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), "models")
    clip_dir = os.path.join(models_dir, "clip")
    @classmethod
    def INPUT_TYPES(s):
264
        return {"required": { "clip_name": (filter_files_extensions(recursive_search(s.clip_dir), supported_pt_extensions), ),
265
266
267
268
269
270
271
272
273
274
275
276
277
                              "stop_at_clip_layer": ("INT", {"default": -1, "min": -24, "max": -1, "step": 1}),
                             }}
    RETURN_TYPES = ("CLIP",)
    FUNCTION = "load_clip"

    CATEGORY = "loaders"

    def load_clip(self, clip_name, stop_at_clip_layer):
        clip_path = os.path.join(self.clip_dir, clip_name)
        clip = comfy.sd.load_clip(ckpt_path=clip_path, embedding_directory=CheckpointLoader.embedding_directory)
        clip.clip_layer(stop_at_clip_layer)
        return (clip,)

comfyanonymous's avatar
comfyanonymous committed
278
279
280
281
282
283
284
285
286
287
288
289
class EmptyLatentImage:
    def __init__(self, device="cpu"):
        self.device = device

    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "width": ("INT", {"default": 512, "min": 64, "max": 4096, "step": 64}),
                              "height": ("INT", {"default": 512, "min": 64, "max": 4096, "step": 64}),
                              "batch_size": ("INT", {"default": 1, "min": 1, "max": 64})}}
    RETURN_TYPES = ("LATENT",)
    FUNCTION = "generate"

290
291
    CATEGORY = "latent"

comfyanonymous's avatar
comfyanonymous committed
292
293
    def generate(self, width, height, batch_size=1):
        latent = torch.zeros([batch_size, 4, height // 8, width // 8])
294
        return ({"samples":latent}, )
comfyanonymous's avatar
comfyanonymous committed
295

comfyanonymous's avatar
comfyanonymous committed
296

comfyanonymous's avatar
comfyanonymous committed
297

comfyanonymous's avatar
comfyanonymous committed
298
299
class LatentUpscale:
    upscale_methods = ["nearest-exact", "bilinear", "area"]
300
    crop_methods = ["disabled", "center"]
comfyanonymous's avatar
comfyanonymous committed
301
302
303
304
305

    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "samples": ("LATENT",), "upscale_method": (s.upscale_methods,),
                              "width": ("INT", {"default": 512, "min": 64, "max": 4096, "step": 64}),
306
307
                              "height": ("INT", {"default": 512, "min": 64, "max": 4096, "step": 64}),
                              "crop": (s.crop_methods,)}}
comfyanonymous's avatar
comfyanonymous committed
308
309
310
    RETURN_TYPES = ("LATENT",)
    FUNCTION = "upscale"

311
312
    CATEGORY = "latent"

313
    def upscale(self, samples, upscale_method, width, height, crop):
314
        s = samples.copy()
comfyanonymous's avatar
comfyanonymous committed
315
        s["samples"] = comfy.utils.common_upscale(samples["samples"], width // 8, height // 8, upscale_method, crop)
comfyanonymous's avatar
comfyanonymous committed
316
317
        return (s,)

comfyanonymous's avatar
comfyanonymous committed
318
319
320
321
322
323
324
325
326
327
328
329
class LatentRotate:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "samples": ("LATENT",),
                              "rotation": (["none", "90 degrees", "180 degrees", "270 degrees"],),
                              }}
    RETURN_TYPES = ("LATENT",)
    FUNCTION = "rotate"

    CATEGORY = "latent"

    def rotate(self, samples, rotation):
330
        s = samples.copy()
comfyanonymous's avatar
comfyanonymous committed
331
332
333
334
335
336
337
338
        rotate_by = 0
        if rotation.startswith("90"):
            rotate_by = 1
        elif rotation.startswith("180"):
            rotate_by = 2
        elif rotation.startswith("270"):
            rotate_by = 3

339
        s["samples"] = torch.rot90(samples["samples"], k=rotate_by, dims=[3, 2])
comfyanonymous's avatar
comfyanonymous committed
340
        return (s,)
comfyanonymous's avatar
comfyanonymous committed
341
342
343
344
345
346
347
348
349
350
351
352
353

class LatentFlip:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "samples": ("LATENT",),
                              "flip_method": (["x-axis: vertically", "y-axis: horizontally"],),
                              }}
    RETURN_TYPES = ("LATENT",)
    FUNCTION = "flip"

    CATEGORY = "latent"

    def flip(self, samples, flip_method):
354
        s = samples.copy()
comfyanonymous's avatar
comfyanonymous committed
355
        if flip_method.startswith("x"):
356
            s["samples"] = torch.flip(samples["samples"], dims=[2])
comfyanonymous's avatar
comfyanonymous committed
357
        elif flip_method.startswith("y"):
358
            s["samples"] = torch.flip(samples["samples"], dims=[3])
comfyanonymous's avatar
comfyanonymous committed
359
360

        return (s,)
comfyanonymous's avatar
comfyanonymous committed
361
362
363
364
365
366
367
368

class LatentComposite:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "samples_to": ("LATENT",),
                              "samples_from": ("LATENT",),
                              "x": ("INT", {"default": 0, "min": 0, "max": 4096, "step": 8}),
                              "y": ("INT", {"default": 0, "min": 0, "max": 4096, "step": 8}),
369
                              "feather": ("INT", {"default": 0, "min": 0, "max": 4096, "step": 8}),
comfyanonymous's avatar
comfyanonymous committed
370
371
372
373
374
375
                              }}
    RETURN_TYPES = ("LATENT",)
    FUNCTION = "composite"

    CATEGORY = "latent"

376
    def composite(self, samples_to, samples_from, x, y, composite_method="normal", feather=0):
comfyanonymous's avatar
comfyanonymous committed
377
378
        x =  x // 8
        y = y // 8
379
        feather = feather // 8
380
381
382
383
        samples_out = samples_to.copy()
        s = samples_to["samples"].clone()
        samples_to = samples_to["samples"]
        samples_from = samples_from["samples"]
384
385
386
        if feather == 0:
            s[:,:,y:y+samples_from.shape[2],x:x+samples_from.shape[3]] = samples_from[:,:,:samples_to.shape[2] - y, :samples_to.shape[3] - x]
        else:
387
388
            samples_from = samples_from[:,:,:samples_to.shape[2] - y, :samples_to.shape[3] - x]
            mask = torch.ones_like(samples_from)
389
390
391
392
393
394
395
396
397
398
399
400
            for t in range(feather):
                if y != 0:
                    mask[:,:,t:1+t,:] *= ((1.0/feather) * (t + 1))

                if y + samples_from.shape[2] < samples_to.shape[2]:
                    mask[:,:,mask.shape[2] -1 -t: mask.shape[2]-t,:] *= ((1.0/feather) * (t + 1))
                if x != 0:
                    mask[:,:,:,t:1+t] *= ((1.0/feather) * (t + 1))
                if x + samples_from.shape[3] < samples_to.shape[3]:
                    mask[:,:,:,mask.shape[3]- 1 - t: mask.shape[3]- t] *= ((1.0/feather) * (t + 1))
            rev_mask = torch.ones_like(mask) - mask
            s[:,:,y:y+samples_from.shape[2],x:x+samples_from.shape[3]] = samples_from[:,:,:samples_to.shape[2] - y, :samples_to.shape[3] - x] * mask + s[:,:,y:y+samples_from.shape[2],x:x+samples_from.shape[3]] * rev_mask
401
402
        samples_out["samples"] = s
        return (samples_out,)
comfyanonymous's avatar
comfyanonymous committed
403

comfyanonymous's avatar
comfyanonymous committed
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
class LatentCrop:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "samples": ("LATENT",),
                              "width": ("INT", {"default": 512, "min": 64, "max": 4096, "step": 64}),
                              "height": ("INT", {"default": 512, "min": 64, "max": 4096, "step": 64}),
                              "x": ("INT", {"default": 0, "min": 0, "max": 4096, "step": 8}),
                              "y": ("INT", {"default": 0, "min": 0, "max": 4096, "step": 8}),
                              }}
    RETURN_TYPES = ("LATENT",)
    FUNCTION = "crop"

    CATEGORY = "latent"

    def crop(self, samples, width, height, x, y):
419
420
        s = samples.copy()
        samples = samples['samples']
comfyanonymous's avatar
comfyanonymous committed
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
        x =  x // 8
        y = y // 8

        #enfonce minimum size of 64
        if x > (samples.shape[3] - 8):
            x = samples.shape[3] - 8
        if y > (samples.shape[2] - 8):
            y = samples.shape[2] - 8

        new_height = height // 8
        new_width = width // 8
        to_x = new_width + x
        to_y = new_height + y
        def enforce_image_dim(d, to_d, max_d):
            if to_d > max_d:
                leftover = (to_d - max_d) % 8
                to_d = max_d
                d -= leftover
            return (d, to_d)

        #make sure size is always multiple of 64
        x, to_x = enforce_image_dim(x, to_x, samples.shape[3])
        y, to_y = enforce_image_dim(y, to_y, samples.shape[2])
444
        s['samples'] = samples[:,:,y:to_y, x:to_x]
comfyanonymous's avatar
comfyanonymous committed
445
446
        return (s,)

447
448
449
450
451
452
453
454
455
class SetLatentNoiseMask:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "samples": ("LATENT",),
                              "mask": ("MASK",),
                              }}
    RETURN_TYPES = ("LATENT",)
    FUNCTION = "set_mask"

456
    CATEGORY = "latent/inpaint"
457
458
459
460
461
462
463
464
465
466
467

    def set_mask(self, samples, mask):
        s = samples.copy()
        s["noise_mask"] = mask
        return (s,)


def common_ksampler(device, model, seed, steps, cfg, sampler_name, scheduler, positive, negative, latent, denoise=1.0, disable_noise=False, start_step=None, last_step=None, force_full_denoise=False):
    latent_image = latent["samples"]
    noise_mask = None

comfyanonymous's avatar
comfyanonymous committed
468
469
470
471
472
    if disable_noise:
        noise = torch.zeros(latent_image.size(), dtype=latent_image.dtype, layout=latent_image.layout, device="cpu")
    else:
        noise = torch.randn(latent_image.size(), dtype=latent_image.dtype, layout=latent_image.layout, generator=torch.manual_seed(seed), device="cpu")

473
474
475
    if "noise_mask" in latent:
        noise_mask = latent['noise_mask']
        noise_mask = torch.nn.functional.interpolate(noise_mask[None,None,], size=(noise.shape[2], noise.shape[3]), mode="bilinear")
476
        noise_mask = noise_mask.round()
477
478
479
480
        noise_mask = torch.cat([noise_mask] * noise.shape[1], dim=1)
        noise_mask = torch.cat([noise_mask] * noise.shape[0])
        noise_mask = noise_mask.to(device)

481
    real_model = None
482
483
484
485
486
    if device != "cpu":
        model_management.load_model_gpu(model)
        real_model = model.model
    else:
        #TODO: cpu support
487
        real_model = model.patch_model()
488
489
490
491
492
493
    noise = noise.to(device)
    latent_image = latent_image.to(device)

    positive_copy = []
    negative_copy = []

comfyanonymous's avatar
comfyanonymous committed
494
    control_nets = []
495
496
497
498
499
    for p in positive:
        t = p[0]
        if t.shape[0] < noise.shape[0]:
            t = torch.cat([t] * noise.shape[0])
        t = t.to(device)
comfyanonymous's avatar
comfyanonymous committed
500
501
        if 'control' in p[1]:
            control_nets += [p[1]['control']]
502
503
504
505
506
507
        positive_copy += [[t] + p[1:]]
    for n in negative:
        t = n[0]
        if t.shape[0] < noise.shape[0]:
            t = torch.cat([t] * noise.shape[0])
        t = t.to(device)
comfyanonymous's avatar
comfyanonymous committed
508
509
        if 'control' in p[1]:
            control_nets += [p[1]['control']]
510
511
        negative_copy += [[t] + n[1:]]

comfyanonymous's avatar
comfyanonymous committed
512
513
    model_management.load_controlnet_gpu(list(map(lambda a: a.control_model, control_nets)))

514
515
516
517
518
519
    if sampler_name in comfy.samplers.KSampler.SAMPLERS:
        sampler = comfy.samplers.KSampler(real_model, steps=steps, device=device, sampler=sampler_name, scheduler=scheduler, denoise=denoise)
    else:
        #other samplers
        pass

520
    samples = sampler.sample(noise, positive_copy, negative_copy, cfg=cfg, latent_image=latent_image, start_step=start_step, last_step=last_step, force_full_denoise=force_full_denoise, denoise_mask=noise_mask)
521
    samples = samples.cpu()
comfyanonymous's avatar
comfyanonymous committed
522
523
    for c in control_nets:
        c.cleanup()
comfyanonymous's avatar
comfyanonymous committed
524

525
526
527
    out = latent.copy()
    out["samples"] = samples
    return (out, )
comfyanonymous's avatar
comfyanonymous committed
528

comfyanonymous's avatar
comfyanonymous committed
529
530
531
532
533
534
class KSampler:
    def __init__(self, device="cuda"):
        self.device = device

    @classmethod
    def INPUT_TYPES(s):
comfyanonymous's avatar
comfyanonymous committed
535
        return {"required":
comfyanonymous's avatar
comfyanonymous committed
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
                    {"model": ("MODEL",),
                    "seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}),
                    "steps": ("INT", {"default": 20, "min": 1, "max": 10000}),
                    "cfg": ("FLOAT", {"default": 8.0, "min": 0.0, "max": 100.0}),
                    "sampler_name": (comfy.samplers.KSampler.SAMPLERS, ),
                    "scheduler": (comfy.samplers.KSampler.SCHEDULERS, ),
                    "positive": ("CONDITIONING", ),
                    "negative": ("CONDITIONING", ),
                    "latent_image": ("LATENT", ),
                    "denoise": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}),
                    }}

    RETURN_TYPES = ("LATENT",)
    FUNCTION = "sample"

551
552
    CATEGORY = "sampling"

comfyanonymous's avatar
comfyanonymous committed
553
    def sample(self, model, seed, steps, cfg, sampler_name, scheduler, positive, negative, latent_image, denoise=1.0):
comfyanonymous's avatar
comfyanonymous committed
554
        return common_ksampler(self.device, model, seed, steps, cfg, sampler_name, scheduler, positive, negative, latent_image, denoise=denoise)
comfyanonymous's avatar
comfyanonymous committed
555

comfyanonymous's avatar
comfyanonymous committed
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
class KSamplerAdvanced:
    def __init__(self, device="cuda"):
        self.device = device

    @classmethod
    def INPUT_TYPES(s):
        return {"required":
                    {"model": ("MODEL",),
                    "add_noise": (["enable", "disable"], ),
                    "noise_seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}),
                    "steps": ("INT", {"default": 20, "min": 1, "max": 10000}),
                    "cfg": ("FLOAT", {"default": 8.0, "min": 0.0, "max": 100.0}),
                    "sampler_name": (comfy.samplers.KSampler.SAMPLERS, ),
                    "scheduler": (comfy.samplers.KSampler.SCHEDULERS, ),
                    "positive": ("CONDITIONING", ),
                    "negative": ("CONDITIONING", ),
                    "latent_image": ("LATENT", ),
                    "start_at_step": ("INT", {"default": 0, "min": 0, "max": 10000}),
                    "end_at_step": ("INT", {"default": 10000, "min": 0, "max": 10000}),
                    "return_with_leftover_noise": (["disable", "enable"], ),
                    }}

    RETURN_TYPES = ("LATENT",)
    FUNCTION = "sample"

    CATEGORY = "sampling"
comfyanonymous's avatar
comfyanonymous committed
582

comfyanonymous's avatar
comfyanonymous committed
583
584
585
586
587
588
589
590
    def sample(self, model, add_noise, noise_seed, steps, cfg, sampler_name, scheduler, positive, negative, latent_image, start_at_step, end_at_step, return_with_leftover_noise, denoise=1.0):
        force_full_denoise = True
        if return_with_leftover_noise == "enable":
            force_full_denoise = False
        disable_noise = False
        if add_noise == "disable":
            disable_noise = True
        return common_ksampler(self.device, model, noise_seed, steps, cfg, sampler_name, scheduler, positive, negative, latent_image, denoise=denoise, disable_noise=disable_noise, start_step=start_at_step, last_step=end_at_step, force_full_denoise=force_full_denoise)
comfyanonymous's avatar
comfyanonymous committed
591
592
593
594
595
596
597
598

class SaveImage:
    def __init__(self):
        self.output_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), "output")

    @classmethod
    def INPUT_TYPES(s):
        return {"required": 
599
600
                    {"images": ("IMAGE", ),
                     "filename_prefix": ("STRING", {"default": "ComfyUI"})},
comfyanonymous's avatar
comfyanonymous committed
601
602
603
604
605
606
607
608
                "hidden": {"prompt": "PROMPT", "extra_pnginfo": "EXTRA_PNGINFO"},
                }

    RETURN_TYPES = ()
    FUNCTION = "save_images"

    OUTPUT_NODE = True

609
610
    CATEGORY = "image"

611
612
613
614
615
616
617
618
619
620
621
622
623
    def save_images(self, images, filename_prefix="ComfyUI", prompt=None, extra_pnginfo=None):
        def map_filename(filename):
            prefix_len = len(filename_prefix)
            prefix = filename[:prefix_len + 1]
            try:
                digits = int(filename[prefix_len + 1:].split('_')[0])
            except:
                digits = 0
            return (digits, prefix)
        try:
            counter = max(filter(lambda a: a[1][:-1] == filename_prefix and a[1][-1] == "_", map(map_filename, os.listdir(self.output_dir))))[0] + 1
        except ValueError:
            counter = 1
624
625
626
        except FileNotFoundError:
            os.mkdir(self.output_dir)
            counter = 1
comfyanonymous's avatar
comfyanonymous committed
627
628
629
630
631
632
633
634
635
        for image in images:
            i = 255. * image.cpu().numpy()
            img = Image.fromarray(i.astype(np.uint8))
            metadata = PngInfo()
            if prompt is not None:
                metadata.add_text("prompt", json.dumps(prompt))
            if extra_pnginfo is not None:
                for x in extra_pnginfo:
                    metadata.add_text(x, json.dumps(extra_pnginfo[x]))
636
            img.save(os.path.join(self.output_dir, f"{filename_prefix}_{counter:05}_.png"), pnginfo=metadata, optimize=True)
637
            counter += 1
comfyanonymous's avatar
comfyanonymous committed
638

639
640
641
642
643
class LoadImage:
    input_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), "input")
    @classmethod
    def INPUT_TYPES(s):
        return {"required":
644
                    {"image": (sorted(os.listdir(s.input_dir)), )},
645
                }
646
647

    CATEGORY = "image"
648
649
650
651
652

    RETURN_TYPES = ("IMAGE",)
    FUNCTION = "load_image"
    def load_image(self, image):
        image_path = os.path.join(self.input_dir, image)
653
654
        i = Image.open(image_path)
        image = i.convert("RGB")
655
        image = np.array(image).astype(np.float32) / 255.0
656
657
        image = torch.from_numpy(image)[None,]
        return (image,)
658

659
660
661
662
663
664
665
666
    @classmethod
    def IS_CHANGED(s, image):
        image_path = os.path.join(s.input_dir, image)
        m = hashlib.sha256()
        with open(image_path, 'rb') as f:
            m.update(f.read())
        return m.digest().hex()

667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
class LoadImageMask:
    input_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), "input")
    @classmethod
    def INPUT_TYPES(s):
        return {"required":
                    {"image": (os.listdir(s.input_dir), ),
                    "channel": (["alpha", "red", "green", "blue"], ),}
                }

    CATEGORY = "image"

    RETURN_TYPES = ("MASK",)
    FUNCTION = "load_image"
    def load_image(self, image, channel):
        image_path = os.path.join(self.input_dir, image)
        i = Image.open(image_path)
        mask = None
        c = channel[0].upper()
        if c in i.getbands():
            mask = np.array(i.getchannel(c)).astype(np.float32) / 255.0
            mask = torch.from_numpy(mask)
            if c == 'A':
                mask = 1. - mask
        else:
            mask = torch.zeros((64,64), dtype=torch.float32, device="cpu")
        return (mask,)

    @classmethod
    def IS_CHANGED(s, image, channel):
        image_path = os.path.join(s.input_dir, image)
        m = hashlib.sha256()
        with open(image_path, 'rb') as f:
            m.update(f.read())
        return m.digest().hex()

comfyanonymous's avatar
comfyanonymous committed
702
703
704
705
706
707
708
709
710
711
712
713
714
715
class ImageScale:
    upscale_methods = ["nearest-exact", "bilinear", "area"]
    crop_methods = ["disabled", "center"]

    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "image": ("IMAGE",), "upscale_method": (s.upscale_methods,),
                              "width": ("INT", {"default": 512, "min": 1, "max": 4096, "step": 1}),
                              "height": ("INT", {"default": 512, "min": 1, "max": 4096, "step": 1}),
                              "crop": (s.crop_methods,)}}
    RETURN_TYPES = ("IMAGE",)
    FUNCTION = "upscale"

    CATEGORY = "image"
716

comfyanonymous's avatar
comfyanonymous committed
717
718
    def upscale(self, image, upscale_method, width, height, crop):
        samples = image.movedim(-1,1)
comfyanonymous's avatar
comfyanonymous committed
719
        s = comfy.utils.common_upscale(samples, width, height, upscale_method, crop)
comfyanonymous's avatar
comfyanonymous committed
720
721
        s = s.movedim(1,-1)
        return (s,)
comfyanonymous's avatar
comfyanonymous committed
722
723
724
725
726
727
728

NODE_CLASS_MAPPINGS = {
    "KSampler": KSampler,
    "CheckpointLoader": CheckpointLoader,
    "CLIPTextEncode": CLIPTextEncode,
    "VAEDecode": VAEDecode,
    "VAEEncode": VAEEncode,
729
    "VAEEncodeForInpaint": VAEEncodeForInpaint,
comfyanonymous's avatar
comfyanonymous committed
730
731
732
733
    "VAELoader": VAELoader,
    "EmptyLatentImage": EmptyLatentImage,
    "LatentUpscale": LatentUpscale,
    "SaveImage": SaveImage,
comfyanonymous's avatar
comfyanonymous committed
734
    "LoadImage": LoadImage,
735
    "LoadImageMask": LoadImageMask,
comfyanonymous's avatar
comfyanonymous committed
736
    "ImageScale": ImageScale,
comfyanonymous's avatar
comfyanonymous committed
737
738
    "ConditioningCombine": ConditioningCombine,
    "ConditioningSetArea": ConditioningSetArea,
comfyanonymous's avatar
comfyanonymous committed
739
    "KSamplerAdvanced": KSamplerAdvanced,
740
    "SetLatentNoiseMask": SetLatentNoiseMask,
comfyanonymous's avatar
comfyanonymous committed
741
    "LatentComposite": LatentComposite,
comfyanonymous's avatar
comfyanonymous committed
742
    "LatentRotate": LatentRotate,
comfyanonymous's avatar
comfyanonymous committed
743
    "LatentFlip": LatentFlip,
comfyanonymous's avatar
comfyanonymous committed
744
    "LatentCrop": LatentCrop,
745
    "LoraLoader": LoraLoader,
746
    "CLIPLoader": CLIPLoader,
comfyanonymous's avatar
comfyanonymous committed
747
748
    "ControlNetApply": ControlNetApply,
    "ControlNetLoader": ControlNetLoader,
comfyanonymous's avatar
comfyanonymous committed
749
750
}

751
CUSTOM_NODE_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), "custom_nodes")
Hacker 17082006's avatar
Hacker 17082006 committed
752
def load_custom_nodes():
753
754
    possible_modules = os.listdir(CUSTOM_NODE_PATH)
    try:
Hacker 17082006's avatar
Hacker 17082006 committed
755
        #Comment out these two lines if you want to test
756
757
        possible_modules.remove("example.py")
        possible_modules.remove("example_folder")
Hacker 17082006's avatar
.  
Hacker 17082006 committed
758
        possible_modules.remove("__pycache__")
759
    except ValueError: pass
Hacker 17082006's avatar
Hacker 17082006 committed
760
    for possible_module in possible_modules:
761
762
        module_path = os.path.join(CUSTOM_NODE_PATH, possible_module)
        if os.path.isfile(module_path) and os.path.splitext(module_path)[1] != ".py": continue
763
        
Hacker 17082006's avatar
Hacker 17082006 committed
764
        try:
765
766
767
768
769
770
771
772
            if os.path.isfile(module_path):
                module_spec = importlib.util.spec_from_file_location(os.path.basename(module_path), module_path)
            else:
                module_spec = importlib.util.spec_from_file_location(module_path, "main.py")
            module = importlib.util.module_from_spec(module_spec)
            module_spec.loader.exec_module(module)
            if getattr(module, "NODE_CLASS_MAPPINGS") is not None:
                NODE_CLASS_MAPPINGS.update(module.NODE_CLASS_MAPPINGS)
Hacker 17082006's avatar
Hacker 17082006 committed
773
            else:
Hacker 17082006's avatar
Hacker 17082006 committed
774
                print(f"Skip {possible_module} module for custom nodes due to the lack of NODE_CLASS_MAPPINGS.")
Hacker 17082006's avatar
Hacker 17082006 committed
775
        except ImportError as e:
Hacker 17082006's avatar
Hacker 17082006 committed
776
            print(f"Cannot import {possible_module} module for custom nodes.")
Hacker 17082006's avatar
Hacker 17082006 committed
777
778
779
            print(e)

load_custom_nodes()