nodes.py 31.7 KB
Newer Older
comfyanonymous's avatar
comfyanonymous committed
1
2
3
4
5
import torch

import os
import sys
import json
6
import hashlib
comfyanonymous's avatar
comfyanonymous committed
7
import copy
8
import traceback
comfyanonymous's avatar
comfyanonymous committed
9
10
11
12
13

from PIL import Image
from PIL.PngImagePlugin import PngInfo
import numpy as np

14
sys.path.insert(0, os.path.join(sys.path[0], "comfy"))
comfyanonymous's avatar
comfyanonymous committed
15
16
17
18


import comfy.samplers
import comfy.sd
comfyanonymous's avatar
comfyanonymous committed
19
20
import comfy.utils

21
import model_management
22
import importlib
comfyanonymous's avatar
comfyanonymous committed
23

comfyanonymous's avatar
comfyanonymous committed
24
25
supported_ckpt_extensions = ['.ckpt', '.pth']
supported_pt_extensions = ['.ckpt', '.pt', '.bin', '.pth']
comfyanonymous's avatar
comfyanonymous committed
26
27
28
try:
    import safetensors.torch
    supported_ckpt_extensions += ['.safetensors']
comfyanonymous's avatar
comfyanonymous committed
29
    supported_pt_extensions += ['.safetensors']
comfyanonymous's avatar
comfyanonymous committed
30
31
32
except:
    print("Could not import safetensors, safetensors support disabled.")

33
34
35
36
def recursive_search(directory):  
    result = []
    for root, subdir, file in os.walk(directory, followlinks=True):
        for filepath in file:
37
38
            #we os.path,join directory with a blank string to generate a path separator at the end.
            result.append(os.path.join(root, filepath).replace(os.path.join(directory,''),'')) 
39
40
    return result

comfyanonymous's avatar
comfyanonymous committed
41
42
43
44
45
46
def filter_files_extensions(files, extensions):
    return sorted(list(filter(lambda a: os.path.splitext(a)[-1].lower() in extensions, files)))

class CLIPTextEncode:
    @classmethod
    def INPUT_TYPES(s):
47
        return {"required": {"text": ("STRING", {"multiline": True, "dynamic_prompt": True}), "clip": ("CLIP", )}}
comfyanonymous's avatar
comfyanonymous committed
48
49
50
    RETURN_TYPES = ("CONDITIONING",)
    FUNCTION = "encode"

51
52
    CATEGORY = "conditioning"

comfyanonymous's avatar
comfyanonymous committed
53
    def encode(self, clip, text):
comfyanonymous's avatar
comfyanonymous committed
54
55
56
57
58
59
60
61
62
        return ([[clip.encode(text), {}]], )

class ConditioningCombine:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": {"conditioning_1": ("CONDITIONING", ), "conditioning_2": ("CONDITIONING", )}}
    RETURN_TYPES = ("CONDITIONING",)
    FUNCTION = "combine"

63
64
    CATEGORY = "conditioning"

comfyanonymous's avatar
comfyanonymous committed
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
    def combine(self, conditioning_1, conditioning_2):
        return (conditioning_1 + conditioning_2, )

class ConditioningSetArea:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": {"conditioning": ("CONDITIONING", ),
                              "width": ("INT", {"default": 64, "min": 64, "max": 4096, "step": 64}),
                              "height": ("INT", {"default": 64, "min": 64, "max": 4096, "step": 64}),
                              "x": ("INT", {"default": 0, "min": 0, "max": 4096, "step": 64}),
                              "y": ("INT", {"default": 0, "min": 0, "max": 4096, "step": 64}),
                              "strength": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}),
                             }}
    RETURN_TYPES = ("CONDITIONING",)
    FUNCTION = "append"

81
82
    CATEGORY = "conditioning"

comfyanonymous's avatar
comfyanonymous committed
83
    def append(self, conditioning, width, height, x, y, strength, min_sigma=0.0, max_sigma=99.0):
comfyanonymous's avatar
comfyanonymous committed
84
85
86
87
88
89
90
91
        c = []
        for t in conditioning:
            n = [t[0], t[1].copy()]
            n[1]['area'] = (height // 8, width // 8, y // 8, x // 8)
            n[1]['strength'] = strength
            n[1]['min_sigma'] = min_sigma
            n[1]['max_sigma'] = max_sigma
            c.append(n)
comfyanonymous's avatar
comfyanonymous committed
92
        return (c, )
comfyanonymous's avatar
comfyanonymous committed
93
94
95
96
97
98
99
100
101
102
103

class VAEDecode:
    def __init__(self, device="cpu"):
        self.device = device

    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "samples": ("LATENT", ), "vae": ("VAE", )}}
    RETURN_TYPES = ("IMAGE",)
    FUNCTION = "decode"

104
105
    CATEGORY = "latent"

comfyanonymous's avatar
comfyanonymous committed
106
    def decode(self, vae, samples):
107
        return (vae.decode(samples["samples"]), )
comfyanonymous's avatar
comfyanonymous committed
108

109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
class VAEDecodeTiled:
    def __init__(self, device="cpu"):
        self.device = device

    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "samples": ("LATENT", ), "vae": ("VAE", )}}
    RETURN_TYPES = ("IMAGE",)
    FUNCTION = "decode"

    CATEGORY = "_for_testing"

    def decode(self, vae, samples):
        return (vae.decode_tiled(samples["samples"]), )

comfyanonymous's avatar
comfyanonymous committed
124
125
126
127
128
129
130
131
132
133
class VAEEncode:
    def __init__(self, device="cpu"):
        self.device = device

    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "pixels": ("IMAGE", ), "vae": ("VAE", )}}
    RETURN_TYPES = ("LATENT",)
    FUNCTION = "encode"

134
135
    CATEGORY = "latent"

comfyanonymous's avatar
comfyanonymous committed
136
    def encode(self, vae, pixels):
137
138
139
140
        x = (pixels.shape[1] // 64) * 64
        y = (pixels.shape[2] // 64) * 64
        if pixels.shape[1] != x or pixels.shape[2] != y:
            pixels = pixels[:,:x,:y,:]
141
142
143
        t = vae.encode(pixels[:,:,:,:3])

        return ({"samples":t}, )
comfyanonymous's avatar
comfyanonymous committed
144

145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
class VAEEncodeForInpaint:
    def __init__(self, device="cpu"):
        self.device = device

    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "pixels": ("IMAGE", ), "vae": ("VAE", ), "mask": ("MASK", )}}
    RETURN_TYPES = ("LATENT",)
    FUNCTION = "encode"

    CATEGORY = "latent/inpaint"

    def encode(self, vae, pixels, mask):
        x = (pixels.shape[1] // 64) * 64
        y = (pixels.shape[2] // 64) * 64
160
161
        mask = torch.nn.functional.interpolate(mask[None,None,], size=(pixels.shape[1], pixels.shape[2]), mode="bilinear")[0][0]

162
163
164
165
        if pixels.shape[1] != x or pixels.shape[2] != y:
            pixels = pixels[:,:x,:y,:]
            mask = mask[:x,:y]

166
        #grow mask by a few pixels to keep things seamless in latent space
167
        kernel_tensor = torch.ones((1, 1, 6, 6))
168
169
        mask_erosion = torch.clamp(torch.nn.functional.conv2d((mask.round())[None], kernel_tensor, padding=3), 0, 1)
        m = (1.0 - mask.round())
170
171
        for i in range(3):
            pixels[:,:,:,i] -= 0.5
172
            pixels[:,:,:,i] *= m
173
174
175
            pixels[:,:,:,i] += 0.5
        t = vae.encode(pixels)

176
        return ({"samples":t, "noise_mask": (mask_erosion[0][:x,:y].round())}, )
comfyanonymous's avatar
comfyanonymous committed
177
178
179
180
181

class CheckpointLoader:
    models_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), "models")
    config_dir = os.path.join(models_dir, "configs")
    ckpt_dir = os.path.join(models_dir, "checkpoints")
182
    embedding_directory = os.path.join(models_dir, "embeddings")
comfyanonymous's avatar
comfyanonymous committed
183
184
185

    @classmethod
    def INPUT_TYPES(s):
186
187
        return {"required": { "config_name": (filter_files_extensions(recursive_search(s.config_dir), '.yaml'), ),
                              "ckpt_name": (filter_files_extensions(recursive_search(s.ckpt_dir), supported_ckpt_extensions), )}}
comfyanonymous's avatar
comfyanonymous committed
188
189
190
    RETURN_TYPES = ("MODEL", "CLIP", "VAE")
    FUNCTION = "load_checkpoint"

191
192
    CATEGORY = "loaders"

comfyanonymous's avatar
comfyanonymous committed
193
194
195
    def load_checkpoint(self, config_name, ckpt_name, output_vae=True, output_clip=True):
        config_path = os.path.join(self.config_dir, config_name)
        ckpt_path = os.path.join(self.ckpt_dir, ckpt_name)
196
        return comfy.sd.load_checkpoint(config_path, ckpt_path, output_vae=True, output_clip=True, embedding_directory=self.embedding_directory)
comfyanonymous's avatar
comfyanonymous committed
197

198
199
200
201
202
203
204
class LoraLoader:
    models_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), "models")
    lora_dir = os.path.join(models_dir, "loras")
    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "model": ("MODEL",),
                              "clip": ("CLIP", ),
205
                              "lora_name": (filter_files_extensions(recursive_search(s.lora_dir), supported_pt_extensions), ),
206
207
208
209
210
211
212
213
214
215
216
217
218
                              "strength_model": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}),
                              "strength_clip": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}),
                              }}
    RETURN_TYPES = ("MODEL", "CLIP")
    FUNCTION = "load_lora"

    CATEGORY = "loaders"

    def load_lora(self, model, clip, lora_name, strength_model, strength_clip):
        lora_path = os.path.join(self.lora_dir, lora_name)
        model_lora, clip_lora = comfy.sd.load_lora_for_models(model, clip, lora_path, strength_model, strength_clip)
        return (model_lora, clip_lora)

comfyanonymous's avatar
comfyanonymous committed
219
220
221
222
223
class VAELoader:
    models_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), "models")
    vae_dir = os.path.join(models_dir, "vae")
    @classmethod
    def INPUT_TYPES(s):
224
        return {"required": { "vae_name": (filter_files_extensions(recursive_search(s.vae_dir), supported_pt_extensions), )}}
comfyanonymous's avatar
comfyanonymous committed
225
226
227
    RETURN_TYPES = ("VAE",)
    FUNCTION = "load_vae"

228
229
    CATEGORY = "loaders"

comfyanonymous's avatar
comfyanonymous committed
230
231
232
233
234
235
    #TODO: scale factor?
    def load_vae(self, vae_name):
        vae_path = os.path.join(self.vae_dir, vae_name)
        vae = comfy.sd.VAE(ckpt_path=vae_path)
        return (vae,)

comfyanonymous's avatar
comfyanonymous committed
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
class ControlNetLoader:
    models_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), "models")
    controlnet_dir = os.path.join(models_dir, "controlnet")
    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "control_net_name": (filter_files_extensions(recursive_search(s.controlnet_dir), supported_pt_extensions), )}}

    RETURN_TYPES = ("CONTROL_NET",)
    FUNCTION = "load_controlnet"

    CATEGORY = "loaders"

    def load_controlnet(self, control_net_name):
        controlnet_path = os.path.join(self.controlnet_dir, control_net_name)
        controlnet = comfy.sd.load_controlnet(controlnet_path)
        return (controlnet,)

253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
class DiffControlNetLoader:
    models_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), "models")
    controlnet_dir = os.path.join(models_dir, "controlnet")
    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "model": ("MODEL",),
                              "control_net_name": (filter_files_extensions(recursive_search(s.controlnet_dir), supported_pt_extensions), )}}

    RETURN_TYPES = ("CONTROL_NET",)
    FUNCTION = "load_controlnet"

    CATEGORY = "loaders"

    def load_controlnet(self, model, control_net_name):
        controlnet_path = os.path.join(self.controlnet_dir, control_net_name)
        controlnet = comfy.sd.load_controlnet(controlnet_path, model)
        return (controlnet,)

comfyanonymous's avatar
comfyanonymous committed
271
272
273
274

class ControlNetApply:
    @classmethod
    def INPUT_TYPES(s):
275
276
277
278
279
        return {"required": {"conditioning": ("CONDITIONING", ),
                             "control_net": ("CONTROL_NET", ),
                             "image": ("IMAGE", ),
                             "strength": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01})
                             }}
comfyanonymous's avatar
comfyanonymous committed
280
281
282
283
284
    RETURN_TYPES = ("CONDITIONING",)
    FUNCTION = "apply_controlnet"

    CATEGORY = "conditioning"

285
    def apply_controlnet(self, conditioning, control_net, image, strength):
comfyanonymous's avatar
comfyanonymous committed
286
287
288
289
290
        c = []
        control_hint = image.movedim(-1,1)
        print(control_hint.shape)
        for t in conditioning:
            n = [t[0], t[1].copy()]
comfyanonymous's avatar
comfyanonymous committed
291
292
293
294
            c_net = control_net.copy().set_cond_hint(control_hint, strength)
            if 'control' in t[1]:
                c_net.set_previous_controlnet(t[1]['control'])
            n[1]['control'] = c_net
comfyanonymous's avatar
comfyanonymous committed
295
296
297
            c.append(n)
        return (c, )

298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
class T2IAdapterLoader:
    models_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), "models")
    t2i_adapter_dir = os.path.join(models_dir, "t2i_adapter")
    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "t2i_adapter_name": (filter_files_extensions(recursive_search(s.t2i_adapter_dir), supported_pt_extensions), )}}

    RETURN_TYPES = ("CONTROL_NET",)
    FUNCTION = "load_t2i_adapter"

    CATEGORY = "loaders"

    def load_t2i_adapter(self, t2i_adapter_name):
        t2i_path = os.path.join(self.t2i_adapter_dir, t2i_adapter_name)
        t2i_adapter = comfy.sd.load_t2i_adapter(t2i_path)
        return (t2i_adapter,)
comfyanonymous's avatar
comfyanonymous committed
314

315
316
317
318
319
class CLIPLoader:
    models_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), "models")
    clip_dir = os.path.join(models_dir, "clip")
    @classmethod
    def INPUT_TYPES(s):
320
        return {"required": { "clip_name": (filter_files_extensions(recursive_search(s.clip_dir), supported_pt_extensions), ),
321
322
323
324
325
326
327
328
329
330
331
332
333
                              "stop_at_clip_layer": ("INT", {"default": -1, "min": -24, "max": -1, "step": 1}),
                             }}
    RETURN_TYPES = ("CLIP",)
    FUNCTION = "load_clip"

    CATEGORY = "loaders"

    def load_clip(self, clip_name, stop_at_clip_layer):
        clip_path = os.path.join(self.clip_dir, clip_name)
        clip = comfy.sd.load_clip(ckpt_path=clip_path, embedding_directory=CheckpointLoader.embedding_directory)
        clip.clip_layer(stop_at_clip_layer)
        return (clip,)

comfyanonymous's avatar
comfyanonymous committed
334
335
336
337
338
339
340
341
342
343
344
345
class EmptyLatentImage:
    def __init__(self, device="cpu"):
        self.device = device

    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "width": ("INT", {"default": 512, "min": 64, "max": 4096, "step": 64}),
                              "height": ("INT", {"default": 512, "min": 64, "max": 4096, "step": 64}),
                              "batch_size": ("INT", {"default": 1, "min": 1, "max": 64})}}
    RETURN_TYPES = ("LATENT",)
    FUNCTION = "generate"

346
347
    CATEGORY = "latent"

comfyanonymous's avatar
comfyanonymous committed
348
349
    def generate(self, width, height, batch_size=1):
        latent = torch.zeros([batch_size, 4, height // 8, width // 8])
350
        return ({"samples":latent}, )
comfyanonymous's avatar
comfyanonymous committed
351

comfyanonymous's avatar
comfyanonymous committed
352

comfyanonymous's avatar
comfyanonymous committed
353

comfyanonymous's avatar
comfyanonymous committed
354
355
class LatentUpscale:
    upscale_methods = ["nearest-exact", "bilinear", "area"]
356
    crop_methods = ["disabled", "center"]
comfyanonymous's avatar
comfyanonymous committed
357
358
359
360
361

    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "samples": ("LATENT",), "upscale_method": (s.upscale_methods,),
                              "width": ("INT", {"default": 512, "min": 64, "max": 4096, "step": 64}),
362
363
                              "height": ("INT", {"default": 512, "min": 64, "max": 4096, "step": 64}),
                              "crop": (s.crop_methods,)}}
comfyanonymous's avatar
comfyanonymous committed
364
365
366
    RETURN_TYPES = ("LATENT",)
    FUNCTION = "upscale"

367
368
    CATEGORY = "latent"

369
    def upscale(self, samples, upscale_method, width, height, crop):
370
        s = samples.copy()
comfyanonymous's avatar
comfyanonymous committed
371
        s["samples"] = comfy.utils.common_upscale(samples["samples"], width // 8, height // 8, upscale_method, crop)
comfyanonymous's avatar
comfyanonymous committed
372
373
        return (s,)

comfyanonymous's avatar
comfyanonymous committed
374
375
376
377
378
379
380
381
382
383
384
385
class LatentRotate:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "samples": ("LATENT",),
                              "rotation": (["none", "90 degrees", "180 degrees", "270 degrees"],),
                              }}
    RETURN_TYPES = ("LATENT",)
    FUNCTION = "rotate"

    CATEGORY = "latent"

    def rotate(self, samples, rotation):
386
        s = samples.copy()
comfyanonymous's avatar
comfyanonymous committed
387
388
389
390
391
392
393
394
        rotate_by = 0
        if rotation.startswith("90"):
            rotate_by = 1
        elif rotation.startswith("180"):
            rotate_by = 2
        elif rotation.startswith("270"):
            rotate_by = 3

395
        s["samples"] = torch.rot90(samples["samples"], k=rotate_by, dims=[3, 2])
comfyanonymous's avatar
comfyanonymous committed
396
        return (s,)
comfyanonymous's avatar
comfyanonymous committed
397
398
399
400
401
402
403
404
405
406
407
408
409

class LatentFlip:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "samples": ("LATENT",),
                              "flip_method": (["x-axis: vertically", "y-axis: horizontally"],),
                              }}
    RETURN_TYPES = ("LATENT",)
    FUNCTION = "flip"

    CATEGORY = "latent"

    def flip(self, samples, flip_method):
410
        s = samples.copy()
comfyanonymous's avatar
comfyanonymous committed
411
        if flip_method.startswith("x"):
412
            s["samples"] = torch.flip(samples["samples"], dims=[2])
comfyanonymous's avatar
comfyanonymous committed
413
        elif flip_method.startswith("y"):
414
            s["samples"] = torch.flip(samples["samples"], dims=[3])
comfyanonymous's avatar
comfyanonymous committed
415
416

        return (s,)
comfyanonymous's avatar
comfyanonymous committed
417
418
419
420
421
422
423
424

class LatentComposite:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "samples_to": ("LATENT",),
                              "samples_from": ("LATENT",),
                              "x": ("INT", {"default": 0, "min": 0, "max": 4096, "step": 8}),
                              "y": ("INT", {"default": 0, "min": 0, "max": 4096, "step": 8}),
425
                              "feather": ("INT", {"default": 0, "min": 0, "max": 4096, "step": 8}),
comfyanonymous's avatar
comfyanonymous committed
426
427
428
429
430
431
                              }}
    RETURN_TYPES = ("LATENT",)
    FUNCTION = "composite"

    CATEGORY = "latent"

432
    def composite(self, samples_to, samples_from, x, y, composite_method="normal", feather=0):
comfyanonymous's avatar
comfyanonymous committed
433
434
        x =  x // 8
        y = y // 8
435
        feather = feather // 8
436
437
438
439
        samples_out = samples_to.copy()
        s = samples_to["samples"].clone()
        samples_to = samples_to["samples"]
        samples_from = samples_from["samples"]
440
441
442
        if feather == 0:
            s[:,:,y:y+samples_from.shape[2],x:x+samples_from.shape[3]] = samples_from[:,:,:samples_to.shape[2] - y, :samples_to.shape[3] - x]
        else:
443
444
            samples_from = samples_from[:,:,:samples_to.shape[2] - y, :samples_to.shape[3] - x]
            mask = torch.ones_like(samples_from)
445
446
447
448
449
450
451
452
453
454
455
456
            for t in range(feather):
                if y != 0:
                    mask[:,:,t:1+t,:] *= ((1.0/feather) * (t + 1))

                if y + samples_from.shape[2] < samples_to.shape[2]:
                    mask[:,:,mask.shape[2] -1 -t: mask.shape[2]-t,:] *= ((1.0/feather) * (t + 1))
                if x != 0:
                    mask[:,:,:,t:1+t] *= ((1.0/feather) * (t + 1))
                if x + samples_from.shape[3] < samples_to.shape[3]:
                    mask[:,:,:,mask.shape[3]- 1 - t: mask.shape[3]- t] *= ((1.0/feather) * (t + 1))
            rev_mask = torch.ones_like(mask) - mask
            s[:,:,y:y+samples_from.shape[2],x:x+samples_from.shape[3]] = samples_from[:,:,:samples_to.shape[2] - y, :samples_to.shape[3] - x] * mask + s[:,:,y:y+samples_from.shape[2],x:x+samples_from.shape[3]] * rev_mask
457
458
        samples_out["samples"] = s
        return (samples_out,)
comfyanonymous's avatar
comfyanonymous committed
459

comfyanonymous's avatar
comfyanonymous committed
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
class LatentCrop:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "samples": ("LATENT",),
                              "width": ("INT", {"default": 512, "min": 64, "max": 4096, "step": 64}),
                              "height": ("INT", {"default": 512, "min": 64, "max": 4096, "step": 64}),
                              "x": ("INT", {"default": 0, "min": 0, "max": 4096, "step": 8}),
                              "y": ("INT", {"default": 0, "min": 0, "max": 4096, "step": 8}),
                              }}
    RETURN_TYPES = ("LATENT",)
    FUNCTION = "crop"

    CATEGORY = "latent"

    def crop(self, samples, width, height, x, y):
475
476
        s = samples.copy()
        samples = samples['samples']
comfyanonymous's avatar
comfyanonymous committed
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
        x =  x // 8
        y = y // 8

        #enfonce minimum size of 64
        if x > (samples.shape[3] - 8):
            x = samples.shape[3] - 8
        if y > (samples.shape[2] - 8):
            y = samples.shape[2] - 8

        new_height = height // 8
        new_width = width // 8
        to_x = new_width + x
        to_y = new_height + y
        def enforce_image_dim(d, to_d, max_d):
            if to_d > max_d:
                leftover = (to_d - max_d) % 8
                to_d = max_d
                d -= leftover
            return (d, to_d)

        #make sure size is always multiple of 64
        x, to_x = enforce_image_dim(x, to_x, samples.shape[3])
        y, to_y = enforce_image_dim(y, to_y, samples.shape[2])
500
        s['samples'] = samples[:,:,y:to_y, x:to_x]
comfyanonymous's avatar
comfyanonymous committed
501
502
        return (s,)

503
504
505
506
507
508
509
510
511
class SetLatentNoiseMask:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "samples": ("LATENT",),
                              "mask": ("MASK",),
                              }}
    RETURN_TYPES = ("LATENT",)
    FUNCTION = "set_mask"

512
    CATEGORY = "latent/inpaint"
513
514
515
516
517
518
519
520
521
522
523

    def set_mask(self, samples, mask):
        s = samples.copy()
        s["noise_mask"] = mask
        return (s,)


def common_ksampler(device, model, seed, steps, cfg, sampler_name, scheduler, positive, negative, latent, denoise=1.0, disable_noise=False, start_step=None, last_step=None, force_full_denoise=False):
    latent_image = latent["samples"]
    noise_mask = None

comfyanonymous's avatar
comfyanonymous committed
524
525
526
527
528
    if disable_noise:
        noise = torch.zeros(latent_image.size(), dtype=latent_image.dtype, layout=latent_image.layout, device="cpu")
    else:
        noise = torch.randn(latent_image.size(), dtype=latent_image.dtype, layout=latent_image.layout, generator=torch.manual_seed(seed), device="cpu")

529
530
531
    if "noise_mask" in latent:
        noise_mask = latent['noise_mask']
        noise_mask = torch.nn.functional.interpolate(noise_mask[None,None,], size=(noise.shape[2], noise.shape[3]), mode="bilinear")
532
        noise_mask = noise_mask.round()
533
534
535
536
        noise_mask = torch.cat([noise_mask] * noise.shape[1], dim=1)
        noise_mask = torch.cat([noise_mask] * noise.shape[0])
        noise_mask = noise_mask.to(device)

537
    real_model = None
538
539
540
541
542
    if device != "cpu":
        model_management.load_model_gpu(model)
        real_model = model.model
    else:
        #TODO: cpu support
543
        real_model = model.patch_model()
544
545
546
547
548
549
    noise = noise.to(device)
    latent_image = latent_image.to(device)

    positive_copy = []
    negative_copy = []

comfyanonymous's avatar
comfyanonymous committed
550
    control_nets = []
551
552
553
554
555
    for p in positive:
        t = p[0]
        if t.shape[0] < noise.shape[0]:
            t = torch.cat([t] * noise.shape[0])
        t = t.to(device)
comfyanonymous's avatar
comfyanonymous committed
556
557
        if 'control' in p[1]:
            control_nets += [p[1]['control']]
558
559
560
561
562
563
        positive_copy += [[t] + p[1:]]
    for n in negative:
        t = n[0]
        if t.shape[0] < noise.shape[0]:
            t = torch.cat([t] * noise.shape[0])
        t = t.to(device)
comfyanonymous's avatar
comfyanonymous committed
564
565
        if 'control' in p[1]:
            control_nets += [p[1]['control']]
566
567
        negative_copy += [[t] + n[1:]]

comfyanonymous's avatar
comfyanonymous committed
568
569
570
571
    control_net_models = []
    for x in control_nets:
        control_net_models += x.get_control_models()
    model_management.load_controlnet_gpu(control_net_models)
comfyanonymous's avatar
comfyanonymous committed
572

573
574
575
576
577
578
    if sampler_name in comfy.samplers.KSampler.SAMPLERS:
        sampler = comfy.samplers.KSampler(real_model, steps=steps, device=device, sampler=sampler_name, scheduler=scheduler, denoise=denoise)
    else:
        #other samplers
        pass

579
    samples = sampler.sample(noise, positive_copy, negative_copy, cfg=cfg, latent_image=latent_image, start_step=start_step, last_step=last_step, force_full_denoise=force_full_denoise, denoise_mask=noise_mask)
580
    samples = samples.cpu()
comfyanonymous's avatar
comfyanonymous committed
581
582
    for c in control_nets:
        c.cleanup()
comfyanonymous's avatar
comfyanonymous committed
583

584
585
586
    out = latent.copy()
    out["samples"] = samples
    return (out, )
comfyanonymous's avatar
comfyanonymous committed
587

comfyanonymous's avatar
comfyanonymous committed
588
589
590
591
592
593
class KSampler:
    def __init__(self, device="cuda"):
        self.device = device

    @classmethod
    def INPUT_TYPES(s):
comfyanonymous's avatar
comfyanonymous committed
594
        return {"required":
comfyanonymous's avatar
comfyanonymous committed
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
                    {"model": ("MODEL",),
                    "seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}),
                    "steps": ("INT", {"default": 20, "min": 1, "max": 10000}),
                    "cfg": ("FLOAT", {"default": 8.0, "min": 0.0, "max": 100.0}),
                    "sampler_name": (comfy.samplers.KSampler.SAMPLERS, ),
                    "scheduler": (comfy.samplers.KSampler.SCHEDULERS, ),
                    "positive": ("CONDITIONING", ),
                    "negative": ("CONDITIONING", ),
                    "latent_image": ("LATENT", ),
                    "denoise": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}),
                    }}

    RETURN_TYPES = ("LATENT",)
    FUNCTION = "sample"

610
611
    CATEGORY = "sampling"

comfyanonymous's avatar
comfyanonymous committed
612
    def sample(self, model, seed, steps, cfg, sampler_name, scheduler, positive, negative, latent_image, denoise=1.0):
comfyanonymous's avatar
comfyanonymous committed
613
        return common_ksampler(self.device, model, seed, steps, cfg, sampler_name, scheduler, positive, negative, latent_image, denoise=denoise)
comfyanonymous's avatar
comfyanonymous committed
614

comfyanonymous's avatar
comfyanonymous committed
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
class KSamplerAdvanced:
    def __init__(self, device="cuda"):
        self.device = device

    @classmethod
    def INPUT_TYPES(s):
        return {"required":
                    {"model": ("MODEL",),
                    "add_noise": (["enable", "disable"], ),
                    "noise_seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}),
                    "steps": ("INT", {"default": 20, "min": 1, "max": 10000}),
                    "cfg": ("FLOAT", {"default": 8.0, "min": 0.0, "max": 100.0}),
                    "sampler_name": (comfy.samplers.KSampler.SAMPLERS, ),
                    "scheduler": (comfy.samplers.KSampler.SCHEDULERS, ),
                    "positive": ("CONDITIONING", ),
                    "negative": ("CONDITIONING", ),
                    "latent_image": ("LATENT", ),
                    "start_at_step": ("INT", {"default": 0, "min": 0, "max": 10000}),
                    "end_at_step": ("INT", {"default": 10000, "min": 0, "max": 10000}),
                    "return_with_leftover_noise": (["disable", "enable"], ),
                    }}

    RETURN_TYPES = ("LATENT",)
    FUNCTION = "sample"

    CATEGORY = "sampling"
comfyanonymous's avatar
comfyanonymous committed
641

comfyanonymous's avatar
comfyanonymous committed
642
643
644
645
646
647
648
649
    def sample(self, model, add_noise, noise_seed, steps, cfg, sampler_name, scheduler, positive, negative, latent_image, start_at_step, end_at_step, return_with_leftover_noise, denoise=1.0):
        force_full_denoise = True
        if return_with_leftover_noise == "enable":
            force_full_denoise = False
        disable_noise = False
        if add_noise == "disable":
            disable_noise = True
        return common_ksampler(self.device, model, noise_seed, steps, cfg, sampler_name, scheduler, positive, negative, latent_image, denoise=denoise, disable_noise=disable_noise, start_step=start_at_step, last_step=end_at_step, force_full_denoise=force_full_denoise)
comfyanonymous's avatar
comfyanonymous committed
650
651
652
653
654
655
656
657

class SaveImage:
    def __init__(self):
        self.output_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), "output")

    @classmethod
    def INPUT_TYPES(s):
        return {"required": 
658
659
                    {"images": ("IMAGE", ),
                     "filename_prefix": ("STRING", {"default": "ComfyUI"})},
pythongosssss's avatar
pythongosssss committed
660
                "hidden": {"prompt": "PROMPT", "extra_pnginfo": "EXTRA_PNGINFO"},
comfyanonymous's avatar
comfyanonymous committed
661
662
663
664
665
666
667
                }

    RETURN_TYPES = ()
    FUNCTION = "save_images"

    OUTPUT_NODE = True

668
669
    CATEGORY = "image"

pythongosssss's avatar
pythongosssss committed
670
    def save_images(self, images, filename_prefix="ComfyUI", prompt=None, extra_pnginfo=None):
671
672
673
674
675
676
677
678
679
680
681
682
        def map_filename(filename):
            prefix_len = len(filename_prefix)
            prefix = filename[:prefix_len + 1]
            try:
                digits = int(filename[prefix_len + 1:].split('_')[0])
            except:
                digits = 0
            return (digits, prefix)
        try:
            counter = max(filter(lambda a: a[1][:-1] == filename_prefix and a[1][-1] == "_", map(map_filename, os.listdir(self.output_dir))))[0] + 1
        except ValueError:
            counter = 1
683
684
685
        except FileNotFoundError:
            os.mkdir(self.output_dir)
            counter = 1
pythongosssss's avatar
pythongosssss committed
686
687

        paths = list()
comfyanonymous's avatar
comfyanonymous committed
688
689
690
691
692
693
694
695
696
        for image in images:
            i = 255. * image.cpu().numpy()
            img = Image.fromarray(i.astype(np.uint8))
            metadata = PngInfo()
            if prompt is not None:
                metadata.add_text("prompt", json.dumps(prompt))
            if extra_pnginfo is not None:
                for x in extra_pnginfo:
                    metadata.add_text(x, json.dumps(extra_pnginfo[x]))
pythongosssss's avatar
pythongosssss committed
697
698
            file = f"{filename_prefix}_{counter:05}_.png"
            img.save(os.path.join(self.output_dir, file), pnginfo=metadata, optimize=True)
pythongosssss's avatar
pythongosssss committed
699
            paths.append(file)
700
            counter += 1
pythongosssss's avatar
pythongosssss committed
701
        return { "ui": { "images": paths } }
comfyanonymous's avatar
comfyanonymous committed
702

703
704
705
706
707
class LoadImage:
    input_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), "input")
    @classmethod
    def INPUT_TYPES(s):
        return {"required":
708
                    {"image": (sorted(os.listdir(s.input_dir)), )},
709
                }
710
711

    CATEGORY = "image"
712
713
714
715
716

    RETURN_TYPES = ("IMAGE",)
    FUNCTION = "load_image"
    def load_image(self, image):
        image_path = os.path.join(self.input_dir, image)
717
718
        i = Image.open(image_path)
        image = i.convert("RGB")
719
        image = np.array(image).astype(np.float32) / 255.0
720
721
        image = torch.from_numpy(image)[None,]
        return (image,)
722

723
724
725
726
727
728
729
730
    @classmethod
    def IS_CHANGED(s, image):
        image_path = os.path.join(s.input_dir, image)
        m = hashlib.sha256()
        with open(image_path, 'rb') as f:
            m.update(f.read())
        return m.digest().hex()

731
732
733
734
735
class LoadImageMask:
    input_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), "input")
    @classmethod
    def INPUT_TYPES(s):
        return {"required":
736
                    {"image": (sorted(os.listdir(s.input_dir)), ),
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
                    "channel": (["alpha", "red", "green", "blue"], ),}
                }

    CATEGORY = "image"

    RETURN_TYPES = ("MASK",)
    FUNCTION = "load_image"
    def load_image(self, image, channel):
        image_path = os.path.join(self.input_dir, image)
        i = Image.open(image_path)
        mask = None
        c = channel[0].upper()
        if c in i.getbands():
            mask = np.array(i.getchannel(c)).astype(np.float32) / 255.0
            mask = torch.from_numpy(mask)
            if c == 'A':
                mask = 1. - mask
        else:
            mask = torch.zeros((64,64), dtype=torch.float32, device="cpu")
        return (mask,)

    @classmethod
    def IS_CHANGED(s, image, channel):
        image_path = os.path.join(s.input_dir, image)
        m = hashlib.sha256()
        with open(image_path, 'rb') as f:
            m.update(f.read())
        return m.digest().hex()

comfyanonymous's avatar
comfyanonymous committed
766
767
768
769
770
771
772
773
774
775
776
777
778
779
class ImageScale:
    upscale_methods = ["nearest-exact", "bilinear", "area"]
    crop_methods = ["disabled", "center"]

    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "image": ("IMAGE",), "upscale_method": (s.upscale_methods,),
                              "width": ("INT", {"default": 512, "min": 1, "max": 4096, "step": 1}),
                              "height": ("INT", {"default": 512, "min": 1, "max": 4096, "step": 1}),
                              "crop": (s.crop_methods,)}}
    RETURN_TYPES = ("IMAGE",)
    FUNCTION = "upscale"

    CATEGORY = "image"
780

comfyanonymous's avatar
comfyanonymous committed
781
782
    def upscale(self, image, upscale_method, width, height, crop):
        samples = image.movedim(-1,1)
comfyanonymous's avatar
comfyanonymous committed
783
        s = comfy.utils.common_upscale(samples, width, height, upscale_method, crop)
comfyanonymous's avatar
comfyanonymous committed
784
785
        s = s.movedim(1,-1)
        return (s,)
comfyanonymous's avatar
comfyanonymous committed
786

787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
class ImageInvert:

    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "image": ("IMAGE",)}}

    RETURN_TYPES = ("IMAGE",)
    FUNCTION = "invert"

    CATEGORY = "image"

    def invert(self, image):
        s = 1.0 - image
        return (s,)


comfyanonymous's avatar
comfyanonymous committed
803
804
805
806
807
808
NODE_CLASS_MAPPINGS = {
    "KSampler": KSampler,
    "CheckpointLoader": CheckpointLoader,
    "CLIPTextEncode": CLIPTextEncode,
    "VAEDecode": VAEDecode,
    "VAEEncode": VAEEncode,
809
    "VAEEncodeForInpaint": VAEEncodeForInpaint,
comfyanonymous's avatar
comfyanonymous committed
810
811
812
813
    "VAELoader": VAELoader,
    "EmptyLatentImage": EmptyLatentImage,
    "LatentUpscale": LatentUpscale,
    "SaveImage": SaveImage,
comfyanonymous's avatar
comfyanonymous committed
814
    "LoadImage": LoadImage,
815
    "LoadImageMask": LoadImageMask,
comfyanonymous's avatar
comfyanonymous committed
816
    "ImageScale": ImageScale,
817
    "ImageInvert": ImageInvert,
comfyanonymous's avatar
comfyanonymous committed
818
819
    "ConditioningCombine": ConditioningCombine,
    "ConditioningSetArea": ConditioningSetArea,
comfyanonymous's avatar
comfyanonymous committed
820
    "KSamplerAdvanced": KSamplerAdvanced,
821
    "SetLatentNoiseMask": SetLatentNoiseMask,
comfyanonymous's avatar
comfyanonymous committed
822
    "LatentComposite": LatentComposite,
comfyanonymous's avatar
comfyanonymous committed
823
    "LatentRotate": LatentRotate,
comfyanonymous's avatar
comfyanonymous committed
824
    "LatentFlip": LatentFlip,
comfyanonymous's avatar
comfyanonymous committed
825
    "LatentCrop": LatentCrop,
826
    "LoraLoader": LoraLoader,
827
    "CLIPLoader": CLIPLoader,
comfyanonymous's avatar
comfyanonymous committed
828
829
    "ControlNetApply": ControlNetApply,
    "ControlNetLoader": ControlNetLoader,
830
    "DiffControlNetLoader": DiffControlNetLoader,
831
    "T2IAdapterLoader": T2IAdapterLoader,
832
    "VAEDecodeTiled": VAEDecodeTiled,
comfyanonymous's avatar
comfyanonymous committed
833
834
}

835
CUSTOM_NODE_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), "custom_nodes")
Hacker 17082006's avatar
Hacker 17082006 committed
836
def load_custom_nodes():
837
    possible_modules = os.listdir(CUSTOM_NODE_PATH)
838
    if "__pycache__" in possible_modules:
Hacker 17082006's avatar
.  
Hacker 17082006 committed
839
        possible_modules.remove("__pycache__")
840

Hacker 17082006's avatar
Hacker 17082006 committed
841
    for possible_module in possible_modules:
842
843
        module_path = os.path.join(CUSTOM_NODE_PATH, possible_module)
        if os.path.isfile(module_path) and os.path.splitext(module_path)[1] != ".py": continue
844

845
        module_name = possible_module
Hacker 17082006's avatar
Hacker 17082006 committed
846
        try:
847
            if os.path.isfile(module_path):
848
                module_spec = importlib.util.spec_from_file_location(module_name, module_path)
849
            else:
850
                module_spec = importlib.util.spec_from_file_location(module_name, os.path.join(module_path, "__init__.py"))
851
            module = importlib.util.module_from_spec(module_spec)
852
            sys.modules[module_name] = module
853
            module_spec.loader.exec_module(module)
854
            if hasattr(module, "NODE_CLASS_MAPPINGS") and getattr(module, "NODE_CLASS_MAPPINGS") is not None:
855
                NODE_CLASS_MAPPINGS.update(module.NODE_CLASS_MAPPINGS)
Hacker 17082006's avatar
Hacker 17082006 committed
856
            else:
Hacker 17082006's avatar
Hacker 17082006 committed
857
                print(f"Skip {possible_module} module for custom nodes due to the lack of NODE_CLASS_MAPPINGS.")
858
859
860
        except Exception as e:
            print(traceback.format_exc())
            print(f"Cannot import {possible_module} module for custom nodes:", e)
Hacker 17082006's avatar
Hacker 17082006 committed
861
862

load_custom_nodes()