"...git@developer.sourcefind.cn:chenpangpang/open-webui.git" did not exist on "a382e82dec1bf2631303f72490d5b716272f944e"
nodes.py 46 KB
Newer Older
comfyanonymous's avatar
comfyanonymous committed
1
2
3
4
5
import torch

import os
import sys
import json
6
import hashlib
7
import traceback
comfyanonymous's avatar
comfyanonymous committed
8
9
10
11
12

from PIL import Image
from PIL.PngImagePlugin import PngInfo
import numpy as np

sALTaccount's avatar
sALTaccount committed
13

comfyanonymous's avatar
comfyanonymous committed
14
sys.path.insert(0, os.path.join(os.path.dirname(os.path.realpath(__file__)), "comfy"))
comfyanonymous's avatar
comfyanonymous committed
15
16


comfyanonymous's avatar
comfyanonymous committed
17
import comfy.diffusers_convert
comfyanonymous's avatar
comfyanonymous committed
18
19
import comfy.samplers
import comfy.sd
comfyanonymous's avatar
comfyanonymous committed
20
21
import comfy.utils

22
import comfy.clip_vision
23

24
import comfy.model_management
25
import importlib
comfyanonymous's avatar
comfyanonymous committed
26

27
import folder_paths
28
29

def before_node_execution():
30
    comfy.model_management.throw_exception_if_processing_interrupted()
31

32
def interrupt_processing(value=True):
33
    comfy.model_management.interrupt_current_processing(value)
34

35
36
MAX_RESOLUTION=8192

comfyanonymous's avatar
comfyanonymous committed
37
38
39
class CLIPTextEncode:
    @classmethod
    def INPUT_TYPES(s):
40
        return {"required": {"text": ("STRING", {"multiline": True}), "clip": ("CLIP", )}}
comfyanonymous's avatar
comfyanonymous committed
41
42
43
    RETURN_TYPES = ("CONDITIONING",)
    FUNCTION = "encode"

44
45
    CATEGORY = "conditioning"

comfyanonymous's avatar
comfyanonymous committed
46
    def encode(self, clip, text):
comfyanonymous's avatar
comfyanonymous committed
47
48
49
50
51
52
53
54
55
        return ([[clip.encode(text), {}]], )

class ConditioningCombine:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": {"conditioning_1": ("CONDITIONING", ), "conditioning_2": ("CONDITIONING", )}}
    RETURN_TYPES = ("CONDITIONING",)
    FUNCTION = "combine"

56
57
    CATEGORY = "conditioning"

comfyanonymous's avatar
comfyanonymous committed
58
59
60
61
62
63
64
    def combine(self, conditioning_1, conditioning_2):
        return (conditioning_1 + conditioning_2, )

class ConditioningSetArea:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": {"conditioning": ("CONDITIONING", ),
65
66
67
68
                              "width": ("INT", {"default": 64, "min": 64, "max": MAX_RESOLUTION, "step": 64}),
                              "height": ("INT", {"default": 64, "min": 64, "max": MAX_RESOLUTION, "step": 64}),
                              "x": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 64}),
                              "y": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 64}),
comfyanonymous's avatar
comfyanonymous committed
69
70
71
72
73
                              "strength": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}),
                             }}
    RETURN_TYPES = ("CONDITIONING",)
    FUNCTION = "append"

74
75
    CATEGORY = "conditioning"

comfyanonymous's avatar
comfyanonymous committed
76
    def append(self, conditioning, width, height, x, y, strength, min_sigma=0.0, max_sigma=99.0):
comfyanonymous's avatar
comfyanonymous committed
77
78
79
80
81
82
83
84
        c = []
        for t in conditioning:
            n = [t[0], t[1].copy()]
            n[1]['area'] = (height // 8, width // 8, y // 8, x // 8)
            n[1]['strength'] = strength
            n[1]['min_sigma'] = min_sigma
            n[1]['max_sigma'] = max_sigma
            c.append(n)
comfyanonymous's avatar
comfyanonymous committed
85
        return (c, )
comfyanonymous's avatar
comfyanonymous committed
86
87
88
89
90
91
92
93
94
95
96

class VAEDecode:
    def __init__(self, device="cpu"):
        self.device = device

    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "samples": ("LATENT", ), "vae": ("VAE", )}}
    RETURN_TYPES = ("IMAGE",)
    FUNCTION = "decode"

97
98
    CATEGORY = "latent"

comfyanonymous's avatar
comfyanonymous committed
99
    def decode(self, vae, samples):
100
        return (vae.decode(samples["samples"]), )
comfyanonymous's avatar
comfyanonymous committed
101

102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
class VAEDecodeTiled:
    def __init__(self, device="cpu"):
        self.device = device

    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "samples": ("LATENT", ), "vae": ("VAE", )}}
    RETURN_TYPES = ("IMAGE",)
    FUNCTION = "decode"

    CATEGORY = "_for_testing"

    def decode(self, vae, samples):
        return (vae.decode_tiled(samples["samples"]), )

comfyanonymous's avatar
comfyanonymous committed
117
118
119
120
121
122
123
124
125
126
class VAEEncode:
    def __init__(self, device="cpu"):
        self.device = device

    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "pixels": ("IMAGE", ), "vae": ("VAE", )}}
    RETURN_TYPES = ("LATENT",)
    FUNCTION = "encode"

127
128
    CATEGORY = "latent"

comfyanonymous's avatar
comfyanonymous committed
129
    def encode(self, vae, pixels):
130
131
132
133
        x = (pixels.shape[1] // 64) * 64
        y = (pixels.shape[2] // 64) * 64
        if pixels.shape[1] != x or pixels.shape[2] != y:
            pixels = pixels[:,:x,:y,:]
134
135
136
        t = vae.encode(pixels[:,:,:,:3])

        return ({"samples":t}, )
comfyanonymous's avatar
comfyanonymous committed
137

comfyanonymous's avatar
comfyanonymous committed
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158

class VAEEncodeTiled:
    def __init__(self, device="cpu"):
        self.device = device

    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "pixels": ("IMAGE", ), "vae": ("VAE", )}}
    RETURN_TYPES = ("LATENT",)
    FUNCTION = "encode"

    CATEGORY = "_for_testing"

    def encode(self, vae, pixels):
        x = (pixels.shape[1] // 64) * 64
        y = (pixels.shape[2] // 64) * 64
        if pixels.shape[1] != x or pixels.shape[2] != y:
            pixels = pixels[:,:x,:y,:]
        t = vae.encode_tiled(pixels[:,:,:,:3])

        return ({"samples":t}, )
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
class VAEEncodeForInpaint:
    def __init__(self, device="cpu"):
        self.device = device

    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "pixels": ("IMAGE", ), "vae": ("VAE", ), "mask": ("MASK", )}}
    RETURN_TYPES = ("LATENT",)
    FUNCTION = "encode"

    CATEGORY = "latent/inpaint"

    def encode(self, vae, pixels, mask):
        x = (pixels.shape[1] // 64) * 64
        y = (pixels.shape[2] // 64) * 64
174
175
176
177
178
        if len(mask.shape) < 3:
            mask = mask.unsqueeze(0).unsqueeze(0)
        elif len(mask.shape) < 4:
            mask = mask.unsqueeze(1)
        mask = torch.nn.functional.interpolate(mask, size=(pixels.shape[1], pixels.shape[2]), mode="bilinear")
179

180
        pixels = pixels.clone()
181
182
        if pixels.shape[1] != x or pixels.shape[2] != y:
            pixels = pixels[:,:x,:y,:]
183
            mask = mask[:,:x,:y,:]
184

185
        #grow mask by a few pixels to keep things seamless in latent space
186
        kernel_tensor = torch.ones((1, 1, 6, 6))
187
188
        mask_erosion = torch.clamp(torch.nn.functional.conv2d(mask.round(), kernel_tensor, padding=3), 0, 1)
        m = (1.0 - mask.round()).squeeze(1)
189
190
        for i in range(3):
            pixels[:,:,:,i] -= 0.5
191
            pixels[:,:,:,i] *= m
192
193
194
            pixels[:,:,:,i] += 0.5
        t = vae.encode(pixels)

195
        return ({"samples":t, "noise_mask": (mask_erosion[:,:x,:y,:].round())}, )
comfyanonymous's avatar
comfyanonymous committed
196
197
198
199

class CheckpointLoader:
    @classmethod
    def INPUT_TYPES(s):
200
201
        return {"required": { "config_name": (folder_paths.get_filename_list("configs"), ),
                              "ckpt_name": (folder_paths.get_filename_list("checkpoints"), )}}
comfyanonymous's avatar
comfyanonymous committed
202
203
204
    RETURN_TYPES = ("MODEL", "CLIP", "VAE")
    FUNCTION = "load_checkpoint"

205
    CATEGORY = "advanced/loaders"
206

comfyanonymous's avatar
comfyanonymous committed
207
    def load_checkpoint(self, config_name, ckpt_name, output_vae=True, output_clip=True):
208
209
        config_path = folder_paths.get_full_path("configs", config_name)
        ckpt_path = folder_paths.get_full_path("checkpoints", ckpt_name)
210
        return comfy.sd.load_checkpoint(config_path, ckpt_path, output_vae=True, output_clip=True, embedding_directory=folder_paths.get_folder_paths("embeddings"))
comfyanonymous's avatar
comfyanonymous committed
211

212
213
214
class CheckpointLoaderSimple:
    @classmethod
    def INPUT_TYPES(s):
215
        return {"required": { "ckpt_name": (folder_paths.get_filename_list("checkpoints"), ),
216
217
218
219
                             }}
    RETURN_TYPES = ("MODEL", "CLIP", "VAE")
    FUNCTION = "load_checkpoint"

220
    CATEGORY = "loaders"
221

222
    def load_checkpoint(self, ckpt_name, output_vae=True, output_clip=True):
223
        ckpt_path = folder_paths.get_full_path("checkpoints", ckpt_name)
224
        out = comfy.sd.load_checkpoint_guess_config(ckpt_path, output_vae=True, output_clip=True, embedding_directory=folder_paths.get_folder_paths("embeddings"))
225
226
        return out

sALTaccount's avatar
sALTaccount committed
227
228
229
class DiffusersLoader:
    @classmethod
    def INPUT_TYPES(cls):
230
        paths = []
sALTaccount's avatar
sALTaccount committed
231
        for search_path in folder_paths.get_folder_paths("diffusers"):
232
            if os.path.exists(search_path):
sALTaccount's avatar
sALTaccount committed
233
                paths += next(os.walk(search_path))[1]
234
        return {"required": {"model_path": (paths,), }}
sALTaccount's avatar
sALTaccount committed
235
236
237
    RETURN_TYPES = ("MODEL", "CLIP", "VAE")
    FUNCTION = "load_checkpoint"

238
    CATEGORY = "advanced/loaders"
sALTaccount's avatar
sALTaccount committed
239
240

    def load_checkpoint(self, model_path, output_vae=True, output_clip=True):
sALTaccount's avatar
sALTaccount committed
241
242
243
244
245
246
        for search_path in folder_paths.get_folder_paths("diffusers"):
            if os.path.exists(search_path):
                paths = next(os.walk(search_path))[1]
                if model_path in paths:
                    model_path = os.path.join(search_path, model_path)
                    break
247

248
        return comfy.diffusers_convert.load_diffusers(model_path, fp16=comfy.model_management.should_use_fp16(), output_vae=output_vae, output_clip=output_clip, embedding_directory=folder_paths.get_folder_paths("embeddings"))
sALTaccount's avatar
sALTaccount committed
249
250


251
252
253
254
255
256
257
258
class unCLIPCheckpointLoader:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "ckpt_name": (folder_paths.get_filename_list("checkpoints"), ),
                             }}
    RETURN_TYPES = ("MODEL", "CLIP", "VAE", "CLIP_VISION")
    FUNCTION = "load_checkpoint"

259
    CATEGORY = "loaders"
260
261
262
263
264
265

    def load_checkpoint(self, ckpt_name, output_vae=True, output_clip=True):
        ckpt_path = folder_paths.get_full_path("checkpoints", ckpt_name)
        out = comfy.sd.load_checkpoint_guess_config(ckpt_path, output_vae=True, output_clip=True, output_clipvision=True, embedding_directory=folder_paths.get_folder_paths("embeddings"))
        return out

comfyanonymous's avatar
comfyanonymous committed
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
class CLIPSetLastLayer:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "clip": ("CLIP", ),
                              "stop_at_clip_layer": ("INT", {"default": -1, "min": -24, "max": -1, "step": 1}),
                              }}
    RETURN_TYPES = ("CLIP",)
    FUNCTION = "set_last_layer"

    CATEGORY = "conditioning"

    def set_last_layer(self, clip, stop_at_clip_layer):
        clip = clip.clone()
        clip.clip_layer(stop_at_clip_layer)
        return (clip,)

282
283
284
285
286
class LoraLoader:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "model": ("MODEL",),
                              "clip": ("CLIP", ),
287
                              "lora_name": (folder_paths.get_filename_list("loras"), ),
288
289
                              "strength_model": ("FLOAT", {"default": 1.0, "min": -10.0, "max": 10.0, "step": 0.01}),
                              "strength_clip": ("FLOAT", {"default": 1.0, "min": -10.0, "max": 10.0, "step": 0.01}),
290
291
292
293
294
295
296
                              }}
    RETURN_TYPES = ("MODEL", "CLIP")
    FUNCTION = "load_lora"

    CATEGORY = "loaders"

    def load_lora(self, model, clip, lora_name, strength_model, strength_clip):
297
        lora_path = folder_paths.get_full_path("loras", lora_name)
298
299
300
        model_lora, clip_lora = comfy.sd.load_lora_for_models(model, clip, lora_path, strength_model, strength_clip)
        return (model_lora, clip_lora)

301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
class TomePatchModel:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "model": ("MODEL",),
                              "ratio": ("FLOAT", {"default": 0.3, "min": 0.0, "max": 1.0, "step": 0.01}),
                              }}
    RETURN_TYPES = ("MODEL",)
    FUNCTION = "patch"

    CATEGORY = "_for_testing"

    def patch(self, model, ratio):
        m = model.clone()
        m.set_model_tomesd(ratio)
        return (m, )

comfyanonymous's avatar
comfyanonymous committed
317
318
319
class VAELoader:
    @classmethod
    def INPUT_TYPES(s):
320
        return {"required": { "vae_name": (folder_paths.get_filename_list("vae"), )}}
comfyanonymous's avatar
comfyanonymous committed
321
322
323
    RETURN_TYPES = ("VAE",)
    FUNCTION = "load_vae"

324
325
    CATEGORY = "loaders"

comfyanonymous's avatar
comfyanonymous committed
326
327
    #TODO: scale factor?
    def load_vae(self, vae_name):
328
        vae_path = folder_paths.get_full_path("vae", vae_name)
comfyanonymous's avatar
comfyanonymous committed
329
330
331
        vae = comfy.sd.VAE(ckpt_path=vae_path)
        return (vae,)

comfyanonymous's avatar
comfyanonymous committed
332
333
334
class ControlNetLoader:
    @classmethod
    def INPUT_TYPES(s):
335
        return {"required": { "control_net_name": (folder_paths.get_filename_list("controlnet"), )}}
comfyanonymous's avatar
comfyanonymous committed
336
337
338
339
340
341
342

    RETURN_TYPES = ("CONTROL_NET",)
    FUNCTION = "load_controlnet"

    CATEGORY = "loaders"

    def load_controlnet(self, control_net_name):
343
        controlnet_path = folder_paths.get_full_path("controlnet", control_net_name)
comfyanonymous's avatar
comfyanonymous committed
344
345
346
        controlnet = comfy.sd.load_controlnet(controlnet_path)
        return (controlnet,)

347
348
349
350
class DiffControlNetLoader:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "model": ("MODEL",),
351
                              "control_net_name": (folder_paths.get_filename_list("controlnet"), )}}
352
353
354
355
356
357
358

    RETURN_TYPES = ("CONTROL_NET",)
    FUNCTION = "load_controlnet"

    CATEGORY = "loaders"

    def load_controlnet(self, model, control_net_name):
359
        controlnet_path = folder_paths.get_full_path("controlnet", control_net_name)
360
361
362
        controlnet = comfy.sd.load_controlnet(controlnet_path, model)
        return (controlnet,)

comfyanonymous's avatar
comfyanonymous committed
363
364
365
366

class ControlNetApply:
    @classmethod
    def INPUT_TYPES(s):
367
368
369
370
371
        return {"required": {"conditioning": ("CONDITIONING", ),
                             "control_net": ("CONTROL_NET", ),
                             "image": ("IMAGE", ),
                             "strength": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01})
                             }}
comfyanonymous's avatar
comfyanonymous committed
372
373
374
375
376
    RETURN_TYPES = ("CONDITIONING",)
    FUNCTION = "apply_controlnet"

    CATEGORY = "conditioning"

377
    def apply_controlnet(self, conditioning, control_net, image, strength):
comfyanonymous's avatar
comfyanonymous committed
378
379
380
381
382
        c = []
        control_hint = image.movedim(-1,1)
        print(control_hint.shape)
        for t in conditioning:
            n = [t[0], t[1].copy()]
comfyanonymous's avatar
comfyanonymous committed
383
384
385
386
            c_net = control_net.copy().set_cond_hint(control_hint, strength)
            if 'control' in t[1]:
                c_net.set_previous_controlnet(t[1]['control'])
            n[1]['control'] = c_net
comfyanonymous's avatar
comfyanonymous committed
387
388
389
            c.append(n)
        return (c, )

390
391
392
class CLIPLoader:
    @classmethod
    def INPUT_TYPES(s):
393
        return {"required": { "clip_name": (folder_paths.get_filename_list("clip"), ),
394
395
396
397
398
399
                             }}
    RETURN_TYPES = ("CLIP",)
    FUNCTION = "load_clip"

    CATEGORY = "loaders"

400
    def load_clip(self, clip_name):
401
        clip_path = folder_paths.get_full_path("clip", clip_name)
comfyanonymous's avatar
comfyanonymous committed
402
        clip = comfy.sd.load_clip(ckpt_path=clip_path, embedding_directory=folder_paths.get_folder_paths("embeddings"))
403
404
        return (clip,)

405
406
407
class CLIPVisionLoader:
    @classmethod
    def INPUT_TYPES(s):
408
        return {"required": { "clip_name": (folder_paths.get_filename_list("clip_vision"), ),
409
410
411
412
413
414
415
                             }}
    RETURN_TYPES = ("CLIP_VISION",)
    FUNCTION = "load_clip"

    CATEGORY = "loaders"

    def load_clip(self, clip_name):
416
        clip_path = folder_paths.get_full_path("clip_vision", clip_name)
417
        clip_vision = comfy.clip_vision.load(clip_path)
418
419
420
421
422
423
424
425
        return (clip_vision,)

class CLIPVisionEncode:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "clip_vision": ("CLIP_VISION",),
                              "image": ("IMAGE",)
                             }}
comfyanonymous's avatar
comfyanonymous committed
426
    RETURN_TYPES = ("CLIP_VISION_OUTPUT",)
427
428
    FUNCTION = "encode"

429
    CATEGORY = "conditioning"
430
431
432
433
434
435
436
437

    def encode(self, clip_vision, image):
        output = clip_vision.encode_image(image)
        return (output,)

class StyleModelLoader:
    @classmethod
    def INPUT_TYPES(s):
438
        return {"required": { "style_model_name": (folder_paths.get_filename_list("style_models"), )}}
439
440
441
442
443
444
445

    RETURN_TYPES = ("STYLE_MODEL",)
    FUNCTION = "load_style_model"

    CATEGORY = "loaders"

    def load_style_model(self, style_model_name):
446
        style_model_path = folder_paths.get_full_path("style_models", style_model_name)
447
448
449
450
451
452
453
        style_model = comfy.sd.load_style_model(style_model_path)
        return (style_model,)


class StyleModelApply:
    @classmethod
    def INPUT_TYPES(s):
454
455
456
        return {"required": {"conditioning": ("CONDITIONING", ),
                             "style_model": ("STYLE_MODEL", ),
                             "clip_vision_output": ("CLIP_VISION_OUTPUT", ),
457
458
459
460
                             }}
    RETURN_TYPES = ("CONDITIONING",)
    FUNCTION = "apply_stylemodel"

comfyanonymous's avatar
comfyanonymous committed
461
    CATEGORY = "conditioning/style_model"
462

463
464
    def apply_stylemodel(self, clip_vision_output, style_model, conditioning):
        cond = style_model.get_cond(clip_vision_output)
465
        c = []
466
467
        for t in conditioning:
            n = [torch.cat((t[0], cond), dim=1), t[1].copy()]
468
469
470
            c.append(n)
        return (c, )

471
472
473
474
475
476
class unCLIPConditioning:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": {"conditioning": ("CONDITIONING", ),
                             "clip_vision_output": ("CLIP_VISION_OUTPUT", ),
                             "strength": ("FLOAT", {"default": 1.0, "min": -10.0, "max": 10.0, "step": 0.01}),
477
                             "noise_augmentation": ("FLOAT", {"default": 0.0, "min": 0.0, "max": 1.0, "step": 0.01}),
478
479
480
481
                             }}
    RETURN_TYPES = ("CONDITIONING",)
    FUNCTION = "apply_adm"

482
    CATEGORY = "conditioning"
483

484
    def apply_adm(self, conditioning, clip_vision_output, strength, noise_augmentation):
485
486
487
        c = []
        for t in conditioning:
            o = t[1].copy()
488
            x = (clip_vision_output, strength, noise_augmentation)
489
490
491
492
493
494
495
496
            if "adm" in o:
                o["adm"] = o["adm"][:] + [x]
            else:
                o["adm"] = [x]
            n = [t[0], o]
            c.append(n)
        return (c, )

497
498
499
500
501
502
503
504
class GLIGENLoader:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "gligen_name": (folder_paths.get_filename_list("gligen"), )}}

    RETURN_TYPES = ("GLIGEN",)
    FUNCTION = "load_gligen"

comfyanonymous's avatar
comfyanonymous committed
505
    CATEGORY = "loaders"
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526

    def load_gligen(self, gligen_name):
        gligen_path = folder_paths.get_full_path("gligen", gligen_name)
        gligen = comfy.sd.load_gligen(gligen_path)
        return (gligen,)

class GLIGENTextBoxApply:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": {"conditioning_to": ("CONDITIONING", ),
                              "clip": ("CLIP", ),
                              "gligen_textbox_model": ("GLIGEN", ),
                              "text": ("STRING", {"multiline": True}),
                              "width": ("INT", {"default": 64, "min": 8, "max": MAX_RESOLUTION, "step": 8}),
                              "height": ("INT", {"default": 64, "min": 8, "max": MAX_RESOLUTION, "step": 8}),
                              "x": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 8}),
                              "y": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 8}),
                             }}
    RETURN_TYPES = ("CONDITIONING",)
    FUNCTION = "append"

comfyanonymous's avatar
comfyanonymous committed
527
    CATEGORY = "conditioning/gligen"
528
529
530
531
532
533
534
535
536
537
538
539
540
541

    def append(self, conditioning_to, clip, gligen_textbox_model, text, width, height, x, y):
        c = []
        cond, cond_pooled = clip.encode_from_tokens(clip.tokenize(text), return_pooled=True)
        for t in conditioning_to:
            n = [t[0], t[1].copy()]
            position_params = [(cond_pooled, height // 8, width // 8, y // 8, x // 8)]
            prev = []
            if "gligen" in n[1]:
                prev = n[1]['gligen'][2]

            n[1]['gligen'] = ("position", gligen_textbox_model, prev + position_params)
            c.append(n)
        return (c, )
542

comfyanonymous's avatar
comfyanonymous committed
543
544
545
546
547
548
class EmptyLatentImage:
    def __init__(self, device="cpu"):
        self.device = device

    @classmethod
    def INPUT_TYPES(s):
549
550
        return {"required": { "width": ("INT", {"default": 512, "min": 64, "max": MAX_RESOLUTION, "step": 64}),
                              "height": ("INT", {"default": 512, "min": 64, "max": MAX_RESOLUTION, "step": 64}),
comfyanonymous's avatar
comfyanonymous committed
551
552
553
554
                              "batch_size": ("INT", {"default": 1, "min": 1, "max": 64})}}
    RETURN_TYPES = ("LATENT",)
    FUNCTION = "generate"

555
556
    CATEGORY = "latent"

comfyanonymous's avatar
comfyanonymous committed
557
558
    def generate(self, width, height, batch_size=1):
        latent = torch.zeros([batch_size, 4, height // 8, width // 8])
559
        return ({"samples":latent}, )
comfyanonymous's avatar
comfyanonymous committed
560

comfyanonymous's avatar
comfyanonymous committed
561

562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
class LatentFromBatch:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "samples": ("LATENT",),
                              "batch_index": ("INT", {"default": 0, "min": 0, "max": 63}),
                              }}
    RETURN_TYPES = ("LATENT",)
    FUNCTION = "rotate"

    CATEGORY = "latent"

    def rotate(self, samples, batch_index):
        s = samples.copy()
        s_in = samples["samples"]
        batch_index = min(s_in.shape[0] - 1, batch_index)
        s["samples"] = s_in[batch_index:batch_index + 1].clone()
        s["batch_index"] = batch_index
        return (s,)
comfyanonymous's avatar
comfyanonymous committed
580

comfyanonymous's avatar
comfyanonymous committed
581
582
class LatentUpscale:
    upscale_methods = ["nearest-exact", "bilinear", "area"]
583
    crop_methods = ["disabled", "center"]
comfyanonymous's avatar
comfyanonymous committed
584
585
586
587

    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "samples": ("LATENT",), "upscale_method": (s.upscale_methods,),
588
589
                              "width": ("INT", {"default": 512, "min": 64, "max": MAX_RESOLUTION, "step": 64}),
                              "height": ("INT", {"default": 512, "min": 64, "max": MAX_RESOLUTION, "step": 64}),
590
                              "crop": (s.crop_methods,)}}
comfyanonymous's avatar
comfyanonymous committed
591
592
593
    RETURN_TYPES = ("LATENT",)
    FUNCTION = "upscale"

594
595
    CATEGORY = "latent"

596
    def upscale(self, samples, upscale_method, width, height, crop):
597
        s = samples.copy()
comfyanonymous's avatar
comfyanonymous committed
598
        s["samples"] = comfy.utils.common_upscale(samples["samples"], width // 8, height // 8, upscale_method, crop)
comfyanonymous's avatar
comfyanonymous committed
599
600
        return (s,)

comfyanonymous's avatar
comfyanonymous committed
601
602
603
604
605
606
607
608
609
class LatentRotate:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "samples": ("LATENT",),
                              "rotation": (["none", "90 degrees", "180 degrees", "270 degrees"],),
                              }}
    RETURN_TYPES = ("LATENT",)
    FUNCTION = "rotate"

comfyanonymous's avatar
comfyanonymous committed
610
    CATEGORY = "latent/transform"
comfyanonymous's avatar
comfyanonymous committed
611
612

    def rotate(self, samples, rotation):
613
        s = samples.copy()
comfyanonymous's avatar
comfyanonymous committed
614
615
616
617
618
619
620
621
        rotate_by = 0
        if rotation.startswith("90"):
            rotate_by = 1
        elif rotation.startswith("180"):
            rotate_by = 2
        elif rotation.startswith("270"):
            rotate_by = 3

622
        s["samples"] = torch.rot90(samples["samples"], k=rotate_by, dims=[3, 2])
comfyanonymous's avatar
comfyanonymous committed
623
        return (s,)
comfyanonymous's avatar
comfyanonymous committed
624
625
626
627
628
629
630
631
632
633

class LatentFlip:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "samples": ("LATENT",),
                              "flip_method": (["x-axis: vertically", "y-axis: horizontally"],),
                              }}
    RETURN_TYPES = ("LATENT",)
    FUNCTION = "flip"

comfyanonymous's avatar
comfyanonymous committed
634
    CATEGORY = "latent/transform"
comfyanonymous's avatar
comfyanonymous committed
635
636

    def flip(self, samples, flip_method):
637
        s = samples.copy()
comfyanonymous's avatar
comfyanonymous committed
638
        if flip_method.startswith("x"):
639
            s["samples"] = torch.flip(samples["samples"], dims=[2])
comfyanonymous's avatar
comfyanonymous committed
640
        elif flip_method.startswith("y"):
641
            s["samples"] = torch.flip(samples["samples"], dims=[3])
comfyanonymous's avatar
comfyanonymous committed
642
643

        return (s,)
comfyanonymous's avatar
comfyanonymous committed
644
645
646
647

class LatentComposite:
    @classmethod
    def INPUT_TYPES(s):
comfyanonymous's avatar
comfyanonymous committed
648
649
650
651
652
653
        return {"required": { "samples_to": ("LATENT",),
                              "samples_from": ("LATENT",),
                              "x": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 8}),
                              "y": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 8}),
                              "feather": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 8}),
                              }}
comfyanonymous's avatar
comfyanonymous committed
654
655
656
657
658
    RETURN_TYPES = ("LATENT",)
    FUNCTION = "composite"

    CATEGORY = "latent"

comfyanonymous's avatar
comfyanonymous committed
659
660
661
    def composite(self, samples_to, samples_from, x, y, composite_method="normal", feather=0):
        x =  x // 8
        y = y // 8
662
        feather = feather // 8
comfyanonymous's avatar
comfyanonymous committed
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
        samples_out = samples_to.copy()
        s = samples_to["samples"].clone()
        samples_to = samples_to["samples"]
        samples_from = samples_from["samples"]
        if feather == 0:
            s[:,:,y:y+samples_from.shape[2],x:x+samples_from.shape[3]] = samples_from[:,:,:samples_to.shape[2] - y, :samples_to.shape[3] - x]
        else:
            samples_from = samples_from[:,:,:samples_to.shape[2] - y, :samples_to.shape[3] - x]
            mask = torch.ones_like(samples_from)
            for t in range(feather):
                if y != 0:
                    mask[:,:,t:1+t,:] *= ((1.0/feather) * (t + 1))

                if y + samples_from.shape[2] < samples_to.shape[2]:
                    mask[:,:,mask.shape[2] -1 -t: mask.shape[2]-t,:] *= ((1.0/feather) * (t + 1))
                if x != 0:
                    mask[:,:,:,t:1+t] *= ((1.0/feather) * (t + 1))
                if x + samples_from.shape[3] < samples_to.shape[3]:
                    mask[:,:,:,mask.shape[3]- 1 - t: mask.shape[3]- t] *= ((1.0/feather) * (t + 1))
            rev_mask = torch.ones_like(mask) - mask
            s[:,:,y:y+samples_from.shape[2],x:x+samples_from.shape[3]] = samples_from[:,:,:samples_to.shape[2] - y, :samples_to.shape[3] - x] * mask + s[:,:,y:y+samples_from.shape[2],x:x+samples_from.shape[3]] * rev_mask
        samples_out["samples"] = s
        return (samples_out,)
comfyanonymous's avatar
comfyanonymous committed
686

comfyanonymous's avatar
comfyanonymous committed
687
688
689
690
class LatentCrop:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "samples": ("LATENT",),
691
692
693
694
                              "width": ("INT", {"default": 512, "min": 64, "max": MAX_RESOLUTION, "step": 64}),
                              "height": ("INT", {"default": 512, "min": 64, "max": MAX_RESOLUTION, "step": 64}),
                              "x": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 8}),
                              "y": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 8}),
comfyanonymous's avatar
comfyanonymous committed
695
696
697
698
                              }}
    RETURN_TYPES = ("LATENT",)
    FUNCTION = "crop"

comfyanonymous's avatar
comfyanonymous committed
699
    CATEGORY = "latent/transform"
comfyanonymous's avatar
comfyanonymous committed
700
701

    def crop(self, samples, width, height, x, y):
702
703
        s = samples.copy()
        samples = samples['samples']
comfyanonymous's avatar
comfyanonymous committed
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
        x =  x // 8
        y = y // 8

        #enfonce minimum size of 64
        if x > (samples.shape[3] - 8):
            x = samples.shape[3] - 8
        if y > (samples.shape[2] - 8):
            y = samples.shape[2] - 8

        new_height = height // 8
        new_width = width // 8
        to_x = new_width + x
        to_y = new_height + y
        def enforce_image_dim(d, to_d, max_d):
            if to_d > max_d:
                leftover = (to_d - max_d) % 8
                to_d = max_d
                d -= leftover
            return (d, to_d)

        #make sure size is always multiple of 64
        x, to_x = enforce_image_dim(x, to_x, samples.shape[3])
        y, to_y = enforce_image_dim(y, to_y, samples.shape[2])
727
        s['samples'] = samples[:,:,y:to_y, x:to_x]
comfyanonymous's avatar
comfyanonymous committed
728
729
        return (s,)

730
731
732
733
734
735
736
737
738
class SetLatentNoiseMask:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "samples": ("LATENT",),
                              "mask": ("MASK",),
                              }}
    RETURN_TYPES = ("LATENT",)
    FUNCTION = "set_mask"

739
    CATEGORY = "latent/inpaint"
740
741
742
743
744
745
746

    def set_mask(self, samples, mask):
        s = samples.copy()
        s["noise_mask"] = mask
        return (s,)


747
def common_ksampler(model, seed, steps, cfg, sampler_name, scheduler, positive, negative, latent, denoise=1.0, disable_noise=False, start_step=None, last_step=None, force_full_denoise=False):
748
749
    latent_image = latent["samples"]
    noise_mask = None
750
    device = comfy.model_management.get_torch_device()
751

comfyanonymous's avatar
comfyanonymous committed
752
753
754
    if disable_noise:
        noise = torch.zeros(latent_image.size(), dtype=latent_image.dtype, layout=latent_image.layout, device="cpu")
    else:
755
756
757
758
759
        batch_index = 0
        if "batch_index" in latent:
            batch_index = latent["batch_index"]

        generator = torch.manual_seed(seed)
comfyanonymous's avatar
comfyanonymous committed
760
        for i in range(batch_index):
761
            noise = torch.randn([1] + list(latent_image.size())[1:], dtype=latent_image.dtype, layout=latent_image.layout, generator=generator, device="cpu")
comfyanonymous's avatar
comfyanonymous committed
762
        noise = torch.randn(latent_image.size(), dtype=latent_image.dtype, layout=latent_image.layout, generator=generator, device="cpu")
comfyanonymous's avatar
comfyanonymous committed
763

764
765
    if "noise_mask" in latent:
        noise_mask = latent['noise_mask']
766
767
768
769
770
        if len(noise_mask.shape) < 3:
            noise_mask = noise_mask.unsqueeze(0).unsqueeze(0)
        elif len(noise_mask.shape) < 4:
            noise_mask = noise_mask.unsqueeze(1)
        noise_mask = torch.nn.functional.interpolate(noise_mask, size=(noise.shape[2], noise.shape[3]), mode="bilinear")
771
        noise_mask = noise_mask.round()
772
        noise_mask = torch.cat([noise_mask] * noise.shape[1], dim=1)
773
774
        if noise_mask.shape[0] < latent_image.shape[0]:
            noise_mask = noise_mask.repeat(latent_image.shape[0] // noise_mask.shape[0], 1, 1, 1)
775
776
        noise_mask = noise_mask.to(device)

777
    real_model = None
778
    comfy.model_management.load_model_gpu(model)
779
780
    real_model = model.model

781
782
783
784
785
786
    noise = noise.to(device)
    latent_image = latent_image.to(device)

    positive_copy = []
    negative_copy = []

comfyanonymous's avatar
comfyanonymous committed
787
    control_nets = []
788
789
790
791
792
793
794
795
796
    def get_models(cond):
        models = []
        for c in cond:
            if 'control' in c[1]:
                models += [c[1]['control']]
            if 'gligen' in c[1]:
                models += [c[1]['gligen'][1]]
        return models

797
798
799
800
801
802
803
804
805
806
807
808
809
    for p in positive:
        t = p[0]
        if t.shape[0] < noise.shape[0]:
            t = torch.cat([t] * noise.shape[0])
        t = t.to(device)
        positive_copy += [[t] + p[1:]]
    for n in negative:
        t = n[0]
        if t.shape[0] < noise.shape[0]:
            t = torch.cat([t] * noise.shape[0])
        t = t.to(device)
        negative_copy += [[t] + n[1:]]

810
811
    models = get_models(positive) + get_models(negative)
    comfy.model_management.load_controlnet_gpu(models)
comfyanonymous's avatar
comfyanonymous committed
812

813
    if sampler_name in comfy.samplers.KSampler.SAMPLERS:
814
        sampler = comfy.samplers.KSampler(real_model, steps=steps, device=device, sampler=sampler_name, scheduler=scheduler, denoise=denoise, model_options=model.model_options)
815
816
817
818
    else:
        #other samplers
        pass

819
    samples = sampler.sample(noise, positive_copy, negative_copy, cfg=cfg, latent_image=latent_image, start_step=start_step, last_step=last_step, force_full_denoise=force_full_denoise, denoise_mask=noise_mask)
820
    samples = samples.cpu()
821
822
    for m in models:
        m.cleanup()
comfyanonymous's avatar
comfyanonymous committed
823

824
825
826
    out = latent.copy()
    out["samples"] = samples
    return (out, )
comfyanonymous's avatar
comfyanonymous committed
827

comfyanonymous's avatar
comfyanonymous committed
828
829
830
class KSampler:
    @classmethod
    def INPUT_TYPES(s):
comfyanonymous's avatar
comfyanonymous committed
831
        return {"required":
comfyanonymous's avatar
comfyanonymous committed
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
                    {"model": ("MODEL",),
                    "seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}),
                    "steps": ("INT", {"default": 20, "min": 1, "max": 10000}),
                    "cfg": ("FLOAT", {"default": 8.0, "min": 0.0, "max": 100.0}),
                    "sampler_name": (comfy.samplers.KSampler.SAMPLERS, ),
                    "scheduler": (comfy.samplers.KSampler.SCHEDULERS, ),
                    "positive": ("CONDITIONING", ),
                    "negative": ("CONDITIONING", ),
                    "latent_image": ("LATENT", ),
                    "denoise": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}),
                    }}

    RETURN_TYPES = ("LATENT",)
    FUNCTION = "sample"

847
848
    CATEGORY = "sampling"

comfyanonymous's avatar
comfyanonymous committed
849
    def sample(self, model, seed, steps, cfg, sampler_name, scheduler, positive, negative, latent_image, denoise=1.0):
850
        return common_ksampler(model, seed, steps, cfg, sampler_name, scheduler, positive, negative, latent_image, denoise=denoise)
comfyanonymous's avatar
comfyanonymous committed
851

comfyanonymous's avatar
comfyanonymous committed
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
class KSamplerAdvanced:
    @classmethod
    def INPUT_TYPES(s):
        return {"required":
                    {"model": ("MODEL",),
                    "add_noise": (["enable", "disable"], ),
                    "noise_seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}),
                    "steps": ("INT", {"default": 20, "min": 1, "max": 10000}),
                    "cfg": ("FLOAT", {"default": 8.0, "min": 0.0, "max": 100.0}),
                    "sampler_name": (comfy.samplers.KSampler.SAMPLERS, ),
                    "scheduler": (comfy.samplers.KSampler.SCHEDULERS, ),
                    "positive": ("CONDITIONING", ),
                    "negative": ("CONDITIONING", ),
                    "latent_image": ("LATENT", ),
                    "start_at_step": ("INT", {"default": 0, "min": 0, "max": 10000}),
                    "end_at_step": ("INT", {"default": 10000, "min": 0, "max": 10000}),
                    "return_with_leftover_noise": (["disable", "enable"], ),
                    }}

    RETURN_TYPES = ("LATENT",)
    FUNCTION = "sample"

    CATEGORY = "sampling"
comfyanonymous's avatar
comfyanonymous committed
875

comfyanonymous's avatar
comfyanonymous committed
876
877
878
879
880
881
882
    def sample(self, model, add_noise, noise_seed, steps, cfg, sampler_name, scheduler, positive, negative, latent_image, start_at_step, end_at_step, return_with_leftover_noise, denoise=1.0):
        force_full_denoise = True
        if return_with_leftover_noise == "enable":
            force_full_denoise = False
        disable_noise = False
        if add_noise == "disable":
            disable_noise = True
883
        return common_ksampler(model, noise_seed, steps, cfg, sampler_name, scheduler, positive, negative, latent_image, denoise=denoise, disable_noise=disable_noise, start_step=start_at_step, last_step=end_at_step, force_full_denoise=force_full_denoise)
comfyanonymous's avatar
comfyanonymous committed
884
885
886

class SaveImage:
    def __init__(self):
887
        self.output_dir = folder_paths.get_output_directory()
m957ymj75urz's avatar
m957ymj75urz committed
888
        self.type = "output"
comfyanonymous's avatar
comfyanonymous committed
889
890
891
892

    @classmethod
    def INPUT_TYPES(s):
        return {"required": 
893
                    {"images": ("IMAGE", ),
pythongosssss's avatar
tidy  
pythongosssss committed
894
                     "filename_prefix": ("STRING", {"default": "ComfyUI"})},
pythongosssss's avatar
pythongosssss committed
895
                "hidden": {"prompt": "PROMPT", "extra_pnginfo": "EXTRA_PNGINFO"},
comfyanonymous's avatar
comfyanonymous committed
896
897
898
899
900
901
902
                }

    RETURN_TYPES = ()
    FUNCTION = "save_images"

    OUTPUT_NODE = True

903
904
    CATEGORY = "image"

pythongosssss's avatar
tidy  
pythongosssss committed
905
    def save_images(self, images, filename_prefix="ComfyUI", prompt=None, extra_pnginfo=None):
906
        def map_filename(filename):
907
            prefix_len = len(os.path.basename(filename_prefix))
908
909
910
911
912
913
            prefix = filename[:prefix_len + 1]
            try:
                digits = int(filename[prefix_len + 1:].split('_')[0])
            except:
                digits = 0
            return (digits, prefix)
comfyanonymous's avatar
Style.  
comfyanonymous committed
914

915
916
917
918
        def compute_vars(input):
            input = input.replace("%width%", str(images[0].shape[1]))
            input = input.replace("%height%", str(images[0].shape[0]))
            return input
comfyanonymous's avatar
Style.  
comfyanonymous committed
919

920
        filename_prefix = compute_vars(filename_prefix)
comfyanonymous's avatar
comfyanonymous committed
921

m957ymj75urz's avatar
m957ymj75urz committed
922
923
924
        subfolder = os.path.dirname(os.path.normpath(filename_prefix))
        filename = os.path.basename(os.path.normpath(filename_prefix))

comfyanonymous's avatar
comfyanonymous committed
925
        full_output_folder = os.path.join(self.output_dir, subfolder)
926

927
        if os.path.commonpath((self.output_dir, os.path.abspath(full_output_folder))) != self.output_dir:
928
            print("Saving image outside the output folder is not allowed.")
comfyanonymous's avatar
comfyanonymous committed
929
930
            return {}

931
        try:
932
            counter = max(filter(lambda a: a[1][:-1] == filename and a[1][-1] == "_", map(map_filename, os.listdir(full_output_folder))))[0] + 1
933
934
        except ValueError:
            counter = 1
935
        except FileNotFoundError:
936
            os.makedirs(full_output_folder, exist_ok=True)
937
            counter = 1
pythongosssss's avatar
pythongosssss committed
938

m957ymj75urz's avatar
m957ymj75urz committed
939
        results = list()
comfyanonymous's avatar
comfyanonymous committed
940
941
        for image in images:
            i = 255. * image.cpu().numpy()
942
            img = Image.fromarray(np.clip(i, 0, 255).astype(np.uint8))
comfyanonymous's avatar
comfyanonymous committed
943
944
945
946
947
948
            metadata = PngInfo()
            if prompt is not None:
                metadata.add_text("prompt", json.dumps(prompt))
            if extra_pnginfo is not None:
                for x in extra_pnginfo:
                    metadata.add_text(x, json.dumps(extra_pnginfo[x]))
949

950
            file = f"{filename}_{counter:05}_.png"
951
            img.save(os.path.join(full_output_folder, file), pnginfo=metadata, compress_level=4)
m957ymj75urz's avatar
m957ymj75urz committed
952
953
954
955
            results.append({
                "filename": file,
                "subfolder": subfolder,
                "type": self.type
Gavroche CryptoRUSH's avatar
Gavroche CryptoRUSH committed
956
            })
957
            counter += 1
comfyanonymous's avatar
comfyanonymous committed
958

m957ymj75urz's avatar
m957ymj75urz committed
959
        return { "ui": { "images": results } }
comfyanonymous's avatar
comfyanonymous committed
960

pythongosssss's avatar
pythongosssss committed
961
962
class PreviewImage(SaveImage):
    def __init__(self):
963
        self.output_dir = folder_paths.get_temp_directory()
m957ymj75urz's avatar
m957ymj75urz committed
964
        self.type = "temp"
pythongosssss's avatar
pythongosssss committed
965
966
967

    @classmethod
    def INPUT_TYPES(s):
968
        return {"required":
pythongosssss's avatar
pythongosssss committed
969
970
971
                    {"images": ("IMAGE", ), },
                "hidden": {"prompt": "PROMPT", "extra_pnginfo": "EXTRA_PNGINFO"},
                }
972

973
974
975
class LoadImage:
    @classmethod
    def INPUT_TYPES(s):
976
        input_dir = folder_paths.get_input_directory()
977
        return {"required":
978
                    {"image": (sorted(os.listdir(input_dir)), )},
979
                }
980
981

    CATEGORY = "image"
982

983
    RETURN_TYPES = ("IMAGE", "MASK")
984
985
    FUNCTION = "load_image"
    def load_image(self, image):
986
987
        input_dir = folder_paths.get_input_directory()
        image_path = os.path.join(input_dir, image)
988
989
        i = Image.open(image_path)
        image = i.convert("RGB")
990
        image = np.array(image).astype(np.float32) / 255.0
991
        image = torch.from_numpy(image)[None,]
992
993
994
995
996
997
        if 'A' in i.getbands():
            mask = np.array(i.getchannel('A')).astype(np.float32) / 255.0
            mask = 1. - torch.from_numpy(mask)
        else:
            mask = torch.zeros((64,64), dtype=torch.float32, device="cpu")
        return (image, mask)
998

999
1000
    @classmethod
    def IS_CHANGED(s, image):
1001
1002
        input_dir = folder_paths.get_input_directory()
        image_path = os.path.join(input_dir, image)
1003
1004
1005
1006
        m = hashlib.sha256()
        with open(image_path, 'rb') as f:
            m.update(f.read())
        return m.digest().hex()
pythongosssss's avatar
pythongosssss committed
1007

1008
1009
1010
class LoadImageMask:
    @classmethod
    def INPUT_TYPES(s):
1011
        input_dir = folder_paths.get_input_directory()
1012
        return {"required":
1013
                    {"image": (sorted(os.listdir(input_dir)), ),
1014
1015
1016
                    "channel": (["alpha", "red", "green", "blue"], ),}
                }

1017
    CATEGORY = "mask"
1018
1019
1020
1021

    RETURN_TYPES = ("MASK",)
    FUNCTION = "load_image"
    def load_image(self, image, channel):
1022
1023
        input_dir = folder_paths.get_input_directory()
        image_path = os.path.join(input_dir, image)
1024
        i = Image.open(image_path)
1025
1026
        if i.getbands() != ("R", "G", "B", "A"):
            i = i.convert("RGBA")
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
        mask = None
        c = channel[0].upper()
        if c in i.getbands():
            mask = np.array(i.getchannel(c)).astype(np.float32) / 255.0
            mask = torch.from_numpy(mask)
            if c == 'A':
                mask = 1. - mask
        else:
            mask = torch.zeros((64,64), dtype=torch.float32, device="cpu")
        return (mask,)

    @classmethod
    def IS_CHANGED(s, image, channel):
1040
1041
        input_dir = folder_paths.get_input_directory()
        image_path = os.path.join(input_dir, image)
1042
1043
1044
1045
        m = hashlib.sha256()
        with open(image_path, 'rb') as f:
            m.update(f.read())
        return m.digest().hex()
pythongosssss's avatar
pythongosssss committed
1046

comfyanonymous's avatar
comfyanonymous committed
1047
1048
1049
1050
1051
1052
1053
class ImageScale:
    upscale_methods = ["nearest-exact", "bilinear", "area"]
    crop_methods = ["disabled", "center"]

    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "image": ("IMAGE",), "upscale_method": (s.upscale_methods,),
1054
1055
                              "width": ("INT", {"default": 512, "min": 1, "max": MAX_RESOLUTION, "step": 1}),
                              "height": ("INT", {"default": 512, "min": 1, "max": MAX_RESOLUTION, "step": 1}),
comfyanonymous's avatar
comfyanonymous committed
1056
1057
1058
1059
                              "crop": (s.crop_methods,)}}
    RETURN_TYPES = ("IMAGE",)
    FUNCTION = "upscale"

1060
    CATEGORY = "image/upscaling"
1061

comfyanonymous's avatar
comfyanonymous committed
1062
1063
    def upscale(self, image, upscale_method, width, height, crop):
        samples = image.movedim(-1,1)
comfyanonymous's avatar
comfyanonymous committed
1064
        s = comfy.utils.common_upscale(samples, width, height, upscale_method, crop)
comfyanonymous's avatar
comfyanonymous committed
1065
1066
        s = s.movedim(1,-1)
        return (s,)
comfyanonymous's avatar
comfyanonymous committed
1067

1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
class ImageInvert:

    @classmethod
    def INPUT_TYPES(s):
        return {"required": { "image": ("IMAGE",)}}

    RETURN_TYPES = ("IMAGE",)
    FUNCTION = "invert"

    CATEGORY = "image"

    def invert(self, image):
        s = 1.0 - image
        return (s,)


Guo Y.K's avatar
Guo Y.K committed
1084
1085
1086
1087
1088
1089
1090
class ImagePadForOutpaint:

    @classmethod
    def INPUT_TYPES(s):
        return {
            "required": {
                "image": ("IMAGE",),
1091
1092
1093
1094
1095
                "left": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 64}),
                "top": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 64}),
                "right": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 64}),
                "bottom": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 64}),
                "feathering": ("INT", {"default": 40, "min": 0, "max": MAX_RESOLUTION, "step": 1}),
Guo Y.K's avatar
Guo Y.K committed
1096
1097
1098
1099
1100
1101
1102
1103
            }
        }

    RETURN_TYPES = ("IMAGE", "MASK")
    FUNCTION = "expand_image"

    CATEGORY = "image"

1104
    def expand_image(self, image, left, top, right, bottom, feathering):
Guo Y.K's avatar
Guo Y.K committed
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
        d1, d2, d3, d4 = image.size()

        new_image = torch.zeros(
            (d1, d2 + top + bottom, d3 + left + right, d4),
            dtype=torch.float32,
        )
        new_image[:, top:top + d2, left:left + d3, :] = image

        mask = torch.ones(
            (d2 + top + bottom, d3 + left + right),
            dtype=torch.float32,
        )
1117

1118
1119
1120
1121
1122
        t = torch.zeros(
            (d2, d3),
            dtype=torch.float32
        )

1123
        if feathering > 0 and feathering * 2 < d2 and feathering * 2 < d3:
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142

            for i in range(d2):
                for j in range(d3):
                    dt = i if top != 0 else d2
                    db = d2 - i if bottom != 0 else d2

                    dl = j if left != 0 else d3
                    dr = d3 - j if right != 0 else d3

                    d = min(dt, db, dl, dr)

                    if d >= feathering:
                        continue

                    v = (feathering - d) / feathering

                    t[i, j] = v * v

        mask[top:top + d2, left:left + d3] = t
1143

Guo Y.K's avatar
Guo Y.K committed
1144
1145
1146
        return (new_image, mask)


comfyanonymous's avatar
comfyanonymous committed
1147
1148
NODE_CLASS_MAPPINGS = {
    "KSampler": KSampler,
1149
    "CheckpointLoaderSimple": CheckpointLoaderSimple,
comfyanonymous's avatar
comfyanonymous committed
1150
    "CLIPTextEncode": CLIPTextEncode,
comfyanonymous's avatar
comfyanonymous committed
1151
    "CLIPSetLastLayer": CLIPSetLastLayer,
comfyanonymous's avatar
comfyanonymous committed
1152
1153
    "VAEDecode": VAEDecode,
    "VAEEncode": VAEEncode,
1154
    "VAEEncodeForInpaint": VAEEncodeForInpaint,
comfyanonymous's avatar
comfyanonymous committed
1155
1156
1157
    "VAELoader": VAELoader,
    "EmptyLatentImage": EmptyLatentImage,
    "LatentUpscale": LatentUpscale,
1158
    "LatentFromBatch": LatentFromBatch,
comfyanonymous's avatar
comfyanonymous committed
1159
    "SaveImage": SaveImage,
pythongosssss's avatar
pythongosssss committed
1160
    "PreviewImage": PreviewImage,
comfyanonymous's avatar
comfyanonymous committed
1161
    "LoadImage": LoadImage,
1162
    "LoadImageMask": LoadImageMask,
comfyanonymous's avatar
comfyanonymous committed
1163
    "ImageScale": ImageScale,
1164
    "ImageInvert": ImageInvert,
Guo Y.K's avatar
Guo Y.K committed
1165
    "ImagePadForOutpaint": ImagePadForOutpaint,
comfyanonymous's avatar
comfyanonymous committed
1166
1167
    "ConditioningCombine": ConditioningCombine,
    "ConditioningSetArea": ConditioningSetArea,
comfyanonymous's avatar
comfyanonymous committed
1168
    "KSamplerAdvanced": KSamplerAdvanced,
1169
    "SetLatentNoiseMask": SetLatentNoiseMask,
comfyanonymous's avatar
comfyanonymous committed
1170
    "LatentComposite": LatentComposite,
comfyanonymous's avatar
comfyanonymous committed
1171
    "LatentRotate": LatentRotate,
comfyanonymous's avatar
comfyanonymous committed
1172
    "LatentFlip": LatentFlip,
comfyanonymous's avatar
comfyanonymous committed
1173
    "LatentCrop": LatentCrop,
1174
    "LoraLoader": LoraLoader,
1175
    "CLIPLoader": CLIPLoader,
1176
    "CLIPVisionEncode": CLIPVisionEncode,
1177
    "StyleModelApply": StyleModelApply,
1178
    "unCLIPConditioning": unCLIPConditioning,
comfyanonymous's avatar
comfyanonymous committed
1179
1180
    "ControlNetApply": ControlNetApply,
    "ControlNetLoader": ControlNetLoader,
1181
    "DiffControlNetLoader": DiffControlNetLoader,
comfyanonymous's avatar
comfyanonymous committed
1182
1183
    "StyleModelLoader": StyleModelLoader,
    "CLIPVisionLoader": CLIPVisionLoader,
1184
    "VAEDecodeTiled": VAEDecodeTiled,
comfyanonymous's avatar
comfyanonymous committed
1185
    "VAEEncodeTiled": VAEEncodeTiled,
1186
    "TomePatchModel": TomePatchModel,
1187
    "unCLIPCheckpointLoader": unCLIPCheckpointLoader,
1188
1189
1190
    "GLIGENLoader": GLIGENLoader,
    "GLIGENTextBoxApply": GLIGENTextBoxApply,

1191
    "CheckpointLoader": CheckpointLoader,
sALTaccount's avatar
sALTaccount committed
1192
    "DiffusersLoader": DiffusersLoader,
comfyanonymous's avatar
comfyanonymous committed
1193
1194
}

City's avatar
City committed
1195
1196
1197
1198
1199
NODE_DISPLAY_NAME_MAPPINGS = {
    # Sampling
    "KSampler": "KSampler",
    "KSamplerAdvanced": "KSampler (Advanced)",
    # Loaders
comfyanonymous's avatar
Rename.  
comfyanonymous committed
1200
1201
    "CheckpointLoader": "Load Checkpoint (With Config)",
    "CheckpointLoaderSimple": "Load Checkpoint",
City's avatar
City committed
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
    "VAELoader": "Load VAE",
    "LoraLoader": "Load LoRA",
    "CLIPLoader": "Load CLIP",
    "ControlNetLoader": "Load ControlNet Model",
    "DiffControlNetLoader": "Load ControlNet Model (diff)",
    "StyleModelLoader": "Load Style Model",
    "CLIPVisionLoader": "Load CLIP Vision",
    "UpscaleModelLoader": "Load Upscale Model",
    # Conditioning
    "CLIPVisionEncode": "CLIP Vision Encode",
    "StyleModelApply": "Apply Style Model",
    "CLIPTextEncode": "CLIP Text Encode (Prompt)",
    "CLIPSetLastLayer": "CLIP Set Last Layer",
    "ConditioningCombine": "Conditioning (Combine)",
    "ConditioningSetArea": "Conditioning (Set Area)",
    "ControlNetApply": "Apply ControlNet",
    # Latent
    "VAEEncodeForInpaint": "VAE Encode (for Inpainting)",
    "SetLatentNoiseMask": "Set Latent Noise Mask",
    "VAEDecode": "VAE Decode",
    "VAEEncode": "VAE Encode",
    "LatentRotate": "Rotate Latent",
    "LatentFlip": "Flip Latent",
    "LatentCrop": "Crop Latent",
    "EmptyLatentImage": "Empty Latent Image",
    "LatentUpscale": "Upscale Latent",
    "LatentComposite": "Latent Composite",
    # Image
    "SaveImage": "Save Image",
    "PreviewImage": "Preview Image",
    "LoadImage": "Load Image",
    "LoadImageMask": "Load Image (as Mask)",
    "ImageScale": "Upscale Image",
    "ImageUpscaleWithModel": "Upscale Image (using Model)",
    "ImageInvert": "Invert Image",
    "ImagePadForOutpaint": "Pad Image for Outpainting",
    # _for_testing
    "VAEDecodeTiled": "VAE Decode (Tiled)",
    "VAEEncodeTiled": "VAE Encode (Tiled)",
}

1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
def load_custom_node(module_path):
    module_name = os.path.basename(module_path)
    if os.path.isfile(module_path):
        sp = os.path.splitext(module_path)
        module_name = sp[0]
    try:
        if os.path.isfile(module_path):
            module_spec = importlib.util.spec_from_file_location(module_name, module_path)
        else:
            module_spec = importlib.util.spec_from_file_location(module_name, os.path.join(module_path, "__init__.py"))
        module = importlib.util.module_from_spec(module_spec)
        sys.modules[module_name] = module
        module_spec.loader.exec_module(module)
        if hasattr(module, "NODE_CLASS_MAPPINGS") and getattr(module, "NODE_CLASS_MAPPINGS") is not None:
            NODE_CLASS_MAPPINGS.update(module.NODE_CLASS_MAPPINGS)
1258
1259
            if hasattr(module, "NODE_DISPLAY_NAME_MAPPINGS") and getattr(module, "NODE_DISPLAY_NAME_MAPPINGS") is not None:
                NODE_DISPLAY_NAME_MAPPINGS.update(module.NODE_DISPLAY_NAME_MAPPINGS)
1260
1261
1262
1263
1264
1265
        else:
            print(f"Skip {module_path} module for custom nodes due to the lack of NODE_CLASS_MAPPINGS.")
    except Exception as e:
        print(traceback.format_exc())
        print(f"Cannot import {module_path} module for custom nodes:", e)

Hacker 17082006's avatar
Hacker 17082006 committed
1266
def load_custom_nodes():
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
    node_paths = folder_paths.get_folder_paths("custom_nodes")
    for custom_node_path in node_paths:
        possible_modules = os.listdir(custom_node_path)
        if "__pycache__" in possible_modules:
            possible_modules.remove("__pycache__")

        for possible_module in possible_modules:
            module_path = os.path.join(custom_node_path, possible_module)
            if os.path.isfile(module_path) and os.path.splitext(module_path)[1] != ".py": continue
            load_custom_node(module_path)
1277

1278
1279
def init_custom_nodes():
    load_custom_nodes()
1280
1281
    load_custom_node(os.path.join(os.path.join(os.path.dirname(os.path.realpath(__file__)), "comfy_extras"), "nodes_upscale_model.py"))
    load_custom_node(os.path.join(os.path.join(os.path.dirname(os.path.realpath(__file__)), "comfy_extras"), "nodes_post_processing.py"))
1282
    load_custom_node(os.path.join(os.path.join(os.path.dirname(os.path.realpath(__file__)), "comfy_extras"), "nodes_mask.py"))