"examples/git@developer.sourcefind.cn:tianlh/lightgbm-dcu.git" did not exist on "18e7de4f5dcbb25ad1eb58882c34517bb0fb4b79"
Commit fdf57325 authored by pythongosssss's avatar pythongosssss
Browse files

Merge remote-tracking branch 'origin/master' into tiled-progress

parents 27df7410 93c64afa
...@@ -13,6 +13,7 @@ a111: ...@@ -13,6 +13,7 @@ a111:
models/ESRGAN models/ESRGAN
models/SwinIR models/SwinIR
embeddings: embeddings embeddings: embeddings
hypernetworks: models/hypernetworks
controlnet: models/ControlNet controlnet: models/ControlNet
#other_ui: #other_ui:
......
...@@ -69,6 +69,46 @@ def get_directory_by_type(type_name): ...@@ -69,6 +69,46 @@ def get_directory_by_type(type_name):
return None return None
# determine base_dir rely on annotation if name is 'filename.ext [annotation]' format
# otherwise use default_path as base_dir
def annotated_filepath(name):
if name.endswith("[output]"):
base_dir = get_output_directory()
name = name[:-9]
elif name.endswith("[input]"):
base_dir = get_input_directory()
name = name[:-8]
elif name.endswith("[temp]"):
base_dir = get_temp_directory()
name = name[:-7]
else:
return name, None
return name, base_dir
def get_annotated_filepath(name, default_dir=None):
name, base_dir = annotated_filepath(name)
if base_dir is None:
if default_dir is not None:
base_dir = default_dir
else:
base_dir = get_input_directory() # fallback path
return os.path.join(base_dir, name)
def exists_annotated_filepath(name):
name, base_dir = annotated_filepath(name)
if base_dir is None:
base_dir = get_input_directory() # fallback path
filepath = os.path.join(base_dir, name)
return os.path.exists(filepath)
def add_model_folder_path(folder_name, full_folder_path): def add_model_folder_path(folder_name, full_folder_path):
global folder_names_and_paths global folder_names_and_paths
if folder_name in folder_names_and_paths: if folder_name in folder_names_and_paths:
......
...@@ -5,6 +5,7 @@ import shutil ...@@ -5,6 +5,7 @@ import shutil
import threading import threading
from comfy.cli_args import args from comfy.cli_args import args
import comfy.utils
if os.name == "nt": if os.name == "nt":
import logging import logging
...@@ -39,14 +40,9 @@ async def run(server, address='', port=8188, verbose=True, call_on_start=None): ...@@ -39,14 +40,9 @@ async def run(server, address='', port=8188, verbose=True, call_on_start=None):
await asyncio.gather(server.start(address, port, verbose, call_on_start), server.publish_loop()) await asyncio.gather(server.start(address, port, verbose, call_on_start), server.publish_loop())
def hijack_progress(server): def hijack_progress(server):
from tqdm.auto import tqdm def hook(value, total):
orig_func = getattr(tqdm, "update") server.send_sync("progress", { "value": value, "max": total}, server.client_id)
def wrapped_func(*args, **kwargs): comfy.utils.set_progress_bar_global_hook(hook)
pbar = args[0]
v = orig_func(*args, **kwargs)
server.send_sync("progress", { "value": pbar.n, "max": pbar.total}, server.client_id)
return v
setattr(tqdm, "update", wrapped_func)
def cleanup_temp(): def cleanup_temp():
temp_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), "temp") temp_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), "temp")
......
...@@ -5,6 +5,7 @@ import sys ...@@ -5,6 +5,7 @@ import sys
import json import json
import hashlib import hashlib
import traceback import traceback
import math
from PIL import Image from PIL import Image
from PIL.PngImagePlugin import PngInfo from PIL.PngImagePlugin import PngInfo
...@@ -16,6 +17,7 @@ sys.path.insert(0, os.path.join(os.path.dirname(os.path.realpath(__file__)), "co ...@@ -16,6 +17,7 @@ sys.path.insert(0, os.path.join(os.path.dirname(os.path.realpath(__file__)), "co
import comfy.diffusers_convert import comfy.diffusers_convert
import comfy.samplers import comfy.samplers
import comfy.sample
import comfy.sd import comfy.sd
import comfy.utils import comfy.utils
...@@ -58,14 +60,44 @@ class ConditioningCombine: ...@@ -58,14 +60,44 @@ class ConditioningCombine:
def combine(self, conditioning_1, conditioning_2): def combine(self, conditioning_1, conditioning_2):
return (conditioning_1 + conditioning_2, ) return (conditioning_1 + conditioning_2, )
class ConditioningAverage :
@classmethod
def INPUT_TYPES(s):
return {"required": {"conditioning_to": ("CONDITIONING", ), "conditioning_from": ("CONDITIONING", ),
"conditioning_to_strength": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01})
}}
RETURN_TYPES = ("CONDITIONING",)
FUNCTION = "addWeighted"
CATEGORY = "conditioning"
def addWeighted(self, conditioning_to, conditioning_from, conditioning_to_strength):
out = []
if len(conditioning_from) > 1:
print("Warning: ConditioningAverage conditioning_from contains more than 1 cond, only the first one will actually be applied to conditioning_to.")
cond_from = conditioning_from[0][0]
for i in range(len(conditioning_to)):
t1 = conditioning_to[i][0]
t0 = cond_from[:,:t1.shape[1]]
if t0.shape[1] < t1.shape[1]:
t0 = torch.cat([t0] + [torch.zeros((1, (t1.shape[1] - t0.shape[1]), t1.shape[2]))], dim=1)
tw = torch.mul(t1, conditioning_to_strength) + torch.mul(t0, (1.0 - conditioning_to_strength))
n = [tw, conditioning_to[i][1].copy()]
out.append(n)
return (out, )
class ConditioningSetArea: class ConditioningSetArea:
@classmethod @classmethod
def INPUT_TYPES(s): def INPUT_TYPES(s):
return {"required": {"conditioning": ("CONDITIONING", ), return {"required": {"conditioning": ("CONDITIONING", ),
"width": ("INT", {"default": 64, "min": 64, "max": MAX_RESOLUTION, "step": 64}), "width": ("INT", {"default": 64, "min": 64, "max": MAX_RESOLUTION, "step": 8}),
"height": ("INT", {"default": 64, "min": 64, "max": MAX_RESOLUTION, "step": 64}), "height": ("INT", {"default": 64, "min": 64, "max": MAX_RESOLUTION, "step": 8}),
"x": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 64}), "x": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 8}),
"y": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 64}), "y": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 8}),
"strength": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}), "strength": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}),
}} }}
RETURN_TYPES = ("CONDITIONING",) RETURN_TYPES = ("CONDITIONING",)
...@@ -79,11 +111,41 @@ class ConditioningSetArea: ...@@ -79,11 +111,41 @@ class ConditioningSetArea:
n = [t[0], t[1].copy()] n = [t[0], t[1].copy()]
n[1]['area'] = (height // 8, width // 8, y // 8, x // 8) n[1]['area'] = (height // 8, width // 8, y // 8, x // 8)
n[1]['strength'] = strength n[1]['strength'] = strength
n[1]['set_area_to_bounds'] = False
n[1]['min_sigma'] = min_sigma n[1]['min_sigma'] = min_sigma
n[1]['max_sigma'] = max_sigma n[1]['max_sigma'] = max_sigma
c.append(n) c.append(n)
return (c, ) return (c, )
class ConditioningSetMask:
@classmethod
def INPUT_TYPES(s):
return {"required": {"conditioning": ("CONDITIONING", ),
"mask": ("MASK", ),
"strength": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}),
"set_cond_area": (["default", "mask bounds"],),
}}
RETURN_TYPES = ("CONDITIONING",)
FUNCTION = "append"
CATEGORY = "conditioning"
def append(self, conditioning, mask, set_cond_area, strength):
c = []
set_area_to_bounds = False
if set_cond_area != "default":
set_area_to_bounds = True
if len(mask.shape) < 3:
mask = mask.unsqueeze(0)
for t in conditioning:
n = [t[0], t[1].copy()]
_, h, w = mask.shape
n[1]['mask'] = mask
n[1]['set_area_to_bounds'] = set_area_to_bounds
n[1]['mask_strength'] = strength
c.append(n)
return (c, )
class VAEDecode: class VAEDecode:
def __init__(self, device="cpu"): def __init__(self, device="cpu"):
self.device = device self.device = device
...@@ -126,16 +188,21 @@ class VAEEncode: ...@@ -126,16 +188,21 @@ class VAEEncode:
CATEGORY = "latent" CATEGORY = "latent"
def encode(self, vae, pixels): @staticmethod
x = (pixels.shape[1] // 64) * 64 def vae_encode_crop_pixels(pixels):
y = (pixels.shape[2] // 64) * 64 x = (pixels.shape[1] // 8) * 8
y = (pixels.shape[2] // 8) * 8
if pixels.shape[1] != x or pixels.shape[2] != y: if pixels.shape[1] != x or pixels.shape[2] != y:
pixels = pixels[:,:x,:y,:] x_offset = (pixels.shape[1] % 8) // 2
t = vae.encode(pixels[:,:,:,:3]) y_offset = (pixels.shape[2] % 8) // 2
pixels = pixels[:, x_offset:x + x_offset, y_offset:y + y_offset, :]
return pixels
def encode(self, vae, pixels):
pixels = self.vae_encode_crop_pixels(pixels)
t = vae.encode(pixels[:,:,:,:3])
return ({"samples":t}, ) return ({"samples":t}, )
class VAEEncodeTiled: class VAEEncodeTiled:
def __init__(self, device="cpu"): def __init__(self, device="cpu"):
self.device = device self.device = device
...@@ -149,46 +216,51 @@ class VAEEncodeTiled: ...@@ -149,46 +216,51 @@ class VAEEncodeTiled:
CATEGORY = "_for_testing" CATEGORY = "_for_testing"
def encode(self, vae, pixels): def encode(self, vae, pixels):
x = (pixels.shape[1] // 64) * 64 pixels = VAEEncode.vae_encode_crop_pixels(pixels)
y = (pixels.shape[2] // 64) * 64
if pixels.shape[1] != x or pixels.shape[2] != y:
pixels = pixels[:,:x,:y,:]
t = vae.encode_tiled(pixels[:,:,:,:3]) t = vae.encode_tiled(pixels[:,:,:,:3])
return ({"samples":t}, ) return ({"samples":t}, )
class VAEEncodeForInpaint: class VAEEncodeForInpaint:
def __init__(self, device="cpu"): def __init__(self, device="cpu"):
self.device = device self.device = device
@classmethod @classmethod
def INPUT_TYPES(s): def INPUT_TYPES(s):
return {"required": { "pixels": ("IMAGE", ), "vae": ("VAE", ), "mask": ("MASK", )}} return {"required": { "pixels": ("IMAGE", ), "vae": ("VAE", ), "mask": ("MASK", ), "grow_mask_by": ("INT", {"default": 6, "min": 0, "max": 64, "step": 1}),}}
RETURN_TYPES = ("LATENT",) RETURN_TYPES = ("LATENT",)
FUNCTION = "encode" FUNCTION = "encode"
CATEGORY = "latent/inpaint" CATEGORY = "latent/inpaint"
def encode(self, vae, pixels, mask): def encode(self, vae, pixels, mask, grow_mask_by=6):
x = (pixels.shape[1] // 64) * 64 x = (pixels.shape[1] // 8) * 8
y = (pixels.shape[2] // 64) * 64 y = (pixels.shape[2] // 8) * 8
mask = torch.nn.functional.interpolate(mask[None,None,], size=(pixels.shape[1], pixels.shape[2]), mode="bilinear")[0][0] mask = torch.nn.functional.interpolate(mask.reshape((-1, 1, mask.shape[-2], mask.shape[-1])), size=(pixels.shape[1], pixels.shape[2]), mode="bilinear")
pixels = pixels.clone() pixels = pixels.clone()
if pixels.shape[1] != x or pixels.shape[2] != y: if pixels.shape[1] != x or pixels.shape[2] != y:
pixels = pixels[:,:x,:y,:] x_offset = (pixels.shape[1] % 8) // 2
mask = mask[:x,:y] y_offset = (pixels.shape[2] % 8) // 2
pixels = pixels[:,x_offset:x + x_offset, y_offset:y + y_offset,:]
mask = mask[:,:,x_offset:x + x_offset, y_offset:y + y_offset]
#grow mask by a few pixels to keep things seamless in latent space #grow mask by a few pixels to keep things seamless in latent space
kernel_tensor = torch.ones((1, 1, 6, 6)) if grow_mask_by == 0:
mask_erosion = torch.clamp(torch.nn.functional.conv2d((mask.round())[None], kernel_tensor, padding=3), 0, 1) mask_erosion = mask
m = (1.0 - mask.round()) else:
kernel_tensor = torch.ones((1, 1, grow_mask_by, grow_mask_by))
padding = math.ceil((grow_mask_by - 1) / 2)
mask_erosion = torch.clamp(torch.nn.functional.conv2d(mask.round(), kernel_tensor, padding=padding), 0, 1)
m = (1.0 - mask.round()).squeeze(1)
for i in range(3): for i in range(3):
pixels[:,:,:,i] -= 0.5 pixels[:,:,:,i] -= 0.5
pixels[:,:,:,i] *= m pixels[:,:,:,i] *= m
pixels[:,:,:,i] += 0.5 pixels[:,:,:,i] += 0.5
t = vae.encode(pixels) t = vae.encode(pixels)
return ({"samples":t, "noise_mask": (mask_erosion[0][:x,:y].round())}, ) return ({"samples":t, "noise_mask": (mask_erosion[:,:,:x,:y].round())}, )
class CheckpointLoader: class CheckpointLoader:
@classmethod @classmethod
...@@ -542,8 +614,8 @@ class EmptyLatentImage: ...@@ -542,8 +614,8 @@ class EmptyLatentImage:
@classmethod @classmethod
def INPUT_TYPES(s): def INPUT_TYPES(s):
return {"required": { "width": ("INT", {"default": 512, "min": 64, "max": MAX_RESOLUTION, "step": 64}), return {"required": { "width": ("INT", {"default": 512, "min": 64, "max": MAX_RESOLUTION, "step": 8}),
"height": ("INT", {"default": 512, "min": 64, "max": MAX_RESOLUTION, "step": 64}), "height": ("INT", {"default": 512, "min": 64, "max": MAX_RESOLUTION, "step": 8}),
"batch_size": ("INT", {"default": 1, "min": 1, "max": 64})}} "batch_size": ("INT", {"default": 1, "min": 1, "max": 64})}}
RETURN_TYPES = ("LATENT",) RETURN_TYPES = ("LATENT",)
FUNCTION = "generate" FUNCTION = "generate"
...@@ -581,8 +653,8 @@ class LatentUpscale: ...@@ -581,8 +653,8 @@ class LatentUpscale:
@classmethod @classmethod
def INPUT_TYPES(s): def INPUT_TYPES(s):
return {"required": { "samples": ("LATENT",), "upscale_method": (s.upscale_methods,), return {"required": { "samples": ("LATENT",), "upscale_method": (s.upscale_methods,),
"width": ("INT", {"default": 512, "min": 64, "max": MAX_RESOLUTION, "step": 64}), "width": ("INT", {"default": 512, "min": 64, "max": MAX_RESOLUTION, "step": 8}),
"height": ("INT", {"default": 512, "min": 64, "max": MAX_RESOLUTION, "step": 64}), "height": ("INT", {"default": 512, "min": 64, "max": MAX_RESOLUTION, "step": 8}),
"crop": (s.crop_methods,)}} "crop": (s.crop_methods,)}}
RETURN_TYPES = ("LATENT",) RETURN_TYPES = ("LATENT",)
FUNCTION = "upscale" FUNCTION = "upscale"
...@@ -684,8 +756,8 @@ class LatentCrop: ...@@ -684,8 +756,8 @@ class LatentCrop:
@classmethod @classmethod
def INPUT_TYPES(s): def INPUT_TYPES(s):
return {"required": { "samples": ("LATENT",), return {"required": { "samples": ("LATENT",),
"width": ("INT", {"default": 512, "min": 64, "max": MAX_RESOLUTION, "step": 64}), "width": ("INT", {"default": 512, "min": 64, "max": MAX_RESOLUTION, "step": 8}),
"height": ("INT", {"default": 512, "min": 64, "max": MAX_RESOLUTION, "step": 64}), "height": ("INT", {"default": 512, "min": 64, "max": MAX_RESOLUTION, "step": 8}),
"x": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 8}), "x": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 8}),
"y": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 8}), "y": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 8}),
}} }}
...@@ -710,16 +782,6 @@ class LatentCrop: ...@@ -710,16 +782,6 @@ class LatentCrop:
new_width = width // 8 new_width = width // 8
to_x = new_width + x to_x = new_width + x
to_y = new_height + y to_y = new_height + y
def enforce_image_dim(d, to_d, max_d):
if to_d > max_d:
leftover = (to_d - max_d) % 8
to_d = max_d
d -= leftover
return (d, to_d)
#make sure size is always multiple of 64
x, to_x = enforce_image_dim(x, to_x, samples.shape[3])
y, to_y = enforce_image_dim(y, to_y, samples.shape[2])
s['samples'] = samples[:,:,y:to_y, x:to_x] s['samples'] = samples[:,:,y:to_y, x:to_x]
return (s,) return (s,)
...@@ -739,79 +801,27 @@ class SetLatentNoiseMask: ...@@ -739,79 +801,27 @@ class SetLatentNoiseMask:
s["noise_mask"] = mask s["noise_mask"] = mask
return (s,) return (s,)
def common_ksampler(model, seed, steps, cfg, sampler_name, scheduler, positive, negative, latent, denoise=1.0, disable_noise=False, start_step=None, last_step=None, force_full_denoise=False): def common_ksampler(model, seed, steps, cfg, sampler_name, scheduler, positive, negative, latent, denoise=1.0, disable_noise=False, start_step=None, last_step=None, force_full_denoise=False):
latent_image = latent["samples"]
noise_mask = None
device = comfy.model_management.get_torch_device() device = comfy.model_management.get_torch_device()
latent_image = latent["samples"]
if disable_noise: if disable_noise:
noise = torch.zeros(latent_image.size(), dtype=latent_image.dtype, layout=latent_image.layout, device="cpu") noise = torch.zeros(latent_image.size(), dtype=latent_image.dtype, layout=latent_image.layout, device="cpu")
else: else:
batch_index = 0 skip = latent["batch_index"] if "batch_index" in latent else 0
if "batch_index" in latent: noise = comfy.sample.prepare_noise(latent_image, seed, skip)
batch_index = latent["batch_index"]
generator = torch.manual_seed(seed)
for i in range(batch_index):
noise = torch.randn([1] + list(latent_image.size())[1:], dtype=latent_image.dtype, layout=latent_image.layout, generator=generator, device="cpu")
noise = torch.randn(latent_image.size(), dtype=latent_image.dtype, layout=latent_image.layout, generator=generator, device="cpu")
noise_mask = None
if "noise_mask" in latent: if "noise_mask" in latent:
noise_mask = latent['noise_mask'] noise_mask = latent["noise_mask"]
noise_mask = torch.nn.functional.interpolate(noise_mask[None,None,], size=(noise.shape[2], noise.shape[3]), mode="bilinear")
noise_mask = noise_mask.round()
noise_mask = torch.cat([noise_mask] * noise.shape[1], dim=1)
noise_mask = torch.cat([noise_mask] * noise.shape[0])
noise_mask = noise_mask.to(device)
real_model = None
comfy.model_management.load_model_gpu(model)
real_model = model.model
noise = noise.to(device)
latent_image = latent_image.to(device)
positive_copy = []
negative_copy = []
control_nets = []
def get_models(cond):
models = []
for c in cond:
if 'control' in c[1]:
models += [c[1]['control']]
if 'gligen' in c[1]:
models += [c[1]['gligen'][1]]
return models
for p in positive:
t = p[0]
if t.shape[0] < noise.shape[0]:
t = torch.cat([t] * noise.shape[0])
t = t.to(device)
positive_copy += [[t] + p[1:]]
for n in negative:
t = n[0]
if t.shape[0] < noise.shape[0]:
t = torch.cat([t] * noise.shape[0])
t = t.to(device)
negative_copy += [[t] + n[1:]]
models = get_models(positive) + get_models(negative)
comfy.model_management.load_controlnet_gpu(models)
if sampler_name in comfy.samplers.KSampler.SAMPLERS:
sampler = comfy.samplers.KSampler(real_model, steps=steps, device=device, sampler=sampler_name, scheduler=scheduler, denoise=denoise, model_options=model.model_options)
else:
#other samplers
pass
samples = sampler.sample(noise, positive_copy, negative_copy, cfg=cfg, latent_image=latent_image, start_step=start_step, last_step=last_step, force_full_denoise=force_full_denoise, denoise_mask=noise_mask) pbar = comfy.utils.ProgressBar(steps)
samples = samples.cpu() def callback(step, x0, x):
for m in models: pbar.update_absolute(step + 1)
m.cleanup()
samples = comfy.sample.sample(model, noise, steps, cfg, sampler_name, scheduler, positive, negative, latent_image,
denoise=denoise, disable_noise=disable_noise, start_step=start_step, last_step=last_step,
force_full_denoise=force_full_denoise, noise_mask=noise_mask, callback=callback)
out = latent.copy() out = latent.copy()
out["samples"] = samples out["samples"] = samples
return (out, ) return (out, )
...@@ -974,8 +984,7 @@ class LoadImage: ...@@ -974,8 +984,7 @@ class LoadImage:
RETURN_TYPES = ("IMAGE", "MASK") RETURN_TYPES = ("IMAGE", "MASK")
FUNCTION = "load_image" FUNCTION = "load_image"
def load_image(self, image): def load_image(self, image):
input_dir = folder_paths.get_input_directory() image_path = folder_paths.get_annotated_filepath(image)
image_path = os.path.join(input_dir, image)
i = Image.open(image_path) i = Image.open(image_path)
image = i.convert("RGB") image = i.convert("RGB")
image = np.array(image).astype(np.float32) / 255.0 image = np.array(image).astype(np.float32) / 255.0
...@@ -989,20 +998,27 @@ class LoadImage: ...@@ -989,20 +998,27 @@ class LoadImage:
@classmethod @classmethod
def IS_CHANGED(s, image): def IS_CHANGED(s, image):
input_dir = folder_paths.get_input_directory() image_path = folder_paths.get_annotated_filepath(image)
image_path = os.path.join(input_dir, image)
m = hashlib.sha256() m = hashlib.sha256()
with open(image_path, 'rb') as f: with open(image_path, 'rb') as f:
m.update(f.read()) m.update(f.read())
return m.digest().hex() return m.digest().hex()
@classmethod
def VALIDATE_INPUTS(s, image):
if not folder_paths.exists_annotated_filepath(image):
return "Invalid image file: {}".format(image)
return True
class LoadImageMask: class LoadImageMask:
_color_channels = ["alpha", "red", "green", "blue"]
@classmethod @classmethod
def INPUT_TYPES(s): def INPUT_TYPES(s):
input_dir = folder_paths.get_input_directory() input_dir = folder_paths.get_input_directory()
return {"required": return {"required":
{"image": (sorted(os.listdir(input_dir)), ), {"image": (sorted(os.listdir(input_dir)), ),
"channel": (["alpha", "red", "green", "blue"], ),} "channel": (s._color_channels, ),}
} }
CATEGORY = "mask" CATEGORY = "mask"
...@@ -1010,8 +1026,7 @@ class LoadImageMask: ...@@ -1010,8 +1026,7 @@ class LoadImageMask:
RETURN_TYPES = ("MASK",) RETURN_TYPES = ("MASK",)
FUNCTION = "load_image" FUNCTION = "load_image"
def load_image(self, image, channel): def load_image(self, image, channel):
input_dir = folder_paths.get_input_directory() image_path = folder_paths.get_annotated_filepath(image)
image_path = os.path.join(input_dir, image)
i = Image.open(image_path) i = Image.open(image_path)
if i.getbands() != ("R", "G", "B", "A"): if i.getbands() != ("R", "G", "B", "A"):
i = i.convert("RGBA") i = i.convert("RGBA")
...@@ -1028,13 +1043,22 @@ class LoadImageMask: ...@@ -1028,13 +1043,22 @@ class LoadImageMask:
@classmethod @classmethod
def IS_CHANGED(s, image, channel): def IS_CHANGED(s, image, channel):
input_dir = folder_paths.get_input_directory() image_path = folder_paths.get_annotated_filepath(image)
image_path = os.path.join(input_dir, image)
m = hashlib.sha256() m = hashlib.sha256()
with open(image_path, 'rb') as f: with open(image_path, 'rb') as f:
m.update(f.read()) m.update(f.read())
return m.digest().hex() return m.digest().hex()
@classmethod
def VALIDATE_INPUTS(s, image, channel):
if not folder_paths.exists_annotated_filepath(image):
return "Invalid image file: {}".format(image)
if channel not in s._color_channels:
return "Invalid color channel: {}".format(channel)
return True
class ImageScale: class ImageScale:
upscale_methods = ["nearest-exact", "bilinear", "area"] upscale_methods = ["nearest-exact", "bilinear", "area"]
crop_methods = ["disabled", "center"] crop_methods = ["disabled", "center"]
...@@ -1079,10 +1103,10 @@ class ImagePadForOutpaint: ...@@ -1079,10 +1103,10 @@ class ImagePadForOutpaint:
return { return {
"required": { "required": {
"image": ("IMAGE",), "image": ("IMAGE",),
"left": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 64}), "left": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 8}),
"top": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 64}), "top": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 8}),
"right": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 64}), "right": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 8}),
"bottom": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 64}), "bottom": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 8}),
"feathering": ("INT", {"default": 40, "min": 0, "max": MAX_RESOLUTION, "step": 1}), "feathering": ("INT", {"default": 40, "min": 0, "max": MAX_RESOLUTION, "step": 1}),
} }
} }
...@@ -1154,8 +1178,10 @@ NODE_CLASS_MAPPINGS = { ...@@ -1154,8 +1178,10 @@ NODE_CLASS_MAPPINGS = {
"ImageScale": ImageScale, "ImageScale": ImageScale,
"ImageInvert": ImageInvert, "ImageInvert": ImageInvert,
"ImagePadForOutpaint": ImagePadForOutpaint, "ImagePadForOutpaint": ImagePadForOutpaint,
"ConditioningAverage ": ConditioningAverage ,
"ConditioningCombine": ConditioningCombine, "ConditioningCombine": ConditioningCombine,
"ConditioningSetArea": ConditioningSetArea, "ConditioningSetArea": ConditioningSetArea,
"ConditioningSetMask": ConditioningSetMask,
"KSamplerAdvanced": KSamplerAdvanced, "KSamplerAdvanced": KSamplerAdvanced,
"SetLatentNoiseMask": SetLatentNoiseMask, "SetLatentNoiseMask": SetLatentNoiseMask,
"LatentComposite": LatentComposite, "LatentComposite": LatentComposite,
...@@ -1204,7 +1230,9 @@ NODE_DISPLAY_NAME_MAPPINGS = { ...@@ -1204,7 +1230,9 @@ NODE_DISPLAY_NAME_MAPPINGS = {
"CLIPTextEncode": "CLIP Text Encode (Prompt)", "CLIPTextEncode": "CLIP Text Encode (Prompt)",
"CLIPSetLastLayer": "CLIP Set Last Layer", "CLIPSetLastLayer": "CLIP Set Last Layer",
"ConditioningCombine": "Conditioning (Combine)", "ConditioningCombine": "Conditioning (Combine)",
"ConditioningAverage ": "Conditioning (Average)",
"ConditioningSetArea": "Conditioning (Set Area)", "ConditioningSetArea": "Conditioning (Set Area)",
"ConditioningSetMask": "Conditioning (Set Mask)",
"ControlNetApply": "Apply ControlNet", "ControlNetApply": "Apply ControlNet",
# Latent # Latent
"VAEEncodeForInpaint": "VAE Encode (for Inpainting)", "VAEEncodeForInpaint": "VAE Encode (for Inpainting)",
......
...@@ -47,7 +47,7 @@ ...@@ -47,7 +47,7 @@
" !git pull\n", " !git pull\n",
"\n", "\n",
"!echo -= Install dependencies =-\n", "!echo -= Install dependencies =-\n",
"!pip install xformers!=0.0.18 -r requirements.txt --extra-index-url https://download.pytorch.org/whl/cu118" "!pip install xformers!=0.0.18 -r requirements.txt --extra-index-url https://download.pytorch.org/whl/cu118 --extra-index-url https://download.pytorch.org/whl/cu117"
] ]
}, },
{ {
......
...@@ -112,14 +112,21 @@ class PromptServer(): ...@@ -112,14 +112,21 @@ class PromptServer():
@routes.post("/upload/image") @routes.post("/upload/image")
async def upload_image(request): async def upload_image(request):
post = await request.post()
image = post.get("image")
if post.get("type") is None:
upload_dir = folder_paths.get_input_directory()
elif post.get("type") == "input":
upload_dir = folder_paths.get_input_directory() upload_dir = folder_paths.get_input_directory()
elif post.get("type") == "temp":
upload_dir = folder_paths.get_temp_directory()
elif post.get("type") == "output":
upload_dir = folder_paths.get_output_directory()
if not os.path.exists(upload_dir): if not os.path.exists(upload_dir):
os.makedirs(upload_dir) os.makedirs(upload_dir)
post = await request.post()
image = post.get("image")
if image and image.file: if image and image.file:
filename = image.filename filename = image.filename
if not filename: if not filename:
......
...@@ -232,10 +232,27 @@ app.registerExtension({ ...@@ -232,10 +232,27 @@ app.registerExtension({
"name": "My Color Palette", "name": "My Color Palette",
"colors": { "colors": {
"node_slot": { "node_slot": {
},
"litegraph_base": {
},
"comfy_base": {
} }
} }
}; };
// Copy over missing keys from default color palette
const defaultColorPalette = colorPalettes[defaultColorPaletteId];
for (const key in defaultColorPalette.colors.litegraph_base) {
if (!colorPalette.colors.litegraph_base[key]) {
colorPalette.colors.litegraph_base[key] = "";
}
}
for (const key in defaultColorPalette.colors.comfy_base) {
if (!colorPalette.colors.comfy_base[key]) {
colorPalette.colors.comfy_base[key] = "";
}
}
return completeColorPalette(colorPalette); return completeColorPalette(colorPalette);
}; };
......
...@@ -6,6 +6,7 @@ app.registerExtension({ ...@@ -6,6 +6,7 @@ app.registerExtension({
name: "Comfy.SlotDefaults", name: "Comfy.SlotDefaults",
suggestionsNumber: null, suggestionsNumber: null,
init() { init() {
LiteGraph.search_filter_enabled = true;
LiteGraph.middle_click_slot_add_default_node = true; LiteGraph.middle_click_slot_add_default_node = true;
this.suggestionsNumber = app.ui.settings.addSetting({ this.suggestionsNumber = app.ui.settings.addSetting({
id: "Comfy.NodeSuggestions.number", id: "Comfy.NodeSuggestions.number",
...@@ -43,6 +44,14 @@ app.registerExtension({ ...@@ -43,6 +44,14 @@ app.registerExtension({
} }
if (this.slot_types_default_out[type].includes(nodeId)) continue; if (this.slot_types_default_out[type].includes(nodeId)) continue;
this.slot_types_default_out[type].push(nodeId); this.slot_types_default_out[type].push(nodeId);
// Input types have to be stored as lower case
// Store each node that can handle this input type
const lowerType = type.toLocaleLowerCase();
if (!(lowerType in LiteGraph.registered_slot_in_types)) {
LiteGraph.registered_slot_in_types[lowerType] = { nodes: [] };
}
LiteGraph.registered_slot_in_types[lowerType].nodes.push(nodeType.comfyClass);
} }
var outputs = nodeData["output"]; var outputs = nodeData["output"];
...@@ -53,6 +62,16 @@ app.registerExtension({ ...@@ -53,6 +62,16 @@ app.registerExtension({
} }
this.slot_types_default_in[type].push(nodeId); this.slot_types_default_in[type].push(nodeId);
// Store each node that can handle this output type
if (!(type in LiteGraph.registered_slot_out_types)) {
LiteGraph.registered_slot_out_types[type] = { nodes: [] };
}
LiteGraph.registered_slot_out_types[type].nodes.push(nodeType.comfyClass);
if(!LiteGraph.slot_types_out.includes(type)) {
LiteGraph.slot_types_out.push(type);
}
} }
var maxNum = this.suggestionsNumber.value; var maxNum = this.suggestionsNumber.value;
this.setDefaults(maxNum); this.setDefaults(maxNum);
......
...@@ -3628,6 +3628,18 @@ ...@@ -3628,6 +3628,18 @@
return size; return size;
}; };
LGraphNode.prototype.inResizeCorner = function(canvasX, canvasY) {
var rows = this.outputs ? this.outputs.length : 1;
var outputs_offset = (this.constructor.slot_start_y || 0) + rows * LiteGraph.NODE_SLOT_HEIGHT;
return isInsideRectangle(canvasX,
canvasY,
this.pos[0] + this.size[0] - 15,
this.pos[1] + Math.max(this.size[1] - 15, outputs_offset),
20,
20
);
}
/** /**
* returns all the info available about a property of this node. * returns all the info available about a property of this node.
* *
...@@ -5877,14 +5889,7 @@ LGraphNode.prototype.executeAction = function(action) ...@@ -5877,14 +5889,7 @@ LGraphNode.prototype.executeAction = function(action)
if ( !this.connecting_node && !node.flags.collapsed && !this.live_mode ) { if ( !this.connecting_node && !node.flags.collapsed && !this.live_mode ) {
//Search for corner for resize //Search for corner for resize
if ( !skip_action && if ( !skip_action &&
node.resizable !== false && node.resizable !== false && node.inResizeCorner(e.canvasX, e.canvasY)
isInsideRectangle( e.canvasX,
e.canvasY,
node.pos[0] + node.size[0] - 5,
node.pos[1] + node.size[1] - 5,
10,
10
)
) { ) {
this.graph.beforeChange(); this.graph.beforeChange();
this.resizing_node = node; this.resizing_node = node;
...@@ -6424,16 +6429,7 @@ LGraphNode.prototype.executeAction = function(action) ...@@ -6424,16 +6429,7 @@ LGraphNode.prototype.executeAction = function(action)
//Search for corner //Search for corner
if (this.canvas) { if (this.canvas) {
if ( if (node.inResizeCorner(e.canvasX, e.canvasY)) {
isInsideRectangle(
e.canvasX,
e.canvasY,
node.pos[0] + node.size[0] - 5,
node.pos[1] + node.size[1] - 5,
5,
5
)
) {
this.canvas.style.cursor = "se-resize"; this.canvas.style.cursor = "se-resize";
} else { } else {
this.canvas.style.cursor = "crosshair"; this.canvas.style.cursor = "crosshair";
...@@ -9953,11 +9949,11 @@ LGraphNode.prototype.executeAction = function(action) ...@@ -9953,11 +9949,11 @@ LGraphNode.prototype.executeAction = function(action)
} }
break; break;
case "slider": case "slider":
var range = w.options.max - w.options.min; var old_value = w.value;
var nvalue = Math.clamp((x - 15) / (widget_width - 30), 0, 1); var nvalue = Math.clamp((x - 15) / (widget_width - 30), 0, 1);
if(w.options.read_only) break; if(w.options.read_only) break;
w.value = w.options.min + (w.options.max - w.options.min) * nvalue; w.value = w.options.min + (w.options.max - w.options.min) * nvalue;
if (w.callback) { if (old_value != w.value) {
setTimeout(function() { setTimeout(function() {
inner_value_change(w, w.value); inner_value_change(w, w.value);
}, 20); }, 20);
...@@ -10044,7 +10040,7 @@ LGraphNode.prototype.executeAction = function(action) ...@@ -10044,7 +10040,7 @@ LGraphNode.prototype.executeAction = function(action)
if (event.click_time < 200 && delta == 0) { if (event.click_time < 200 && delta == 0) {
this.prompt("Value",w.value,function(v) { this.prompt("Value",w.value,function(v) {
// check if v is a valid equation or a number // check if v is a valid equation or a number
if (/^[0-9+\-*/()\s]+$/.test(v)) { if (/^[0-9+\-*/()\s]+|\d+\.\d+$/.test(v)) {
try {//solve the equation if possible try {//solve the equation if possible
v = eval(v); v = eval(v);
} catch (e) { } } catch (e) { }
......
...@@ -20,6 +20,12 @@ export class ComfyApp { ...@@ -20,6 +20,12 @@ export class ComfyApp {
*/ */
#processingQueue = false; #processingQueue = false;
/**
* Content Clipboard
* @type {serialized node object}
*/
static clipspace = null;
constructor() { constructor() {
this.ui = new ComfyUI(this); this.ui = new ComfyUI(this);
...@@ -130,6 +136,83 @@ export class ComfyApp { ...@@ -130,6 +136,83 @@ export class ComfyApp {
); );
} }
} }
options.push(
{
content: "Copy (Clipspace)",
callback: (obj) => {
var widgets = null;
if(this.widgets) {
widgets = this.widgets.map(({ type, name, value }) => ({ type, name, value }));
}
let img = new Image();
var imgs = undefined;
if(this.imgs != undefined) {
img.src = this.imgs[0].src;
imgs = [img];
}
ComfyApp.clipspace = {
'widgets': widgets,
'imgs': imgs,
'original_imgs': imgs,
'images': this.images
};
}
});
if(ComfyApp.clipspace != null) {
options.push(
{
content: "Paste (Clipspace)",
callback: () => {
if(ComfyApp.clipspace != null) {
if(ComfyApp.clipspace.widgets != null && this.widgets != null) {
ComfyApp.clipspace.widgets.forEach(({ type, name, value }) => {
const prop = Object.values(this.widgets).find(obj => obj.type === type && obj.name === name);
if (prop) {
prop.callback(value);
}
});
}
// image paste
if(ComfyApp.clipspace.imgs != undefined && this.imgs != undefined && this.widgets != null) {
var filename = "";
if(this.images && ComfyApp.clipspace.images) {
this.images = ComfyApp.clipspace.images;
}
if(ComfyApp.clipspace.images != undefined) {
const clip_image = ComfyApp.clipspace.images[0];
if(clip_image.subfolder != '')
filename = `${clip_image.subfolder}/`;
filename += `${clip_image.filename} [${clip_image.type}]`;
}
else if(ComfyApp.clipspace.widgets != undefined) {
const index_in_clip = ComfyApp.clipspace.widgets.findIndex(obj => obj.name === 'image');
if(index_in_clip >= 0) {
filename = `${ComfyApp.clipspace.widgets[index_in_clip].value}`;
}
}
const index = this.widgets.findIndex(obj => obj.name === 'image');
if(index >= 0 && filename != "" && ComfyApp.clipspace.imgs != undefined) {
this.imgs = ComfyApp.clipspace.imgs;
this.widgets[index].value = filename;
if(this.widgets_values != undefined) {
this.widgets_values[index] = filename;
}
}
}
this.trigger('changed');
}
}
}
);
}
}; };
} }
...@@ -888,8 +971,10 @@ export class ComfyApp { ...@@ -888,8 +971,10 @@ export class ComfyApp {
loadGraphData(graphData) { loadGraphData(graphData) {
this.clean(); this.clean();
let reset_invalid_values = false;
if (!graphData) { if (!graphData) {
graphData = structuredClone(defaultGraph); graphData = structuredClone(defaultGraph);
reset_invalid_values = true;
} }
const missingNodeTypes = []; const missingNodeTypes = [];
...@@ -975,6 +1060,13 @@ export class ComfyApp { ...@@ -975,6 +1060,13 @@ export class ComfyApp {
} }
} }
} }
if (reset_invalid_values) {
if (widget.type == "combo") {
if (!widget.options.values.includes(widget.value) && widget.options.values.length > 0) {
widget.value = widget.options.values[0];
}
}
}
} }
} }
......
...@@ -136,9 +136,11 @@ function addMultilineWidget(node, name, opts, app) { ...@@ -136,9 +136,11 @@ function addMultilineWidget(node, name, opts, app) {
left: `${t.a * margin + t.e}px`, left: `${t.a * margin + t.e}px`,
top: `${t.d * (y + widgetHeight - margin - 3) + t.f}px`, top: `${t.d * (y + widgetHeight - margin - 3) + t.f}px`,
width: `${(widgetWidth - margin * 2 - 3) * t.a}px`, width: `${(widgetWidth - margin * 2 - 3) * t.a}px`,
background: (!node.color)?'':node.color,
height: `${(this.parent.inputHeight - margin * 2 - 4) * t.d}px`, height: `${(this.parent.inputHeight - margin * 2 - 4) * t.d}px`,
position: "absolute", position: "absolute",
zIndex: 1, color: (!node.color)?'':'white',
zIndex: app.graph._nodes.indexOf(node),
fontSize: `${t.d * 10.0}px`, fontSize: `${t.d * 10.0}px`,
}); });
this.inputEl.hidden = !visible; this.inputEl.hidden = !visible;
...@@ -270,6 +272,9 @@ export const ComfyWidgets = { ...@@ -270,6 +272,9 @@ export const ComfyWidgets = {
app.graph.setDirtyCanvas(true); app.graph.setDirtyCanvas(true);
}; };
img.src = `/view?filename=${name}&type=input`; img.src = `/view?filename=${name}&type=input`;
if ((node.size[1] - node.imageOffset) < 100) {
node.size[1] = 250 + node.imageOffset;
}
} }
// Add our own callback to the combo widget to render an image when it changes // Add our own callback to the combo widget to render an image when it changes
......
...@@ -120,7 +120,7 @@ body { ...@@ -120,7 +120,7 @@ body {
.comfy-menu > button, .comfy-menu > button,
.comfy-menu-btns button, .comfy-menu-btns button,
.comfy-menu .comfy-list button, .comfy-menu .comfy-list button,
.comfy-modal button{ .comfy-modal button {
color: var(--input-text); color: var(--input-text);
background-color: var(--comfy-input-bg); background-color: var(--comfy-input-bg);
border-radius: 8px; border-radius: 8px;
...@@ -129,6 +129,15 @@ body { ...@@ -129,6 +129,15 @@ body {
margin-top: 2px; margin-top: 2px;
} }
.comfy-menu > button:hover,
.comfy-menu-btns button:hover,
.comfy-menu .comfy-list button:hover,
.comfy-modal button:hover,
.comfy-settings-btn:hover {
filter: brightness(1.2);
cursor: pointer;
}
.comfy-menu span.drag-handle { .comfy-menu span.drag-handle {
width: 10px; width: 10px;
height: 20px; height: 20px;
...@@ -248,8 +257,11 @@ button.comfy-queue-btn { ...@@ -248,8 +257,11 @@ button.comfy-queue-btn {
} }
} }
/* Input popup */
.graphdialog { .graphdialog {
min-height: 1em; min-height: 1em;
background-color: var(--comfy-menu-bg);
} }
.graphdialog .name { .graphdialog .name {
...@@ -273,15 +285,66 @@ button.comfy-queue-btn { ...@@ -273,15 +285,66 @@ button.comfy-queue-btn {
border-radius: 12px 0 0 12px; border-radius: 12px 0 0 12px;
} }
/* Context menu */
.litegraph .litemenu-entry.has_submenu { .litegraph .litemenu-entry.has_submenu {
position: relative; position: relative;
padding-right: 20px; padding-right: 20px;
} }
.litemenu-entry.has_submenu::after { .litemenu-entry.has_submenu::after {
content: ">"; content: ">";
position: absolute; position: absolute;
top: 0; top: 0;
right: 2px; right: 2px;
} }
.litegraph.litecontextmenu,
.litegraph.litecontextmenu.dark {
z-index: 9999 !important;
background-color: var(--comfy-menu-bg) !important;
filter: brightness(95%);
}
.litegraph.litecontextmenu .litemenu-entry:hover:not(.disabled):not(.separator) {
background-color: var(--comfy-menu-bg) !important;
filter: brightness(155%);
color: var(--input-text);
}
.litegraph.litecontextmenu .litemenu-entry.submenu,
.litegraph.litecontextmenu.dark .litemenu-entry.submenu {
background-color: var(--comfy-menu-bg) !important;
color: var(--input-text);
}
.litegraph.litecontextmenu input {
background-color: var(--comfy-input-bg) !important;
color: var(--input-text) !important;
}
/* Search box */
.litegraph.litesearchbox {
z-index: 9999 !important;
background-color: var(--comfy-menu-bg) !important;
overflow: hidden;
}
.litegraph.litesearchbox input,
.litegraph.litesearchbox select {
background-color: var(--comfy-input-bg) !important;
color: var(--input-text);
}
.litegraph.lite-search-item {
color: var(--input-text);
background-color: var(--comfy-input-bg);
filter: brightness(80%);
padding-left: 0.2em;
}
.litegraph.lite-search-item.generic_type {
color: var(--input-text);
filter: brightness(50%);
}
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment