"git@developer.sourcefind.cn:dadigang/Ventoy.git" did not exist on "537f0eaa7e16df83a9e826cda9aa7967bc77717d"
Commit 220afe33 authored by comfyanonymous's avatar comfyanonymous
Browse files

Initial commit.

parents
This source diff could not be displayed because it is too large. You can view the blob instead.
import sd1_clip
import open_clip
import torch
class SD2ClipModel(torch.nn.Module, sd1_clip.ClipTokenWeightEncoder):
"""
Uses the OpenCLIP transformer encoder for text
"""
LAYERS = [
#"pooled",
"last",
"penultimate",
"hidden"
]
#version="laion2b_s32b_b79k"
def __init__(self, arch="ViT-H-14", device="cpu", max_length=77,
freeze=True, layer="penultimate", layer_idx=None):
super().__init__()
assert layer in self.LAYERS
model, _, _ = open_clip.create_model_and_transforms(arch, device=torch.device('cpu'))
del model.visual
self.model = model
self.device = device
self.max_length = max_length
self.empty_tokens = [[49406] + [49407] + [0] * 75]
if freeze:
self.freeze()
self.layer = layer
if self.layer == "last":
self.layer_idx = 0
elif self.layer == "penultimate":
self.layer_idx = 1
elif self.layer == "hidden":
assert layer_idx is not None
assert abs(layer_idx) < 24
self.clip_layer(layer_idx)
else:
raise NotImplementedError()
def freeze(self):
self.model = self.model.eval()
for param in self.parameters():
param.requires_grad = False
def clip_layer(self, layer_idx):
#layer_idx should have the same logic as the one for SD1
if abs(layer_idx) >= 24:
self.layer_idx = 0
else:
if layer_idx < 0:
self.layer_idx = -(layer_idx + 1)
else:
self.layer_idx = 24 - (layer_idx + 1)
def forward(self, tokens):
tokens = torch.LongTensor(tokens).to(self.device)
z = self.encode_with_transformer(tokens)
return z
def encode_with_transformer(self, tokens):
x = self.model.token_embedding(tokens) # [batch_size, n_ctx, d_model]
x = x + self.model.positional_embedding
x = x.permute(1, 0, 2) # NLD -> LND
x = self.text_transformer_forward(x, attn_mask=self.model.attn_mask)
x = x.permute(1, 0, 2) # LND -> NLD
x = self.model.ln_final(x)
return x
def text_transformer_forward(self, x: torch.Tensor, attn_mask = None):
for i, r in enumerate(self.model.transformer.resblocks):
if i == len(self.model.transformer.resblocks) - self.layer_idx:
break
if self.model.transformer.grad_checkpointing and not torch.jit.is_scripting():
x = checkpoint(r, x, attn_mask)
else:
x = r(x, attn_mask=attn_mask)
return x
def encode(self, tokens):
return self(tokens)
class SD2Tokenizer(sd1_clip.SD1Tokenizer):
def __init__(self, tokenizer_path=None):
super().__init__(tokenizer_path, pad_with_end=False)
import os
import sys
import copy
import json
import threading
import queue
import traceback
import torch
import nodes
def recursive_execute(prompt, outputs, current_item, extra_data={}):
unique_id = current_item
inputs = prompt[unique_id]['inputs']
class_type = prompt[unique_id]['class_type']
c_obj = nodes.NODE_CLASS_MAPPINGS[class_type]
valid_inputs = c_obj.INPUT_TYPES()
if unique_id in outputs:
return []
executed = []
for x in inputs:
input_data = inputs[x]
if isinstance(input_data, list):
input_unique_id = input_data[0]
output_index = input_data[1]
if input_unique_id not in outputs:
executed += recursive_execute(prompt, outputs, input_unique_id, extra_data)
input_data_all = {}
for x in inputs:
input_data = inputs[x]
if isinstance(input_data, list):
input_unique_id = input_data[0]
output_index = input_data[1]
obj = outputs[input_unique_id][output_index]
input_data_all[x] = obj
else:
if ("required" in valid_inputs and x in valid_inputs["required"]) or ("optional" in valid_inputs and x in valid_inputs["optional"]):
input_data_all[x] = input_data
obj = c_obj()
if "hidden" in valid_inputs:
h = valid_inputs["hidden"]
for x in h:
if h[x] == "PROMPT":
input_data_all[x] = prompt
if h[x] == "EXTRA_PNGINFO":
if "extra_pnginfo" in extra_data:
input_data_all[x] = extra_data['extra_pnginfo']
outputs[unique_id] = getattr(obj, obj.FUNCTION)(**input_data_all)
return executed + [unique_id]
def recursive_output_delete_if_changed(prompt, old_prompt, outputs, current_item):
unique_id = current_item
inputs = prompt[unique_id]['inputs']
class_type = prompt[unique_id]['class_type']
if unique_id not in outputs:
return True
to_delete = False
if unique_id not in old_prompt:
to_delete = True
elif inputs == old_prompt[unique_id]['inputs']:
for x in inputs:
input_data = inputs[x]
if isinstance(input_data, list):
input_unique_id = input_data[0]
output_index = input_data[1]
if input_unique_id in outputs:
to_delete = recursive_output_delete_if_changed(prompt, old_prompt, outputs, input_unique_id)
else:
to_delete = True
if to_delete:
break
else:
to_delete = True
if to_delete:
print("deleted", unique_id)
d = outputs.pop(unique_id)
del d
return to_delete
class PromptExecutor:
def __init__(self):
self.outputs = {}
self.old_prompt = {}
def execute(self, prompt, extra_data={}):
with torch.no_grad():
for x in prompt:
recursive_output_delete_if_changed(prompt, self.old_prompt, self.outputs, x)
current_outputs = set(self.outputs.keys())
executed = []
try:
for x in prompt:
class_ = nodes.NODE_CLASS_MAPPINGS[prompt[x]['class_type']]
if hasattr(class_, 'OUTPUT_NODE'):
if class_.OUTPUT_NODE == True:
valid = False
try:
m = validate_inputs(prompt, x)
valid = m[0]
except:
valid = False
if valid:
executed += recursive_execute(prompt, self.outputs, x, extra_data)
except Exception as e:
print(traceback.format_exc())
to_delete = []
for o in self.outputs:
if o not in current_outputs:
to_delete += [o]
if o in self.old_prompt:
d = self.old_prompt.pop(o)
del d
for o in to_delete:
d = self.outputs.pop(o)
del d
else:
executed = set(executed)
for x in executed:
self.old_prompt[x] = copy.deepcopy(prompt[x])
def validate_inputs(prompt, item):
unique_id = item
inputs = prompt[unique_id]['inputs']
class_type = prompt[unique_id]['class_type']
obj_class = nodes.NODE_CLASS_MAPPINGS[class_type]
class_inputs = obj_class.INPUT_TYPES()
required_inputs = class_inputs['required']
for x in required_inputs:
if x not in inputs:
return (False, "Required input is missing. {}, {}".format(class_type, x))
val = inputs[x]
info = required_inputs[x]
type_input = info[0]
if isinstance(val, list):
if len(val) != 2:
return (False, "Bad Input. {}, {}".format(class_type, x))
o_id = val[0]
o_class_type = prompt[o_id]['class_type']
r = nodes.NODE_CLASS_MAPPINGS[o_class_type].RETURN_TYPES
if r[val[1]] != type_input:
return (False, "Return type mismatch. {}, {}".format(class_type, x))
r = validate_inputs(prompt, o_id)
if r[0] == False:
return r
else:
if type_input == "INT":
val = int(val)
inputs[x] = val
if type_input == "FLOAT":
val = float(val)
inputs[x] = val
if type_input == "STRING":
val = str(val)
inputs[x] = val
if len(info) > 1:
if "min" in info[1] and val < info[1]["min"]:
return (False, "Value smaller than min. {}, {}".format(class_type, x))
if "max" in info[1] and val > info[1]["max"]:
return (False, "Value bigger than max. {}, {}".format(class_type, x))
if isinstance(type_input, list):
if val not in type_input:
return (False, "Value not in list. {}, {}".format(class_type, x))
return (True, "")
def validate_prompt(prompt):
outputs = set()
for x in prompt:
class_ = nodes.NODE_CLASS_MAPPINGS[prompt[x]['class_type']]
if hasattr(class_, 'OUTPUT_NODE') and class_.OUTPUT_NODE == True:
outputs.add(x)
if len(outputs) == 0:
return (False, "Prompt has no outputs")
good_outputs = set()
for o in outputs:
valid = False
reason = ""
try:
m = validate_inputs(prompt, o)
valid = m[0]
reason = m[1]
except:
valid = False
reason = "Parsing error"
if valid == True:
good_outputs.add(x)
else:
print("Failed to validate prompt for output {} {}".format(o, reason))
print("output will be ignored")
if len(good_outputs) == 0:
return (False, "Prompt has no properly connected outputs")
return (True, "")
def prompt_worker(q):
e = PromptExecutor()
while True:
item = q.get()
e.execute(item[-2], item[-1])
q.task_done()
from http.server import BaseHTTPRequestHandler, HTTPServer
class PromptServer(BaseHTTPRequestHandler):
def _set_headers(self, code=200, ct='text/html'):
self.send_response(code)
self.send_header('Content-type', ct)
self.end_headers()
def log_message(self, format, *args):
pass
def do_GET(self):
if self.path == "/prompt":
self._set_headers(ct='application/json')
prompt_info = {}
exec_info = {}
exec_info['queue_remaining'] = self.server.prompt_queue.unfinished_tasks
prompt_info['exec_info'] = exec_info
self.wfile.write(json.dumps(prompt_info).encode('utf-8'))
elif self.path == "/object_info":
self._set_headers(ct='application/json')
out = {}
for x in nodes.NODE_CLASS_MAPPINGS:
obj_class = nodes.NODE_CLASS_MAPPINGS[x]
info = {}
info['input'] = obj_class.INPUT_TYPES()
info['output'] = obj_class.RETURN_TYPES
info['name'] = x #TODO
info['description'] = ''
out[x] = info
self.wfile.write(json.dumps(out).encode('utf-8'))
elif self.path[1:] in os.listdir(self.server.server_dir):
self._set_headers()
with open(os.path.join(self.server.server_dir, self.path[1:]), "rb") as f:
self.wfile.write(f.read())
else:
self._set_headers()
with open(os.path.join(self.server.server_dir, "index.html"), "rb") as f:
self.wfile.write(f.read())
def do_HEAD(self):
self._set_headers()
def do_POST(self):
resp_code = 200
out_string = ""
if self.path == "/prompt":
print("got prompt")
self.data_string = self.rfile.read(int(self.headers['Content-Length']))
json_data = json.loads(self.data_string)
if "number" in json_data:
number = float(json_data['number'])
else:
number = self.server.number
self.server.number += 1
if "prompt" in json_data:
prompt = json_data["prompt"]
valid = validate_prompt(prompt)
extra_data = {}
if "extra_data" in json_data:
extra_data = json_data["extra_data"]
if valid[0]:
self.server.prompt_queue.put((number, id(prompt), prompt, extra_data))
else:
resp_code = 400
out_string = valid[1]
print("invalid prompt:", valid[1])
self._set_headers(code=resp_code)
self.end_headers()
self.wfile.write(out_string.encode('utf8'))
return
def run(prompt_queue, address='', port=8188):
server_address = (address, port)
httpd = HTTPServer(server_address, PromptServer)
httpd.server_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), "webshit")
httpd.prompt_queue = prompt_queue
httpd.number = 0
if server_address[0] == '':
addr = '0.0.0.0'
else:
addr = server_address[0]
print("Starting server\n")
print("To see the GUI go to: http://{}:{}".format(addr, server_address[1]))
httpd.serve_forever()
if __name__ == "__main__":
q = queue.PriorityQueue()
threading.Thread(target=prompt_worker, daemon=True, args=(q,)).start()
run(q, address='127.0.0.1', port=8188)
model:
base_learning_rate: 1.0e-04
target: ldm.models.diffusion.ddpm.LatentDiffusion
params:
linear_start: 0.00085
linear_end: 0.0120
num_timesteps_cond: 1
log_every_t: 200
timesteps: 1000
first_stage_key: "jpg"
cond_stage_key: "txt"
image_size: 64
channels: 4
cond_stage_trainable: false # Note: different from the one we trained before
conditioning_key: crossattn
monitor: val/loss_simple_ema
scale_factor: 0.18215
use_ema: False
scheduler_config: # 10000 warmup steps
target: ldm.lr_scheduler.LambdaLinearScheduler
params:
warm_up_steps: [ 10000 ]
cycle_lengths: [ 10000000000000 ] # incredibly large number to prevent corner cases
f_start: [ 1.e-6 ]
f_max: [ 1. ]
f_min: [ 1. ]
unet_config:
target: ldm.modules.diffusionmodules.openaimodel.UNetModel
params:
image_size: 32 # unused
in_channels: 4
out_channels: 4
model_channels: 320
attention_resolutions: [ 4, 2, 1 ]
num_res_blocks: 2
channel_mult: [ 1, 2, 4, 4 ]
num_heads: 8
use_spatial_transformer: True
transformer_depth: 1
context_dim: 768
use_checkpoint: True
legacy: False
first_stage_config:
target: ldm.models.autoencoder.AutoencoderKL
params:
embed_dim: 4
monitor: val/rec_loss
ddconfig:
double_z: true
z_channels: 4
resolution: 256
in_channels: 3
out_ch: 3
ch: 128
ch_mult:
- 1
- 2
- 4
- 4
num_res_blocks: 2
attn_resolutions: []
dropout: 0.0
lossconfig:
target: torch.nn.Identity
cond_stage_config:
target: ldm.modules.encoders.modules.FrozenCLIPEmbedder
params:
layer: "hidden"
layer_idx: -2
model:
base_learning_rate: 1.0e-04
target: ldm.models.diffusion.ddpm.LatentDiffusion
params:
linear_start: 0.00085
linear_end: 0.0120
num_timesteps_cond: 1
log_every_t: 200
timesteps: 1000
first_stage_key: "jpg"
cond_stage_key: "txt"
image_size: 64
channels: 4
cond_stage_trainable: false # Note: different from the one we trained before
conditioning_key: crossattn
monitor: val/loss_simple_ema
scale_factor: 0.18215
use_ema: False
scheduler_config: # 10000 warmup steps
target: ldm.lr_scheduler.LambdaLinearScheduler
params:
warm_up_steps: [ 10000 ]
cycle_lengths: [ 10000000000000 ] # incredibly large number to prevent corner cases
f_start: [ 1.e-6 ]
f_max: [ 1. ]
f_min: [ 1. ]
unet_config:
target: ldm.modules.diffusionmodules.openaimodel.UNetModel
params:
image_size: 32 # unused
in_channels: 4
out_channels: 4
model_channels: 320
attention_resolutions: [ 4, 2, 1 ]
num_res_blocks: 2
channel_mult: [ 1, 2, 4, 4 ]
num_heads: 8
use_spatial_transformer: True
transformer_depth: 1
context_dim: 768
use_checkpoint: True
legacy: False
first_stage_config:
target: ldm.models.autoencoder.AutoencoderKL
params:
embed_dim: 4
monitor: val/rec_loss
ddconfig:
double_z: true
z_channels: 4
resolution: 256
in_channels: 3
out_ch: 3
ch: 128
ch_mult:
- 1
- 2
- 4
- 4
num_res_blocks: 2
attn_resolutions: []
dropout: 0.0
lossconfig:
target: torch.nn.Identity
cond_stage_config:
target: ldm.modules.encoders.modules.FrozenCLIPEmbedder
model:
base_learning_rate: 1.0e-04
target: ldm.models.diffusion.ddpm.LatentDiffusion
params:
linear_start: 0.00085
linear_end: 0.0120
num_timesteps_cond: 1
log_every_t: 200
timesteps: 1000
first_stage_key: "jpg"
cond_stage_key: "txt"
image_size: 64
channels: 4
cond_stage_trainable: false # Note: different from the one we trained before
conditioning_key: crossattn
monitor: val/loss_simple_ema
scale_factor: 0.18215
use_ema: False
scheduler_config: # 10000 warmup steps
target: ldm.lr_scheduler.LambdaLinearScheduler
params:
warm_up_steps: [ 10000 ]
cycle_lengths: [ 10000000000000 ] # incredibly large number to prevent corner cases
f_start: [ 1.e-6 ]
f_max: [ 1. ]
f_min: [ 1. ]
unet_config:
target: ldm.modules.diffusionmodules.openaimodel.UNetModel
params:
image_size: 32 # unused
in_channels: 4
out_channels: 4
model_channels: 320
attention_resolutions: [ 4, 2, 1 ]
num_res_blocks: 2
channel_mult: [ 1, 2, 4, 4 ]
num_heads: 8
use_spatial_transformer: True
transformer_depth: 1
context_dim: 768
use_checkpoint: True
legacy: False
first_stage_config:
target: ldm.models.autoencoder.AutoencoderKL
params:
embed_dim: 4
monitor: val/rec_loss
ddconfig:
double_z: true
z_channels: 4
resolution: 256
in_channels: 3
out_ch: 3
ch: 128
ch_mult:
- 1
- 2
- 4
- 4
num_res_blocks: 2
attn_resolutions: []
dropout: 0.0
lossconfig:
target: torch.nn.Identity
cond_stage_config:
target: ldm.modules.encoders.modules.FrozenCLIPEmbedder
params:
layer: "hidden"
layer_idx: -2
model:
base_learning_rate: 1.0e-4
target: ldm.models.diffusion.ddpm.LatentDiffusion
params:
parameterization: "v"
linear_start: 0.00085
linear_end: 0.0120
num_timesteps_cond: 1
log_every_t: 200
timesteps: 1000
first_stage_key: "jpg"
cond_stage_key: "txt"
image_size: 64
channels: 4
cond_stage_trainable: false
conditioning_key: crossattn
monitor: val/loss_simple_ema
scale_factor: 0.18215
use_ema: False # we set this to false because this is an inference only config
unet_config:
target: ldm.modules.diffusionmodules.openaimodel.UNetModel
params:
use_checkpoint: True
use_fp16: True
image_size: 32 # unused
in_channels: 4
out_channels: 4
model_channels: 320
attention_resolutions: [ 4, 2, 1 ]
num_res_blocks: 2
channel_mult: [ 1, 2, 4, 4 ]
num_head_channels: 64 # need to fix for flash-attn
use_spatial_transformer: True
use_linear_in_transformer: True
transformer_depth: 1
context_dim: 1024
legacy: False
first_stage_config:
target: ldm.models.autoencoder.AutoencoderKL
params:
embed_dim: 4
monitor: val/rec_loss
ddconfig:
#attn_type: "vanilla-xformers"
double_z: true
z_channels: 4
resolution: 256
in_channels: 3
out_ch: 3
ch: 128
ch_mult:
- 1
- 2
- 4
- 4
num_res_blocks: 2
attn_resolutions: []
dropout: 0.0
lossconfig:
target: torch.nn.Identity
cond_stage_config:
target: ldm.modules.encoders.modules.FrozenOpenCLIPEmbedder
params:
freeze: True
layer: "penultimate"
model:
base_learning_rate: 1.0e-4
target: ldm.models.diffusion.ddpm.LatentDiffusion
params:
parameterization: "v"
linear_start: 0.00085
linear_end: 0.0120
num_timesteps_cond: 1
log_every_t: 200
timesteps: 1000
first_stage_key: "jpg"
cond_stage_key: "txt"
image_size: 64
channels: 4
cond_stage_trainable: false
conditioning_key: crossattn
monitor: val/loss_simple_ema
scale_factor: 0.18215
use_ema: False # we set this to false because this is an inference only config
unet_config:
target: ldm.modules.diffusionmodules.openaimodel.UNetModel
params:
use_checkpoint: True
use_fp16: False
image_size: 32 # unused
in_channels: 4
out_channels: 4
model_channels: 320
attention_resolutions: [ 4, 2, 1 ]
num_res_blocks: 2
channel_mult: [ 1, 2, 4, 4 ]
num_head_channels: 64 # need to fix for flash-attn
use_spatial_transformer: True
use_linear_in_transformer: True
transformer_depth: 1
context_dim: 1024
legacy: False
first_stage_config:
target: ldm.models.autoencoder.AutoencoderKL
params:
embed_dim: 4
monitor: val/rec_loss
ddconfig:
#attn_type: "vanilla-xformers"
double_z: true
z_channels: 4
resolution: 256
in_channels: 3
out_ch: 3
ch: 128
ch_mult:
- 1
- 2
- 4
- 4
num_res_blocks: 2
attn_resolutions: []
dropout: 0.0
lossconfig:
target: torch.nn.Identity
cond_stage_config:
target: ldm.modules.encoders.modules.FrozenOpenCLIPEmbedder
params:
freeze: True
layer: "penultimate"
import torch
import os
import sys
import json
from PIL import Image
from PIL.PngImagePlugin import PngInfo
import numpy as np
sys.path.append(os.path.join(sys.path[0], "comfy"))
import comfy.samplers
import comfy.sd
supported_ckpt_extensions = ['.ckpt']
try:
import safetensors.torch
supported_ckpt_extensions += ['.safetensors']
except:
print("Could not import safetensors, safetensors support disabled.")
def filter_files_extensions(files, extensions):
return sorted(list(filter(lambda a: os.path.splitext(a)[-1].lower() in extensions, files)))
class CLIPTextEncode:
@classmethod
def INPUT_TYPES(s):
return {"required": {"text": ("STRING", ), "clip": ("CLIP", )}}
RETURN_TYPES = ("CONDITIONING",)
FUNCTION = "encode"
def encode(self, clip, text):
return (clip.encode(text), )
class VAEDecode:
def __init__(self, device="cpu"):
self.device = device
@classmethod
def INPUT_TYPES(s):
return {"required": { "samples": ("LATENT", ), "vae": ("VAE", )}}
RETURN_TYPES = ("IMAGE",)
FUNCTION = "decode"
def decode(self, vae, samples):
return (vae.decode(samples), )
class VAEEncode:
def __init__(self, device="cpu"):
self.device = device
@classmethod
def INPUT_TYPES(s):
return {"required": { "pixels": ("IMAGE", ), "vae": ("VAE", )}}
RETURN_TYPES = ("LATENT",)
FUNCTION = "encode"
def encode(self, vae, pixels):
return (vae.encode(pixels), )
class CheckpointLoader:
models_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), "models")
config_dir = os.path.join(models_dir, "configs")
ckpt_dir = os.path.join(models_dir, "checkpoints")
@classmethod
def INPUT_TYPES(s):
return {"required": { "config_name": (filter_files_extensions(os.listdir(s.config_dir), '.yaml'), ),
"ckpt_name": (filter_files_extensions(os.listdir(s.ckpt_dir), supported_ckpt_extensions), )}}
RETURN_TYPES = ("MODEL", "CLIP", "VAE")
FUNCTION = "load_checkpoint"
def load_checkpoint(self, config_name, ckpt_name, output_vae=True, output_clip=True):
config_path = os.path.join(self.config_dir, config_name)
ckpt_path = os.path.join(self.ckpt_dir, ckpt_name)
return comfy.sd.load_checkpoint(config_path, ckpt_path, output_vae=True, output_clip=True)
class VAELoader:
models_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), "models")
vae_dir = os.path.join(models_dir, "vae")
@classmethod
def INPUT_TYPES(s):
return {"required": { "vae_name": (filter_files_extensions(os.listdir(s.vae_dir), supported_ckpt_extensions), )}}
RETURN_TYPES = ("VAE",)
FUNCTION = "load_vae"
#TODO: scale factor?
def load_vae(self, vae_name):
vae_path = os.path.join(self.vae_dir, vae_name)
vae = comfy.sd.VAE(ckpt_path=vae_path)
return (vae,)
class EmptyLatentImage:
def __init__(self, device="cpu"):
self.device = device
@classmethod
def INPUT_TYPES(s):
return {"required": { "width": ("INT", {"default": 512, "min": 64, "max": 4096, "step": 64}),
"height": ("INT", {"default": 512, "min": 64, "max": 4096, "step": 64}),
"batch_size": ("INT", {"default": 1, "min": 1, "max": 64})}}
RETURN_TYPES = ("LATENT",)
FUNCTION = "generate"
def generate(self, width, height, batch_size=1):
latent = torch.zeros([batch_size, 4, height // 8, width // 8])
return (latent, )
class LatentUpscale:
upscale_methods = ["nearest-exact", "bilinear", "area"]
@classmethod
def INPUT_TYPES(s):
return {"required": { "samples": ("LATENT",), "upscale_method": (s.upscale_methods,),
"width": ("INT", {"default": 512, "min": 64, "max": 4096, "step": 64}),
"height": ("INT", {"default": 512, "min": 64, "max": 4096, "step": 64}),}}
RETURN_TYPES = ("LATENT",)
FUNCTION = "upscale"
def upscale(self, samples, upscale_method, width, height):
s = torch.nn.functional.interpolate(samples, size=(height // 8, width // 8), mode=upscale_method)
return (s,)
class KSampler:
def __init__(self, device="cuda"):
self.device = device
@classmethod
def INPUT_TYPES(s):
return {"required":
{"model": ("MODEL",),
"seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}),
"steps": ("INT", {"default": 20, "min": 1, "max": 10000}),
"cfg": ("FLOAT", {"default": 8.0, "min": 0.0, "max": 100.0}),
"sampler_name": (comfy.samplers.KSampler.SAMPLERS, ),
"scheduler": (comfy.samplers.KSampler.SCHEDULERS, ),
"positive": ("CONDITIONING", ),
"negative": ("CONDITIONING", ),
"latent_image": ("LATENT", ),
"denoise": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}),
}}
RETURN_TYPES = ("LATENT",)
FUNCTION = "sample"
def sample(self, model, seed, steps, cfg, sampler_name, scheduler, positive, negative, latent_image, denoise=1.0):
noise = torch.randn(latent_image.size(), dtype=latent_image.dtype, layout=latent_image.layout, generator=torch.manual_seed(seed), device="cpu")
model = model.to(self.device)
noise = noise.to(self.device)
latent_image = latent_image.to(self.device)
if positive.shape[0] < noise.shape[0]:
positive = torch.cat([positive] * noise.shape[0])
if negative.shape[0] < noise.shape[0]:
negative = torch.cat([negative] * noise.shape[0])
positive = positive.to(self.device)
negative = negative.to(self.device)
if sampler_name in comfy.samplers.KSampler.SAMPLERS:
sampler = comfy.samplers.KSampler(model, steps=steps, device=self.device, sampler=sampler_name, scheduler=scheduler, denoise=denoise)
else:
#other samplers
pass
samples = sampler.sample(noise, positive, negative, cfg=cfg, latent_image=latent_image)
samples = samples.cpu()
model = model.cpu()
return (samples, )
class SaveImage:
def __init__(self):
self.output_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), "output")
try:
self.counter = int(max(filter(lambda a: 'ComfyUI_' in a, os.listdir(self.output_dir))).split('_')[1]) + 1
except:
self.counter = 1
@classmethod
def INPUT_TYPES(s):
return {"required":
{"images": ("IMAGE", )},
"hidden": {"prompt": "PROMPT", "extra_pnginfo": "EXTRA_PNGINFO"},
}
RETURN_TYPES = ()
FUNCTION = "save_images"
OUTPUT_NODE = True
def save_images(self, images, prompt=None, extra_pnginfo=None):
for image in images:
i = 255. * image.cpu().numpy()
img = Image.fromarray(i.astype(np.uint8))
metadata = PngInfo()
if prompt is not None:
metadata.add_text("prompt", json.dumps(prompt))
if extra_pnginfo is not None:
for x in extra_pnginfo:
metadata.add_text(x, json.dumps(extra_pnginfo[x]))
img.save(f"output/ComfyUI_{self.counter:05}_.png", pnginfo=metadata, optimize=True)
self.counter += 1
NODE_CLASS_MAPPINGS = {
"KSampler": KSampler,
"CheckpointLoader": CheckpointLoader,
"CLIPTextEncode": CLIPTextEncode,
"VAEDecode": VAEDecode,
"VAEEncode": VAEEncode,
"VAELoader": VAELoader,
"EmptyLatentImage": EmptyLatentImage,
"LatentUpscale": LatentUpscale,
"SaveImage": SaveImage,
}
<html>
<head>
<link rel="stylesheet" type="text/css" href="litegraph.css">
<script type="text/javascript" src="litegraph.core.js"></script>
</head>
<body style='width:100%; height:100%'>
<canvas id='mycanvas' width='1000' height='1000' style='width: 100%; height: 100%;'></canvas>
<script>
var graph = new LGraph();
var canvas = new LGraphCanvas("#mycanvas", graph);
const ccc = document.getElementById("mycanvas");
const ctx = ccc.getContext("2d");
// Resize the canvas to match the size of the canvas element
function resizeCanvas() {
ccc.width = ccc.offsetWidth;
ccc.height = ccc.offsetHeight;
canvas.draw(true, true);
}
// call the function when the page loads
resizeCanvas();
// call the function when the window is resized
window.addEventListener("resize", resizeCanvas);
// var node_const = LiteGraph.createNode("basic/const");
// node_const.pos = [200,200];
// graph.add(node_const);
// node_const.setValue(4.5);
//
// var node_watch = LiteGraph.createNode("basic/watch");
// node_watch.pos = [700,200];
// graph.add(node_watch);
//
// node_const.connect(0, node_watch, 0 );
//node constructor class
// function MyAddNode()
// {
// this.addInput("A","number");
// this.addInput("B","number");
// this.addInput("XXX","TESTESTE");
// this.addInput("NNN","TESTES");
// this.addOutput("A+B","number");
// this.addOutput("TTT","TESTESTE");
// this.properties = { precision: 1 };
// }
//name to show
// MyAddNode.title = "Sum";
//function to call when the node is executed
// MyAddNode.prototype.onExecute = function()
// {
// var A = this.getInputData(0);
// if( A === undefined )
// A = 0;
// var B = this.getInputData(1);
// if( B === undefined )
// B = 0;
// this.setOutputData( 0, A + B );
// }
var default_graph = {"last_node_id":9,"last_link_id":9,"nodes":[{"id":7,"type":"CLIPTextEncode","pos":[413,389],"size":{"0":425.27801513671875,"1":180.6060791015625},"flags":{},"order":3,"mode":0,"inputs":[{"name":"clip","type":"CLIP","link":5}],"outputs":[{"name":"CONDITIONING","type":"CONDITIONING","links":[6],"slot_index":0}],"properties":{},"widgets_values":["bad hands"]},{"id":6,"type":"CLIPTextEncode","pos":[415,186],"size":{"0":422.84503173828125,"1":164.31304931640625},"flags":{},"order":2,"mode":0,"inputs":[{"name":"clip","type":"CLIP","link":3}],"outputs":[{"name":"CONDITIONING","type":"CONDITIONING","links":[4],"slot_index":0}],"properties":{},"widgets_values":["masterpiece best quality girl"]},{"id":5,"type":"EmptyLatentImage","pos":[473,609],"size":{"0":315,"1":106},"flags":{},"order":1,"mode":0,"outputs":[{"name":"LATENT","type":"LATENT","links":[2],"slot_index":0}],"properties":{},"widgets_values":[512,512,1]},{"id":3,"type":"KSampler","pos":[863,186],"size":{"0":315,"1":262},"flags":{},"order":4,"mode":0,"inputs":[{"name":"model","type":"MODEL","link":1},{"name":"positive","type":"CONDITIONING","link":4},{"name":"negative","type":"CONDITIONING","link":6},{"name":"latent_image","type":"LATENT","link":2}],"outputs":[{"name":"LATENT","type":"LATENT","links":[7],"slot_index":0}],"properties":{},"widgets_values":[8566257,true,20,8,"sample_euler","normal",1]},{"id":8,"type":"VAEDecode","pos":[1209,188],"size":{"0":210,"1":46},"flags":{},"order":5,"mode":0,"inputs":[{"name":"samples","type":"LATENT","link":7},{"name":"vae","type":"VAE","link":8}],"outputs":[{"name":"IMAGE","type":"IMAGE","links":[9],"slot_index":0}],"properties":{}},{"id":9,"type":"SaveImage","pos":[1451,189],"size":{"0":210,"1":26},"flags":{},"order":6,"mode":0,"inputs":[{"name":"images","type":"IMAGE","link":9}],"properties":{}},{"id":4,"type":"CheckpointLoader","pos":[26,474],"size":{"0":315,"1":122},"flags":{},"order":0,"mode":0,"outputs":[{"name":"MODEL","type":"MODEL","links":[1],"slot_index":0},{"name":"CLIP","type":"CLIP","links":[3,5],"slot_index":1},{"name":"VAE","type":"VAE","links":[8],"slot_index":2}],"properties":{},"widgets_values":["v1-inference.yaml","v1-5-pruned-emaonly.ckpt"]}],"links":[[1,4,0,3,0,"MODEL"],[2,5,0,3,3,"LATENT"],[3,4,1,6,0,"CLIP"],[4,6,0,3,1,"CONDITIONING"],[5,4,1,7,0,"CLIP"],[6,7,0,3,2,"CONDITIONING"],[7,3,0,8,0,"LATENT"],[8,4,2,8,1,"VAE"],[9,8,0,9,0,"IMAGE"]],"groups":[],"config":{},"extra":{},"version":0.4}
function afterLoadGraph()
{
let workflow = null;
try {
workflow = JSON.parse(localStorage.getItem("workflow"));
graph.configure(workflow);
} catch(err) {
}
if (!workflow) {
graph.configure(default_graph);
}
function saveGraph() {
localStorage.setItem("workflow", JSON.stringify(graph.serialize()));
}
setInterval(saveGraph, 1000);
}
function onObjectInfo(json) {
for (let key in json) {
function MyNode()
{
j = MyNode.__json_data;
inp = j['input']['required'];
this.class_comfy = MyNode.class_type_comfy;
this._widgets = []
min_height = 1;
min_width = 1;
for (let x in inp) {
let default_val = min_val = max_val = step_val = undefined;
if (inp[x].length > 1) {
default_val = inp[x][1]['default'];
min_val = inp[x][1]['min'];
max_val = inp[x][1]['max'];
step_val = inp[x][1]['step'];
}
let type = inp[x][0];
if (Array.isArray(type)) {
w = this.addWidget("combo", x, type[0], function(v){}, { values: type } );
this._widgets += [w]
} else if (type == "INT") {
if (default_val == undefined) default_val = 0;
if (min_val == undefined) min_val = 0;
if (max_val == undefined) max_val = 2048;
if (step_val == undefined) step_val = 1;
w = this.addWidget("number", x, default_val, function(v){let s = this.options.step / 10;this.value = Math.round( v / s ) * s;}, { min: min_val, max: max_val, step: 10.0 * step_val} );
this._widgets += [w]
if (x == "seed") {
w1 = this.addWidget("toggle", "Random seed after every gen", true, function(v){}, { on: "enabled", off:"disabled"} );
w1.to_randomize = w;
this._widgets += [w1]
}
} else if (type == "FLOAT") {
if (default_val == undefined) default_val = 0;
if (min_val == undefined) min_val = 0;
if (max_val == undefined) max_val = 2048;
if (step_val == undefined) step_val = 0.5;
// if (min_val == 0.0 && max_val == 1.0) {
// w = this.slider = this.addWidget("slider", x, default_val, function(v){}, { min: min_val, max: max_val} );
// } else {
w = this.addWidget("number", x, default_val, function(v){}, { min: min_val, max: max_val, step: 10.0 * step_val} );
// }
this._widgets += [w]
} else if (type == "STRING") {
var w = {
type: "customtext",
name: x,
get value() { return this.input_div.innerText;},
set value(x) { this.input_div.innerText = x;},
callback: function(v){console.log(v);},
options: {},
draw: function(ctx, node, widget_width, y, H){
var show_text = canvas.ds.scale > 0.5;
// this.input_div.style.top = `${y}px`;
let t = ctx.getTransform();
let margin = 15;
let x_div = t.a * margin * 2 + t.e;
let y_div = t.d * (y + H) + t.f;
let width_div = (widget_width - margin * 2) * t.a;
let height_div = (this.parent.size[1] - (y + H))* t.d;
this.input_div.style.left = `${x_div}px`;
this.input_div.style.top = `${y_div}px`;
this.input_div.style.width = width_div;
this.input_div.style.height = height_div;
this.input_div.style.position = 'absolute';
this.input_div.style.zIndex = 1;
this.input_div.style.fontSize = t.d * 10.0;
if (show_text) {
this.input_div.hidden = false;
} else {
this.input_div.hidden = true;
}
ctx.save();
// ctx.fillText(String(this.value).substr(0,30), 0, y + H * 0.7);
ctx.restore();
},
};
w.input_div = document.createElement('div');
w.input_div.contentEditable = true;
w.input_div.style.backgroundColor = "#FFFFFF";
w.input_div.style.overflow = 'hidden';
document.addEventListener('click', function(event) {
if (!w.input_div.contains(event.target)) {
w.input_div.blur();
}
});
w.parent = this;
min_height = Math.max(min_height, 200);
min_width = Math.max(min_width, 400);
ccc.parentNode.appendChild(w.input_div);
w = this.addCustomWidget(w);
// w = this.addWidget("text", x, "", function(v){}, { multiline:true } );
console.log(w, this);
this._widgets += [w]
this.onRemoved = function() {
for (let y in this.widgets) {
if (this.widgets[y].input_div) {
this.widgets[y].input_div.remove();
}
}
}
} else {
this.addInput(x, type);
}
}
out = j['output'];
for (let x in out) {
this.addOutput(out[x], out[x]);
}
s = this.computeSize();
s[0] *= 1.5;
s[0] = Math.max(min_width, s[0]);
s[1] = Math.max(min_height, s[1]);
this.size = s;
this.serialize_widgets = true;
}
MyNode.title = json[key]['name'];
MyNode.class_type_comfy = json[key]['name'];
MyNode.__json_data = json[key]
LiteGraph.registerNodeType(key, MyNode);
MyNode.category = "sd"; //TODO: proper categories
};
afterLoadGraph();
// graph.configure(JSON.parse(base_txt2img_graph));
}
fetch("object_info", {cache: "no-store"})
.then(response => response.json())
.then(json => onObjectInfo(json));
//register in the system
graph.start();
// LiteGraph.registerNodeType("testing", MyAddNode);
graph.onNodeRemoved = function(n) {
for (let y in n.widgets) {
if (n.widgets[y].input_div) {
n.widgets[y].input_div.remove();
}
}
}
function graphToPrompt() {
let s = graph.serialize();
let output = {};
// console.log(s['nodes']);
nodes = s['nodes']
for (let x in nodes) {
let n = graph.getNodeById(nodes[x].id);
let input_ = {};
for (let y in n.widgets) {
input_[n.widgets[y].name] = n.widgets[y].value;
}
for (let y in n.inputs) {
let parent_node = n.getInputNode(y);
if (parent_node) {
for (let z in parent_node.outputs) {
let c_nodes = parent_node.getOutputNodes(z);
// console.log(c_nodes, z);
if (c_nodes) {
for (let zz in c_nodes) {
if (c_nodes[zz].id == n.id) {
input_[n.inputs[y].name] = [String(parent_node.id), parseInt(z)];
break;
}
}
}
}
}
}
let node = {}
node['inputs'] = input_;
node['class_type'] = n.class_comfy;
// inputs = x['inputs']
// inputs['name'], inputs['id']
// console.log(x, n);
// console.log(node);
output[String(n.id)] = node;
}
return output;
}
function promptPosted(data)
{
if (data.status == 400) {
data.text().then(dt => alert(dt));
return;
}
let s = graph.serialize();
let output = {};
// console.log(s['nodes']);
nodes = s['nodes']
for (let x in nodes) {
let n = graph.getNodeById(nodes[x].id);
for (let w in n.widgets) {
let wid = n.widgets[w];
if (Object.hasOwn(wid, 'to_randomize')) {
if (wid.value) {
wid.to_randomize.value = Math.floor(Math.random() * 1125899906842624);
}
}
}
}
canvas.draw(true, true);
}
function postPrompt() {
let prompt = graphToPrompt();
let full_data = {prompt: prompt, extra_data: {extra_pnginfo: {workflow: graph.serialize()}}};
fetch('/prompt', {
method: 'POST',
headers: {
'Content-Type': 'application/json'
},
body: JSON.stringify(full_data)
})
.then(data => promptPosted(data))
.catch(error => console.error(error))
// console.log(JSON.stringify(prompt));
// console.log(JSON.stringify(graph.serialize()));
}
function promptToGraph(prompt) {
for (let x in prompt) {
}
}
function prompt_file_load(file)
{
if (file.type === 'image/png') {
const reader = new FileReader();
reader.onload = (event) => {
// Get the PNG data as a Uint8Array
const pngData = new Uint8Array(event.target.result);
const dataView = new DataView(pngData.buffer);
// Check that the PNG signature is present
if (dataView.getUint32(0) !== 0x89504e47) {
console.error('Not a valid PNG file');
return;
}
// Start searching for chunks after the PNG signature
let offset = 8;
let txt_chunks = {}
// Loop through the chunks in the PNG file
while (offset < pngData.length) {
// Get the length of the chunk
const length = dataView.getUint32(offset);
// Get the chunk type
const type = String.fromCharCode(...pngData.slice(offset + 4, offset + 8));
if (type === 'tEXt') {
// Get the keyword
let keyword_end = offset + 8;
while (pngData[keyword_end] !== 0) {
keyword_end++;
}
const keyword = String.fromCharCode(...pngData.slice(offset + 8, keyword_end));
// Get the text
const text = String.fromCharCode(...pngData.slice(keyword_end + 1, offset + 8 + length));
txt_chunks[keyword] = text;
}
// Get the next chunk
offset += 12 + length;
}
console.log(txt_chunks);
console.log(JSON.parse(txt_chunks["prompt"]));
graph.configure(JSON.parse(txt_chunks["workflow"]));
};
reader.readAsArrayBuffer(file);
} else if (file.type === "application/json" || file.name.endsWith(".json")) {
var reader = new FileReader();
reader.onload = function() {
console.log(reader.result);
var jsonData = JSON.parse(reader.result);
graph.configure(jsonData);
};
reader.readAsText(file);
}
}
// Get prompt from dropped PNG or json
document.addEventListener('drop', (event) => {
event.preventDefault();
event.stopPropagation();
const file = event.dataTransfer.files[0];
console.log(file.type);
prompt_file_load(file);
});
setInterval(function(){
fetch('/prompt')
.then(response => response.json())
.then(data => {
document.getElementById("queuesize").innerHTML = "Queue size: " + data.exec_info.queue_remaining + "";
}).catch((response) => {document.getElementById("queuesize").innerHTML = "Queue size: ERR"});
}, 500);
function clearGraph() {
graph.clear();
}
function loadTxt2Img() {
graph.configure(default_graph);
}
function saveGraph() {
var json = JSON.stringify(graph.serialize()); // convert the data to a JSON string
var blob = new Blob([json], {type: "application/json"});
var url = URL.createObjectURL(blob);
var a = document.createElement("a");
a.style = "display: none";
a.href = url;
a.download = "workflow.json";
document.body.appendChild(a);
a.click();
setTimeout(function() {
document.body.removeChild(a);
window.URL.revokeObjectURL(url);
}, 0);
}
var input = document.createElement("input");
input.setAttribute("type", "file");
input.setAttribute("accept", ".json,image/png");
input.style.display = "none";
document.body.appendChild(input);
input.addEventListener('change', function() {
var file = input.files[0];
prompt_file_load(file);
});
function loadGraph() {
input.click();
}
</script>
<span style="font-size: 15px;position: absolute; top: 50%; right: 0%; background-color: white; text-align: center;">
<span id="queuesize">Queue size: X</span><br>
<button style="font-size: 20px;" id="queuebutton" onclick="postPrompt()">Queue Prompt</button><br>
<br>
<br>
<button style="font-size: 20px;" onclick="saveGraph()">Save</button><br>
<button style="font-size: 20px;" onclick="loadGraph()">Load</button><br>
<button style="font-size: 20px;" onclick="clearGraph()">Clear</button><br>
<button style="font-size: 20px;" onclick="loadTxt2Img()">Load Default</button><br>
</span>
</body>
</html>
This source diff could not be displayed because it is too large. You can view the blob instead.
/* this CSS contains only the basic CSS needed to run the app and use it */
.lgraphcanvas {
/*cursor: crosshair;*/
user-select: none;
-moz-user-select: none;
-webkit-user-select: none;
outline: none;
font-family: Tahoma, sans-serif;
}
.lgraphcanvas * {
box-sizing: border-box;
}
.litegraph.litecontextmenu {
font-family: Tahoma, sans-serif;
position: fixed;
top: 100px;
left: 100px;
min-width: 100px;
color: #aaf;
padding: 0;
box-shadow: 0 0 10px black !important;
background-color: #2e2e2e !important;
z-index: 10;
}
.litegraph.litecontextmenu.dark {
background-color: #000 !important;
}
.litegraph.litecontextmenu .litemenu-title img {
margin-top: 2px;
margin-left: 2px;
margin-right: 4px;
}
.litegraph.litecontextmenu .litemenu-entry {
margin: 2px;
padding: 2px;
}
.litegraph.litecontextmenu .litemenu-entry.submenu {
background-color: #2e2e2e !important;
}
.litegraph.litecontextmenu.dark .litemenu-entry.submenu {
background-color: #000 !important;
}
.litegraph .litemenubar ul {
font-family: Tahoma, sans-serif;
margin: 0;
padding: 0;
}
.litegraph .litemenubar li {
font-size: 14px;
color: #999;
display: inline-block;
min-width: 50px;
padding-left: 10px;
padding-right: 10px;
user-select: none;
-moz-user-select: none;
-webkit-user-select: none;
cursor: pointer;
}
.litegraph .litemenubar li:hover {
background-color: #777;
color: #eee;
}
.litegraph .litegraph .litemenubar-panel {
position: absolute;
top: 5px;
left: 5px;
min-width: 100px;
background-color: #444;
box-shadow: 0 0 3px black;
padding: 4px;
border-bottom: 2px solid #aaf;
z-index: 10;
}
.litegraph .litemenu-entry,
.litemenu-title {
font-size: 12px;
color: #aaa;
padding: 0 0 0 4px;
margin: 2px;
padding-left: 2px;
-moz-user-select: none;
-webkit-user-select: none;
user-select: none;
cursor: pointer;
}
.litegraph .litemenu-entry .icon {
display: inline-block;
width: 12px;
height: 12px;
margin: 2px;
vertical-align: top;
}
.litegraph .litemenu-entry.checked .icon {
background-color: #aaf;
}
.litegraph .litemenu-entry .more {
float: right;
padding-right: 5px;
}
.litegraph .litemenu-entry.disabled {
opacity: 0.5;
cursor: default;
}
.litegraph .litemenu-entry.separator {
display: block;
border-top: 1px solid #333;
border-bottom: 1px solid #666;
width: 100%;
height: 0px;
margin: 3px 0 2px 0;
background-color: transparent;
padding: 0 !important;
cursor: default !important;
}
.litegraph .litemenu-entry.has_submenu {
border-right: 2px solid cyan;
}
.litegraph .litemenu-title {
color: #dde;
background-color: #111;
margin: 0;
padding: 2px;
cursor: default;
}
.litegraph .litemenu-entry:hover:not(.disabled):not(.separator) {
background-color: #444 !important;
color: #eee;
transition: all 0.2s;
}
.litegraph .litemenu-entry .property_name {
display: inline-block;
text-align: left;
min-width: 80px;
min-height: 1.2em;
}
.litegraph .litemenu-entry .property_value {
display: inline-block;
background-color: rgba(0, 0, 0, 0.5);
text-align: right;
min-width: 80px;
min-height: 1.2em;
vertical-align: middle;
padding-right: 10px;
}
.litegraph.litesearchbox {
font-family: Tahoma, sans-serif;
position: absolute;
background-color: rgba(0, 0, 0, 0.5);
padding-top: 4px;
}
.litegraph.litesearchbox input,
.litegraph.litesearchbox select {
margin-top: 3px;
min-width: 60px;
min-height: 1.5em;
background-color: black;
border: 0;
color: white;
padding-left: 10px;
margin-right: 5px;
}
.litegraph.litesearchbox .name {
display: inline-block;
min-width: 60px;
min-height: 1.5em;
padding-left: 10px;
}
.litegraph.litesearchbox .helper {
overflow: auto;
max-height: 200px;
margin-top: 2px;
}
.litegraph.lite-search-item {
font-family: Tahoma, sans-serif;
background-color: rgba(0, 0, 0, 0.5);
color: white;
padding-top: 2px;
}
.litegraph.lite-search-item.not_in_filter{
/*background-color: rgba(50, 50, 50, 0.5);*/
/*color: #999;*/
color: #B99;
font-style: italic;
}
.litegraph.lite-search-item.generic_type{
/*background-color: rgba(50, 50, 50, 0.5);*/
/*color: #DD9;*/
color: #999;
font-style: italic;
}
.litegraph.lite-search-item:hover,
.litegraph.lite-search-item.selected {
cursor: pointer;
background-color: white;
color: black;
}
/* DIALOGs ******/
.litegraph .dialog {
position: absolute;
top: 50%;
left: 50%;
margin-top: -150px;
margin-left: -200px;
background-color: #2A2A2A;
min-width: 400px;
min-height: 200px;
box-shadow: 0 0 4px #111;
border-radius: 6px;
}
.litegraph .dialog.settings {
left: 10px;
top: 10px;
height: calc( 100% - 20px );
margin: auto;
max-width: 50%;
}
.litegraph .dialog.centered {
top: 50px;
left: 50%;
position: absolute;
transform: translateX(-50%);
min-width: 600px;
min-height: 300px;
height: calc( 100% - 100px );
margin: auto;
}
.litegraph .dialog .close {
float: right;
margin: 4px;
margin-right: 10px;
cursor: pointer;
font-size: 1.4em;
}
.litegraph .dialog .close:hover {
color: white;
}
.litegraph .dialog .dialog-header {
color: #AAA;
border-bottom: 1px solid #161616;
}
.litegraph .dialog .dialog-header { height: 40px; }
.litegraph .dialog .dialog-footer { height: 50px; padding: 10px; border-top: 1px solid #1a1a1a;}
.litegraph .dialog .dialog-header .dialog-title {
font: 20px "Arial";
margin: 4px;
padding: 4px 10px;
display: inline-block;
}
.litegraph .dialog .dialog-content, .litegraph .dialog .dialog-alt-content {
height: calc(100% - 90px);
width: 100%;
min-height: 100px;
display: inline-block;
color: #AAA;
/*background-color: black;*/
overflow: auto;
}
.litegraph .dialog .dialog-content h3 {
margin: 10px;
}
.litegraph .dialog .dialog-content .connections {
flex-direction: row;
}
.litegraph .dialog .dialog-content .connections .connections_side {
width: calc(50% - 5px);
min-height: 100px;
background-color: black;
display: flex;
}
.litegraph .dialog .node_type {
font-size: 1.2em;
display: block;
margin: 10px;
}
.litegraph .dialog .node_desc {
opacity: 0.5;
display: block;
margin: 10px;
}
.litegraph .dialog .separator {
display: block;
width: calc( 100% - 4px );
height: 1px;
border-top: 1px solid #000;
border-bottom: 1px solid #333;
margin: 10px 2px;
padding: 0;
}
.litegraph .dialog .property {
margin-bottom: 2px;
padding: 4px;
}
.litegraph .dialog .property:hover {
background: #545454;
}
.litegraph .dialog .property_name {
color: #737373;
display: inline-block;
text-align: left;
vertical-align: top;
width: 160px;
padding-left: 4px;
overflow: hidden;
margin-right: 6px;
}
.litegraph .dialog .property:hover .property_name {
color: white;
}
.litegraph .dialog .property_value {
display: inline-block;
text-align: right;
color: #AAA;
background-color: #1A1A1A;
/*width: calc( 100% - 122px );*/
max-width: calc( 100% - 162px );
min-width: 200px;
max-height: 300px;
min-height: 20px;
padding: 4px;
padding-right: 12px;
overflow: hidden;
cursor: pointer;
border-radius: 3px;
}
.litegraph .dialog .property_value:hover {
color: white;
}
.litegraph .dialog .property.boolean .property_value {
padding-right: 30px;
color: #A88;
/*width: auto;
float: right;*/
}
.litegraph .dialog .property.boolean.bool-on .property_name{
color: #8A8;
}
.litegraph .dialog .property.boolean.bool-on .property_value{
color: #8A8;
}
.litegraph .dialog .btn {
border: 0;
border-radius: 4px;
padding: 4px 20px;
margin-left: 0px;
background-color: #060606;
color: #8e8e8e;
}
.litegraph .dialog .btn:hover {
background-color: #111;
color: #FFF;
}
.litegraph .dialog .btn.delete:hover {
background-color: #F33;
color: black;
}
.litegraph .subgraph_property {
padding: 4px;
}
.litegraph .subgraph_property:hover {
background-color: #333;
}
.litegraph .subgraph_property.extra {
margin-top: 8px;
}
.litegraph .subgraph_property span.name {
font-size: 1.3em;
padding-left: 4px;
}
.litegraph .subgraph_property span.type {
opacity: 0.5;
margin-right: 20px;
padding-left: 4px;
}
.litegraph .subgraph_property span.label {
display: inline-block;
width: 60px;
padding: 0px 10px;
}
.litegraph .subgraph_property input {
width: 140px;
color: #999;
background-color: #1A1A1A;
border-radius: 4px;
border: 0;
margin-right: 10px;
padding: 4px;
padding-left: 10px;
}
.litegraph .subgraph_property button {
background-color: #1c1c1c;
color: #aaa;
border: 0;
border-radius: 2px;
padding: 4px 10px;
cursor: pointer;
}
.litegraph .subgraph_property.extra {
color: #ccc;
}
.litegraph .subgraph_property.extra input {
background-color: #111;
}
.litegraph .bullet_icon {
margin-left: 10px;
border-radius: 10px;
width: 12px;
height: 12px;
background-color: #666;
display: inline-block;
margin-top: 2px;
margin-right: 4px;
transition: background-color 0.1s ease 0s;
-moz-transition: background-color 0.1s ease 0s;
}
.litegraph .bullet_icon:hover {
background-color: #698;
cursor: pointer;
}
/* OLD */
.graphcontextmenu {
padding: 4px;
min-width: 100px;
}
.graphcontextmenu-title {
color: #dde;
background-color: #222;
margin: 0;
padding: 2px;
cursor: default;
}
.graphmenu-entry {
box-sizing: border-box;
margin: 2px;
padding-left: 20px;
user-select: none;
-moz-user-select: none;
-webkit-user-select: none;
transition: all linear 0.3s;
}
.graphmenu-entry.event,
.litemenu-entry.event {
border-left: 8px solid orange;
padding-left: 12px;
}
.graphmenu-entry.disabled {
opacity: 0.3;
}
.graphmenu-entry.submenu {
border-right: 2px solid #eee;
}
.graphmenu-entry:hover {
background-color: #555;
}
.graphmenu-entry.separator {
background-color: #111;
border-bottom: 1px solid #666;
height: 1px;
width: calc(100% - 20px);
-moz-width: calc(100% - 20px);
-webkit-width: calc(100% - 20px);
}
.graphmenu-entry .property_name {
display: inline-block;
text-align: left;
min-width: 80px;
min-height: 1.2em;
}
.graphmenu-entry .property_value,
.litemenu-entry .property_value {
display: inline-block;
background-color: rgba(0, 0, 0, 0.5);
text-align: right;
min-width: 80px;
min-height: 1.2em;
vertical-align: middle;
padding-right: 10px;
}
.graphdialog {
position: absolute;
top: 10px;
left: 10px;
min-height: 2em;
background-color: #333;
font-size: 1.2em;
box-shadow: 0 0 10px black !important;
z-index: 10;
}
.graphdialog.rounded {
border-radius: 12px;
padding-right: 2px;
}
.graphdialog .name {
display: inline-block;
min-width: 60px;
min-height: 1.5em;
padding-left: 10px;
}
.graphdialog input,
.graphdialog textarea,
.graphdialog select {
margin: 3px;
min-width: 60px;
min-height: 1.5em;
background-color: black;
border: 0;
color: white;
padding-left: 10px;
outline: none;
}
.graphdialog textarea {
min-height: 150px;
}
.graphdialog button {
margin-top: 3px;
vertical-align: top;
background-color: #999;
border: 0;
}
.graphdialog button.rounded,
.graphdialog input.rounded {
border-radius: 0 12px 12px 0;
}
.graphdialog .helper {
overflow: auto;
max-height: 200px;
}
.graphdialog .help-item {
padding-left: 10px;
}
.graphdialog .help-item:hover,
.graphdialog .help-item.selected {
cursor: pointer;
background-color: white;
color: black;
}
.litegraph .dialog {
min-height: 0;
}
.litegraph .dialog .dialog-content {
display: block;
}
.litegraph .dialog .dialog-content .subgraph_property {
padding: 5px;
}
.litegraph .dialog .dialog-footer {
margin: 0;
}
.litegraph .dialog .dialog-footer .subgraph_property {
margin-top: 0;
display: flex;
align-items: center;
padding: 5px;
}
.litegraph .dialog .dialog-footer .subgraph_property .name {
flex: 1;
}
.litegraph .graphdialog {
display: flex;
align-items: center;
border-radius: 20px;
padding: 4px 10px;
position: fixed;
}
.litegraph .graphdialog .name {
padding: 0;
min-height: 0;
font-size: 16px;
vertical-align: middle;
}
.litegraph .graphdialog .value {
font-size: 16px;
min-height: 0;
margin: 0 10px;
padding: 2px 5px;
}
.litegraph .graphdialog input[type="checkbox"] {
width: 16px;
height: 16px;
}
.litegraph .graphdialog button {
padding: 4px 18px;
border-radius: 20px;
cursor: pointer;
}
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment