Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
chenpangpang
ComfyUI
Commits
2816eb23
Unverified
Commit
2816eb23
authored
Feb 23, 2023
by
pythongosssss
Committed by
GitHub
Feb 23, 2023
Browse files
Merge branch 'comfyanonymous:master' into socketrework
parents
a52aa9f4
5796705c
Changes
9
Show whitespace changes
Inline
Side-by-side
Showing
9 changed files
with
379 additions
and
34 deletions
+379
-34
comfy/extra_samplers/uni_pc.py
comfy/extra_samplers/uni_pc.py
+8
-3
comfy/ldm/models/diffusion/ddim.py
comfy/ldm/models/diffusion/ddim.py
+86
-13
comfy/ldm/modules/attention.py
comfy/ldm/modules/attention.py
+8
-9
comfy/samplers.py
comfy/samplers.py
+50
-3
comfy/sd.py
comfy/sd.py
+16
-1
nodes.py
nodes.py
+36
-0
notebooks/comfyui_colab.ipynb
notebooks/comfyui_colab.ipynb
+52
-1
script_examples/basic_api_example.py
script_examples/basic_api_example.py
+117
-0
webshit/index.html
webshit/index.html
+6
-4
No files found.
comfy/extra_samplers/uni_pc.py
View file @
2816eb23
...
...
@@ -833,7 +833,7 @@ def expand_dims(v, dims):
def
sample_unipc
(
model
,
noise
,
image
,
sigmas
,
sampling_function
,
extra_args
=
None
,
callback
=
None
,
disable
=
None
,
noise_mask
=
None
):
def
sample_unipc
(
model
,
noise
,
image
,
sigmas
,
sampling_function
,
max_denoise
,
extra_args
=
None
,
callback
=
None
,
disable
=
None
,
noise_mask
=
None
,
variant
=
'bh1'
):
to_zero
=
False
if
sigmas
[
-
1
]
==
0
:
timesteps
=
torch
.
nn
.
functional
.
interpolate
(
sigmas
[
None
,
None
,:
-
1
],
size
=
(
len
(
sigmas
),),
mode
=
'linear'
)[
0
][
0
]
...
...
@@ -847,7 +847,12 @@ def sample_unipc(model, noise, image, sigmas, sampling_function, extra_args=None
ns
=
NoiseScheduleVP
(
'discrete'
,
alphas_cumprod
=
model
.
inner_model
.
alphas_cumprod
)
if
image
is
not
None
:
img
=
image
*
ns
.
marginal_alpha
(
timesteps
[
0
])
+
noise
*
ns
.
marginal_std
(
timesteps
[
0
])
img
=
image
*
ns
.
marginal_alpha
(
timesteps
[
0
])
if
max_denoise
:
noise_mult
=
1.0
else
:
noise_mult
=
ns
.
marginal_std
(
timesteps
[
0
])
img
+=
noise
*
noise_mult
else
:
img
=
noise
...
...
@@ -870,7 +875,7 @@ def sample_unipc(model, noise, image, sigmas, sampling_function, extra_args=None
model_kwargs
=
extra_args
,
)
uni_pc
=
UniPC
(
model_fn
,
ns
,
predict_x0
=
True
,
thresholding
=
False
,
noise_mask
=
noise_mask
,
masked_image
=
image
,
noise
=
noise
)
uni_pc
=
UniPC
(
model_fn
,
ns
,
predict_x0
=
True
,
thresholding
=
False
,
noise_mask
=
noise_mask
,
masked_image
=
image
,
noise
=
noise
,
variant
=
variant
)
x
=
uni_pc
.
sample
(
img
,
timesteps
=
timesteps
,
skip_type
=
"time_uniform"
,
method
=
"multistep"
,
order
=
3
,
lower_order_final
=
True
)
if
not
to_zero
:
x
/=
ns
.
marginal_alpha
(
timesteps
[
-
1
])
...
...
comfy/ldm/models/diffusion/ddim.py
View file @
2816eb23
...
...
@@ -22,11 +22,15 @@ class DDIMSampler(object):
setattr
(
self
,
name
,
attr
)
def
make_schedule
(
self
,
ddim_num_steps
,
ddim_discretize
=
"uniform"
,
ddim_eta
=
0.
,
verbose
=
True
):
self
.
ddim_timesteps
=
make_ddim_timesteps
(
ddim_discr_method
=
ddim_discretize
,
num_ddim_timesteps
=
ddim_num_steps
,
ddim_timesteps
=
make_ddim_timesteps
(
ddim_discr_method
=
ddim_discretize
,
num_ddim_timesteps
=
ddim_num_steps
,
num_ddpm_timesteps
=
self
.
ddpm_num_timesteps
,
verbose
=
verbose
)
self
.
make_schedule_timesteps
(
ddim_timesteps
,
ddim_eta
=
ddim_eta
,
verbose
=
verbose
)
def
make_schedule_timesteps
(
self
,
ddim_timesteps
,
ddim_eta
=
0.
,
verbose
=
True
):
self
.
ddim_timesteps
=
torch
.
tensor
(
ddim_timesteps
)
alphas_cumprod
=
self
.
model
.
alphas_cumprod
assert
alphas_cumprod
.
shape
[
0
]
==
self
.
ddpm_num_timesteps
,
'alphas have to be defined for each timestep'
to_torch
=
lambda
x
:
x
.
clone
().
detach
().
to
(
torch
.
float32
).
to
(
self
.
model
.
device
)
to_torch
=
lambda
x
:
x
.
clone
().
detach
().
to
(
torch
.
float32
).
to
(
self
.
device
)
self
.
register_buffer
(
'betas'
,
to_torch
(
self
.
model
.
betas
))
self
.
register_buffer
(
'alphas_cumprod'
,
to_torch
(
alphas_cumprod
))
...
...
@@ -52,6 +56,58 @@ class DDIMSampler(object):
1
-
self
.
alphas_cumprod
/
self
.
alphas_cumprod_prev
))
self
.
register_buffer
(
'ddim_sigmas_for_original_num_steps'
,
sigmas_for_original_sampling_steps
)
@
torch
.
no_grad
()
def
sample_custom
(
self
,
ddim_timesteps
,
conditioning
,
callback
=
None
,
img_callback
=
None
,
quantize_x0
=
False
,
eta
=
0.
,
mask
=
None
,
x0
=
None
,
temperature
=
1.
,
noise_dropout
=
0.
,
score_corrector
=
None
,
corrector_kwargs
=
None
,
verbose
=
True
,
x_T
=
None
,
log_every_t
=
100
,
unconditional_guidance_scale
=
1.
,
unconditional_conditioning
=
None
,
# this has to come in the same format as the conditioning, # e.g. as encoded tokens, ...
dynamic_threshold
=
None
,
ucg_schedule
=
None
,
denoise_function
=
None
,
cond_concat
=
None
,
to_zero
=
True
,
end_step
=
None
,
**
kwargs
):
self
.
make_schedule_timesteps
(
ddim_timesteps
=
ddim_timesteps
,
ddim_eta
=
eta
,
verbose
=
verbose
)
samples
,
intermediates
=
self
.
ddim_sampling
(
conditioning
,
x_T
.
shape
,
callback
=
callback
,
img_callback
=
img_callback
,
quantize_denoised
=
quantize_x0
,
mask
=
mask
,
x0
=
x0
,
ddim_use_original_steps
=
False
,
noise_dropout
=
noise_dropout
,
temperature
=
temperature
,
score_corrector
=
score_corrector
,
corrector_kwargs
=
corrector_kwargs
,
x_T
=
x_T
,
log_every_t
=
log_every_t
,
unconditional_guidance_scale
=
unconditional_guidance_scale
,
unconditional_conditioning
=
unconditional_conditioning
,
dynamic_threshold
=
dynamic_threshold
,
ucg_schedule
=
ucg_schedule
,
denoise_function
=
denoise_function
,
cond_concat
=
cond_concat
,
to_zero
=
to_zero
,
end_step
=
end_step
)
return
samples
,
intermediates
@
torch
.
no_grad
()
def
sample
(
self
,
S
,
...
...
@@ -116,7 +172,9 @@ class DDIMSampler(object):
unconditional_guidance_scale
=
unconditional_guidance_scale
,
unconditional_conditioning
=
unconditional_conditioning
,
dynamic_threshold
=
dynamic_threshold
,
ucg_schedule
=
ucg_schedule
ucg_schedule
=
ucg_schedule
,
denoise_function
=
None
,
cond_concat
=
None
)
return
samples
,
intermediates
...
...
@@ -127,7 +185,7 @@ class DDIMSampler(object):
mask
=
None
,
x0
=
None
,
img_callback
=
None
,
log_every_t
=
100
,
temperature
=
1.
,
noise_dropout
=
0.
,
score_corrector
=
None
,
corrector_kwargs
=
None
,
unconditional_guidance_scale
=
1.
,
unconditional_conditioning
=
None
,
dynamic_threshold
=
None
,
ucg_schedule
=
None
):
ucg_schedule
=
None
,
denoise_function
=
None
,
cond_concat
=
None
,
to_zero
=
True
,
end_step
=
None
):
device
=
self
.
model
.
betas
.
device
b
=
shape
[
0
]
if
x_T
is
None
:
...
...
@@ -142,11 +200,11 @@ class DDIMSampler(object):
timesteps
=
self
.
ddim_timesteps
[:
subset_end
]
intermediates
=
{
'x_inter'
:
[
img
],
'pred_x0'
:
[
img
]}
time_range
=
reversed
(
range
(
0
,
timesteps
))
if
ddim_use_original_steps
else
np
.
flip
(
timesteps
)
time_range
=
reversed
(
range
(
0
,
timesteps
))
if
ddim_use_original_steps
else
timesteps
.
flip
(
0
)
total_steps
=
timesteps
if
ddim_use_original_steps
else
timesteps
.
shape
[
0
]
print
(
f
"Running DDIM Sampling with
{
total_steps
}
timesteps"
)
#
print(f"Running DDIM Sampling with {total_steps} timesteps")
iterator
=
tqdm
(
time_range
,
desc
=
'DDIM Sampler'
,
total
=
total
_step
s
)
iterator
=
tqdm
(
time_range
[:
end_step
]
,
desc
=
'DDIM Sampler'
,
total
=
end
_step
)
for
i
,
step
in
enumerate
(
iterator
):
index
=
total_steps
-
i
-
1
...
...
@@ -167,7 +225,7 @@ class DDIMSampler(object):
corrector_kwargs
=
corrector_kwargs
,
unconditional_guidance_scale
=
unconditional_guidance_scale
,
unconditional_conditioning
=
unconditional_conditioning
,
dynamic_threshold
=
dynamic_threshold
)
dynamic_threshold
=
dynamic_threshold
,
denoise_function
=
denoise_function
,
cond_concat
=
cond_concat
)
img
,
pred_x0
=
outs
if
callback
:
callback
(
i
)
if
img_callback
:
img_callback
(
pred_x0
,
i
)
...
...
@@ -176,16 +234,27 @@ class DDIMSampler(object):
intermediates
[
'x_inter'
].
append
(
img
)
intermediates
[
'pred_x0'
].
append
(
pred_x0
)
if
to_zero
:
img
=
pred_x0
else
:
if
ddim_use_original_steps
:
sqrt_alphas_cumprod
=
self
.
sqrt_alphas_cumprod
else
:
sqrt_alphas_cumprod
=
torch
.
sqrt
(
self
.
ddim_alphas
)
img
/=
sqrt_alphas_cumprod
[
index
-
1
]
return
img
,
intermediates
@
torch
.
no_grad
()
def
p_sample_ddim
(
self
,
x
,
c
,
t
,
index
,
repeat_noise
=
False
,
use_original_steps
=
False
,
quantize_denoised
=
False
,
temperature
=
1.
,
noise_dropout
=
0.
,
score_corrector
=
None
,
corrector_kwargs
=
None
,
unconditional_guidance_scale
=
1.
,
unconditional_conditioning
=
None
,
dynamic_threshold
=
None
):
dynamic_threshold
=
None
,
denoise_function
=
None
,
cond_concat
=
None
):
b
,
*
_
,
device
=
*
x
.
shape
,
x
.
device
if
unconditional_conditioning
is
None
or
unconditional_guidance_scale
==
1.
:
if
denoise_function
is
not
None
:
model_output
=
denoise_function
(
self
.
model
.
apply_model
,
x
,
t
,
unconditional_conditioning
,
c
,
unconditional_guidance_scale
,
cond_concat
)
elif
unconditional_conditioning
is
None
or
unconditional_guidance_scale
==
1.
:
model_output
=
self
.
model
.
apply_model
(
x
,
t
,
c
)
else
:
x_in
=
torch
.
cat
([
x
]
*
2
)
...
...
@@ -299,7 +368,7 @@ class DDIMSampler(object):
return
x_next
,
out
@
torch
.
no_grad
()
def
stochastic_encode
(
self
,
x0
,
t
,
use_original_steps
=
False
,
noise
=
None
):
def
stochastic_encode
(
self
,
x0
,
t
,
use_original_steps
=
False
,
noise
=
None
,
max_denoise
=
False
):
# fast, but does not allow for exact reconstruction
# t serves as an index to gather the correct alphas
if
use_original_steps
:
...
...
@@ -311,8 +380,12 @@ class DDIMSampler(object):
if
noise
is
None
:
noise
=
torch
.
randn_like
(
x0
)
return
(
extract_into_tensor
(
sqrt_alphas_cumprod
,
t
,
x0
.
shape
)
*
x0
+
extract_into_tensor
(
sqrt_one_minus_alphas_cumprod
,
t
,
x0
.
shape
)
*
noise
)
if
max_denoise
:
noise_multiplier
=
1.0
else
:
noise_multiplier
=
extract_into_tensor
(
sqrt_one_minus_alphas_cumprod
,
t
,
x0
.
shape
)
return
(
extract_into_tensor
(
sqrt_alphas_cumprod
,
t
,
x0
.
shape
)
*
x0
+
noise_multiplier
*
noise
)
@
torch
.
no_grad
()
def
decode
(
self
,
x_latent
,
cond
,
t_start
,
unconditional_guidance_scale
=
1.0
,
unconditional_conditioning
=
None
,
...
...
comfy/ldm/modules/attention.py
View file @
2816eb23
...
...
@@ -343,7 +343,7 @@ class CrossAttentionDoggettx(nn.Module):
return
self
.
to_out
(
r2
)
class
Original
CrossAttention
(
nn
.
Module
):
class
CrossAttention
(
nn
.
Module
):
def
__init__
(
self
,
query_dim
,
context_dim
=
None
,
heads
=
8
,
dim_head
=
64
,
dropout
=
0.
):
super
().
__init__
()
inner_dim
=
dim_head
*
heads
...
...
@@ -395,14 +395,13 @@ class OriginalCrossAttention(nn.Module):
return
self
.
to_out
(
out
)
import
sys
if
"--use-split-cross-attention"
in
sys
.
argv
:
if
XFORMERS_IS_AVAILBLE
==
False
:
if
"--use-split-cross-attention"
in
sys
.
argv
:
print
(
"Using split optimization for cross attention"
)
class
CrossAttention
(
CrossAttentionDoggettx
):
pass
else
:
CrossAttention
=
CrossAttentionDoggettx
else
:
print
(
"Using sub quadratic optimization for cross attention, if you have memory or speed issues try using: --use-split-cross-attention"
)
class
CrossAttention
(
CrossAttentionBirchSan
):
pass
CrossAttention
=
CrossAttentionBirchSan
class
MemoryEfficientCrossAttention
(
nn
.
Module
):
# https://github.com/MatthieuTPHR/diffusers/blob/d80b531ff8060ec1ea982b65a1b8df70f73aa67c/src/diffusers/models/attention.py#L223
...
...
comfy/samplers.py
View file @
2816eb23
...
...
@@ -4,6 +4,8 @@ from .extra_samplers import uni_pc
import
torch
import
contextlib
import
model_management
from
.ldm.models.diffusion.ddim
import
DDIMSampler
from
.ldm.modules.diffusionmodules.util
import
make_ddim_timesteps
class
CFGDenoiser
(
torch
.
nn
.
Module
):
def
__init__
(
self
,
model
):
...
...
@@ -234,6 +236,14 @@ def simple_scheduler(model, steps):
sigs
+=
[
0.0
]
return
torch
.
FloatTensor
(
sigs
)
def
ddim_scheduler
(
model
,
steps
):
sigs
=
[]
ddim_timesteps
=
make_ddim_timesteps
(
ddim_discr_method
=
"uniform"
,
num_ddim_timesteps
=
steps
,
num_ddpm_timesteps
=
model
.
inner_model
.
inner_model
.
num_timesteps
,
verbose
=
False
)
for
x
in
range
(
len
(
ddim_timesteps
)
-
1
,
-
1
,
-
1
):
sigs
.
append
(
model
.
t_to_sigma
(
torch
.
tensor
(
ddim_timesteps
[
x
])))
sigs
+=
[
0.0
]
return
torch
.
FloatTensor
(
sigs
)
def
blank_inpaint_image_like
(
latent_image
):
blank_image
=
torch
.
ones_like
(
latent_image
)
# these are the values for "zero" in pixel space translated to latent space
...
...
@@ -310,10 +320,10 @@ def apply_control_net_to_equal_area(conds, uncond):
uncond
[
temp
[
1
]]
=
[
o
[
0
],
n
]
class
KSampler
:
SCHEDULERS
=
[
"karras"
,
"normal"
,
"simple"
]
SCHEDULERS
=
[
"karras"
,
"normal"
,
"simple"
,
"ddim_uniform"
]
SAMPLERS
=
[
"sample_euler"
,
"sample_euler_ancestral"
,
"sample_heun"
,
"sample_dpm_2"
,
"sample_dpm_2_ancestral"
,
"sample_lms"
,
"sample_dpm_fast"
,
"sample_dpm_adaptive"
,
"sample_dpmpp_2s_ancestral"
,
"sample_dpmpp_sde"
,
"sample_dpmpp_2m"
,
"
uni_pc
"
]
"sample_dpmpp_2m"
,
"
ddim"
,
"uni_pc"
,
"uni_pc_bh2
"
]
def
__init__
(
self
,
model
,
steps
,
device
,
sampler
=
None
,
scheduler
=
None
,
denoise
=
None
):
self
.
model
=
model
...
...
@@ -334,6 +344,7 @@ class KSampler:
self
.
sigma_min
=
float
(
self
.
model_wrap
.
sigma_min
)
self
.
sigma_max
=
float
(
self
.
model_wrap
.
sigma_max
)
self
.
set_steps
(
steps
,
denoise
)
self
.
denoise
=
denoise
def
_calculate_sigmas
(
self
,
steps
):
sigmas
=
None
...
...
@@ -349,6 +360,8 @@ class KSampler:
sigmas
=
self
.
model_wrap
.
get_sigmas
(
steps
).
to
(
self
.
device
)
elif
self
.
scheduler
==
"simple"
:
sigmas
=
simple_scheduler
(
self
.
model_wrap
,
steps
).
to
(
self
.
device
)
elif
self
.
scheduler
==
"ddim_uniform"
:
sigmas
=
ddim_scheduler
(
self
.
model_wrap
,
steps
).
to
(
self
.
device
)
else
:
print
(
"error invalid scheduler"
,
self
.
scheduler
)
...
...
@@ -402,6 +415,7 @@ class KSampler:
extra_args
=
{
"cond"
:
positive
,
"uncond"
:
negative
,
"cond_scale"
:
cfg
}
cond_concat
=
None
if
hasattr
(
self
.
model
,
'concat_keys'
):
cond_concat
=
[]
for
ck
in
self
.
model
.
concat_keys
:
...
...
@@ -417,9 +431,42 @@ class KSampler:
cond_concat
.
append
(
blank_inpaint_image_like
(
noise
))
extra_args
[
"cond_concat"
]
=
cond_concat
if
sigmas
[
0
]
!=
self
.
sigmas
[
0
]
or
(
self
.
denoise
is
not
None
and
self
.
denoise
<
1.0
):
max_denoise
=
False
else
:
max_denoise
=
True
with
precision_scope
(
self
.
device
):
if
self
.
sampler
==
"uni_pc"
:
samples
=
uni_pc
.
sample_unipc
(
self
.
model_wrap
,
noise
,
latent_image
,
sigmas
,
sampling_function
=
sampling_function
,
extra_args
=
extra_args
,
noise_mask
=
denoise_mask
)
samples
=
uni_pc
.
sample_unipc
(
self
.
model_wrap
,
noise
,
latent_image
,
sigmas
,
sampling_function
=
sampling_function
,
max_denoise
=
max_denoise
,
extra_args
=
extra_args
,
noise_mask
=
denoise_mask
)
elif
self
.
sampler
==
"uni_pc_bh2"
:
samples
=
uni_pc
.
sample_unipc
(
self
.
model_wrap
,
noise
,
latent_image
,
sigmas
,
sampling_function
=
sampling_function
,
max_denoise
=
max_denoise
,
extra_args
=
extra_args
,
noise_mask
=
denoise_mask
,
variant
=
'bh2'
)
elif
self
.
sampler
==
"ddim"
:
timesteps
=
[]
for
s
in
range
(
sigmas
.
shape
[
0
]):
timesteps
.
insert
(
0
,
self
.
model_wrap
.
sigma_to_t
(
sigmas
[
s
]))
noise_mask
=
None
if
denoise_mask
is
not
None
:
noise_mask
=
1.0
-
denoise_mask
sampler
=
DDIMSampler
(
self
.
model
)
sampler
.
make_schedule_timesteps
(
ddim_timesteps
=
timesteps
,
verbose
=
False
)
z_enc
=
sampler
.
stochastic_encode
(
latent_image
,
torch
.
tensor
([
len
(
timesteps
)
-
1
]
*
noise
.
shape
[
0
]).
to
(
self
.
device
),
noise
=
noise
,
max_denoise
=
max_denoise
)
samples
,
_
=
sampler
.
sample_custom
(
ddim_timesteps
=
timesteps
,
conditioning
=
positive
,
batch_size
=
noise
.
shape
[
0
],
shape
=
noise
.
shape
[
1
:],
verbose
=
False
,
unconditional_guidance_scale
=
cfg
,
unconditional_conditioning
=
negative
,
eta
=
0.0
,
x_T
=
z_enc
,
x0
=
latent_image
,
denoise_function
=
sampling_function
,
cond_concat
=
cond_concat
,
mask
=
noise_mask
,
to_zero
=
sigmas
[
-
1
]
==
0
,
end_step
=
sigmas
.
shape
[
0
]
-
1
)
else
:
extra_args
[
"denoise_mask"
]
=
denoise_mask
self
.
model_k
.
latent_image
=
latent_image
...
...
comfy/sd.py
View file @
2816eb23
...
...
@@ -400,7 +400,7 @@ class ControlNet:
out
.
append
(
self
.
control_model
)
return
out
def
load_controlnet
(
ckpt_path
):
def
load_controlnet
(
ckpt_path
,
model
=
None
):
controlnet_data
=
load_torch_file
(
ckpt_path
)
pth_key
=
'control_model.input_blocks.1.1.transformer_blocks.0.attn2.to_k.weight'
pth
=
False
...
...
@@ -437,6 +437,21 @@ def load_controlnet(ckpt_path):
use_fp16
=
use_fp16
)
if
pth
:
if
'difference'
in
controlnet_data
:
if
model
is
not
None
:
m
=
model
.
patch_model
()
model_sd
=
m
.
state_dict
()
for
x
in
controlnet_data
:
c_m
=
"control_model."
if
x
.
startswith
(
c_m
):
sd_key
=
"model.diffusion_model.{}"
.
format
(
x
[
len
(
c_m
):])
if
sd_key
in
model_sd
:
cd
=
controlnet_data
[
x
]
cd
+=
model_sd
[
sd_key
].
type
(
cd
.
dtype
).
to
(
cd
.
device
)
model
.
unpatch_model
()
else
:
print
(
"WARNING: Loaded a diff controlnet without a model. It will very likely not work."
)
class
WeightsLoader
(
torch
.
nn
.
Module
):
pass
w
=
WeightsLoader
()
...
...
nodes.py
View file @
2816eb23
...
...
@@ -232,6 +232,24 @@ class ControlNetLoader:
controlnet
=
comfy
.
sd
.
load_controlnet
(
controlnet_path
)
return
(
controlnet
,)
class
DiffControlNetLoader
:
models_dir
=
os
.
path
.
join
(
os
.
path
.
dirname
(
os
.
path
.
realpath
(
__file__
)),
"models"
)
controlnet_dir
=
os
.
path
.
join
(
models_dir
,
"controlnet"
)
@
classmethod
def
INPUT_TYPES
(
s
):
return
{
"required"
:
{
"model"
:
(
"MODEL"
,),
"control_net_name"
:
(
filter_files_extensions
(
recursive_search
(
s
.
controlnet_dir
),
supported_pt_extensions
),
)}}
RETURN_TYPES
=
(
"CONTROL_NET"
,)
FUNCTION
=
"load_controlnet"
CATEGORY
=
"loaders"
def
load_controlnet
(
self
,
model
,
control_net_name
):
controlnet_path
=
os
.
path
.
join
(
self
.
controlnet_dir
,
control_net_name
)
controlnet
=
comfy
.
sd
.
load_controlnet
(
controlnet_path
,
model
)
return
(
controlnet
,)
class
ControlNetApply
:
@
classmethod
...
...
@@ -733,6 +751,22 @@ class ImageScale:
s
=
s
.
movedim
(
1
,
-
1
)
return
(
s
,)
class
ImageInvert
:
@
classmethod
def
INPUT_TYPES
(
s
):
return
{
"required"
:
{
"image"
:
(
"IMAGE"
,)}}
RETURN_TYPES
=
(
"IMAGE"
,)
FUNCTION
=
"invert"
CATEGORY
=
"image"
def
invert
(
self
,
image
):
s
=
1.0
-
image
return
(
s
,)
NODE_CLASS_MAPPINGS
=
{
"KSampler"
:
KSampler
,
"CheckpointLoader"
:
CheckpointLoader
,
...
...
@@ -747,6 +781,7 @@ NODE_CLASS_MAPPINGS = {
"LoadImage"
:
LoadImage
,
"LoadImageMask"
:
LoadImageMask
,
"ImageScale"
:
ImageScale
,
"ImageInvert"
:
ImageInvert
,
"ConditioningCombine"
:
ConditioningCombine
,
"ConditioningSetArea"
:
ConditioningSetArea
,
"KSamplerAdvanced"
:
KSamplerAdvanced
,
...
...
@@ -759,6 +794,7 @@ NODE_CLASS_MAPPINGS = {
"CLIPLoader"
:
CLIPLoader
,
"ControlNetApply"
:
ControlNetApply
,
"ControlNetLoader"
:
ControlNetLoader
,
"DiffControlNetLoader"
:
DiffControlNetLoader
,
}
CUSTOM_NODE_PATH
=
os
.
path
.
join
(
os
.
path
.
dirname
(
os
.
path
.
realpath
(
__file__
)),
"custom_nodes"
)
...
...
notebooks/comfyui_colab.ipynb
View file @
2816eb23
...
...
@@ -85,7 +85,12 @@
{
"cell_type": "markdown",
"source": [
"Run ComfyUI (use the fp16 model configs for more speed):"
"### Run ComfyUI \n",
"use the **fp16** model configs for more speed\n",
"\n",
"You should see the ui appear in an iframe. If you get a 403 error, it's your firefox settings or an extension that's messing things up.\n",
"\n",
"If you want to open it in another window use the second link not the first one.\n"
],
"metadata": {
"id": "gggggggggg"
...
...
@@ -119,6 +124,52 @@
},
"execution_count": null,
"outputs": []
},
{
"cell_type": "markdown",
"source": [
"### Run ComfyUI with localtunnel\n",
"\n",
"If you have issues with the previous way, you can try this way. It will also work on non colab.\n",
"\n",
"use the **fp16** model configs for more speed\n",
"\n"
],
"metadata": {
"id": "kkkkkkkkkkkkkk"
}
},
{
"cell_type": "code",
"source": [
"!npm install -g localtunnel\n",
"\n",
"import subprocess\n",
"import threading\n",
"import time\n",
"import socket\n",
"def iframe_thread(port):\n",
" while True:\n",
" time.sleep(0.5)\n",
" sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n",
" result = sock.connect_ex(('127.0.0.1', port))\n",
" if result == 0:\n",
" break\n",
" sock.close()\n",
" p = subprocess.Popen([\"lt\", \"--port\", \"{}\".format(port)], stdout=subprocess.PIPE)\n",
" for line in p.stdout:\n",
" print(line.decode(), end='')\n",
"\n",
"\n",
"threading.Thread(target=iframe_thread, daemon=True, args=(8188,)).start()\n",
"\n",
"!python main.py --highvram"
],
"metadata": {
"id": "jjjjjjjjjjjjj"
},
"execution_count": null,
"outputs": []
}
]
}
\ No newline at end of file
script_examples/basic_api_example.py
0 → 100644
View file @
2816eb23
import
json
from
urllib
import
request
,
parse
import
random
#this is the ComfyUI api prompt format. If you want it for a specific workflow you can copy it from the prompt section
#of the image metadata of images generated with ComfyUI
#keep in mind ComfyUI is pre alpha software so this format will change a bit.
#this is the one for the default workflow
prompt_text
=
"""
{
"3": {
"class_type": "KSampler",
"inputs": {
"cfg": 8,
"denoise": 1,
"latent_image": [
"5",
0
],
"model": [
"4",
0
],
"negative": [
"7",
0
],
"positive": [
"6",
0
],
"sampler_name": "sample_euler",
"scheduler": "normal",
"seed": 8566257,
"steps": 20
}
},
"4": {
"class_type": "CheckpointLoader",
"inputs": {
"ckpt_name": "v1-5-pruned-emaonly.ckpt",
"config_name": "v1-inference.yaml"
}
},
"5": {
"class_type": "EmptyLatentImage",
"inputs": {
"batch_size": 1,
"height": 512,
"width": 512
}
},
"6": {
"class_type": "CLIPTextEncode",
"inputs": {
"clip": [
"4",
1
],
"text": "masterpiece best quality girl"
}
},
"7": {
"class_type": "CLIPTextEncode",
"inputs": {
"clip": [
"4",
1
],
"text": "bad hands"
}
},
"8": {
"class_type": "VAEDecode",
"inputs": {
"samples": [
"3",
0
],
"vae": [
"4",
2
]
}
},
"9": {
"class_type": "SaveImage",
"inputs": {
"filename_prefix": "ComfyUI",
"images": [
"8",
0
]
}
}
}
"""
def
queue_prompt
(
prompt
):
p
=
{
"prompt"
:
prompt
}
data
=
json
.
dumps
(
p
).
encode
(
'utf-8'
)
req
=
request
.
Request
(
"http://127.0.0.1:8188/prompt"
,
data
=
data
)
request
.
urlopen
(
req
)
prompt
=
json
.
loads
(
prompt_text
)
#set the text prompt for our positive CLIPTextEncode
prompt
[
"6"
][
"inputs"
][
"text"
]
=
"masterpiece best quality man"
#set the seed for our KSampler node
prompt
[
"3"
][
"inputs"
][
"seed"
]
=
5
queue_prompt
(
prompt
)
webshit/index.html
View file @
2816eb23
...
...
@@ -406,12 +406,14 @@ function graphToPrompt() {
}
for
(
let
y
in
n
.
widgets
)
{
if
(
!
Object
.
hasOwn
(
n
.
widgets
[
y
],
'
to_randomize
'
))
{
//don't include "Random seed after every gen" in prompt.
if
(
n
.
widgets
[
y
].
dynamic_prompt
&&
n
.
widgets
[
y
].
dynamic_prompt
===
true
)
{
input_
[
n
.
widgets
[
y
].
name
]
=
n
.
widgets
[
y
].
value
.
replace
(
"
\\
{
"
,
"
{
"
).
replace
(
"
\\
}
"
,
"
}
"
);
}
else
{
input_
[
n
.
widgets
[
y
].
name
]
=
n
.
widgets
[
y
].
value
;
}
}
}
for
(
let
y
in
n
.
inputs
)
{
let
parent_node
=
n
.
getInputNode
(
y
);
if
(
parent_node
)
{
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment