Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
chenpangpang
ComfyUI
Commits
a126e2c1
Unverified
Commit
a126e2c1
authored
Apr 04, 2023
by
missionfloyd
Committed by
GitHub
Apr 04, 2023
Browse files
Merge branch 'master' into confirm-clear
parents
1b556ea9
255dac25
Changes
5
Hide whitespace changes
Inline
Side-by-side
Showing
5 changed files
with
257 additions
and
7 deletions
+257
-7
comfy/ldm/modules/diffusionmodules/model.py
comfy/ldm/modules/diffusionmodules/model.py
+2
-2
comfy/model_management.py
comfy/model_management.py
+14
-0
comfy_extras/nodes_post_processing.py
comfy_extras/nodes_post_processing.py
+210
-0
nodes.py
nodes.py
+6
-5
web/style.css
web/style.css
+25
-0
No files found.
comfy/ldm/modules/diffusionmodules/model.py
View file @
a126e2c1
...
...
@@ -9,7 +9,7 @@ from typing import Optional, Any
from
ldm.modules.attention
import
MemoryEfficientCrossAttention
import
model_management
if
model_management
.
xformers_enabled
():
if
model_management
.
xformers_enabled
_vae
():
import
xformers
import
xformers.ops
...
...
@@ -364,7 +364,7 @@ class MemoryEfficientCrossAttentionWrapper(MemoryEfficientCrossAttention):
def
make_attn
(
in_channels
,
attn_type
=
"vanilla"
,
attn_kwargs
=
None
):
assert
attn_type
in
[
"vanilla"
,
"vanilla-xformers"
,
"memory-efficient-cross-attn"
,
"linear"
,
"none"
],
f
'attn_type
{
attn_type
}
unknown'
if
model_management
.
xformers_enabled
()
and
attn_type
==
"vanilla"
:
if
model_management
.
xformers_enabled
_vae
()
and
attn_type
==
"vanilla"
:
attn_type
=
"vanilla-xformers"
if
model_management
.
pytorch_attention_enabled
()
and
attn_type
==
"vanilla"
:
attn_type
=
"vanilla-pytorch"
...
...
comfy/model_management.py
View file @
a126e2c1
...
...
@@ -199,11 +199,25 @@ def get_autocast_device(dev):
return
dev
.
type
return
"cuda"
def
xformers_enabled
():
if
vram_state
==
CPU
:
return
False
return
XFORMERS_IS_AVAILBLE
def
xformers_enabled_vae
():
enabled
=
xformers_enabled
()
if
not
enabled
:
return
False
try
:
#0.0.18 has a bug where Nan is returned when inputs are too big (1152x1920 res images and above)
if
xformers
.
version
.
__version__
==
"0.0.18"
:
return
False
except
:
pass
return
enabled
def
pytorch_attention_enabled
():
return
ENABLE_PYTORCH_ATTENTION
...
...
comfy_extras/nodes_post_processing.py
0 → 100644
View file @
a126e2c1
import
numpy
as
np
import
torch
import
torch.nn.functional
as
F
from
PIL
import
Image
import
comfy.utils
class
Blend
:
def
__init__
(
self
):
pass
@
classmethod
def
INPUT_TYPES
(
s
):
return
{
"required"
:
{
"image1"
:
(
"IMAGE"
,),
"image2"
:
(
"IMAGE"
,),
"blend_factor"
:
(
"FLOAT"
,
{
"default"
:
0.5
,
"min"
:
0.0
,
"max"
:
1.0
,
"step"
:
0.01
}),
"blend_mode"
:
([
"normal"
,
"multiply"
,
"screen"
,
"overlay"
,
"soft_light"
],),
},
}
RETURN_TYPES
=
(
"IMAGE"
,)
FUNCTION
=
"blend_images"
CATEGORY
=
"image/postprocessing"
def
blend_images
(
self
,
image1
:
torch
.
Tensor
,
image2
:
torch
.
Tensor
,
blend_factor
:
float
,
blend_mode
:
str
):
if
image1
.
shape
!=
image2
.
shape
:
image2
=
image2
.
permute
(
0
,
3
,
1
,
2
)
image2
=
comfy
.
utils
.
common_upscale
(
image2
,
image1
.
shape
[
2
],
image1
.
shape
[
1
],
upscale_method
=
'bicubic'
,
crop
=
'center'
)
image2
=
image2
.
permute
(
0
,
2
,
3
,
1
)
blended_image
=
self
.
blend_mode
(
image1
,
image2
,
blend_mode
)
blended_image
=
image1
*
(
1
-
blend_factor
)
+
blended_image
*
blend_factor
blended_image
=
torch
.
clamp
(
blended_image
,
0
,
1
)
return
(
blended_image
,)
def
blend_mode
(
self
,
img1
,
img2
,
mode
):
if
mode
==
"normal"
:
return
img2
elif
mode
==
"multiply"
:
return
img1
*
img2
elif
mode
==
"screen"
:
return
1
-
(
1
-
img1
)
*
(
1
-
img2
)
elif
mode
==
"overlay"
:
return
torch
.
where
(
img1
<=
0.5
,
2
*
img1
*
img2
,
1
-
2
*
(
1
-
img1
)
*
(
1
-
img2
))
elif
mode
==
"soft_light"
:
return
torch
.
where
(
img2
<=
0.5
,
img1
-
(
1
-
2
*
img2
)
*
img1
*
(
1
-
img1
),
img1
+
(
2
*
img2
-
1
)
*
(
self
.
g
(
img1
)
-
img1
))
else
:
raise
ValueError
(
f
"Unsupported blend mode:
{
mode
}
"
)
def
g
(
self
,
x
):
return
torch
.
where
(
x
<=
0.25
,
((
16
*
x
-
12
)
*
x
+
4
)
*
x
,
torch
.
sqrt
(
x
))
class
Blur
:
def
__init__
(
self
):
pass
@
classmethod
def
INPUT_TYPES
(
s
):
return
{
"required"
:
{
"image"
:
(
"IMAGE"
,),
"blur_radius"
:
(
"INT"
,
{
"default"
:
1
,
"min"
:
1
,
"max"
:
31
,
"step"
:
1
}),
"sigma"
:
(
"FLOAT"
,
{
"default"
:
1.0
,
"min"
:
0.1
,
"max"
:
10.0
,
"step"
:
0.1
}),
},
}
RETURN_TYPES
=
(
"IMAGE"
,)
FUNCTION
=
"blur"
CATEGORY
=
"image/postprocessing"
def
gaussian_kernel
(
self
,
kernel_size
:
int
,
sigma
:
float
):
x
,
y
=
torch
.
meshgrid
(
torch
.
linspace
(
-
1
,
1
,
kernel_size
),
torch
.
linspace
(
-
1
,
1
,
kernel_size
),
indexing
=
"ij"
)
d
=
torch
.
sqrt
(
x
*
x
+
y
*
y
)
g
=
torch
.
exp
(
-
(
d
*
d
)
/
(
2.0
*
sigma
*
sigma
))
return
g
/
g
.
sum
()
def
blur
(
self
,
image
:
torch
.
Tensor
,
blur_radius
:
int
,
sigma
:
float
):
if
blur_radius
==
0
:
return
(
image
,)
batch_size
,
height
,
width
,
channels
=
image
.
shape
kernel_size
=
blur_radius
*
2
+
1
kernel
=
self
.
gaussian_kernel
(
kernel_size
,
sigma
).
repeat
(
channels
,
1
,
1
).
unsqueeze
(
1
)
image
=
image
.
permute
(
0
,
3
,
1
,
2
)
# Torch wants (B, C, H, W) we use (B, H, W, C)
blurred
=
F
.
conv2d
(
image
,
kernel
,
padding
=
kernel_size
//
2
,
groups
=
channels
)
blurred
=
blurred
.
permute
(
0
,
2
,
3
,
1
)
return
(
blurred
,)
class
Quantize
:
def
__init__
(
self
):
pass
@
classmethod
def
INPUT_TYPES
(
s
):
return
{
"required"
:
{
"image"
:
(
"IMAGE"
,),
"colors"
:
(
"INT"
,
{
"default"
:
256
,
"min"
:
1
,
"max"
:
256
,
"step"
:
1
}),
"dither"
:
([
"none"
,
"floyd-steinberg"
],),
},
}
RETURN_TYPES
=
(
"IMAGE"
,)
FUNCTION
=
"quantize"
CATEGORY
=
"image/postprocessing"
def
quantize
(
self
,
image
:
torch
.
Tensor
,
colors
:
int
=
256
,
dither
:
str
=
"FLOYDSTEINBERG"
):
batch_size
,
height
,
width
,
_
=
image
.
shape
result
=
torch
.
zeros_like
(
image
)
dither_option
=
Image
.
Dither
.
FLOYDSTEINBERG
if
dither
==
"floyd-steinberg"
else
Image
.
Dither
.
NONE
for
b
in
range
(
batch_size
):
tensor_image
=
image
[
b
]
img
=
(
tensor_image
*
255
).
to
(
torch
.
uint8
).
numpy
()
pil_image
=
Image
.
fromarray
(
img
,
mode
=
'RGB'
)
palette
=
pil_image
.
quantize
(
colors
=
colors
)
# Required as described in https://github.com/python-pillow/Pillow/issues/5836
quantized_image
=
pil_image
.
quantize
(
colors
=
colors
,
palette
=
palette
,
dither
=
dither_option
)
quantized_array
=
torch
.
tensor
(
np
.
array
(
quantized_image
.
convert
(
"RGB"
))).
float
()
/
255
result
[
b
]
=
quantized_array
return
(
result
,)
class
Sharpen
:
def
__init__
(
self
):
pass
@
classmethod
def
INPUT_TYPES
(
s
):
return
{
"required"
:
{
"image"
:
(
"IMAGE"
,),
"sharpen_radius"
:
(
"INT"
,
{
"default"
:
1
,
"min"
:
1
,
"max"
:
31
,
"step"
:
1
}),
"alpha"
:
(
"FLOAT"
,
{
"default"
:
1.0
,
"min"
:
0.1
,
"max"
:
5.0
,
"step"
:
0.1
}),
},
}
RETURN_TYPES
=
(
"IMAGE"
,)
FUNCTION
=
"sharpen"
CATEGORY
=
"image/postprocessing"
def
sharpen
(
self
,
image
:
torch
.
Tensor
,
sharpen_radius
:
int
,
alpha
:
float
):
if
sharpen_radius
==
0
:
return
(
image
,)
batch_size
,
height
,
width
,
channels
=
image
.
shape
kernel_size
=
sharpen_radius
*
2
+
1
kernel
=
torch
.
ones
((
kernel_size
,
kernel_size
),
dtype
=
torch
.
float32
)
*
-
1
center
=
kernel_size
//
2
kernel
[
center
,
center
]
=
kernel_size
**
2
kernel
*=
alpha
kernel
=
kernel
.
repeat
(
channels
,
1
,
1
).
unsqueeze
(
1
)
tensor_image
=
image
.
permute
(
0
,
3
,
1
,
2
)
# Torch wants (B, C, H, W) we use (B, H, W, C)
sharpened
=
F
.
conv2d
(
tensor_image
,
kernel
,
padding
=
center
,
groups
=
channels
)
sharpened
=
sharpened
.
permute
(
0
,
2
,
3
,
1
)
result
=
torch
.
clamp
(
sharpened
,
0
,
1
)
return
(
result
,)
NODE_CLASS_MAPPINGS
=
{
"ImageBlend"
:
Blend
,
"ImageBlur"
:
Blur
,
"ImageQuantize"
:
Quantize
,
"ImageSharpen"
:
Sharpen
,
}
nodes.py
View file @
a126e2c1
...
...
@@ -197,7 +197,7 @@ class CheckpointLoader:
RETURN_TYPES
=
(
"MODEL"
,
"CLIP"
,
"VAE"
)
FUNCTION
=
"load_checkpoint"
CATEGORY
=
"loaders"
CATEGORY
=
"
advanced/
loaders"
def
load_checkpoint
(
self
,
config_name
,
ckpt_name
,
output_vae
=
True
,
output_clip
=
True
):
config_path
=
folder_paths
.
get_full_path
(
"configs"
,
config_name
)
...
...
@@ -227,7 +227,7 @@ class unCLIPCheckpointLoader:
RETURN_TYPES
=
(
"MODEL"
,
"CLIP"
,
"VAE"
,
"CLIP_VISION"
)
FUNCTION
=
"load_checkpoint"
CATEGORY
=
"
_for_testing/unclip
"
CATEGORY
=
"
loaders
"
def
load_checkpoint
(
self
,
ckpt_name
,
output_vae
=
True
,
output_clip
=
True
):
ckpt_path
=
folder_paths
.
get_full_path
(
"checkpoints"
,
ckpt_name
)
...
...
@@ -450,7 +450,7 @@ class unCLIPConditioning:
RETURN_TYPES
=
(
"CONDITIONING"
,)
FUNCTION
=
"apply_adm"
CATEGORY
=
"
_for_testing/unclip
"
CATEGORY
=
"
conditioning
"
def
apply_adm
(
self
,
conditioning
,
clip_vision_output
,
strength
,
noise_augmentation
):
c
=
[]
...
...
@@ -1038,7 +1038,6 @@ class ImagePadForOutpaint:
NODE_CLASS_MAPPINGS
=
{
"KSampler"
:
KSampler
,
"CheckpointLoader"
:
CheckpointLoader
,
"CheckpointLoaderSimple"
:
CheckpointLoaderSimple
,
"CLIPTextEncode"
:
CLIPTextEncode
,
"CLIPSetLastLayer"
:
CLIPSetLastLayer
,
...
...
@@ -1077,6 +1076,7 @@ NODE_CLASS_MAPPINGS = {
"VAEEncodeTiled"
:
VAEEncodeTiled
,
"TomePatchModel"
:
TomePatchModel
,
"unCLIPCheckpointLoader"
:
unCLIPCheckpointLoader
,
"CheckpointLoader"
:
CheckpointLoader
,
}
def
load_custom_node
(
module_path
):
...
...
@@ -1113,4 +1113,5 @@ def load_custom_nodes():
def
init_custom_nodes
():
load_custom_nodes
()
load_custom_node
(
os
.
path
.
join
(
os
.
path
.
join
(
os
.
path
.
dirname
(
os
.
path
.
realpath
(
__file__
)),
"comfy_extras"
),
"nodes_upscale_model.py"
))
\ No newline at end of file
load_custom_node
(
os
.
path
.
join
(
os
.
path
.
join
(
os
.
path
.
dirname
(
os
.
path
.
realpath
(
__file__
)),
"comfy_extras"
),
"nodes_upscale_model.py"
))
load_custom_node
(
os
.
path
.
join
(
os
.
path
.
join
(
os
.
path
.
dirname
(
os
.
path
.
realpath
(
__file__
)),
"comfy_extras"
),
"nodes_post_processing.py"
))
web/style.css
View file @
a126e2c1
...
...
@@ -237,3 +237,28 @@ button.comfy-queue-btn {
visibility
:
hidden
}
}
.graphdialog
{
min-height
:
1em
;
}
.graphdialog
.name
{
font-size
:
14px
;
font-family
:
sans-serif
;
color
:
#999999
;
}
.graphdialog
button
{
margin-top
:
unset
;
vertical-align
:
unset
;
height
:
1.6em
;
padding-right
:
8px
;
}
.graphdialog
input
,
.graphdialog
textarea
,
.graphdialog
select
{
background-color
:
#222
;
border
:
2px
solid
;
border-color
:
#444444
;
color
:
#ddd
;
border-radius
:
12px
0
0
12px
;
}
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment