Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
chenpangpang
ComfyUI
Commits
4c7a9dbc
Commit
4c7a9dbc
authored
Apr 02, 2023
by
EllangoK
Browse files
adds Blend, Blur, Dither, Sharpen nodes
parent
72f9235a
Changes
2
Hide whitespace changes
Inline
Side-by-side
Showing
2 changed files
with
217 additions
and
1 deletion
+217
-1
comfy_extras/nodes_post_processing.py
comfy_extras/nodes_post_processing.py
+215
-0
nodes.py
nodes.py
+2
-1
No files found.
comfy_extras/nodes_post_processing.py
0 → 100644
View file @
4c7a9dbc
import
torch
import
torch.nn.functional
as
F
class
Blend
:
def
__init__
(
self
):
pass
@
classmethod
def
INPUT_TYPES
(
s
):
return
{
"required"
:
{
"image1"
:
(
"IMAGE"
,),
"image2"
:
(
"IMAGE"
,),
"blend_factor"
:
(
"FLOAT"
,
{
"default"
:
0.5
,
"min"
:
0.0
,
"max"
:
1.0
,
"step"
:
0.01
}),
"blend_mode"
:
([
"normal"
,
"multiply"
,
"screen"
,
"overlay"
,
"soft_light"
],),
},
}
RETURN_TYPES
=
(
"IMAGE"
,)
FUNCTION
=
"blend_images"
CATEGORY
=
"postprocessing"
def
blend_images
(
self
,
image1
:
torch
.
Tensor
,
image2
:
torch
.
Tensor
,
blend_factor
:
float
,
blend_mode
:
str
):
blended_image
=
self
.
blend_mode
(
image1
,
image2
,
blend_mode
)
blended_image
=
image1
*
(
1
-
blend_factor
)
+
blended_image
*
blend_factor
blended_image
=
torch
.
clamp
(
blended_image
,
0
,
1
)
return
(
blended_image
,)
def
blend_mode
(
self
,
img1
,
img2
,
mode
):
if
mode
==
"normal"
:
return
img2
elif
mode
==
"multiply"
:
return
img1
*
img2
elif
mode
==
"screen"
:
return
1
-
(
1
-
img1
)
*
(
1
-
img2
)
elif
mode
==
"overlay"
:
return
torch
.
where
(
img1
<=
0.5
,
2
*
img1
*
img2
,
1
-
2
*
(
1
-
img1
)
*
(
1
-
img2
))
elif
mode
==
"soft_light"
:
return
torch
.
where
(
img2
<=
0.5
,
img1
-
(
1
-
2
*
img2
)
*
img1
*
(
1
-
img1
),
img1
+
(
2
*
img2
-
1
)
*
(
self
.
g
(
img1
)
-
img1
))
else
:
raise
ValueError
(
f
"Unsupported blend mode:
{
mode
}
"
)
def
g
(
self
,
x
):
return
torch
.
where
(
x
<=
0.25
,
((
16
*
x
-
12
)
*
x
+
4
)
*
x
,
torch
.
sqrt
(
x
))
class
Blur
:
def
__init__
(
self
):
pass
@
classmethod
def
INPUT_TYPES
(
s
):
return
{
"required"
:
{
"image"
:
(
"IMAGE"
,),
"blur_radius"
:
(
"INT"
,
{
"default"
:
1
,
"min"
:
1
,
"max"
:
31
,
"step"
:
1
}),
"sigma"
:
(
"FLOAT"
,
{
"default"
:
1.0
,
"min"
:
0.1
,
"max"
:
10.0
,
"step"
:
0.1
}),
},
}
RETURN_TYPES
=
(
"IMAGE"
,)
FUNCTION
=
"blur"
CATEGORY
=
"postprocessing"
def
gaussian_kernel
(
self
,
kernel_size
:
int
,
sigma
:
float
):
x
,
y
=
torch
.
meshgrid
(
torch
.
linspace
(
-
1
,
1
,
kernel_size
),
torch
.
linspace
(
-
1
,
1
,
kernel_size
),
indexing
=
"ij"
)
d
=
torch
.
sqrt
(
x
*
x
+
y
*
y
)
g
=
torch
.
exp
(
-
(
d
*
d
)
/
(
2.0
*
sigma
*
sigma
))
return
g
/
g
.
sum
()
def
blur
(
self
,
image
:
torch
.
Tensor
,
blur_radius
:
int
,
sigma
:
float
):
if
blur_radius
==
0
:
return
(
image
,)
batch_size
,
height
,
width
,
channels
=
image
.
shape
kernel_size
=
blur_radius
*
2
+
1
kernel
=
self
.
gaussian_kernel
(
kernel_size
,
sigma
).
repeat
(
channels
,
1
,
1
).
unsqueeze
(
1
)
image
=
image
.
permute
(
0
,
3
,
1
,
2
)
# Torch wants (B, C, H, W) we use (B, H, W, C)
blurred
=
F
.
conv2d
(
image
,
kernel
,
padding
=
kernel_size
//
2
,
groups
=
channels
)
blurred
=
blurred
.
permute
(
0
,
2
,
3
,
1
)
return
(
blurred
,)
class
Dither
:
def
__init__
(
self
):
pass
@
classmethod
def
INPUT_TYPES
(
s
):
return
{
"required"
:
{
"image"
:
(
"IMAGE"
,),
"bits"
:
(
"INT"
,
{
"default"
:
4
,
"min"
:
1
,
"max"
:
8
,
"step"
:
1
}),
},
}
RETURN_TYPES
=
(
"IMAGE"
,)
FUNCTION
=
"dither"
CATEGORY
=
"postprocessing"
def
dither
(
self
,
image
:
torch
.
Tensor
,
bits
:
int
):
batch_size
,
height
,
width
,
_
=
image
.
shape
result
=
torch
.
zeros_like
(
image
)
for
b
in
range
(
batch_size
):
tensor_image
=
image
[
b
]
img
=
(
tensor_image
*
255
)
height
,
width
,
_
=
img
.
shape
scale
=
255
/
(
2
**
bits
-
1
)
for
y
in
range
(
height
):
for
x
in
range
(
width
):
old_pixel
=
img
[
y
,
x
].
clone
()
new_pixel
=
torch
.
round
(
old_pixel
/
scale
)
*
scale
img
[
y
,
x
]
=
new_pixel
quant_error
=
old_pixel
-
new_pixel
if
x
+
1
<
width
:
img
[
y
,
x
+
1
]
+=
quant_error
*
7
/
16
if
y
+
1
<
height
:
if
x
-
1
>=
0
:
img
[
y
+
1
,
x
-
1
]
+=
quant_error
*
3
/
16
img
[
y
+
1
,
x
]
+=
quant_error
*
5
/
16
if
x
+
1
<
width
:
img
[
y
+
1
,
x
+
1
]
+=
quant_error
*
1
/
16
dithered
=
img
/
255
tensor
=
dithered
.
unsqueeze
(
0
)
result
[
b
]
=
tensor
return
(
result
,)
class
Sharpen
:
def
__init__
(
self
):
pass
@
classmethod
def
INPUT_TYPES
(
s
):
return
{
"required"
:
{
"image"
:
(
"IMAGE"
,),
"sharpen_radius"
:
(
"INT"
,
{
"default"
:
1
,
"min"
:
1
,
"max"
:
31
,
"step"
:
1
}),
"alpha"
:
(
"FLOAT"
,
{
"default"
:
1.0
,
"min"
:
0.1
,
"max"
:
5.0
,
"step"
:
0.1
}),
},
}
RETURN_TYPES
=
(
"IMAGE"
,)
FUNCTION
=
"sharpen"
CATEGORY
=
"postprocessing"
def
sharpen
(
self
,
image
:
torch
.
Tensor
,
sharpen_radius
:
int
,
alpha
:
float
):
if
sharpen_radius
==
0
:
return
(
image
,)
batch_size
,
height
,
width
,
channels
=
image
.
shape
kernel_size
=
sharpen_radius
*
2
+
1
kernel
=
torch
.
ones
((
kernel_size
,
kernel_size
),
dtype
=
torch
.
float32
)
*
-
1
center
=
kernel_size
//
2
kernel
[
center
,
center
]
=
kernel_size
**
2
kernel
*=
alpha
kernel
=
kernel
.
repeat
(
channels
,
1
,
1
).
unsqueeze
(
1
)
tensor_image
=
image
.
permute
(
0
,
3
,
1
,
2
)
# Torch wants (B, C, H, W) we use (B, H, W, C)
sharpened
=
F
.
conv2d
(
tensor_image
,
kernel
,
padding
=
center
,
groups
=
channels
)
sharpened
=
sharpened
.
permute
(
0
,
2
,
3
,
1
)
result
=
torch
.
clamp
(
sharpened
,
0
,
1
)
return
(
result
,)
NODE_CLASS_MAPPINGS
=
{
"Blend"
:
Blend
,
"Blur"
:
Blur
,
"Dither"
:
Dither
,
"Sharpen"
:
Sharpen
,
}
nodes.py
View file @
4c7a9dbc
...
...
@@ -1112,4 +1112,5 @@ def load_custom_nodes():
def
init_custom_nodes
():
load_custom_nodes
()
load_custom_node
(
os
.
path
.
join
(
os
.
path
.
join
(
os
.
path
.
dirname
(
os
.
path
.
realpath
(
__file__
)),
"comfy_extras"
),
"nodes_upscale_model.py"
))
\ No newline at end of file
load_custom_node
(
os
.
path
.
join
(
os
.
path
.
join
(
os
.
path
.
dirname
(
os
.
path
.
realpath
(
__file__
)),
"comfy_extras"
),
"nodes_upscale_model.py"
))
load_custom_node
(
os
.
path
.
join
(
os
.
path
.
join
(
os
.
path
.
dirname
(
os
.
path
.
realpath
(
__file__
)),
"comfy_extras"
),
"nodes_post_processing.py"
))
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment