Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
chenpangpang
ComfyUI
Commits
4e345b31
Commit
4e345b31
authored
Apr 24, 2023
by
comfyanonymous
Browse files
Support all known hypernetworks.
parent
f1b87f50
Changes
1
Hide whitespace changes
Inline
Side-by-side
Showing
1 changed file
with
26 additions
and
4 deletions
+26
-4
comfy_extras/nodes_hypernetwork.py
comfy_extras/nodes_hypernetwork.py
+26
-4
No files found.
comfy_extras/nodes_hypernetwork.py
View file @
4e345b31
...
...
@@ -10,7 +10,17 @@ def load_hypernetwork_patch(path, strength):
activate_output
=
sd
.
get
(
'activate_output'
,
False
)
last_layer_dropout
=
sd
.
get
(
'last_layer_dropout'
,
False
)
if
activation_func
!=
'linear'
or
is_layer_norm
!=
False
or
use_dropout
!=
False
or
activate_output
!=
False
or
last_layer_dropout
!=
False
:
valid_activation
=
{
"linear"
:
torch
.
nn
.
Identity
,
"relu"
:
torch
.
nn
.
ReLU
,
"leakyrelu"
:
torch
.
nn
.
LeakyReLU
,
"elu"
:
torch
.
nn
.
ELU
,
"swish"
:
torch
.
nn
.
Hardswish
,
"tanh"
:
torch
.
nn
.
Tanh
,
"sigmoid"
:
torch
.
nn
.
Sigmoid
,
}
if
activation_func
not
in
valid_activation
:
print
(
"Unsupported Hypernetwork format, if you report it I might implement it."
,
path
,
" "
,
activation_func
,
is_layer_norm
,
use_dropout
,
activate_output
,
last_layer_dropout
)
return
None
...
...
@@ -28,15 +38,27 @@ def load_hypernetwork_patch(path, strength):
keys
=
attn_weights
.
keys
()
linears
=
filter
(
lambda
a
:
a
.
endswith
(
".weight"
),
keys
)
linears
=
sorted
(
list
(
map
(
lambda
a
:
a
[:
-
len
(
".weight"
)],
linears
))
)
linears
=
list
(
map
(
lambda
a
:
a
[:
-
len
(
".weight"
)],
linears
))
layers
=
[]
for
lin_name
in
linears
:
for
i
in
range
(
len
(
linears
)):
lin_name
=
linears
[
i
]
last_layer
=
(
i
==
(
len
(
linears
)
-
1
))
penultimate_layer
=
(
i
==
(
len
(
linears
)
-
2
))
lin_weight
=
attn_weights
[
'{}.weight'
.
format
(
lin_name
)]
lin_bias
=
attn_weights
[
'{}.bias'
.
format
(
lin_name
)]
layer
=
torch
.
nn
.
Linear
(
lin_weight
.
shape
[
1
],
lin_weight
.
shape
[
0
])
layer
.
load_state_dict
({
"weight"
:
lin_weight
,
"bias"
:
lin_bias
})
layers
+=
[
layer
]
layers
.
append
(
layer
)
if
activation_func
!=
"linear"
:
if
(
not
last_layer
)
or
(
activate_output
):
layers
.
append
(
valid_activation
[
activation_func
]())
if
is_layer_norm
:
layers
.
append
(
torch
.
nn
.
LayerNorm
(
lin_weight
.
shape
[
0
]))
if
use_dropout
:
if
(
not
last_layer
)
and
(
not
penultimate_layer
or
last_layer_dropout
):
layers
.
append
(
torch
.
nn
.
Dropout
(
p
=
0.3
))
output
.
append
(
torch
.
nn
.
Sequential
(
*
layers
))
out
[
dim
]
=
torch
.
nn
.
ModuleList
(
output
)
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment