Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
chenpangpang
ComfyUI
Commits
7e941f9f
Commit
7e941f9f
authored
Aug 30, 2023
by
comfyanonymous
Browse files
Clean up DiffusersLoader node.
parent
18617967
Changes
2
Hide whitespace changes
Inline
Side-by-side
Showing
2 changed files
with
26 additions
and
77 deletions
+26
-77
comfy/diffusers_load.py
comfy/diffusers_load.py
+25
-76
nodes.py
nodes.py
+1
-1
No files found.
comfy/diffusers_load.py
View file @
7e941f9f
import
json
import
json
import
os
import
os
import
yaml
import
folder_paths
import
comfy.sd
from
comfy.sd
import
load_checkpoint
import
os.path
as
osp
import
re
import
torch
from
safetensors.torch
import
load_file
,
save_file
from
.
import
diffusers_convert
def
first_file
(
path
,
filenames
):
for
f
in
filenames
:
p
=
os
.
path
.
join
(
path
,
f
)
if
os
.
path
.
exists
(
p
):
return
p
return
None
def
load_diffusers
(
model_path
,
fp16
=
True
,
output_vae
=
True
,
output_clip
=
True
,
embedding_directory
=
None
):
def
load_diffusers
(
model_path
,
output_vae
=
True
,
output_clip
=
True
,
embedding_directory
=
None
):
diffusers_unet_conf
=
json
.
load
(
open
(
osp
.
join
(
model_path
,
"unet/config.json"
)))
diffusion_model_names
=
[
"diffusion_pytorch_model.fp16.safetensors"
,
"diffusion_pytorch_model.safetensors"
,
"diffusion_pytorch_model.fp16.bin"
,
"diffusion_pytorch_model.bin"
]
diffusers_scheduler_conf
=
json
.
load
(
open
(
osp
.
join
(
model_path
,
"scheduler/scheduler_config.json"
)))
unet_path
=
first_file
(
os
.
path
.
join
(
model_path
,
"unet"
),
diffusion_model_names
)
vae_path
=
first_file
(
os
.
path
.
join
(
model_path
,
"vae"
),
diffusion_model_names
)
# magic
text_encoder_model_names
=
[
"model.fp16.safetensors"
,
"model.safetensors"
,
"pytorch_model.fp16.bin"
,
"pytorch_model.bin"
]
v2
=
diffusers_unet_conf
[
"sample_size"
]
==
96
text_encoder1_path
=
first_file
(
os
.
path
.
join
(
model_path
,
"text_encoder"
),
text_encoder_model_names
)
if
'prediction_type'
in
diffusers_scheduler_conf
:
text_encoder2_path
=
first_file
(
os
.
path
.
join
(
model_path
,
"text_encoder_2"
),
text_encoder_model_names
)
v_pred
=
diffusers_scheduler_conf
[
'prediction_type'
]
==
'v_prediction'
if
v2
:
text_encoder_paths
=
[
text_encoder1_path
]
if
v_pred
:
if
text_encoder2_path
is
not
None
:
config_path
=
folder_paths
.
get_full_path
(
"configs"
,
'v2-inference-v.yaml'
)
text_encoder_paths
.
append
(
text_encoder2_path
)
else
:
config_path
=
folder_paths
.
get_full_path
(
"configs"
,
'v2-inference.yaml'
)
else
:
config_path
=
folder_paths
.
get_full_path
(
"configs"
,
'v1-inference.yaml'
)
with
open
(
config_path
,
'r'
)
as
stream
:
unet
=
comfy
.
sd
.
load_unet
(
unet_path
)
config
=
yaml
.
safe_load
(
stream
)
model_config_params
=
config
[
'model'
][
'params'
]
clip
=
None
clip_config
=
model_config_params
[
'cond_stage_config'
]
if
output_clip
:
scale_factor
=
model_config_params
[
'scale_factor'
]
clip
=
comfy
.
sd
.
load_clip
(
text_encoder_paths
,
embedding_directory
=
embedding_directory
)
vae_config
=
model_config_params
[
'first_stage_config'
]
vae_config
[
'scale_factor'
]
=
scale_factor
model_config_params
[
"unet_config"
][
"params"
][
"use_fp16"
]
=
fp16
unet_path
=
osp
.
join
(
model_path
,
"unet"
,
"diffusion_pytorch_model.safetensors"
)
vae
=
None
vae_path
=
osp
.
join
(
model_path
,
"vae"
,
"diffusion_pytorch_model.safetensors"
)
if
output_vae
:
text_enc_path
=
osp
.
join
(
model_path
,
"text_encoder"
,
"model.safetensors"
)
vae
=
comfy
.
sd
.
VAE
(
ckpt_path
=
vae_path
)
# Load models from safetensors if it exists, if it doesn't pytorch
return
(
unet
,
clip
,
vae
)
if
osp
.
exists
(
unet_path
):
unet_state_dict
=
load_file
(
unet_path
,
device
=
"cpu"
)
else
:
unet_path
=
osp
.
join
(
model_path
,
"unet"
,
"diffusion_pytorch_model.bin"
)
unet_state_dict
=
torch
.
load
(
unet_path
,
map_location
=
"cpu"
)
if
osp
.
exists
(
vae_path
):
vae_state_dict
=
load_file
(
vae_path
,
device
=
"cpu"
)
else
:
vae_path
=
osp
.
join
(
model_path
,
"vae"
,
"diffusion_pytorch_model.bin"
)
vae_state_dict
=
torch
.
load
(
vae_path
,
map_location
=
"cpu"
)
if
osp
.
exists
(
text_enc_path
):
text_enc_dict
=
load_file
(
text_enc_path
,
device
=
"cpu"
)
else
:
text_enc_path
=
osp
.
join
(
model_path
,
"text_encoder"
,
"pytorch_model.bin"
)
text_enc_dict
=
torch
.
load
(
text_enc_path
,
map_location
=
"cpu"
)
# Convert the UNet model
unet_state_dict
=
diffusers_convert
.
convert_unet_state_dict
(
unet_state_dict
)
unet_state_dict
=
{
"model.diffusion_model."
+
k
:
v
for
k
,
v
in
unet_state_dict
.
items
()}
# Convert the VAE model
vae_state_dict
=
diffusers_convert
.
convert_vae_state_dict
(
vae_state_dict
)
vae_state_dict
=
{
"first_stage_model."
+
k
:
v
for
k
,
v
in
vae_state_dict
.
items
()}
# Easiest way to identify v2.0 model seems to be that the text encoder (OpenCLIP) is deeper
is_v20_model
=
"text_model.encoder.layers.22.layer_norm2.bias"
in
text_enc_dict
if
is_v20_model
:
# Need to add the tag 'transformer' in advance so we can knock it out from the final layer-norm
text_enc_dict
=
{
"transformer."
+
k
:
v
for
k
,
v
in
text_enc_dict
.
items
()}
text_enc_dict
=
diffusers_convert
.
convert_text_enc_state_dict_v20
(
text_enc_dict
)
text_enc_dict
=
{
"cond_stage_model.model."
+
k
:
v
for
k
,
v
in
text_enc_dict
.
items
()}
else
:
text_enc_dict
=
diffusers_convert
.
convert_text_enc_state_dict
(
text_enc_dict
)
text_enc_dict
=
{
"cond_stage_model.transformer."
+
k
:
v
for
k
,
v
in
text_enc_dict
.
items
()}
# Put together new checkpoint
sd
=
{
**
unet_state_dict
,
**
vae_state_dict
,
**
text_enc_dict
}
return
load_checkpoint
(
embedding_directory
=
embedding_directory
,
state_dict
=
sd
,
config
=
config
)
nodes.py
View file @
7e941f9f
...
@@ -475,7 +475,7 @@ class DiffusersLoader:
...
@@ -475,7 +475,7 @@ class DiffusersLoader:
model_path
=
path
model_path
=
path
break
break
return
comfy
.
diffusers_load
.
load_diffusers
(
model_path
,
fp16
=
comfy
.
model_management
.
should_use_fp16
(),
output_vae
=
output_vae
,
output_clip
=
output_clip
,
embedding_directory
=
folder_paths
.
get_folder_paths
(
"embeddings"
))
return
comfy
.
diffusers_load
.
load_diffusers
(
model_path
,
output_vae
=
output_vae
,
output_clip
=
output_clip
,
embedding_directory
=
folder_paths
.
get_folder_paths
(
"embeddings"
))
class
unCLIPCheckpointLoader
:
class
unCLIPCheckpointLoader
:
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment