Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
chenpangpang
ComfyUI
Commits
0e425603
"git@developer.sourcefind.cn:chenpangpang/diffusers.git" did not exist on "2c34c7d6ddba1776e9131a052893dfeb2c48be82"
Commit
0e425603
authored
Jun 06, 2023
by
comfyanonymous
Browse files
Small refactor.
parent
a3a713b6
Changes
2
Hide whitespace changes
Inline
Side-by-side
Showing
2 changed files
with
17 additions
and
16 deletions
+17
-16
comfy/sd.py
comfy/sd.py
+5
-16
comfy/utils.py
comfy/utils.py
+12
-0
No files found.
comfy/sd.py
View file @
0e425603
...
@@ -31,17 +31,6 @@ def load_model_weights(model, sd, verbose=False, load_state_dict_to=[]):
...
@@ -31,17 +31,6 @@ def load_model_weights(model, sd, verbose=False, load_state_dict_to=[]):
if
ids
.
dtype
==
torch
.
float32
:
if
ids
.
dtype
==
torch
.
float32
:
sd
[
'cond_stage_model.transformer.text_model.embeddings.position_ids'
]
=
ids
.
round
()
sd
[
'cond_stage_model.transformer.text_model.embeddings.position_ids'
]
=
ids
.
round
()
keys_to_replace
=
{
"cond_stage_model.model.positional_embedding"
:
"cond_stage_model.transformer.text_model.embeddings.position_embedding.weight"
,
"cond_stage_model.model.token_embedding.weight"
:
"cond_stage_model.transformer.text_model.embeddings.token_embedding.weight"
,
"cond_stage_model.model.ln_final.weight"
:
"cond_stage_model.transformer.text_model.final_layer_norm.weight"
,
"cond_stage_model.model.ln_final.bias"
:
"cond_stage_model.transformer.text_model.final_layer_norm.bias"
,
}
for
x
in
keys_to_replace
:
if
x
in
sd
:
sd
[
keys_to_replace
[
x
]]
=
sd
.
pop
(
x
)
sd
=
utils
.
transformers_convert
(
sd
,
"cond_stage_model.model"
,
"cond_stage_model.transformer.text_model"
,
24
)
sd
=
utils
.
transformers_convert
(
sd
,
"cond_stage_model.model"
,
"cond_stage_model.transformer.text_model"
,
24
)
for
x
in
load_state_dict_to
:
for
x
in
load_state_dict_to
:
...
@@ -1073,13 +1062,13 @@ def load_checkpoint_guess_config(ckpt_path, output_vae=True, output_clip=True, o
...
@@ -1073,13 +1062,13 @@ def load_checkpoint_guess_config(ckpt_path, output_vae=True, output_clip=True, o
"legacy"
:
False
"legacy"
:
False
}
}
if
len
(
sd
[
'model.diffusion_model.input_blocks.
1
.1.proj_in.weight'
].
shape
)
==
2
:
if
len
(
sd
[
'model.diffusion_model.input_blocks.
4
.1.proj_in.weight'
].
shape
)
==
2
:
unet_config
[
'use_linear_in_transformer'
]
=
True
unet_config
[
'use_linear_in_transformer'
]
=
True
unet_config
[
"use_fp16"
]
=
fp16
unet_config
[
"use_fp16"
]
=
fp16
unet_config
[
"model_channels"
]
=
sd
[
'model.diffusion_model.input_blocks.0.0.weight'
].
shape
[
0
]
unet_config
[
"model_channels"
]
=
sd
[
'model.diffusion_model.input_blocks.0.0.weight'
].
shape
[
0
]
unet_config
[
"in_channels"
]
=
sd
[
'model.diffusion_model.input_blocks.0.0.weight'
].
shape
[
1
]
unet_config
[
"in_channels"
]
=
sd
[
'model.diffusion_model.input_blocks.0.0.weight'
].
shape
[
1
]
unet_config
[
"context_dim"
]
=
sd
[
'model.diffusion_model.input_blocks.
1
.1.transformer_blocks.0.attn2.to_k.weight'
].
shape
[
1
]
unet_config
[
"context_dim"
]
=
sd
[
'model.diffusion_model.input_blocks.
4
.1.transformer_blocks.0.attn2.to_k.weight'
].
shape
[
1
]
sd_config
[
"unet_config"
]
=
{
"target"
:
"comfy.ldm.modules.diffusionmodules.openaimodel.UNetModel"
,
"params"
:
unet_config
}
sd_config
[
"unet_config"
]
=
{
"target"
:
"comfy.ldm.modules.diffusionmodules.openaimodel.UNetModel"
,
"params"
:
unet_config
}
model_config
=
{
"target"
:
"comfy.ldm.models.diffusion.ddpm.LatentDiffusion"
,
"params"
:
sd_config
}
model_config
=
{
"target"
:
"comfy.ldm.models.diffusion.ddpm.LatentDiffusion"
,
"params"
:
sd_config
}
...
@@ -1097,10 +1086,10 @@ def load_checkpoint_guess_config(ckpt_path, output_vae=True, output_clip=True, o
...
@@ -1097,10 +1086,10 @@ def load_checkpoint_guess_config(ckpt_path, output_vae=True, output_clip=True, o
else
:
else
:
sd_config
[
"conditioning_key"
]
=
"crossattn"
sd_config
[
"conditioning_key"
]
=
"crossattn"
if
unet_config
[
"context_dim"
]
==
1024
:
if
unet_config
[
"context_dim"
]
==
768
:
unet_config
[
"num_head_channels"
]
=
64
#SD2.x
else
:
unet_config
[
"num_heads"
]
=
8
#SD1.x
unet_config
[
"num_heads"
]
=
8
#SD1.x
else
:
unet_config
[
"num_head_channels"
]
=
64
#SD2.x
unclip
=
'model.diffusion_model.label_emb.0.0.weight'
unclip
=
'model.diffusion_model.label_emb.0.0.weight'
if
unclip
in
sd_keys
:
if
unclip
in
sd_keys
:
...
...
comfy/utils.py
View file @
0e425603
...
@@ -24,6 +24,18 @@ def load_torch_file(ckpt, safe_load=False):
...
@@ -24,6 +24,18 @@ def load_torch_file(ckpt, safe_load=False):
return
sd
return
sd
def
transformers_convert
(
sd
,
prefix_from
,
prefix_to
,
number
):
def
transformers_convert
(
sd
,
prefix_from
,
prefix_to
,
number
):
keys_to_replace
=
{
"{}.positional_embedding"
:
"{}.embeddings.position_embedding.weight"
,
"{}.token_embedding.weight"
:
"{}.embeddings.token_embedding.weight"
,
"{}.ln_final.weight"
:
"{}.final_layer_norm.weight"
,
"{}.ln_final.bias"
:
"{}.final_layer_norm.bias"
,
}
for
k
in
keys_to_replace
:
x
=
k
.
format
(
prefix_from
)
if
x
in
sd
:
sd
[
keys_to_replace
[
k
].
format
(
prefix_to
)]
=
sd
.
pop
(
x
)
resblock_to_replace
=
{
resblock_to_replace
=
{
"ln_1"
:
"layer_norm1"
,
"ln_1"
:
"layer_norm1"
,
"ln_2"
:
"layer_norm2"
,
"ln_2"
:
"layer_norm2"
,
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment