Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
chenpangpang
ComfyUI
Commits
42152062
Commit
42152062
authored
Mar 03, 2023
by
comfyanonymous
Browse files
Add a node to set CLIP skip.
Use a more simple way to detect if the model is -v prediction.
parent
fed315a7
Changes
3
Show whitespace changes
Inline
Side-by-side
Showing
3 changed files
with
27 additions
and
9 deletions
+27
-9
comfy/ldm/models/diffusion/ddpm.py
comfy/ldm/models/diffusion/ddpm.py
+1
-1
comfy/sd.py
comfy/sd.py
+9
-8
nodes.py
nodes.py
+17
-0
No files found.
comfy/ldm/models/diffusion/ddpm.py
View file @
42152062
...
...
@@ -81,7 +81,7 @@ class DDPM(torch.nn.Module):
super
().
__init__
()
assert
parameterization
in
[
"eps"
,
"x0"
,
"v"
],
'currently only supporting "eps" and "x0" and "v"'
self
.
parameterization
=
parameterization
#
print(f"{self.__class__.__name__}: Running in {self.parameterization}-prediction mode")
print
(
f
"
{
self
.
__class__
.
__name__
}
: Running in
{
self
.
parameterization
}
-prediction mode"
)
self
.
cond_stage_model
=
None
self
.
clip_denoised
=
clip_denoised
self
.
log_every_t
=
log_every_t
...
...
comfy/sd.py
View file @
42152062
...
...
@@ -266,6 +266,7 @@ class CLIP:
self
.
cond_stage_model
=
clip
(
**
(
params
))
self
.
tokenizer
=
tokenizer
(
embedding_directory
=
embedding_directory
)
self
.
patcher
=
ModelPatcher
(
self
.
cond_stage_model
)
self
.
layer_idx
=
-
1
def
clone
(
self
):
n
=
CLIP
(
no_init
=
True
)
...
...
@@ -273,6 +274,7 @@ class CLIP:
n
.
patcher
=
self
.
patcher
.
clone
()
n
.
cond_stage_model
=
self
.
cond_stage_model
n
.
tokenizer
=
self
.
tokenizer
n
.
layer_idx
=
self
.
layer_idx
return
n
def
load_from_state_dict
(
self
,
sd
):
...
...
@@ -282,9 +284,10 @@ class CLIP:
return
self
.
patcher
.
add_patches
(
patches
,
strength
)
def
clip_layer
(
self
,
layer_idx
):
return
self
.
cond_stage_model
.
clip_layer
(
layer_idx
)
self
.
layer_idx
=
layer_idx
def
encode
(
self
,
text
):
self
.
cond_stage_model
.
clip_layer
(
self
.
layer_idx
)
tokens
=
self
.
tokenizer
.
tokenize_with_weights
(
text
)
try
:
self
.
patcher
.
patch_model
()
...
...
@@ -744,15 +747,13 @@ def load_checkpoint_guess_config(ckpt_path, output_vae=True, output_clip=True, e
else
:
unet_config
[
"num_heads"
]
=
8
#SD1.x
if
unet_config
[
"context_dim"
]
==
1024
and
unet_config
[
"in_channels"
]
==
4
:
#only SD2.x non inpainting models are v prediction
k
=
"model.diffusion_model.output_blocks.11.1.transformer_blocks.0.norm1.bias"
out
=
sd
[
k
]
if
torch
.
std
(
out
,
unbiased
=
False
)
>
0.09
:
# not sure how well this will actually work. I guess we will find out.
sd_config
[
"parameterization"
]
=
'v'
model
=
instantiate_from_config
(
model_config
)
model
=
load_model_weights
(
model
,
sd
,
verbose
=
False
,
load_state_dict_to
=
load_state_dict_to
)
if
unet_config
[
"context_dim"
]
==
1024
and
unet_config
[
"in_channels"
]
==
4
:
#only SD2.x non inpainting models are v prediction
cond
=
torch
.
zeros
((
1
,
2
,
unet_config
[
"context_dim"
]),
device
=
"cpu"
)
x_in
=
torch
.
rand
((
1
,
unet_config
[
"in_channels"
],
8
,
8
),
device
=
"cpu"
,
generator
=
torch
.
manual_seed
(
1
))
out
=
model
.
apply_model
(
x_in
,
torch
.
tensor
([
999
],
device
=
"cpu"
),
cond
)
if
out
.
mean
()
<
-
0.6
:
#mean of eps should be ~0 and mean of v prediction should be ~-1
model
.
parameterization
=
'v'
return
(
ModelPatcher
(
model
),
clip
,
vae
)
nodes.py
View file @
42152062
...
...
@@ -220,6 +220,22 @@ class CheckpointLoaderSimple:
out
=
comfy
.
sd
.
load_checkpoint_guess_config
(
ckpt_path
,
output_vae
=
True
,
output_clip
=
True
,
embedding_directory
=
CheckpointLoader
.
embedding_directory
)
return
out
class
CLIPSetLastLayer
:
@
classmethod
def
INPUT_TYPES
(
s
):
return
{
"required"
:
{
"clip"
:
(
"CLIP"
,
),
"stop_at_clip_layer"
:
(
"INT"
,
{
"default"
:
-
1
,
"min"
:
-
24
,
"max"
:
-
1
,
"step"
:
1
}),
}}
RETURN_TYPES
=
(
"CLIP"
,)
FUNCTION
=
"set_last_layer"
CATEGORY
=
"conditioning"
def
set_last_layer
(
self
,
clip
,
stop_at_clip_layer
):
clip
=
clip
.
clone
()
clip
.
clip_layer
(
stop_at_clip_layer
)
return
(
clip
,)
class
LoraLoader
:
models_dir
=
os
.
path
.
join
(
os
.
path
.
dirname
(
os
.
path
.
realpath
(
__file__
)),
"models"
)
lora_dir
=
os
.
path
.
join
(
models_dir
,
"loras"
)
...
...
@@ -829,6 +845,7 @@ NODE_CLASS_MAPPINGS = {
"KSampler"
:
KSampler
,
"CheckpointLoader"
:
CheckpointLoader
,
"CLIPTextEncode"
:
CLIPTextEncode
,
"CLIPSetLastLayer"
:
CLIPSetLastLayer
,
"VAEDecode"
:
VAEDecode
,
"VAEEncode"
:
VAEEncode
,
"VAEEncodeForInpaint"
:
VAEEncodeForInpaint
,
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment