Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
chenpangpang
ComfyUI
Commits
da115bd7
Commit
da115bd7
authored
Apr 14, 2023
by
BlenderNeko
Browse files
ensure backwards compat with optional args
parent
752f7a16
Changes
3
Hide whitespace changes
Inline
Side-by-side
Showing
3 changed files
with
13 additions
and
6 deletions
+13
-6
comfy/sd.py
comfy/sd.py
+7
-3
comfy/sd1_clip.py
comfy/sd1_clip.py
+5
-1
nodes.py
nodes.py
+1
-2
No files found.
comfy/sd.py
View file @
da115bd7
...
@@ -372,12 +372,16 @@ class CLIP:
...
@@ -372,12 +372,16 @@ class CLIP:
def
clip_layer
(
self
,
layer_idx
):
def
clip_layer
(
self
,
layer_idx
):
self
.
layer_idx
=
layer_idx
self
.
layer_idx
=
layer_idx
def
tokenize
(
self
,
text
):
def
tokenize
(
self
,
text
,
return_word_ids
=
False
):
return
self
.
tokenizer
.
tokenize_with_weights
(
text
)
return
self
.
tokenizer
.
tokenize_with_weights
(
text
,
return_word_ids
)
def
encode
(
self
,
t
okens
):
def
encode
(
self
,
t
ext
,
from_tokens
=
False
):
if
self
.
layer_idx
is
not
None
:
if
self
.
layer_idx
is
not
None
:
self
.
cond_stage_model
.
clip_layer
(
self
.
layer_idx
)
self
.
cond_stage_model
.
clip_layer
(
self
.
layer_idx
)
if
from_tokens
:
tokens
=
text
else
:
tokens
=
self
.
tokenizer
.
tokenize_with_weights
(
text
)
try
:
try
:
self
.
patcher
.
patch_model
()
self
.
patcher
.
patch_model
()
cond
=
self
.
cond_stage_model
.
encode_token_weights
(
tokens
)
cond
=
self
.
cond_stage_model
.
encode_token_weights
(
tokens
)
...
...
comfy/sd1_clip.py
View file @
da115bd7
...
@@ -240,7 +240,7 @@ class SD1Tokenizer:
...
@@ -240,7 +240,7 @@ class SD1Tokenizer:
return
(
embed
,
""
)
return
(
embed
,
""
)
def
tokenize_with_weights
(
self
,
text
:
str
):
def
tokenize_with_weights
(
self
,
text
:
str
,
return_word_ids
=
False
):
'''
'''
Takes a prompt and converts it to a list of (token, weight, word id) elements.
Takes a prompt and converts it to a list of (token, weight, word id) elements.
Tokens can both be integer tokens and pre computed CLIP tensors.
Tokens can both be integer tokens and pre computed CLIP tensors.
...
@@ -301,6 +301,10 @@ class SD1Tokenizer:
...
@@ -301,6 +301,10 @@ class SD1Tokenizer:
#add start and end tokens
#add start and end tokens
batched_tokens
=
[[(
self
.
start_token
,
1.0
,
0
)]
+
x
+
[(
self
.
end_token
,
1.0
,
0
)]
for
x
in
batched_tokens
]
batched_tokens
=
[[(
self
.
start_token
,
1.0
,
0
)]
+
x
+
[(
self
.
end_token
,
1.0
,
0
)]
for
x
in
batched_tokens
]
if
not
return_word_ids
:
batched_tokens
=
[[(
t
,
w
)
for
t
,
w
,
_
in
x
]
for
x
in
batched_tokens
]
return
batched_tokens
return
batched_tokens
...
...
nodes.py
View file @
da115bd7
...
@@ -44,8 +44,7 @@ class CLIPTextEncode:
...
@@ -44,8 +44,7 @@ class CLIPTextEncode:
CATEGORY
=
"conditioning"
CATEGORY
=
"conditioning"
def
encode
(
self
,
clip
,
text
):
def
encode
(
self
,
clip
,
text
):
tokens
=
clip
.
tokenize
(
text
)
return
([[
clip
.
encode
(
text
),
{}]],
)
return
([[
clip
.
encode
(
tokens
),
{}]],
)
class
ConditioningCombine
:
class
ConditioningCombine
:
@
classmethod
@
classmethod
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment