Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
chenpangpang
ComfyUI
Commits
72741105
"tests/vscode:/vscode.git/clone" did not exist on "c0b4d72095b715c518f54d7111b539667320228e"
Commit
72741105
authored
Nov 21, 2023
by
comfyanonymous
Browse files
Remove useless code.
parent
6a491ebe
Changes
1
Hide whitespace changes
Inline
Side-by-side
Showing
1 changed file
with
11 additions
and
20 deletions
+11
-20
comfy/ldm/modules/diffusionmodules/openaimodel.py
comfy/ldm/modules/diffusionmodules/openaimodel.py
+11
-20
No files found.
comfy/ldm/modules/diffusionmodules/openaimodel.py
View file @
72741105
...
@@ -28,25 +28,6 @@ class TimestepBlock(nn.Module):
...
@@ -28,25 +28,6 @@ class TimestepBlock(nn.Module):
Apply the module to `x` given `emb` timestep embeddings.
Apply the module to `x` given `emb` timestep embeddings.
"""
"""
class
TimestepEmbedSequential
(
nn
.
Sequential
,
TimestepBlock
):
"""
A sequential module that passes timestep embeddings to the children that
support it as an extra input.
"""
def
forward
(
self
,
x
,
emb
,
context
=
None
,
transformer_options
=
{},
output_shape
=
None
):
for
layer
in
self
:
if
isinstance
(
layer
,
TimestepBlock
):
x
=
layer
(
x
,
emb
)
elif
isinstance
(
layer
,
SpatialTransformer
):
x
=
layer
(
x
,
context
,
transformer_options
)
elif
isinstance
(
layer
,
Upsample
):
x
=
layer
(
x
,
output_shape
=
output_shape
)
else
:
x
=
layer
(
x
)
return
x
#This is needed because accelerate makes a copy of transformer_options which breaks "current_index"
#This is needed because accelerate makes a copy of transformer_options which breaks "current_index"
def
forward_timestep_embed
(
ts
,
x
,
emb
,
context
=
None
,
transformer_options
=
{},
output_shape
=
None
):
def
forward_timestep_embed
(
ts
,
x
,
emb
,
context
=
None
,
transformer_options
=
{},
output_shape
=
None
):
for
layer
in
ts
:
for
layer
in
ts
:
...
@@ -54,13 +35,23 @@ def forward_timestep_embed(ts, x, emb, context=None, transformer_options={}, out
...
@@ -54,13 +35,23 @@ def forward_timestep_embed(ts, x, emb, context=None, transformer_options={}, out
x
=
layer
(
x
,
emb
)
x
=
layer
(
x
,
emb
)
elif
isinstance
(
layer
,
SpatialTransformer
):
elif
isinstance
(
layer
,
SpatialTransformer
):
x
=
layer
(
x
,
context
,
transformer_options
)
x
=
layer
(
x
,
context
,
transformer_options
)
transformer_options
[
"current_index"
]
+=
1
if
"current_index"
in
transformer_options
:
transformer_options
[
"current_index"
]
+=
1
elif
isinstance
(
layer
,
Upsample
):
elif
isinstance
(
layer
,
Upsample
):
x
=
layer
(
x
,
output_shape
=
output_shape
)
x
=
layer
(
x
,
output_shape
=
output_shape
)
else
:
else
:
x
=
layer
(
x
)
x
=
layer
(
x
)
return
x
return
x
class
TimestepEmbedSequential
(
nn
.
Sequential
,
TimestepBlock
):
"""
A sequential module that passes timestep embeddings to the children that
support it as an extra input.
"""
def
forward
(
self
,
*
args
,
**
kwargs
):
return
forward_timestep_embed
(
self
,
*
args
,
**
kwargs
)
class
Upsample
(
nn
.
Module
):
class
Upsample
(
nn
.
Module
):
"""
"""
An upsampling layer with an optional convolution.
An upsampling layer with an optional convolution.
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment