Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
xuwx1
LightX2V
Commits
f804574f
"...text-generation-inference.git" did not exist on "7a48a84784b74f9ea12cf04a3c4572c027cec2e4"
Commit
f804574f
authored
May 23, 2025
by
GoatWu
Browse files
bug fixed
parent
2f874771
Changes
1
Hide whitespace changes
Inline
Side-by-side
Showing
1 changed file
with
6 additions
and
27 deletions
+6
-27
lightx2v/models/networks/wan/infer/causvid/transformer_infer.py
...2v/models/networks/wan/infer/causvid/transformer_infer.py
+6
-27
No files found.
lightx2v/models/networks/wan/infer/causvid/transformer_infer.py
View file @
f804574f
...
@@ -90,7 +90,7 @@ class WanTransformerInferCausVid(WanTransformerInfer):
...
@@ -90,7 +90,7 @@ class WanTransformerInferCausVid(WanTransformerInfer):
kv_end
,
kv_end
,
)
)
return
x
return
x
def
_infer_self_attn
(
self
,
weights
,
grid_sizes
,
embed
,
x
,
embed0
,
seq_lens
,
freqs
,
context
,
block_idx
,
kv_start
,
kv_end
):
def
_infer_self_attn
(
self
,
weights
,
grid_sizes
,
embed
,
x
,
embed0
,
seq_lens
,
freqs
,
context
,
block_idx
,
kv_start
,
kv_end
):
norm1_out
=
torch
.
nn
.
functional
.
layer_norm
(
x
,
(
x
.
shape
[
1
],),
None
,
None
,
1e-6
)
norm1_out
=
torch
.
nn
.
functional
.
layer_norm
(
x
,
(
x
.
shape
[
1
],),
None
,
None
,
1e-6
)
norm1_out
=
(
norm1_out
*
(
1
+
embed0
[
1
])
+
embed0
[
0
]).
squeeze
(
0
)
norm1_out
=
(
norm1_out
*
(
1
+
embed0
[
1
])
+
embed0
[
0
]).
squeeze
(
0
)
...
@@ -194,7 +194,7 @@ class WanTransformerInferCausVid(WanTransformerInfer):
...
@@ -194,7 +194,7 @@ class WanTransformerInferCausVid(WanTransformerInfer):
x
=
x
+
attn_out
x
=
x
+
attn_out
return
x
return
x
def
_infer_ffn
(
self
,
weights
,
x
,
embed0
):
def
_infer_ffn
(
self
,
weights
,
x
,
embed0
):
norm2_out
=
torch
.
nn
.
functional
.
layer_norm
(
x
,
(
x
.
shape
[
1
],),
None
,
None
,
1e-6
)
norm2_out
=
torch
.
nn
.
functional
.
layer_norm
(
x
,
(
x
.
shape
[
1
],),
None
,
None
,
1e-6
)
y
=
weights
.
ffn_0
.
apply
(
norm2_out
*
(
1
+
embed0
[
4
].
squeeze
(
0
))
+
embed0
[
3
].
squeeze
(
0
))
y
=
weights
.
ffn_0
.
apply
(
norm2_out
*
(
1
+
embed0
[
4
].
squeeze
(
0
))
+
embed0
[
3
].
squeeze
(
0
))
...
@@ -204,7 +204,6 @@ class WanTransformerInferCausVid(WanTransformerInfer):
...
@@ -204,7 +204,6 @@ class WanTransformerInferCausVid(WanTransformerInfer):
return
x
return
x
def
infer_block
(
self
,
weights
,
grid_sizes
,
embed
,
x
,
embed0
,
seq_lens
,
freqs
,
context
,
block_idx
,
kv_start
,
kv_end
):
def
infer_block
(
self
,
weights
,
grid_sizes
,
embed
,
x
,
embed0
,
seq_lens
,
freqs
,
context
,
block_idx
,
kv_start
,
kv_end
):
if
embed0
.
dim
()
==
3
:
if
embed0
.
dim
()
==
3
:
modulation
=
weights
.
modulation
.
tensor
.
unsqueeze
(
2
)
# 1, 6, 1, dim
modulation
=
weights
.
modulation
.
tensor
.
unsqueeze
(
2
)
# 1, 6, 1, dim
...
@@ -214,30 +213,10 @@ class WanTransformerInferCausVid(WanTransformerInfer):
...
@@ -214,30 +213,10 @@ class WanTransformerInferCausVid(WanTransformerInfer):
elif
embed0
.
dim
()
==
2
:
elif
embed0
.
dim
()
==
2
:
embed0
=
(
weights
.
modulation
.
tensor
+
embed0
).
chunk
(
6
,
dim
=
1
)
embed0
=
(
weights
.
modulation
.
tensor
+
embed0
).
chunk
(
6
,
dim
=
1
)
x
=
self
.
_infer_self_attn
(
x
=
self
.
_infer_self_attn
(
weights
.
compute_phases
[
0
],
grid_sizes
,
embed
,
x
,
embed0
,
seq_lens
,
freqs
,
context
,
block_idx
,
kv_start
,
kv_end
)
weights
.
compute_phases
[
0
],
grid_sizes
,
embed
,
x
,
embed0
,
seq_lens
,
freqs
,
context
,
block_idx
,
kv_start
,
kv_end
)
x
=
self
.
_infer_cross_attn
(
x
=
self
.
_infer_cross_attn
(
weights
.
compute_phases
[
1
],
x
,
context
,
block_idx
)
weights
.
compute_phases
[
1
],
x
,
x
=
self
.
_infer_ffn
(
weights
.
compute_phases
[
2
],
x
,
embed0
)
context
,
block_idx
)
x
=
self
.
_infer_ffn
(
weights
.
compute_phases
[
2
],
x
,
embed0
)
return
x
return
x
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment