Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
xuwx1
LightX2V
Commits
c926b08a
Commit
c926b08a
authored
Jun 26, 2025
by
gushiqiao
Browse files
Fix.
parent
b811c2be
Changes
2
Show whitespace changes
Inline
Side-by-side
Showing
2 changed files
with
5 additions
and
6 deletions
+5
-6
lightx2v/common/ops/norm/layer_norm_weight.py
lightx2v/common/ops/norm/layer_norm_weight.py
+4
-4
lightx2v/models/networks/wan/model.py
lightx2v/models/networks/wan/model.py
+1
-2
No files found.
lightx2v/common/ops/norm/layer_norm_weight.py
View file @
c926b08a
...
@@ -94,9 +94,7 @@ class LNWeight(LNWeightTemplate):
...
@@ -94,9 +94,7 @@ class LNWeight(LNWeightTemplate):
self
.
bias
=
None
self
.
bias
=
None
def
apply
(
self
,
input_tensor
):
def
apply
(
self
,
input_tensor
):
if
self
.
weight
is
None
or
self
.
weight
.
dtype
==
torch
.
bfloat16
:
if
GET_DTYPE
()
!=
"BF16"
:
input_tensor
=
torch
.
nn
.
functional
.
layer_norm
(
input_tensor
,
(
input_tensor
.
shape
[
-
1
],),
self
.
weight
,
self
.
bias
,
self
.
eps
)
else
:
input_tensor
=
torch
.
nn
.
functional
.
layer_norm
(
input_tensor
=
torch
.
nn
.
functional
.
layer_norm
(
input_tensor
.
float
(),
input_tensor
.
float
(),
(
input_tensor
.
shape
[
-
1
],),
(
input_tensor
.
shape
[
-
1
],),
...
@@ -104,4 +102,6 @@ class LNWeight(LNWeightTemplate):
...
@@ -104,4 +102,6 @@ class LNWeight(LNWeightTemplate):
self
.
bias
,
self
.
bias
,
self
.
eps
,
self
.
eps
,
).
to
(
torch
.
bfloat16
)
).
to
(
torch
.
bfloat16
)
else
:
input_tensor
=
torch
.
nn
.
functional
.
layer_norm
(
input_tensor
,
(
input_tensor
.
shape
[
-
1
],),
self
.
weight
,
self
.
bias
,
self
.
eps
)
return
input_tensor
return
input_tensor
\ No newline at end of file
lightx2v/models/networks/wan/model.py
View file @
c926b08a
...
@@ -143,9 +143,8 @@ class WanModel:
...
@@ -143,9 +143,8 @@ class WanModel:
def
_init_weights
(
self
,
weight_dict
=
None
):
def
_init_weights
(
self
,
weight_dict
=
None
):
use_bf16
=
GET_DTYPE
()
==
"BF16"
use_bf16
=
GET_DTYPE
()
==
"BF16"
# Some layers run with float32 to achieve high accuracy
# Some layers run with float32 to achieve high accuracy
skip_bf16
=
{
"norm"
,
"embedding"
,
"modulation"
,
"time"
}
skip_bf16
=
{
"norm"
,
"embedding"
,
"modulation"
,
"time"
,
"img_emb.proj.0"
,
"img_emb.proj.4"
}
if
weight_dict
is
None
:
if
weight_dict
is
None
:
if
not
self
.
dit_quantized
or
self
.
weight_auto_quant
:
if
not
self
.
dit_quantized
or
self
.
weight_auto_quant
:
self
.
original_weight_dict
=
self
.
_load_ckpt
(
use_bf16
,
skip_bf16
)
self
.
original_weight_dict
=
self
.
_load_ckpt
(
use_bf16
,
skip_bf16
)
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment