Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
xuwx1
LightX2V
Commits
b161b913
Unverified
Commit
b161b913
authored
Dec 05, 2025
by
Gu Shiqiao
Committed by
GitHub
Dec 05, 2025
Browse files
fix import error (#573)
parent
0e08595c
Changes
6
Hide whitespace changes
Inline
Side-by-side
Showing
6 changed files
with
26 additions
and
6 deletions
+26
-6
README_zh.md
README_zh.md
+0
-1
lightx2v/models/input_encoders/hf/qwen25/qwen25_vlforconditionalgeneration.py
...t_encoders/hf/qwen25/qwen25_vlforconditionalgeneration.py
+6
-1
lightx2v/models/input_encoders/hf/wan/t5/model.py
lightx2v/models/input_encoders/hf/wan/t5/model.py
+5
-0
lightx2v/models/input_encoders/hf/wan/xlm_roberta/model.py
lightx2v/models/input_encoders/hf/wan/xlm_roberta/model.py
+5
-1
lightx2v/models/networks/wan/infer/matrix_game2/transformer_infer.py
...dels/networks/wan/infer/matrix_game2/transformer_infer.py
+6
-2
lightx2v/pipeline.py
lightx2v/pipeline.py
+4
-1
No files found.
README_zh.md
View file @
b161b913
...
...
@@ -292,4 +292,3 @@ pipe.generate(
<div
align=
"center"
>
由 LightX2V 团队用 ❤️ 构建
</div>
lightx2v/models/input_encoders/hf/qwen25/qwen25_vlforconditionalgeneration.py
View file @
b161b913
...
...
@@ -3,7 +3,12 @@ import math
import
os
import
torch
from
transformers
import
Qwen2Tokenizer
,
Qwen2_5_VLForConditionalGeneration
try
:
from
transformers
import
Qwen2Tokenizer
,
Qwen2_5_VLForConditionalGeneration
except
ImportError
:
Qwen2Tokenizer
=
None
Qwen2_5_VLForConditionalGeneration
=
None
from
lightx2v_platform.base.global_var
import
AI_DEVICE
...
...
lightx2v/models/input_encoders/hf/wan/t5/model.py
View file @
b161b913
...
...
@@ -24,6 +24,7 @@ from lightx2v.models.input_encoders.hf.q_linear import ( # noqa E402
SglQuantLinearFp8
,
# noqa E402
TorchaoQuantLinearInt8
,
# noqa E402
VllmQuantLinearInt8
,
# noqa E402,
VllmQuantLinearFp8
,
# noqa E402
)
from
lightx2v_platform.ops.mm.cambricon_mlu.q_linear
import
MluQuantLinearInt8
# noqa E402
from
lightx2v.models.input_encoders.hf.wan.t5.tokenizer
import
HuggingfaceTokenizer
# noqa E402
...
...
@@ -195,6 +196,8 @@ class T5Attention(nn.Module):
linear_cls
=
VllmQuantLinearInt8
elif
quant_scheme
in
[
"fp8"
,
"fp8-sgl"
]:
linear_cls
=
SglQuantLinearFp8
elif
quant_scheme
==
"fp8-vllm"
:
linear_cls
=
VllmQuantLinearFp8
elif
quant_scheme
==
"int8-torchao"
:
linear_cls
=
TorchaoQuantLinearInt8
elif
quant_scheme
==
"int8-q8f"
:
...
...
@@ -268,6 +271,8 @@ class T5FeedForward(nn.Module):
linear_cls
=
VllmQuantLinearInt8
elif
quant_scheme
in
[
"fp8"
,
"fp8-sgl"
]:
linear_cls
=
SglQuantLinearFp8
elif
quant_scheme
==
"fp8-vllm"
:
linear_cls
=
VllmQuantLinearFp8
elif
quant_scheme
==
"int8-torchao"
:
linear_cls
=
TorchaoQuantLinearInt8
elif
quant_scheme
==
"int8-q8f"
:
...
...
lightx2v/models/input_encoders/hf/wan/xlm_roberta/model.py
View file @
b161b913
...
...
@@ -10,7 +10,7 @@ from loguru import logger
# from lightx2v.attentions import attention
from
lightx2v.common.ops.attn
import
TorchSDPAWeight
from
lightx2v.models.input_encoders.hf.q_linear
import
Q8FQuantLinearFp8
,
Q8FQuantLinearInt8
,
SglQuantLinearFp8
,
TorchaoQuantLinearInt8
,
VllmQuantLinearInt8
from
lightx2v.models.input_encoders.hf.q_linear
import
Q8FQuantLinearFp8
,
Q8FQuantLinearInt8
,
SglQuantLinearFp8
,
TorchaoQuantLinearInt8
,
VllmQuantLinearFp8
,
VllmQuantLinearInt8
from
lightx2v.utils.utils
import
load_weights
from
lightx2v_platform.base.global_var
import
AI_DEVICE
from
lightx2v_platform.ops.mm.cambricon_mlu.q_linear
import
MluQuantLinearInt8
...
...
@@ -65,6 +65,8 @@ class SelfAttention(nn.Module):
linear_cls
=
VllmQuantLinearInt8
elif
quant_scheme
in
[
"fp8"
,
"fp8-sgl"
]:
linear_cls
=
SglQuantLinearFp8
elif
quant_scheme
==
"fp8-vllm"
:
linear_cls
=
VllmQuantLinearFp8
elif
quant_scheme
==
"int8-torchao"
:
linear_cls
=
TorchaoQuantLinearInt8
elif
quant_scheme
==
"int8-q8f"
:
...
...
@@ -147,6 +149,8 @@ class AttentionBlock(nn.Module):
linear_cls
=
VllmQuantLinearInt8
elif
quant_scheme
in
[
"fp8"
,
"fp8-sgl"
]:
linear_cls
=
SglQuantLinearFp8
elif
quant_scheme
==
"fp8-vllm"
:
linear_cls
=
VllmQuantLinearFp8
elif
quant_scheme
==
"int8-torchao"
:
linear_cls
=
TorchaoQuantLinearInt8
elif
quant_scheme
==
"int8-q8f"
:
...
...
lightx2v/models/networks/wan/infer/matrix_game2/transformer_infer.py
100644 → 100755
View file @
b161b913
...
...
@@ -8,9 +8,13 @@ try:
FLASH_ATTN_3_AVAILABLE
=
True
except
ImportError
:
from
flash_attn
import
flash_attn_func
try
:
from
flash_attn
import
flash_attn_func
FLASH_ATTN_3_AVAILABLE
=
False
FLASH_ATTN_3_AVAILABLE
=
False
except
ImportError
:
FLASH_ATTN_3_AVAILABLE
=
False
from
lightx2v.models.networks.wan.infer.matrix_game2.posemb_layers
import
apply_rotary_emb
,
get_nd_rotary_pos_embed
...
...
lightx2v/pipeline.py
View file @
b161b913
...
...
@@ -113,12 +113,14 @@ class LightX2VPipeline:
boundary_step_index
=
2
,
denoising_step_list
=
[
1000
,
750
,
500
,
250
],
config_json
=
None
,
rope_type
=
"torch"
,
):
if
config_json
is
not
None
:
self
.
set_infer_config_json
(
config_json
)
else
:
self
.
set_infer_config
(
attn_mode
,
rope_type
,
infer_steps
,
num_frames
,
height
,
...
...
@@ -142,6 +144,7 @@ class LightX2VPipeline:
def
set_infer_config
(
self
,
attn_mode
,
rope_type
,
infer_steps
,
num_frames
,
height
,
...
...
@@ -164,7 +167,7 @@ class LightX2VPipeline:
self
.
enable_cfg
=
False
else
:
self
.
enable_cfg
=
True
self
.
rope_type
=
rope_type
self
.
fps
=
fps
self
.
aspect_ratio
=
aspect_ratio
self
.
boundary
=
boundary
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment