Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
xuwx1
LightX2V
Commits
aec90a0d
Commit
aec90a0d
authored
Apr 28, 2025
by
helloyongyang
Browse files
update some variable names
parent
c98d486d
Changes
8
Show whitespace changes
Inline
Side-by-side
Showing
8 changed files
with
97 additions
and
97 deletions
+97
-97
lightx2v/api_server.py
lightx2v/api_server.py
+1
-1
lightx2v/common/ops/mm/mm_weight.py
lightx2v/common/ops/mm/mm_weight.py
+6
-6
lightx2v/models/networks/hunyuan/weights/post_weights.py
lightx2v/models/networks/hunyuan/weights/post_weights.py
+10
-10
lightx2v/models/networks/hunyuan/weights/pre_weights.py
lightx2v/models/networks/hunyuan/weights/pre_weights.py
+10
-10
lightx2v/models/networks/hunyuan/weights/transformer_weights.py
...2v/models/networks/hunyuan/weights/transformer_weights.py
+32
-32
lightx2v/models/networks/wan/weights/post_weights.py
lightx2v/models/networks/wan/weights/post_weights.py
+11
-11
lightx2v/models/networks/wan/weights/pre_weights.py
lightx2v/models/networks/wan/weights/pre_weights.py
+11
-11
lightx2v/models/networks/wan/weights/transformer_weights.py
lightx2v/models/networks/wan/weights/transformer_weights.py
+16
-16
No files found.
lightx2v/api_server.py
View file @
aec90a0d
...
@@ -59,7 +59,7 @@ class Message(BaseModel):
...
@@ -59,7 +59,7 @@ class Message(BaseModel):
@
app
.
post
(
"/v1/local/video/generate"
)
@
app
.
post
(
"/v1/local/video/generate"
)
async
def
v1_local_video_generate
(
message
:
Message
,
request
:
Request
):
async
def
v1_local_video_generate
(
message
:
Message
):
global
runner
global
runner
runner
.
set_inputs
(
message
)
runner
.
set_inputs
(
message
)
await
asyncio
.
to_thread
(
runner
.
run_pipeline
)
await
asyncio
.
to_thread
(
runner
.
run_pipeline
)
...
...
lightx2v/common/ops/mm/mm_weight.py
View file @
aec90a0d
...
@@ -87,9 +87,9 @@ class MMWeightQuantTemplate(MMWeightTemplate):
...
@@ -87,9 +87,9 @@ class MMWeightQuantTemplate(MMWeightTemplate):
self
.
weight_need_transpose
=
True
self
.
weight_need_transpose
=
True
self
.
act_quant_func
=
None
self
.
act_quant_func
=
None
"""
# =========================
weight load functions
#
weight load functions
"""
# =========================
def
load
(
self
,
weight_dict
):
def
load
(
self
,
weight_dict
):
self
.
load_func
(
weight_dict
)
self
.
load_func
(
weight_dict
)
...
@@ -140,9 +140,9 @@ class MMWeightQuantTemplate(MMWeightTemplate):
...
@@ -140,9 +140,9 @@ class MMWeightQuantTemplate(MMWeightTemplate):
x_scaled
=
(
x_view
*
(
448.0
/
x_amax
)).
to
(
torch
.
float8_e4m3fn
)
x_scaled
=
(
x_view
*
(
448.0
/
x_amax
)).
to
(
torch
.
float8_e4m3fn
)
return
x_scaled
.
view_as
(
x_padded
)[:
m
,
:
n
].
contiguous
(),
(
x_amax
/
448.0
).
view
(
x_view
.
size
(
0
),
x_view
.
size
(
2
))
return
x_scaled
.
view_as
(
x_padded
)[:
m
,
:
n
].
contiguous
(),
(
x_amax
/
448.0
).
view
(
x_view
.
size
(
0
),
x_view
.
size
(
2
))
"""
# =========================
act quant kernels
#
act quant kernels
"""
# =========================
def
act_quant_fp8_perchannel_sym_vllm
(
self
,
x
):
def
act_quant_fp8_perchannel_sym_vllm
(
self
,
x
):
input_tensor_quant
,
input_tensor_scale
=
ops
.
scaled_fp8_quant
(
x
,
None
,
scale_ub
=
None
,
use_per_token_if_dynamic
=
True
)
input_tensor_quant
,
input_tensor_scale
=
ops
.
scaled_fp8_quant
(
x
,
None
,
scale_ub
=
None
,
use_per_token_if_dynamic
=
True
)
...
...
lightx2v/models/networks/hunyuan/weights/post_weights.py
View file @
aec90a0d
...
@@ -15,17 +15,17 @@ class HunyuanPostWeights:
...
@@ -15,17 +15,17 @@ class HunyuanPostWeights:
self
.
final_layer_adaLN_modulation_1
,
self
.
final_layer_adaLN_modulation_1
,
]
]
for
mm_
weight
in
self
.
weight_list
:
for
weight
in
self
.
weight_list
:
if
isinstance
(
mm_
weight
,
MMWeightTemplate
):
if
isinstance
(
weight
,
MMWeightTemplate
):
mm_
weight
.
set_config
(
self
.
config
[
"mm_config"
])
weight
.
set_config
(
self
.
config
[
"mm_config"
])
mm_
weight
.
load
(
weight_dict
)
weight
.
load
(
weight_dict
)
def
to_cpu
(
self
):
def
to_cpu
(
self
):
for
mm_
weight
in
self
.
weight_list
:
for
weight
in
self
.
weight_list
:
if
isinstance
(
mm_
weight
,
MMWeightTemplate
):
if
isinstance
(
weight
,
MMWeightTemplate
):
mm_
weight
.
to_cpu
()
weight
.
to_cpu
()
def
to_cuda
(
self
):
def
to_cuda
(
self
):
for
mm_
weight
in
self
.
weight_list
:
for
weight
in
self
.
weight_list
:
if
isinstance
(
mm_
weight
,
MMWeightTemplate
):
if
isinstance
(
weight
,
MMWeightTemplate
):
mm_
weight
.
to_cuda
()
weight
.
to_cuda
()
lightx2v/models/networks/hunyuan/weights/pre_weights.py
View file @
aec90a0d
...
@@ -97,17 +97,17 @@ class HunyuanPreWeights:
...
@@ -97,17 +97,17 @@ class HunyuanPreWeights:
self
.
guidance_in_mlp_2
,
self
.
guidance_in_mlp_2
,
]
]
for
mm_
weight
in
self
.
weight_list
:
for
weight
in
self
.
weight_list
:
if
isinstance
(
mm_
weight
,
MMWeightTemplate
)
or
isinstance
(
mm_
weight
,
LNWeightTemplate
)
or
isinstance
(
mm_
weight
,
Conv3dWeightTemplate
):
if
isinstance
(
weight
,
MMWeightTemplate
)
or
isinstance
(
weight
,
LNWeightTemplate
)
or
isinstance
(
weight
,
Conv3dWeightTemplate
):
mm_
weight
.
set_config
(
self
.
config
[
"mm_config"
])
weight
.
set_config
(
self
.
config
[
"mm_config"
])
mm_
weight
.
load
(
weight_dict
)
weight
.
load
(
weight_dict
)
def
to_cpu
(
self
):
def
to_cpu
(
self
):
for
mm_
weight
in
self
.
weight_list
:
for
weight
in
self
.
weight_list
:
if
isinstance
(
mm_
weight
,
MMWeightTemplate
)
or
isinstance
(
mm_
weight
,
LNWeightTemplate
)
or
isinstance
(
mm_
weight
,
Conv3dWeightTemplate
):
if
isinstance
(
weight
,
MMWeightTemplate
)
or
isinstance
(
weight
,
LNWeightTemplate
)
or
isinstance
(
weight
,
Conv3dWeightTemplate
):
mm_
weight
.
to_cpu
()
weight
.
to_cpu
()
def
to_cuda
(
self
):
def
to_cuda
(
self
):
for
mm_
weight
in
self
.
weight_list
:
for
weight
in
self
.
weight_list
:
if
isinstance
(
mm_
weight
,
MMWeightTemplate
)
or
isinstance
(
mm_
weight
,
LNWeightTemplate
)
or
isinstance
(
mm_
weight
,
Conv3dWeightTemplate
):
if
isinstance
(
weight
,
MMWeightTemplate
)
or
isinstance
(
weight
,
LNWeightTemplate
)
or
isinstance
(
weight
,
Conv3dWeightTemplate
):
mm_
weight
.
to_cuda
()
weight
.
to_cuda
()
lightx2v/models/networks/hunyuan/weights/transformer_weights.py
View file @
aec90a0d
...
@@ -79,30 +79,30 @@ class HunyuanTransformerDoubleBlock:
...
@@ -79,30 +79,30 @@ class HunyuanTransformerDoubleBlock:
self
.
txt_mlp_fc2
,
self
.
txt_mlp_fc2
,
]
]
for
mm_
weight
in
self
.
weight_list
:
for
weight
in
self
.
weight_list
:
if
isinstance
(
mm_
weight
,
(
MMWeightTemplate
,
RMSWeightTemplate
)):
if
isinstance
(
weight
,
(
MMWeightTemplate
,
RMSWeightTemplate
)):
mm_
weight
.
set_config
(
self
.
config
[
"mm_config"
])
weight
.
set_config
(
self
.
config
[
"mm_config"
])
mm_
weight
.
load
(
weight_dict
)
weight
.
load
(
weight_dict
)
def
to_cpu
(
self
):
def
to_cpu
(
self
):
for
mm_
weight
in
self
.
weight_list
:
for
weight
in
self
.
weight_list
:
if
isinstance
(
mm_
weight
,
(
MMWeightTemplate
,
RMSWeightTemplate
)):
if
isinstance
(
weight
,
(
MMWeightTemplate
,
RMSWeightTemplate
)):
mm_
weight
.
to_cpu
()
weight
.
to_cpu
()
def
to_cuda
(
self
):
def
to_cuda
(
self
):
for
mm_
weight
in
self
.
weight_list
:
for
weight
in
self
.
weight_list
:
if
isinstance
(
mm_
weight
,
(
MMWeightTemplate
,
RMSWeightTemplate
)):
if
isinstance
(
weight
,
(
MMWeightTemplate
,
RMSWeightTemplate
)):
mm_
weight
.
to_cuda
()
weight
.
to_cuda
()
def
to_cpu_sync
(
self
):
def
to_cpu_sync
(
self
):
for
mm_
weight
in
self
.
weight_list
:
for
weight
in
self
.
weight_list
:
if
isinstance
(
mm_
weight
,
(
MMWeightTemplate
,
RMSWeightTemplate
)):
if
isinstance
(
weight
,
(
MMWeightTemplate
,
RMSWeightTemplate
)):
mm_
weight
.
to_cpu
(
non_blocking
=
True
)
weight
.
to_cpu
(
non_blocking
=
True
)
def
to_cuda_sync
(
self
):
def
to_cuda_sync
(
self
):
for
mm_
weight
in
self
.
weight_list
:
for
weight
in
self
.
weight_list
:
if
isinstance
(
mm_
weight
,
(
MMWeightTemplate
,
RMSWeightTemplate
)):
if
isinstance
(
weight
,
(
MMWeightTemplate
,
RMSWeightTemplate
)):
mm_
weight
.
to_cuda
(
non_blocking
=
True
)
weight
.
to_cuda
(
non_blocking
=
True
)
class
HunyuanTransformerSingleBlock
:
class
HunyuanTransformerSingleBlock
:
...
@@ -131,27 +131,27 @@ class HunyuanTransformerSingleBlock:
...
@@ -131,27 +131,27 @@ class HunyuanTransformerSingleBlock:
self
.
modulation
,
self
.
modulation
,
]
]
for
mm_
weight
in
self
.
weight_list
:
for
weight
in
self
.
weight_list
:
if
isinstance
(
mm_
weight
,
(
MMWeightTemplate
,
RMSWeightTemplate
)):
if
isinstance
(
weight
,
(
MMWeightTemplate
,
RMSWeightTemplate
)):
mm_
weight
.
set_config
(
self
.
config
[
"mm_config"
])
weight
.
set_config
(
self
.
config
[
"mm_config"
])
mm_
weight
.
load
(
weight_dict
)
weight
.
load
(
weight_dict
)
def
to_cpu
(
self
):
def
to_cpu
(
self
):
for
mm_
weight
in
self
.
weight_list
:
for
weight
in
self
.
weight_list
:
if
isinstance
(
mm_
weight
,
(
MMWeightTemplate
,
RMSWeightTemplate
)):
if
isinstance
(
weight
,
(
MMWeightTemplate
,
RMSWeightTemplate
)):
mm_
weight
.
to_cpu
()
weight
.
to_cpu
()
def
to_cuda
(
self
):
def
to_cuda
(
self
):
for
mm_
weight
in
self
.
weight_list
:
for
weight
in
self
.
weight_list
:
if
isinstance
(
mm_
weight
,
(
MMWeightTemplate
,
RMSWeightTemplate
)):
if
isinstance
(
weight
,
(
MMWeightTemplate
,
RMSWeightTemplate
)):
mm_
weight
.
to_cuda
()
weight
.
to_cuda
()
def
to_cpu_sync
(
self
):
def
to_cpu_sync
(
self
):
for
mm_
weight
in
self
.
weight_list
:
for
weight
in
self
.
weight_list
:
if
isinstance
(
mm_
weight
,
(
MMWeightTemplate
,
RMSWeightTemplate
)):
if
isinstance
(
weight
,
(
MMWeightTemplate
,
RMSWeightTemplate
)):
mm_
weight
.
to_cpu
(
non_blocking
=
True
)
weight
.
to_cpu
(
non_blocking
=
True
)
def
to_cuda_sync
(
self
):
def
to_cuda_sync
(
self
):
for
mm_
weight
in
self
.
weight_list
:
for
weight
in
self
.
weight_list
:
if
isinstance
(
mm_
weight
,
(
MMWeightTemplate
,
RMSWeightTemplate
)):
if
isinstance
(
weight
,
(
MMWeightTemplate
,
RMSWeightTemplate
)):
mm_
weight
.
to_cuda
(
non_blocking
=
True
)
weight
.
to_cuda
(
non_blocking
=
True
)
lightx2v/models/networks/wan/weights/post_weights.py
View file @
aec90a0d
...
@@ -12,22 +12,22 @@ class WanPostWeights:
...
@@ -12,22 +12,22 @@ class WanPostWeights:
self
.
weight_list
=
[
self
.
head
]
self
.
weight_list
=
[
self
.
head
]
for
mm_
weight
in
self
.
weight_list
:
for
weight
in
self
.
weight_list
:
if
isinstance
(
mm_
weight
,
MMWeightTemplate
):
if
isinstance
(
weight
,
MMWeightTemplate
):
mm_
weight
.
set_config
(
self
.
config
[
"mm_config"
])
weight
.
set_config
(
self
.
config
[
"mm_config"
])
mm_
weight
.
load
(
weight_dict
)
weight
.
load
(
weight_dict
)
if
self
.
config
[
"cpu_offload"
]:
if
self
.
config
[
"cpu_offload"
]:
mm_
weight
.
to_cpu
()
weight
.
to_cpu
()
self
.
head_modulation
=
self
.
head_modulation
.
cpu
()
self
.
head_modulation
=
self
.
head_modulation
.
cpu
()
def
to_cpu
(
self
):
def
to_cpu
(
self
):
for
mm_
weight
in
self
.
weight_list
:
for
weight
in
self
.
weight_list
:
if
isinstance
(
mm_
weight
,
MMWeightTemplate
):
if
isinstance
(
weight
,
MMWeightTemplate
):
mm_
weight
.
to_cpu
()
weight
.
to_cpu
()
self
.
head_modulation
=
self
.
head_modulation
.
cpu
()
self
.
head_modulation
=
self
.
head_modulation
.
cpu
()
def
to_cuda
(
self
):
def
to_cuda
(
self
):
for
mm_
weight
in
self
.
weight_list
:
for
weight
in
self
.
weight_list
:
if
isinstance
(
mm_
weight
,
MMWeightTemplate
):
if
isinstance
(
weight
,
MMWeightTemplate
):
mm_
weight
.
to_cuda
()
weight
.
to_cuda
()
self
.
head_modulation
=
self
.
head_modulation
.
cuda
()
self
.
head_modulation
=
self
.
head_modulation
.
cuda
()
lightx2v/models/networks/wan/weights/pre_weights.py
View file @
aec90a0d
...
@@ -40,19 +40,19 @@ class WanPreWeights:
...
@@ -40,19 +40,19 @@ class WanPreWeights:
self
.
weight_list
.
append
(
self
.
proj_3
)
self
.
weight_list
.
append
(
self
.
proj_3
)
self
.
weight_list
.
append
(
self
.
proj_4
)
self
.
weight_list
.
append
(
self
.
proj_4
)
for
mm_
weight
in
self
.
weight_list
:
for
weight
in
self
.
weight_list
:
if
isinstance
(
mm_
weight
,
(
MMWeightTemplate
,
LNWeightTemplate
,
Conv3dWeightTemplate
)):
if
isinstance
(
weight
,
(
MMWeightTemplate
,
LNWeightTemplate
,
Conv3dWeightTemplate
)):
mm_
weight
.
set_config
(
self
.
config
[
"mm_config"
])
weight
.
set_config
(
self
.
config
[
"mm_config"
])
mm_
weight
.
load
(
weight_dict
)
weight
.
load
(
weight_dict
)
if
self
.
config
[
"cpu_offload"
]:
if
self
.
config
[
"cpu_offload"
]:
mm_
weight
.
to_cpu
()
weight
.
to_cpu
()
def
to_cpu
(
self
):
def
to_cpu
(
self
):
for
mm_
weight
in
self
.
weight_list
:
for
weight
in
self
.
weight_list
:
if
isinstance
(
mm_
weight
,
(
MMWeightTemplate
,
LNWeightTemplate
,
Conv3dWeightTemplate
)):
if
isinstance
(
weight
,
(
MMWeightTemplate
,
LNWeightTemplate
,
Conv3dWeightTemplate
)):
mm_
weight
.
to_cpu
()
weight
.
to_cpu
()
def
to_cuda
(
self
):
def
to_cuda
(
self
):
for
mm_
weight
in
self
.
weight_list
:
for
weight
in
self
.
weight_list
:
if
isinstance
(
mm_
weight
,
(
MMWeightTemplate
,
LNWeightTemplate
,
Conv3dWeightTemplate
)):
if
isinstance
(
weight
,
(
MMWeightTemplate
,
LNWeightTemplate
,
Conv3dWeightTemplate
)):
mm_
weight
.
to_cuda
()
weight
.
to_cuda
()
lightx2v/models/networks/wan/weights/transformer_weights.py
View file @
aec90a0d
...
@@ -81,31 +81,31 @@ class WanTransformerAttentionBlock:
...
@@ -81,31 +81,31 @@ class WanTransformerAttentionBlock:
self
.
weight_list
.
append
(
self
.
cross_attn_v_img
)
self
.
weight_list
.
append
(
self
.
cross_attn_v_img
)
self
.
weight_list
.
append
(
self
.
cross_attn_norm_k_img
)
self
.
weight_list
.
append
(
self
.
cross_attn_norm_k_img
)
for
mm_
weight
in
self
.
weight_list
:
for
weight
in
self
.
weight_list
:
if
isinstance
(
mm_
weight
,
(
MMWeightTemplate
,
LNWeightTemplate
,
RMSWeightTemplate
)):
if
isinstance
(
weight
,
(
MMWeightTemplate
,
LNWeightTemplate
,
RMSWeightTemplate
)):
mm_
weight
.
set_config
(
self
.
config
[
"mm_config"
])
weight
.
set_config
(
self
.
config
[
"mm_config"
])
mm_
weight
.
load
(
weight_dict
)
weight
.
load
(
weight_dict
)
def
to_cpu
(
self
):
def
to_cpu
(
self
):
for
mm_
weight
in
self
.
weight_list
:
for
weight
in
self
.
weight_list
:
if
isinstance
(
mm_
weight
,
(
MMWeightTemplate
,
LNWeightTemplate
,
RMSWeightTemplate
)):
if
isinstance
(
weight
,
(
MMWeightTemplate
,
LNWeightTemplate
,
RMSWeightTemplate
)):
mm_
weight
.
to_cpu
()
weight
.
to_cpu
()
self
.
modulation
=
self
.
modulation
.
cpu
()
self
.
modulation
=
self
.
modulation
.
cpu
()
def
to_cuda
(
self
):
def
to_cuda
(
self
):
for
mm_
weight
in
self
.
weight_list
:
for
weight
in
self
.
weight_list
:
if
isinstance
(
mm_
weight
,
(
MMWeightTemplate
,
LNWeightTemplate
,
RMSWeightTemplate
)):
if
isinstance
(
weight
,
(
MMWeightTemplate
,
LNWeightTemplate
,
RMSWeightTemplate
)):
mm_
weight
.
to_cuda
()
weight
.
to_cuda
()
self
.
modulation
=
self
.
modulation
.
cuda
()
self
.
modulation
=
self
.
modulation
.
cuda
()
def
to_cpu_sync
(
self
):
def
to_cpu_sync
(
self
):
for
mm_
weight
in
self
.
weight_list
:
for
weight
in
self
.
weight_list
:
if
isinstance
(
mm_
weight
,
(
MMWeightTemplate
,
LNWeightTemplate
,
RMSWeightTemplate
)):
if
isinstance
(
weight
,
(
MMWeightTemplate
,
LNWeightTemplate
,
RMSWeightTemplate
)):
mm_
weight
.
to_cpu
(
non_blocking
=
True
)
weight
.
to_cpu
(
non_blocking
=
True
)
self
.
modulation
=
self
.
modulation
.
to
(
"cpu"
,
non_blocking
=
True
)
self
.
modulation
=
self
.
modulation
.
to
(
"cpu"
,
non_blocking
=
True
)
def
to_cuda_sync
(
self
):
def
to_cuda_sync
(
self
):
for
mm_
weight
in
self
.
weight_list
:
for
weight
in
self
.
weight_list
:
if
isinstance
(
mm_
weight
,
(
MMWeightTemplate
,
LNWeightTemplate
,
RMSWeightTemplate
)):
if
isinstance
(
weight
,
(
MMWeightTemplate
,
LNWeightTemplate
,
RMSWeightTemplate
)):
mm_
weight
.
to_cuda
(
non_blocking
=
True
)
weight
.
to_cuda
(
non_blocking
=
True
)
self
.
modulation
=
self
.
modulation
.
cuda
(
non_blocking
=
True
)
self
.
modulation
=
self
.
modulation
.
cuda
(
non_blocking
=
True
)
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment