Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
xuwx1
LightX2V
Commits
dc472eb5
Commit
dc472eb5
authored
Apr 29, 2025
by
gushiqiao
Committed by
GitHub
Apr 29, 2025
Browse files
Merge pull request #27 from ModelTC/dev_convert
Support convert weight to diffusers.
parents
aec90a0d
6347b21b
Changes
23
Hide whitespace changes
Inline
Side-by-side
Showing
3 changed files
with
18 additions
and
15 deletions
+18
-15
lightx2v/utils/quant_utils.py
lightx2v/utils/quant_utils.py
+13
-12
lightx2v/utils/utils.py
lightx2v/utils/utils.py
+2
-1
scripts/post.py
scripts/post.py
+3
-2
No files found.
lightx2v/utils/quant_utils.py
View file @
dc472eb5
import
torch
from
qtorch.quant
import
float_quantize
from
loguru
import
logger
class
BaseQuantizer
(
object
):
...
...
@@ -168,23 +169,23 @@ if __name__ == "__main__":
weight
=
torch
.
randn
(
4096
,
4096
,
dtype
=
torch
.
bfloat16
).
cuda
()
quantizer
=
IntegerQuantizer
(
4
,
False
,
"per_group"
,
group_size
=
128
)
q_weight
=
quantizer
.
fake_quant_tensor
(
weight
)
print
(
weight
)
print
(
q_weight
)
print
(
f
"cosine =
{
torch
.
cosine_similarity
(
weight
.
view
(
1
,
-
1
).
to
(
torch
.
float64
),
q_weight
.
view
(
1
,
-
1
).
to
(
torch
.
float64
))
}
"
)
logger
.
info
(
weight
)
logger
.
info
(
q_weight
)
logger
.
info
(
f
"cosine =
{
torch
.
cosine_similarity
(
weight
.
view
(
1
,
-
1
).
to
(
torch
.
float64
),
q_weight
.
view
(
1
,
-
1
).
to
(
torch
.
float64
))
}
"
)
realq_weight
,
scales
,
zeros
=
quantizer
.
real_quant_tensor
(
weight
)
print
(
f
"realq_weight =
{
realq_weight
}
,
{
realq_weight
.
shape
}
"
)
print
(
f
"scales =
{
scales
}
,
{
scales
.
shape
}
"
)
print
(
f
"zeros =
{
zeros
}
,
{
zeros
.
shape
}
"
)
logger
.
info
(
f
"realq_weight =
{
realq_weight
}
,
{
realq_weight
.
shape
}
"
)
logger
.
info
(
f
"scales =
{
scales
}
,
{
scales
.
shape
}
"
)
logger
.
info
(
f
"zeros =
{
zeros
}
,
{
zeros
.
shape
}
"
)
weight
=
torch
.
randn
(
8192
,
4096
,
dtype
=
torch
.
bfloat16
).
cuda
()
quantizer
=
FloatQuantizer
(
"e4m3"
,
True
,
"per_channel"
)
q_weight
=
quantizer
.
fake_quant_tensor
(
weight
)
print
(
weight
)
print
(
q_weight
)
print
(
f
"cosine =
{
torch
.
cosine_similarity
(
weight
.
view
(
1
,
-
1
).
to
(
torch
.
float64
),
q_weight
.
view
(
1
,
-
1
).
to
(
torch
.
float64
))
}
"
)
logger
.
info
(
weight
)
logger
.
info
(
q_weight
)
logger
.
info
(
f
"cosine =
{
torch
.
cosine_similarity
(
weight
.
view
(
1
,
-
1
).
to
(
torch
.
float64
),
q_weight
.
view
(
1
,
-
1
).
to
(
torch
.
float64
))
}
"
)
realq_weight
,
scales
,
zeros
=
quantizer
.
real_quant_tensor
(
weight
)
print
(
f
"realq_weight =
{
realq_weight
}
,
{
realq_weight
.
shape
}
"
)
print
(
f
"scales =
{
scales
}
,
{
scales
.
shape
}
"
)
print
(
f
"zeros =
{
zeros
}
"
)
logger
.
info
(
f
"realq_weight =
{
realq_weight
}
,
{
realq_weight
.
shape
}
"
)
logger
.
info
(
f
"scales =
{
scales
}
,
{
scales
.
shape
}
"
)
logger
.
info
(
f
"zeros =
{
zeros
}
"
)
lightx2v/utils/utils.py
View file @
dc472eb5
import
os
from
einops
import
rearrange
from
loguru
import
logger
import
torch
import
torchvision
...
...
@@ -81,5 +82,5 @@ def cache_video(
error
=
e
continue
else
:
print
(
f
"cache_video failed, error:
{
error
}
"
,
flush
=
True
)
logger
.
info
(
f
"cache_video failed, error:
{
error
}
"
,
flush
=
True
)
return
None
scripts/post.py
View file @
dc472eb5
import
requests
from
loguru
import
logger
url
=
"http://localhost:8000/v1/local/video/generate"
...
...
@@ -10,8 +11,8 @@ message = {
"save_video_path"
:
"./output_lightx2v_wan_t2v_ap4.mp4"
,
# It is best to set it to an absolute path.
}
print
(
f
"message:
{
message
}
"
)
logger
.
info
(
f
"message:
{
message
}
"
)
response
=
requests
.
post
(
url
,
json
=
message
)
print
(
f
"response:
{
response
.
json
()
}
"
)
logger
.
info
(
f
"response:
{
response
.
json
()
}
"
)
Prev
1
2
Next
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment