Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
xuwx1
LightX2V
Commits
0379ae88
"awq/vscode:/vscode.git/clone" did not exist on "11efba09bfb520152fee4e3404b386b52f50d59d"
Unverified
Commit
0379ae88
authored
Oct 13, 2025
by
PengGao
Committed by
GitHub
Oct 13, 2025
Browse files
fix server use new config system (#362)
parent
544435d1
Changes
5
Show whitespace changes
Inline
Side-by-side
Showing
5 changed files
with
67 additions
and
10 deletions
+67
-10
lightx2v/server/api.py
lightx2v/server/api.py
+7
-4
lightx2v/server/schema.py
lightx2v/server/schema.py
+8
-0
lightx2v/server/service.py
lightx2v/server/service.py
+50
-4
lightx2v/utils/input_info.py
lightx2v/utils/input_info.py
+2
-0
lightx2v/utils/set_config.py
lightx2v/utils/set_config.py
+0
-2
No files found.
lightx2v/server/api.py
View file @
0379ae88
...
@@ -40,11 +40,14 @@ class ApiServer:
...
@@ -40,11 +40,14 @@ class ApiServer:
self
.
_setup_routes
()
self
.
_setup_routes
()
def
_setup_routes
(
self
):
def
_setup_routes
(
self
):
@
self
.
app
.
get
(
"/"
)
def
redirect_to_docs
():
return
RedirectResponse
(
url
=
"/docs"
)
self
.
_setup_task_routes
()
self
.
_setup_task_routes
()
self
.
_setup_file_routes
()
self
.
_setup_file_routes
()
self
.
_setup_service_routes
()
self
.
_setup_service_routes
()
# Register routers
self
.
app
.
include_router
(
self
.
tasks_router
)
self
.
app
.
include_router
(
self
.
tasks_router
)
self
.
app
.
include_router
(
self
.
files_router
)
self
.
app
.
include_router
(
self
.
files_router
)
self
.
app
.
include_router
(
self
.
service_router
)
self
.
app
.
include_router
(
self
.
service_router
)
...
@@ -133,7 +136,7 @@ class ApiServer:
...
@@ -133,7 +136,7 @@ class ApiServer:
infer_steps
:
int
=
Form
(
default
=
5
),
infer_steps
:
int
=
Form
(
default
=
5
),
target_video_length
:
int
=
Form
(
default
=
81
),
target_video_length
:
int
=
Form
(
default
=
81
),
seed
:
int
=
Form
(
default
=
42
),
seed
:
int
=
Form
(
default
=
42
),
audio_file
:
Optional
[
UploadFile
]
=
File
(
default
=
None
),
audio_file
:
UploadFile
=
File
(
None
),
video_duration
:
int
=
Form
(
default
=
5
),
video_duration
:
int
=
Form
(
default
=
5
),
):
):
assert
self
.
file_service
is
not
None
,
"File service is not initialized"
assert
self
.
file_service
is
not
None
,
"File service is not initialized"
...
@@ -305,7 +308,7 @@ class ApiServer:
...
@@ -305,7 +308,7 @@ class ApiServer:
if
not
parsed_url
.
scheme
or
not
parsed_url
.
netloc
:
if
not
parsed_url
.
scheme
or
not
parsed_url
.
netloc
:
return
False
return
False
timeout
=
httpx
.
Timeout
(
connect
=
5.0
,
read
=
5.0
)
timeout
=
httpx
.
Timeout
(
connect
=
5.0
,
read
=
5.0
,
write
=
5.0
,
pool
=
5.0
)
async
with
httpx
.
AsyncClient
(
verify
=
False
,
timeout
=
timeout
)
as
client
:
async
with
httpx
.
AsyncClient
(
verify
=
False
,
timeout
=
timeout
)
as
client
:
response
=
await
client
.
head
(
image_url
,
follow_redirects
=
True
)
response
=
await
client
.
head
(
image_url
,
follow_redirects
=
True
)
return
response
.
status_code
<
400
return
response
.
status_code
<
400
...
@@ -375,7 +378,7 @@ class ApiServer:
...
@@ -375,7 +378,7 @@ class ApiServer:
logger
.
error
(
f
"Task
{
task_id
}
generation failed"
)
logger
.
error
(
f
"Task
{
task_id
}
generation failed"
)
except
Exception
as
e
:
except
Exception
as
e
:
logger
.
e
rror
(
f
"Task
{
task_id
}
processing failed:
{
str
(
e
)
}
"
)
logger
.
e
xception
(
f
"Task
{
task_id
}
processing failed:
{
str
(
e
)
}
"
)
task_manager
.
fail_task
(
task_id
,
str
(
e
))
task_manager
.
fail_task
(
task_id
,
str
(
e
))
finally
:
finally
:
if
lock_acquired
:
if
lock_acquired
:
...
...
lightx2v/server/schema.py
View file @
0379ae88
from
typing
import
Optional
from
pydantic
import
BaseModel
,
Field
from
pydantic
import
BaseModel
,
Field
from
..utils.generate_task_id
import
generate_task_id
from
..utils.generate_task_id
import
generate_task_id
class
TalkObject
(
BaseModel
):
audio
:
str
=
Field
(...,
description
=
"Audio path"
)
mask
:
str
=
Field
(...,
description
=
"Mask path"
)
class
TaskRequest
(
BaseModel
):
class
TaskRequest
(
BaseModel
):
task_id
:
str
=
Field
(
default_factory
=
generate_task_id
,
description
=
"Task ID (auto-generated)"
)
task_id
:
str
=
Field
(
default_factory
=
generate_task_id
,
description
=
"Task ID (auto-generated)"
)
prompt
:
str
=
Field
(
""
,
description
=
"Generation prompt"
)
prompt
:
str
=
Field
(
""
,
description
=
"Generation prompt"
)
...
@@ -16,6 +23,7 @@ class TaskRequest(BaseModel):
...
@@ -16,6 +23,7 @@ class TaskRequest(BaseModel):
seed
:
int
=
Field
(
42
,
description
=
"Random seed"
)
seed
:
int
=
Field
(
42
,
description
=
"Random seed"
)
audio_path
:
str
=
Field
(
""
,
description
=
"Input audio path (Wan-Audio)"
)
audio_path
:
str
=
Field
(
""
,
description
=
"Input audio path (Wan-Audio)"
)
video_duration
:
int
=
Field
(
5
,
description
=
"Video duration (Wan-Audio)"
)
video_duration
:
int
=
Field
(
5
,
description
=
"Video duration (Wan-Audio)"
)
talk_objects
:
Optional
[
list
[
TalkObject
]]
=
Field
(
None
,
description
=
"Talk objects (Wan-Audio)"
)
def
__init__
(
self
,
**
data
):
def
__init__
(
self
,
**
data
):
super
().
__init__
(
**
data
)
super
().
__init__
(
**
data
)
...
...
lightx2v/server/service.py
View file @
0379ae88
...
@@ -8,9 +8,11 @@ from urllib.parse import urlparse
...
@@ -8,9 +8,11 @@ from urllib.parse import urlparse
import
httpx
import
httpx
import
torch
import
torch
from
easydict
import
EasyDict
from
loguru
import
logger
from
loguru
import
logger
from
..infer
import
init_runner
from
..infer
import
init_runner
from
..utils.input_info
import
set_input_info
from
..utils.set_config
import
set_config
from
..utils.set_config
import
set_config
from
.audio_utils
import
is_base64_audio
,
save_base64_audio
from
.audio_utils
import
is_base64_audio
,
save_base64_audio
from
.distributed_utils
import
DistributedManager
from
.distributed_utils
import
DistributedManager
...
@@ -245,8 +247,21 @@ class TorchrunInferenceWorker:
...
@@ -245,8 +247,21 @@ class TorchrunInferenceWorker:
# Run inference directly - torchrun handles the parallelization
# Run inference directly - torchrun handles the parallelization
# Using asyncio.to_thread would be risky with NCCL operations
# Using asyncio.to_thread would be risky with NCCL operations
# Instead, we rely on FastAPI's async handling and queue management
# Instead, we rely on FastAPI's async handling and queue management
self
.
runner
.
set_inputs
(
task_data
)
self
.
runner
.
run_pipeline
()
task_data
[
"task"
]
=
self
.
runner
.
config
[
"task"
]
task_data
[
"return_result_tensor"
]
=
False
task_data
[
"negative_prompt"
]
=
task_data
.
get
(
"negative_prompt"
,
""
)
# must be convert
task_data
=
EasyDict
(
task_data
)
input_info
=
set_input_info
(
task_data
)
# update lock config
self
.
runner
.
set_config
(
task_data
)
# print("input_info==>", input_info)
self
.
runner
.
run_pipeline
(
input_info
)
# Small yield to allow other async operations if needed
# Small yield to allow other async operations if needed
await
asyncio
.
sleep
(
0
)
await
asyncio
.
sleep
(
0
)
...
@@ -267,7 +282,7 @@ class TorchrunInferenceWorker:
...
@@ -267,7 +282,7 @@ class TorchrunInferenceWorker:
return
None
return
None
except
Exception
as
e
:
except
Exception
as
e
:
logger
.
e
rror
(
f
"Rank
{
self
.
rank
}
inference failed:
{
str
(
e
)
}
"
)
logger
.
e
xception
(
f
"Rank
{
self
.
rank
}
inference failed:
{
str
(
e
)
}
"
)
if
self
.
world_size
>
1
:
if
self
.
world_size
>
1
:
self
.
dist_manager
.
barrier
()
self
.
dist_manager
.
barrier
()
...
@@ -418,6 +433,37 @@ class VideoGenerationService:
...
@@ -418,6 +433,37 @@ class VideoGenerationService:
logger
.
info
(
f
"Task
{
message
.
task_id
}
audio path:
{
task_data
[
'audio_path'
]
}
"
)
logger
.
info
(
f
"Task
{
message
.
task_id
}
audio path:
{
task_data
[
'audio_path'
]
}
"
)
if
"talk_objects"
in
message
.
model_fields_set
and
message
.
talk_objects
:
task_data
[
"talk_objects"
]
=
[{}
for
_
in
range
(
len
(
message
.
talk_objects
))]
for
index
,
talk_object
in
enumerate
(
message
.
talk_objects
):
if
talk_object
.
audio
.
startswith
(
"http"
):
audio_path
=
await
self
.
file_service
.
download_audio
(
talk_object
.
audio
)
task_data
[
"talk_objects"
][
index
][
"audio"
]
=
str
(
audio_path
)
elif
is_base64_audio
(
talk_object
.
audio
):
audio_path
=
save_base64_audio
(
talk_object
.
audio
,
str
(
self
.
file_service
.
input_audio_dir
))
task_data
[
"talk_objects"
][
index
][
"audio"
]
=
str
(
audio_path
)
else
:
task_data
[
"talk_objects"
][
index
][
"audio"
]
=
talk_object
.
audio
if
talk_object
.
mask
.
startswith
(
"http"
):
mask_path
=
await
self
.
file_service
.
download_image
(
talk_object
.
mask
)
task_data
[
"talk_objects"
][
index
][
"mask"
]
=
str
(
mask_path
)
elif
is_base64_image
(
talk_object
.
mask
):
mask_path
=
save_base64_image
(
talk_object
.
mask
,
str
(
self
.
file_service
.
input_image_dir
))
task_data
[
"talk_objects"
][
index
][
"mask"
]
=
str
(
mask_path
)
else
:
task_data
[
"talk_objects"
][
index
][
"mask"
]
=
talk_object
.
mask
# FIXME(xxx): 存储成一个config.json , 然后将这个config.json 的路径,赋值给task_data["audio_path"]
temp_path
=
self
.
file_service
.
cache_dir
/
uuid
.
uuid4
().
hex
[:
8
]
temp_path
.
mkdir
(
parents
=
True
,
exist_ok
=
True
)
task_data
[
"audio_path"
]
=
str
(
temp_path
)
config_path
=
temp_path
/
"config.json"
with
open
(
config_path
,
"w"
)
as
f
:
json
.
dump
({
"talk_objects"
:
task_data
[
"talk_objects"
]},
f
)
actual_save_path
=
self
.
file_service
.
get_output_path
(
message
.
save_result_path
)
actual_save_path
=
self
.
file_service
.
get_output_path
(
message
.
save_result_path
)
task_data
[
"save_result_path"
]
=
str
(
actual_save_path
)
task_data
[
"save_result_path"
]
=
str
(
actual_save_path
)
task_data
[
"video_path"
]
=
message
.
save_result_path
task_data
[
"video_path"
]
=
message
.
save_result_path
...
@@ -441,5 +487,5 @@ class VideoGenerationService:
...
@@ -441,5 +487,5 @@ class VideoGenerationService:
raise
RuntimeError
(
error_msg
)
raise
RuntimeError
(
error_msg
)
except
Exception
as
e
:
except
Exception
as
e
:
logger
.
e
rror
(
f
"Task
{
message
.
task_id
}
processing failed:
{
str
(
e
)
}
"
)
logger
.
e
xception
(
f
"Task
{
message
.
task_id
}
processing failed:
{
str
(
e
)
}
"
)
raise
raise
lightx2v/utils/input_info.py
View file @
0379ae88
...
@@ -163,6 +163,8 @@ def set_input_info(args):
...
@@ -163,6 +163,8 @@ def set_input_info(args):
)
)
else
:
else
:
raise
ValueError
(
f
"Unsupported task:
{
args
.
task
}
"
)
raise
ValueError
(
f
"Unsupported task:
{
args
.
task
}
"
)
assert
not
(
input_info
.
save_result_path
and
input_info
.
return_result_tensor
),
"save_result_path and return_result_tensor cannot be set at the same time"
return
input_info
return
input_info
...
...
lightx2v/utils/set_config.py
View file @
0379ae88
...
@@ -34,8 +34,6 @@ def get_default_config():
...
@@ -34,8 +34,6 @@ def get_default_config():
def
set_config
(
args
):
def
set_config
(
args
):
assert
not
(
args
.
save_result_path
and
args
.
return_result_tensor
),
"save_result_path and return_result_tensor cannot be set at the same time"
config
=
get_default_config
()
config
=
get_default_config
()
config
.
update
({
k
:
v
for
k
,
v
in
vars
(
args
).
items
()
if
k
not
in
ALL_INPUT_INFO_KEYS
})
config
.
update
({
k
:
v
for
k
,
v
in
vars
(
args
).
items
()
if
k
not
in
ALL_INPUT_INFO_KEYS
})
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment