infer.py 3.65 KB
Newer Older
helloyongyang's avatar
helloyongyang committed
1
import argparse
2

PengGao's avatar
PengGao committed
3
4
import torch.distributed as dist
from loguru import logger
5

PengGao's avatar
PengGao committed
6
from lightx2v.common.ops import *
PengGao's avatar
PengGao committed
7
from lightx2v.models.runners.cogvideox.cogvidex_runner import CogvideoxRunner  # noqa: F401
PengGao's avatar
PengGao committed
8
from lightx2v.models.runners.graph_runner import GraphRunner
PengGao's avatar
PengGao committed
9
from lightx2v.models.runners.hunyuan.hunyuan_runner import HunyuanRunner  # noqa: F401
10
from lightx2v.models.runners.qwen_image.qwen_image_runner import QwenImageRunner  # noqa: F401
11
from lightx2v.models.runners.wan.wan_audio_runner import Wan22AudioRunner, Wan22MoeAudioRunner, WanAudioRunner  # noqa: F401
PengGao's avatar
PengGao committed
12
13
14
15
from lightx2v.models.runners.wan.wan_causvid_runner import WanCausVidRunner  # noqa: F401
from lightx2v.models.runners.wan.wan_distill_runner import WanDistillRunner  # noqa: F401
from lightx2v.models.runners.wan.wan_runner import Wan22MoeRunner, WanRunner  # noqa: F401
from lightx2v.models.runners.wan.wan_skyreels_v2_df_runner import WanSkyreelsV2DFRunner  # noqa: F401
PengGao's avatar
PengGao committed
16
17
18
from lightx2v.utils.envs import *
from lightx2v.utils.profiler import ProfilingContext
from lightx2v.utils.registry_factory import RUNNER_REGISTER
helloyongyang's avatar
helloyongyang committed
19
from lightx2v.utils.set_config import print_config, set_config, set_parallel_config
PengGao's avatar
PengGao committed
20
from lightx2v.utils.utils import seed_all
lijiaqi2's avatar
lijiaqi2 committed
21
22


helloyongyang's avatar
helloyongyang committed
23
24
25
26
27
def init_runner(config):
    seed_all(config.seed)

    if CHECK_ENABLE_GRAPH_MODE():
        default_runner = RUNNER_REGISTER[config.model_cls](config)
helloyongyang's avatar
helloyongyang committed
28
        default_runner.init_modules()
helloyongyang's avatar
helloyongyang committed
29
30
31
        runner = GraphRunner(default_runner)
    else:
        runner = RUNNER_REGISTER[config.model_cls](config)
gushiqiao's avatar
gushiqiao committed
32
        runner.init_modules()
helloyongyang's avatar
helloyongyang committed
33
34
35
    return runner


36
def main():
helloyongyang's avatar
helloyongyang committed
37
    parser = argparse.ArgumentParser()
wangshankun's avatar
wangshankun committed
38
    parser.add_argument(
39
40
41
        "--model_cls",
        type=str,
        required=True,
42
43
44
45
46
47
48
49
50
        choices=[
            "wan2.1",
            "hunyuan",
            "wan2.1_distill",
            "wan2.1_causvid",
            "wan2.1_skyreels_v2_df",
            "cogvideox",
            "wan2.1_audio",
            "wan2.2_moe",
helloyongyang's avatar
fix ci  
helloyongyang committed
51
            "wan2.2",
52
53
54
            "wan2.2_moe_audio",
            "wan2.2_audio",
            "wan2.2_moe_distill",
55
            "qwen_image",
56
        ],
57
        default="wan2.1",
wangshankun's avatar
wangshankun committed
58
    )
helloyongyang's avatar
helloyongyang committed
59

gushiqiao's avatar
gushiqiao committed
60
    parser.add_argument("--task", type=str, choices=["t2v", "i2v", "t2i", "flf2v"], default="t2v")
helloyongyang's avatar
helloyongyang committed
61
    parser.add_argument("--model_path", type=str, required=True)
helloyongyang's avatar
helloyongyang committed
62
    parser.add_argument("--config_json", type=str, required=True)
63
    parser.add_argument("--use_prompt_enhancer", action="store_true")
helloyongyang's avatar
helloyongyang committed
64

wangshankun's avatar
wangshankun committed
65
    parser.add_argument("--prompt", type=str, default="", help="The input prompt for text-to-video generation")
helloyongyang's avatar
helloyongyang committed
66
    parser.add_argument("--negative_prompt", type=str, default="")
helloyongyang's avatar
helloyongyang committed
67
68

    parser.add_argument("--image_path", type=str, default="", help="The path to input image file for image-to-video (i2v) task")
gushiqiao's avatar
gushiqiao committed
69
    parser.add_argument("--last_frame_path", type=str, default="", help="The path to last frame file for first-last-frame-to-video (flf2v) task")
helloyongyang's avatar
helloyongyang committed
70
71
    parser.add_argument("--audio_path", type=str, default="", help="The path to input audio file for audio-to-video (a2v) task")

72
    parser.add_argument("--save_video_path", type=str, default="./output_lightx2v.mp4", help="The path to save video path/file")
helloyongyang's avatar
helloyongyang committed
73
    args = parser.parse_args()
PengGao's avatar
PengGao committed
74

helloyongyang's avatar
helloyongyang committed
75
76
77
    # set config
    config = set_config(args)

helloyongyang's avatar
fix bug  
helloyongyang committed
78
    if config.parallel:
helloyongyang's avatar
helloyongyang committed
79
80
81
82
        dist.init_process_group(backend="nccl")
        torch.cuda.set_device(dist.get_rank())
        set_parallel_config(config)

helloyongyang's avatar
helloyongyang committed
83
84
    print_config(config)

helloyongyang's avatar
helloyongyang committed
85
    with ProfilingContext("Total Cost"):
helloyongyang's avatar
helloyongyang committed
86
        runner = init_runner(config)
87
        runner.run_pipeline()
Xinchi Huang's avatar
Xinchi Huang committed
88

helloyongyang's avatar
helloyongyang committed
89
90
91
92
93
    # Clean up distributed process group
    if dist.is_initialized():
        dist.destroy_process_group()
        logger.info("Distributed process group cleaned up")

Xinchi Huang's avatar
Xinchi Huang committed
94
95

if __name__ == "__main__":
96
    main()