run_gradio_win.bat 7.95 KB
Newer Older
gushiqiao's avatar
gushiqiao committed
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
@echo off
chcp 65001 >nul
echo 🎬 LightX2V Gradio Windows Startup Script
echo ==========================================

REM ==================== Configuration Area ====================
REM ⚠️  Important: Please modify the following paths according to your actual environment

REM 🚨 Storage Performance Tips 🚨
REM 💾 Strongly recommend storing model files on SSD solid-state drives!
REM 📈 SSD can significantly improve model loading speed and inference performance
REM 🐌 Using mechanical hard drives (HDD) may cause slow model loading and affect overall experience

REM LightX2V project root directory path
REM Example: D:\LightX2V
set lightx2v_path=/path/to/LightX2V

REM Model path configuration
REM Image-to-video model path (for i2v tasks)
REM Example: D:\models\Wan2.1-I2V-14B-480P-Lightx2v
set i2v_model_path=/path/to/Wan2.1-I2V-14B-480P-Lightx2v

REM Text-to-video model path (for t2v tasks)
REM Example: D:\models\Wan2.1-T2V-1.3B
set t2v_model_path=/path/to/Wan2.1-T2V-1.3B

REM Model size configuration
REM Default model size (14b, 1.3b)
set model_size=14b

REM Model class configuration
REM Default model class (wan2.1, wan2.1_distill)
set model_cls=wan2.1

REM Server configuration
set server_name=127.0.0.1
set server_port=8032

REM GPU configuration
set gpu_id=0

REM ==================== Environment Variables Setup ====================
set CUDA_VISIBLE_DEVICES=%gpu_id%
set PYTHONPATH=%lightx2v_path%;%PYTHONPATH%
set ENABLE_PROFILING_DEBUG=true
set PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True

REM ==================== Parameter Parsing ====================
REM Default task type
set task=i2v
REM Default interface language
set lang=zh

REM Parse command line arguments
:parse_args
if "%1"=="" goto :end_parse
if "%1"=="--task" (
    set task=%2
    shift
    shift
    goto :parse_args
)
if "%1"=="--lang" (
    set lang=%2
    shift
    shift
    goto :parse_args
)
if "%1"=="--port" (
    set server_port=%2
    shift
    shift
    goto :parse_args
)
if "%1"=="--gpu" (
    set gpu_id=%2
    set CUDA_VISIBLE_DEVICES=%gpu_id%
    shift
    shift
    goto :parse_args
)
if "%1"=="--model_size" (
    set model_size=%2
    shift
    shift
    goto :parse_args
)
if "%1"=="--model_cls" (
    set model_cls=%2
    shift
    shift
    goto :parse_args
)
if "%1"=="--help" (
    echo 🎬 LightX2V Gradio Windows Startup Script
    echo ==========================================
    echo Usage: %0 [options]
    echo.
    echo 📋 Available options:
    echo   --task i2v^|t2v    Task type (default: i2v)
    echo                      i2v: Image-to-video generation
    echo                      t2v: Text-to-video generation
    echo   --lang zh^|en      Interface language (default: zh)
    echo                      zh: Chinese interface
    echo                      en: English interface
    echo   --port PORT        Server port (default: 8032)
    echo   --gpu GPU_ID       GPU device ID (default: 0)
    echo   --model_size MODEL_SIZE
    echo                      Model size (default: 14b)
    echo                      14b: 14B parameter model
    echo                      1.3b: 1.3B parameter model
    echo   --model_cls MODEL_CLASS
    echo                      Model class (default: wan2.1)
    echo                      wan2.1: Standard model variant
    echo                      wan2.1_distill: Distilled model variant for faster inference
    echo   --help             Show this help message
    echo.
    echo 🚀 Usage examples:
    echo   %0                                    # Default startup for image-to-video mode
    echo   %0 --task i2v --lang zh --port 8032   # Start with specified parameters
    echo   %0 --task t2v --lang en --port 7860   # Text-to-video with English interface
    echo   %0 --task i2v --gpu 1 --port 8032     # Use GPU 1
    echo   %0 --task t2v --model_size 1.3b       # Use 1.3B model
    echo   %0 --task i2v --model_size 14b        # Use 14B model
    echo   %0 --task i2v --model_cls wan2.1_distill  # Use distilled model
    echo.
    echo 📝 Notes:
    echo   - Edit script to configure model paths before first use
    echo   - Ensure required Python dependencies are installed
    echo   - Recommended to use GPU with 8GB+ VRAM
    echo   - 🚨 Strongly recommend storing models on SSD for better performance
    pause
    exit /b 0
)
echo Unknown parameter: %1
echo Use --help to see help information
pause
exit /b 1

:end_parse

REM ==================== Parameter Validation ====================
if "%task%"=="i2v" goto :valid_task
if "%task%"=="t2v" goto :valid_task
echo Error: Task type must be 'i2v' or 't2v'
pause
exit /b 1

:valid_task
if "%lang%"=="zh" goto :valid_lang
if "%lang%"=="en" goto :valid_lang
echo Error: Language must be 'zh' or 'en'
pause
exit /b 1

:valid_lang
if "%model_size%"=="14b" goto :valid_size
if "%model_size%"=="1.3b" goto :valid_size
echo Error: Model size must be '14b' or '1.3b'
pause
exit /b 1

:valid_size
if "%model_cls%"=="wan2.1" goto :valid_cls
if "%model_cls%"=="wan2.1_distill" goto :valid_cls
echo Error: Model class must be 'wan2.1' or 'wan2.1_distill'
pause
exit /b 1

:valid_cls

REM Select model path based on task type
if "%task%"=="i2v" (
    set model_path=%i2v_model_path%
    echo 🎬 Starting Image-to-Video mode
) else (
    set model_path=%t2v_model_path%
    echo 🎬 Starting Text-to-Video mode
)

REM Check if model path exists
if not exist "%model_path%" (
    echoError: Model path does not exist
    echo 📁 Path: %model_path%
    echo 🔧 Solutions:
    echo   1. Check model path configuration in script
    echo   2. Ensure model files are properly downloaded
    echo   3. Verify path permissions are correct
    echo   4. 💾 Recommend storing models on SSD for faster loading
    pause
    exit /b 1
)

REM Select demo file based on language
if "%lang%"=="zh" (
    set demo_file=gradio_demo_zh.py
    echo 🌏 Using Chinese interface
) else (
    set demo_file=gradio_demo.py
    echo 🌏 Using English interface
)

REM Check if demo file exists
if not exist "%demo_file%" (
    echoError: Demo file does not exist
    echo 📄 File: %demo_file%
    echo 🔧 Solutions:
    echo   1. Ensure script is run in the correct directory
    echo   2. Check if file has been renamed or moved
    echo   3. Re-clone or download project files
    pause
    exit /b 1
)

REM ==================== System Information Display ====================
echo ==========================================
echo 🚀 LightX2V Gradio Starting...
echo ==========================================
echo 📁 Project path: %lightx2v_path%
echo 🤖 Model path: %model_path%
echo 🎯 Task type: %task%
echo 🤖 Model size: %model_size%
echo 🤖 Model class: %model_cls%
echo 🌏 Interface language: %lang%
echo 🖥️  GPU device: %gpu_id%
echo 🌐 Server address: %server_name%:%server_port%
echo ==========================================

REM Display system resource information
echo 💻 System resource information:
wmic OS get TotalVisibleMemorySize,FreePhysicalMemory /format:table

REM Display GPU information
nvidia-smi --query-gpu=name,memory.total,memory.free --format=csv,noheader,nounits 2>nul
if errorlevel 1 (
    echo 🎮 GPU information: Unable to get GPU info
) else (
    echo 🎮 GPU information:
    nvidia-smi --query-gpu=name,memory.total,memory.free --format=csv,noheader,nounits
)

REM ==================== Start Demo ====================
echo 🎬 Starting Gradio demo...
echo 📱 Please access in browser: http://%server_name%:%server_port%
echo ⏹️  Press Ctrl+C to stop service
echo 🔄 First startup may take several minutes to load resources...
echo ==========================================

REM Start Python demo
python %demo_file% ^
    --model_path "%model_path%" ^
    --model_cls %model_cls% ^
    --task %task% ^
    --server_name %server_name% ^
    --server_port %server_port% ^
    --model_size %model_size%

REM Display final system resource usage
echo.
echo ==========================================
echo 📊 Final system resource usage:
wmic OS get TotalVisibleMemorySize,FreePhysicalMemory /format:table

gushiqiao's avatar
gushiqiao committed
264
pause