run_wan_i2v_with_lora.sh 1.58 KB
Newer Older
1
2
3
#!/bin/bash

# set path and first
helloyongyang's avatar
helloyongyang committed
4
5
6
lightx2v_path=""
model_path=""
lora_path=""
7
8
9
10
11
12
13
14
15
16
17
18
19

# check section
if [ -z "${CUDA_VISIBLE_DEVICES}" ]; then
    cuda_devices=0
    echo "Warn: CUDA_VISIBLE_DEVICES is not set, using defalt value: ${cuda_devices}, change at shell script or set env variable."
    export CUDA_VISIBLE_DEVICES=${cuda_devices}
fi

if [ -z "${model_path}" ]; then
    echo "Error: model_path is not set. Please set this variable first."
    exit 1
fi

20
21
if [ -z "${lora_path}" ]; then
    echo "Error: lora_path is not set. Please set this variable first."
22
23
24
    exit 1
fi

25
26
export TOKENIZERS_PARALLELISM=false

27
28
export PYTHONPATH=${lightx2v_path}:$PYTHONPATH

29
30
export ENABLE_PROFILING_DEBUG=true

31
32
33
34
35
36
37
38
39
40
41
python -m lightx2v \
--model_cls wan2.1 \
--task i2v \
--model_path $model_path \
--prompt "画面中的物体轻轻向上跃起,变成了外貌相似的毛绒玩具。毛绒玩具有着一双眼睛,它的颜色和之前的一样。然后,它开始跳跃起来。背景保持一致,气氛显得格外俏皮。" \
--infer_steps 40 \
--target_video_length 81 \
--target_width  832 \
--target_height 480 \
--attention_type flash_attn3 \
--seed 42 \
helloyongyang's avatar
helloyongyang committed
42
--negative_prompt "画面过曝,模糊,文字,字幕" \
43
44
45
46
47
48
49
50
51
--save_video_path ./output_lightx2v_wan_i2v.mp4 \
--sample_guide_scale 5 \
--sample_shift 5 \
--image_path ${lightx2v_path}/assets/inputs/imgs/img_0.jpg \
--lora_path ${lora_path} \
--feature_caching Tea \
--mm_config '{"mm_type": "W-fp8-channel-sym-A-fp8-channel-sym-dynamic-Vllm", "weight_auto_quant": true}' \
# --mm_config '{"mm_type": "Default", "weight_auto_quant": true}' \
# --use_ret_steps \