Commit 3b460075 authored by helloyongyang's avatar helloyongyang Committed by Yang Yong(雍洋)
Browse files

update

parent f21528e7
#!/bin/bash #!/bin/bash
# model_path=/mnt/nvme1/yongyang/models/hy/ckpts # H800-13 lightx2v_path=/mtc/yongyang/projects/lightx2v
# model_path=/mnt/nvme0/yongyang/projects/hy/HunyuanVideo/ckpts # H800-14 export PYTHONPATH=${lightx2v_path}:$PYTHONPATH
model_path=/workspace/ckpts_link # H800-14
export CUDA_VISIBLE_DEVICES=2 export CUDA_VISIBLE_DEVICES=2
python main.py \
model_path=/mtc/yongyang/models/x2v_models/hunyuan/lightx2v_format/t2v
python ${lightx2v_path}/lightx2v/__main__.py \
--model_cls hunyuan \ --model_cls hunyuan \
--model_path $model_path \ --model_path $model_path \
--prompt "A cat walks on the grass, realistic style." \ --prompt "A cat walks on the grass, realistic style." \
......
# model_path=/mnt/nvme1/yongyang/models/hy/ckpts # H800-13 #!/bin/bash
# model_path=/mnt/nvme0/yongyang/projects/hy/HunyuanVideo/ckpts # H800-14
model_path=/workspace/ckpts_link # H800-14
lightx2v_path=/mtc/yongyang/projects/lightx2v
export PYTHONPATH=${lightx2v_path}:$PYTHONPATH
export CUDA_VISIBLE_DEVICES=0,1,2,3 export CUDA_VISIBLE_DEVICES=0,1,2,3
torchrun --nproc_per_node=4 ../main.py \
model_path=/mtc/yongyang/models/x2v_models/hunyuan/lightx2v_format/t2v
torchrun --nproc_per_node=4 ${lightx2v_path}/lightx2v/__main__.py \
--model_cls hunyuan \ --model_cls hunyuan \
--model_path $model_path \ --model_path $model_path \
--prompt "A cat walks on the grass, realistic style." \ --prompt "A cat walks on the grass, realistic style." \
......
# model_path=/mnt/nvme1/yongyang/models/hy/ckpts # H800-13 #!/bin/bash
# model_path=/mnt/nvme0/yongyang/projects/hy/HunyuanVideo/ckpts # H800-14
model_path=/workspace/ckpts_link # H800-14
lightx2v_path=/mtc/yongyang/projects/lightx2v
export PYTHONPATH=${lightx2v_path}:$PYTHONPATH
export CUDA_VISIBLE_DEVICES=2 export CUDA_VISIBLE_DEVICES=2
python main.py \
model_path=/mtc/yongyang/models/x2v_models/hunyuan/lightx2v_format/t2v
python ${lightx2v_path}/lightx2v/__main__.py \
--model_cls hunyuan \ --model_cls hunyuan \
--model_path $model_path \ --model_path $model_path \
--prompt "A detailed wooden toy ship with intricately carved masts and sails is seen gliding smoothly over a plush, blue carpet that mimics the waves of the sea. The ship's hull is painted a rich brown, with tiny windows. The carpet, soft and textured, provides a perfect backdrop, resembling an oceanic expanse. Surrounding the ship are various other toys and children's items, hinting at a playful environment. The scene captures the innocence and imagination of childhood, with the toy ship's journey symbolizing endless adventures in a whimsical, indoor setting." \ --prompt "A detailed wooden toy ship with intricately carved masts and sails is seen gliding smoothly over a plush, blue carpet that mimics the waves of the sea. The ship's hull is painted a rich brown, with tiny windows. The carpet, soft and textured, provides a perfect backdrop, resembling an oceanic expanse. Surrounding the ship are various other toys and children's items, hinting at a playful environment. The scene captures the innocence and imagination of childhood, with the toy ship's journey symbolizing endless adventures in a whimsical, indoor setting." \
......
#!/bin/bash #!/bin/bash
lightx2v_path=/mtc/yongyang/projects/lightx2v
export PYTHONPATH=${lightx2v_path}:$PYTHONPATH
export CUDA_VISIBLE_DEVICES=2 export CUDA_VISIBLE_DEVICES=2
# model_path=/mnt/nvme1/yongyang/models/hy/ckpts # H800-13 model_path=/mtc/yongyang/models/x2v_models/wan/Wan2.1-I2V-14B-480P
model_path=/mnt/nvme0/yongyang/projects/wan/Wan2.1-I2V-14B-480P # H800-14 config_path=/mtc/yongyang/models/x2v_models/wan/Wan2.1-I2V-14B-480P/config.json
config_path=/mnt/nvme0/yongyang/projects/wan/Wan2.1-I2V-14B-480P/config.json
python main.py \ python ${lightx2v_path}/lightx2v/__main__.py \
--model_cls wan2.1 \ --model_cls wan2.1 \
--task i2v \ --task i2v \
--model_path $model_path \ --model_path $model_path \
...@@ -22,7 +25,7 @@ python main.py \ ...@@ -22,7 +25,7 @@ python main.py \
--save_video_path ./output_lightx2v_seed42_fp8_base.mp4 \ --save_video_path ./output_lightx2v_seed42_fp8_base.mp4 \
--sample_guide_scale 5 \ --sample_guide_scale 5 \
--sample_shift 5 \ --sample_shift 5 \
--image_path ./i2v_input.JPG \ --image_path ${lightx2v_path}/assets/inputs/imgs/img_0.jpg \
--mm_config '{"mm_type": "W-fp8-channel-sym-A-fp8-channel-sym-dynamic-Vllm", "weight_auto_quant": true}' \ --mm_config '{"mm_type": "W-fp8-channel-sym-A-fp8-channel-sym-dynamic-Vllm", "weight_auto_quant": true}' \
# --feature_caching Tea \ # --feature_caching Tea \
# --use_ret_steps \ # --use_ret_steps \
\ No newline at end of file
#!/bin/bash #!/bin/bash
model_path=/workspace/wan/Wan2.1-T2V-1.3B # H800-14 lightx2v_path=/mtc/yongyang/projects/lightx2v
config_path=/workspace/wan/Wan2.1-T2V-1.3B/config.json export PYTHONPATH=${lightx2v_path}:$PYTHONPATH
export CUDA_VISIBLE_DEVICES=0 export CUDA_VISIBLE_DEVICES=2
python ../main.py \
model_path=/mtc/yongyang/models/x2v_models/wan/Wan2.1-T2V-1.3B
config_path=/mtc/yongyang/models/x2v_models/wan/Wan2.1-T2V-1.3B/config.json
python ${lightx2v_path}/lightx2v/__main__.py \
--model_cls wan2.1 \ --model_cls wan2.1 \
--task t2v \ --task t2v \
--model_path $model_path \ --model_path $model_path \
......
model_path=/workspace/wan/Wan2.1-T2V-1.3B # H800-14 #!/bin/bash
config_path=/workspace/wan/Wan2.1-T2V-1.3B/config.json
lightx2v_path=/mtc/yongyang/projects/lightx2v
export PYTHONPATH=${lightx2v_path}:$PYTHONPATH
export CUDA_VISIBLE_DEVICES=4,5,6,7 export CUDA_VISIBLE_DEVICES=0,1,2,3
torchrun --nproc_per_node=4 main.py \
model_path=/mtc/yongyang/models/x2v_models/wan/Wan2.1-T2V-1.3B
config_path=/mtc/yongyang/models/x2v_models/wan/Wan2.1-T2V-1.3B/config.json
torchrun --nproc_per_node=4 ${lightx2v_path}/lightx2v/__main__.py \
--model_cls wan2.1 \ --model_cls wan2.1 \
--task t2v \ --task t2v \
--model_path $model_path \ --model_path $model_path \
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment