Unverified Commit 8da2797d authored by sandy's avatar sandy Committed by GitHub
Browse files

[Feat] Add load lora for animate (#433)

parent e6868185
{
"infer_steps": 4,
"target_video_length": 77,
"text_len": 512,
"target_height": 720,
"target_width": 1280,
"self_attn_1_type": "flash_attn3",
"cross_attn_1_type": "flash_attn3",
"cross_attn_2_type": "flash_attn3",
"adapter_attn_type": "flash_attn3",
"sample_shift": 5.0,
"sample_guide_scale": 1.0,
"enable_cfg": false,
"cpu_offload": false,
"src_pose_path": "../save_results/animate/process_results/src_pose.mp4",
"src_face_path": "../save_results/animate/process_results/src_face.mp4",
"src_ref_images": "../save_results/animate/process_results/src_ref.png",
"refert_num": 1,
"replace_flag": false,
"fps": 30,
"lora_configs": [
{
"path": "lightx2v_I2V_14B_480p_cfg_step_distill_rank32_bf16.safetensors",
"strength": 1.0
}
]
}
...@@ -17,6 +17,7 @@ except ImportError: ...@@ -17,6 +17,7 @@ except ImportError:
from lightx2v.models.input_encoders.hf.animate.face_encoder import FaceEncoder from lightx2v.models.input_encoders.hf.animate.face_encoder import FaceEncoder
from lightx2v.models.input_encoders.hf.animate.motion_encoder import Generator from lightx2v.models.input_encoders.hf.animate.motion_encoder import Generator
from lightx2v.models.networks.wan.animate_model import WanAnimateModel from lightx2v.models.networks.wan.animate_model import WanAnimateModel
from lightx2v.models.networks.wan.lora_adapter import WanLoraWrapper
from lightx2v.models.runners.wan.wan_runner import WanRunner from lightx2v.models.runners.wan.wan_runner import WanRunner
from lightx2v.server.metrics import monitor_cli from lightx2v.server.metrics import monitor_cli
from lightx2v.utils.envs import * from lightx2v.utils.envs import *
...@@ -391,6 +392,17 @@ class WanAnimateRunner(WanRunner): ...@@ -391,6 +392,17 @@ class WanAnimateRunner(WanRunner):
self.config, self.config,
self.init_device, self.init_device,
) )
if self.config.get("lora_configs") and self.config.lora_configs:
assert not self.config.get("dit_quantized", False)
lora_wrapper = WanLoraWrapper(model)
for lora_config in self.config.lora_configs:
lora_path = lora_config["path"]
strength = lora_config.get("strength", 1.0)
lora_name = lora_wrapper.load_lora(lora_path)
lora_wrapper.apply_lora(lora_name, strength)
logger.info(f"Loaded LoRA: {lora_name} with strength: {strength}")
motion_encoder, face_encoder = self.load_encoders() motion_encoder, face_encoder = self.load_encoders()
model.set_animate_encoders(motion_encoder, face_encoder) model.set_animate_encoders(motion_encoder, face_encoder)
return model return model
......
#!/bin/bash
# set path and first
lightx2v_path=
model_path=
video_path=
refer_path=
export CUDA_VISIBLE_DEVICES=0
# set environment variables
source ${lightx2v_path}/scripts/base/base.sh
# process
python ${lightx2v_path}/tools/preprocess/preprocess_data.py \
--ckpt_path ${model_path}/process_checkpoint \
--video_path $video_path \
--refer_path $refer_path \
--save_path ${lightx2v_path}/save_results/animate/process_results \
--resolution_area 1280 720 \
--retarget_flag \
python -m lightx2v.infer \
--model_cls wan2.2_animate \
--task animate \
--model_path $model_path \
--config_json ${lightx2v_path}/configs/wan22/wan_animate_lora.json \
--prompt "视频中的人在做动作" \
--negative_prompt "色调艳丽,过曝,静态,细节模糊不清,字幕,风格,作品,画作,画面,静止,整体发灰,最差质量,低质量,JPEG压缩残留,丑陋的,残缺的,多余的手指,画得不好的手部,画得不好的脸部,畸形的,毁容的,形态畸形的肢体,手指融合,静止不动的画面,杂乱的背景,三条腿,背景人很多,倒着走" \
--save_result_path ${lightx2v_path}/save_results/output_lightx2v_wan22_animate_lora.mp4
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment