causvid_model.py 2.66 KB
Newer Older
1
import os
PengGao's avatar
PengGao committed
2

3
import torch
PengGao's avatar
PengGao committed
4

helloyongyang's avatar
helloyongyang committed
5
from lightx2v.common.ops.attn.radial_attn import MaskMap
PengGao's avatar
PengGao committed
6
7
8
9
10
from lightx2v.models.networks.wan.infer.causvid.transformer_infer import (
    WanTransformerInferCausVid,
)
from lightx2v.models.networks.wan.infer.post_infer import WanPostInfer
from lightx2v.models.networks.wan.infer.pre_infer import WanPreInfer
11
12
from lightx2v.models.networks.wan.model import WanModel
from lightx2v.models.networks.wan.weights.post_weights import WanPostWeights
PengGao's avatar
PengGao committed
13
from lightx2v.models.networks.wan.weights.pre_weights import WanPreWeights
14
15
16
from lightx2v.models.networks.wan.weights.transformer_weights import (
    WanTransformerWeights,
)
gushiqiao's avatar
gushiqiao committed
17
from lightx2v.utils.envs import *
helloyongyang's avatar
fix ci  
helloyongyang committed
18
from lightx2v.utils.utils import find_torch_model_path
19
20


Zhuguanyu Wu's avatar
Zhuguanyu Wu committed
21
class WanCausVidModel(WanModel):
22
23
24
25
    pre_weight_class = WanPreWeights
    post_weight_class = WanPostWeights
    transformer_weight_class = WanTransformerWeights

helloyongyang's avatar
helloyongyang committed
26
27
    def __init__(self, model_path, config, device):
        super().__init__(model_path, config, device)
28
29
30
31

    def _init_infer_class(self):
        self.pre_infer_class = WanPreInfer
        self.post_infer_class = WanPostInfer
Zhuguanyu Wu's avatar
Zhuguanyu Wu committed
32
        self.transformer_infer_class = WanTransformerInferCausVid
33

34
    def _load_ckpt(self, unified_dtype, sensitive_layer):
gushiqiao's avatar
gushiqiao committed
35
        ckpt_path = find_torch_model_path(self.config, self.model_path, "causvid_model.pt")
GoatWu's avatar
GoatWu committed
36
37
38
        if os.path.exists(ckpt_path):
            weight_dict = torch.load(ckpt_path, map_location="cpu", weights_only=True)
            weight_dict = {
gushiqiao's avatar
gushiqiao committed
39
40
                key: (weight_dict[key].to(GET_DTYPE()) if unified_dtype or all(s not in key for s in sensitive_layer) else weight_dict[key].to(GET_SENSITIVE_DTYPE())).pin_memory().to(self.device)
                for key in weight_dict.keys()
GoatWu's avatar
GoatWu committed
41
42
43
            }
            return weight_dict

44
        return super()._load_ckpt(unified_dtype, sensitive_layer)
45
46
47

    @torch.no_grad()
    def infer(self, inputs, kv_start, kv_end):
48
49
50
51
52
        if self.transformer_infer.mask_map is None:
            _, c, h, w = self.scheduler.latents.shape
            video_token_num = c * (h // 2) * (w // 2)
            self.transformer_infer.mask_map = MaskMap(video_token_num, c)

53
54
55
56
        if self.config["cpu_offload"]:
            self.pre_weight.to_cuda()
            self.post_weight.to_cuda()

helloyongyang's avatar
helloyongyang committed
57
        embed, grid_sizes, pre_infer_out = self.pre_infer.infer(self.pre_weight, inputs, kv_start=kv_start, kv_end=kv_end)
58
59
60
61
62
63
64

        x = self.transformer_infer.infer(self.transformer_weights, grid_sizes, embed, *pre_infer_out, kv_start, kv_end)
        self.scheduler.noise_pred = self.post_infer.infer(self.post_weight, x, embed, grid_sizes)[0]

        if self.config["cpu_offload"]:
            self.pre_weight.to_cpu()
            self.post_weight.to_cpu()