model.py 9.5 KB
Newer Older
helloyongyang's avatar
helloyongyang committed
1
import os
2
import sys
helloyongyang's avatar
helloyongyang committed
3
4
import torch
import glob
5
import json
6
7
8
from lightx2v.models.networks.wan.weights.pre_weights import WanPreWeights
from lightx2v.models.networks.wan.weights.post_weights import WanPostWeights
from lightx2v.models.networks.wan.weights.transformer_weights import (
helloyongyang's avatar
helloyongyang committed
9
10
    WanTransformerWeights,
)
11
12
13
from lightx2v.models.networks.wan.infer.pre_infer import WanPreInfer
from lightx2v.models.networks.wan.infer.post_infer import WanPostInfer
from lightx2v.models.networks.wan.infer.transformer_infer import (
helloyongyang's avatar
helloyongyang committed
14
15
    WanTransformerInfer,
)
16
17
from lightx2v.models.networks.wan.infer.feature_caching.transformer_infer import (
    WanTransformerInferTeaCaching,
18
19
20
    WanTransformerInferTaylorCaching,
    WanTransformerInferAdaCaching,
    WanTransformerInferCustomCaching,
21
)
helloyongyang's avatar
helloyongyang committed
22
from safetensors import safe_open
Xinchi Huang's avatar
Xinchi Huang committed
23
24
import lightx2v.attentions.distributed.ulysses.wrap as ulysses_dist_wrap
import lightx2v.attentions.distributed.ring.wrap as ring_dist_wrap
25
26
from lightx2v.utils.envs import *
from loguru import logger
helloyongyang's avatar
helloyongyang committed
27
28
29
30
31
32
33


class WanModel:
    pre_weight_class = WanPreWeights
    post_weight_class = WanPostWeights
    transformer_weight_class = WanTransformerWeights

gushiqiao's avatar
gushiqiao committed
34
    def __init__(self, model_path, config, device):
helloyongyang's avatar
helloyongyang committed
35
36
        self.model_path = model_path
        self.config = config
gushiqiao's avatar
gushiqiao committed
37
        self.clean_cuda_cache = self.config.get("clean_cuda_cache", False)
38
39
40
41
42
43
44

        self.dit_quantized = self.config.mm_config.get("mm_type", "Default") != "Default"
        self.dit_quantized_ckpt = self.config.get("dit_quantized_ckpt", None)
        self.weight_auto_quant = self.config.mm_config.get("weight_auto_quant", False)
        if self.dit_quantized:
            assert self.weight_auto_quant or self.dit_quantized_ckpt is not None

gushiqiao's avatar
gushiqiao committed
45
        self.device = device
helloyongyang's avatar
helloyongyang committed
46
47
48
        self._init_infer_class()
        self._init_weights()
        self._init_infer()
lijiaqi2's avatar
lijiaqi2 committed
49
        self.current_lora = None
helloyongyang's avatar
helloyongyang committed
50

Xinchi Huang's avatar
Xinchi Huang committed
51
52
53
54
55
56
57
        if config["parallel_attn_type"]:
            if config["parallel_attn_type"] == "ulysses":
                ulysses_dist_wrap.parallelize_wan(self)
            elif config["parallel_attn_type"] == "ring":
                ring_dist_wrap.parallelize_wan(self)
            else:
                raise Exception(f"Unsuppotred parallel_attn_type")
Xinchi Huang's avatar
Xinchi Huang committed
58

helloyongyang's avatar
helloyongyang committed
59
60
61
62
63
64
    def _init_infer_class(self):
        self.pre_infer_class = WanPreInfer
        self.post_infer_class = WanPostInfer
        if self.config["feature_caching"] == "NoCaching":
            self.transformer_infer_class = WanTransformerInfer
        elif self.config["feature_caching"] == "Tea":
65
            self.transformer_infer_class = WanTransformerInferTeaCaching
Yang Yong(雍洋)'s avatar
Yang Yong(雍洋) committed
66
        elif self.config["feature_caching"] == "TaylorSeer":
67
68
69
70
71
            self.transformer_infer_class = WanTransformerInferTaylorCaching
        elif self.config["feature_caching"] == "Ada":
            self.transformer_infer_class = WanTransformerInferAdaCaching
        elif self.config["feature_caching"] == "Custom":
            self.transformer_infer_class = WanTransformerInferCustomCaching
helloyongyang's avatar
helloyongyang committed
72
        else:
Dongz's avatar
Dongz committed
73
            raise NotImplementedError(f"Unsupported feature_caching type: {self.config['feature_caching']}")
helloyongyang's avatar
helloyongyang committed
74

gushiqiao's avatar
Fix  
gushiqiao committed
75
    def _load_safetensor_to_dict(self, file_path, use_bf16, skip_bf16):
helloyongyang's avatar
helloyongyang committed
76
        with safe_open(file_path, framework="pt") as f:
gushiqiao's avatar
gushiqiao committed
77
            return {key: (f.get_tensor(key).to(torch.bfloat16) if use_bf16 or all(s not in key for s in skip_bf16) else f.get_tensor(key)).pin_memory().to(self.device) for key in f.keys()}
helloyongyang's avatar
helloyongyang committed
78

gushiqiao's avatar
Fix  
gushiqiao committed
79
    def _load_ckpt(self, use_bf16, skip_bf16):
helloyongyang's avatar
helloyongyang committed
80
81
82
83
        safetensors_pattern = os.path.join(self.model_path, "*.safetensors")
        safetensors_files = glob.glob(safetensors_pattern)

        if not safetensors_files:
Dongz's avatar
Dongz committed
84
            raise FileNotFoundError(f"No .safetensors files found in directory: {self.model_path}")
helloyongyang's avatar
helloyongyang committed
85
86
        weight_dict = {}
        for file_path in safetensors_files:
gushiqiao's avatar
Fix  
gushiqiao committed
87
            file_weights = self._load_safetensor_to_dict(file_path, use_bf16, skip_bf16)
helloyongyang's avatar
helloyongyang committed
88
89
90
            weight_dict.update(file_weights)
        return weight_dict

gushiqiao's avatar
Fix  
gushiqiao committed
91
    def _load_quant_ckpt(self, use_bf16, skip_bf16):
92
93
        ckpt_path = self.config.dit_quantized_ckpt
        logger.info(f"Loading quant dit model from {ckpt_path}")
94

gushiqiao's avatar
Fix  
gushiqiao committed
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
        index_files = [f for f in os.listdir(ckpt_path) if f.endswith(".index.json")]
        if not index_files:
            raise FileNotFoundError(f"No *.index.json found in {ckpt_path}")

        index_path = os.path.join(ckpt_path, index_files[0])
        logger.info(f" Using safetensors index: {index_path}")

        with open(index_path, "r") as f:
            index_data = json.load(f)

        weight_dict = {}
        for filename in set(index_data["weight_map"].values()):
            safetensor_path = os.path.join(ckpt_path, filename)
            with safe_open(safetensor_path, framework="pt") as f:
                logger.info(f"Loading weights from {safetensor_path}")
                for k in f.keys():
                    if f.get_tensor(k).dtype == torch.float:
                        if use_bf16 or all(s not in k for s in skip_bf16):
                            weight_dict[k] = f.get_tensor(k).pin_memory().to(torch.bfloat16).to(self.device)
                        else:
                            weight_dict[k] = f.get_tensor(k).pin_memory().to(self.device)
                    else:
                        weight_dict[k] = f.get_tensor(k).pin_memory().to(self.device)
118

119
120
        return weight_dict

gushiqiao's avatar
Fix  
gushiqiao committed
121
    def _load_quant_split_ckpt(self, use_bf16, skip_bf16):
122
123
124
125
126
        lazy_load_model_path = self.config.dit_quantized_ckpt
        logger.info(f"Loading splited quant model from {lazy_load_model_path}")
        pre_post_weight_dict, transformer_weight_dict = {}, {}

        safetensor_path = os.path.join(lazy_load_model_path, "non_block.safetensors")
gushiqiao's avatar
gushiqiao committed
127
        with safe_open(safetensor_path, framework="pt", device="cpu") as f:
128
            for k in f.keys():
gushiqiao's avatar
Fix  
gushiqiao committed
129
130
131
132
133
134
135
                if f.get_tensor(k).dtype == torch.float:
                    if use_bf16 or all(s not in k for s in skip_bf16):
                        pre_post_weight_dict[k] = f.get_tensor(k).pin_memory().to(torch.bfloat16).to(self.device)
                    else:
                        pre_post_weight_dict[k] = f.get_tensor(k).pin_memory().to(self.device)
                else:
                    pre_post_weight_dict[k] = f.get_tensor(k).pin_memory().to(self.device)
136

gushiqiao's avatar
gushiqiao committed
137
        return pre_post_weight_dict
138

lijiaqi2's avatar
lijiaqi2 committed
139
    def _init_weights(self, weight_dict=None):
gushiqiao's avatar
Fix  
gushiqiao committed
140
141
        use_bf16 = GET_DTYPE() == "BF16"
        # Some layers run with float32 to achieve high accuracy
gushiqiao's avatar
Fix.  
gushiqiao committed
142
        skip_bf16 = {"norm", "embedding", "modulation", "time", "img_emb.proj.0", "img_emb.proj.4"}
lijiaqi2's avatar
lijiaqi2 committed
143
        if weight_dict is None:
144
            if not self.dit_quantized or self.weight_auto_quant:
gushiqiao's avatar
Fix  
gushiqiao committed
145
                self.original_weight_dict = self._load_ckpt(use_bf16, skip_bf16)
146
            else:
147
                if not self.config.get("lazy_load", False):
gushiqiao's avatar
Fix  
gushiqiao committed
148
                    self.original_weight_dict = self._load_quant_ckpt(use_bf16, skip_bf16)
149
                else:
gushiqiao's avatar
gushiqiao committed
150
                    self.original_weight_dict = self._load_quant_split_ckpt(use_bf16, skip_bf16)
lijiaqi2's avatar
lijiaqi2 committed
151
152
        else:
            self.original_weight_dict = weight_dict
helloyongyang's avatar
helloyongyang committed
153
154
        # init weights
        self.pre_weight = self.pre_weight_class(self.config)
TorynCurtis's avatar
TorynCurtis committed
155
        self.post_weight = self.post_weight_class(self.config)
helloyongyang's avatar
helloyongyang committed
156
157
        self.transformer_weights = self.transformer_weight_class(self.config)
        # load weights
158
159
        self.pre_weight.load(self.original_weight_dict)
        self.post_weight.load(self.original_weight_dict)
gushiqiao's avatar
gushiqiao committed
160
        self.transformer_weights.load(self.original_weight_dict)
helloyongyang's avatar
helloyongyang committed
161
162
163
164
165
166
167
168

    def _init_infer(self):
        self.pre_infer = self.pre_infer_class(self.config)
        self.post_infer = self.post_infer_class(self.config)
        self.transformer_infer = self.transformer_infer_class(self.config)

    def set_scheduler(self, scheduler):
        self.scheduler = scheduler
169
170
        self.pre_infer.set_scheduler(scheduler)
        self.post_infer.set_scheduler(scheduler)
helloyongyang's avatar
helloyongyang committed
171
172
        self.transformer_infer.set_scheduler(scheduler)

TorynCurtis's avatar
TorynCurtis committed
173
174
175
176
177
178
179
180
181
182
    def to_cpu(self):
        self.pre_weight.to_cpu()
        self.post_weight.to_cpu()
        self.transformer_weights.to_cpu()

    def to_cuda(self):
        self.pre_weight.to_cuda()
        self.post_weight.to_cuda()
        self.transformer_weights.to_cuda()

helloyongyang's avatar
helloyongyang committed
183
    @torch.no_grad()
184
    def infer(self, inputs):
gushiqiao's avatar
gushiqiao committed
185
186
187
188
        if self.config["cpu_offload"]:
            self.pre_weight.to_cuda()
            self.post_weight.to_cuda()

189
        embed, grid_sizes, pre_infer_out = self.pre_infer.infer(self.pre_weight, inputs, positive=True)
gushiqiao's avatar
Fix bug  
gushiqiao committed
190
        x = self.transformer_infer.infer(self.transformer_weights, grid_sizes, embed, *pre_infer_out)
Dongz's avatar
Dongz committed
191
        noise_pred_cond = self.post_infer.infer(self.post_weight, x, embed, grid_sizes)[0]
helloyongyang's avatar
helloyongyang committed
192

root's avatar
root committed
193
        self.scheduler.noise_pred = noise_pred_cond
helloyongyang's avatar
helloyongyang committed
194

gushiqiao's avatar
gushiqiao committed
195
196
197
198
        if self.clean_cuda_cache:
            del x, embed, pre_infer_out, noise_pred_cond, grid_sizes
            torch.cuda.empty_cache()

199
        if self.config["enable_cfg"]:
root's avatar
root committed
200
            embed, grid_sizes, pre_infer_out = self.pre_infer.infer(self.pre_weight, inputs, positive=False)
gushiqiao's avatar
Fix bug  
gushiqiao committed
201
            x = self.transformer_infer.infer(self.transformer_weights, grid_sizes, embed, *pre_infer_out)
root's avatar
root committed
202
            noise_pred_uncond = self.post_infer.infer(self.post_weight, x, embed, grid_sizes)[0]
helloyongyang's avatar
helloyongyang committed
203

gushiqiao's avatar
gushiqiao committed
204
            self.scheduler.noise_pred = noise_pred_uncond + self.config.sample_guide_scale * (self.scheduler.noise_pred - noise_pred_uncond)
gushiqiao's avatar
gushiqiao committed
205

root's avatar
root committed
206
207
208
            if self.config["cpu_offload"]:
                self.pre_weight.to_cpu()
                self.post_weight.to_cpu()
gushiqiao's avatar
gushiqiao committed
209
210
211
212

                if self.clean_cuda_cache:
                    del x, embed, pre_infer_out, noise_pred_uncond, grid_sizes
                    torch.cuda.empty_cache()