model.py 13.8 KB
Newer Older
1
import json
2
3
import os

helloyongyang's avatar
helloyongyang committed
4
import torch
5
import torch.distributed as dist
PengGao's avatar
PengGao committed
6
7
8
from loguru import logger
from safetensors import safe_open

helloyongyang's avatar
helloyongyang committed
9
from lightx2v.common.ops.attn import MaskMap
PengGao's avatar
PengGao committed
10
from lightx2v.models.networks.wan.infer.dist_infer.transformer_infer import WanTransformerDistInfer
11
from lightx2v.models.networks.wan.infer.feature_caching.transformer_infer import (
12
13
    WanTransformerInferAdaCaching,
    WanTransformerInferCustomCaching,
Rongjin Yang's avatar
Rongjin Yang committed
14
15
    WanTransformerInferDualBlock,
    WanTransformerInferDynamicBlock,
PengGao's avatar
PengGao committed
16
17
18
19
20
21
22
23
24
25
26
27
28
    WanTransformerInferFirstBlock,
    WanTransformerInferTaylorCaching,
    WanTransformerInferTeaCaching,
)
from lightx2v.models.networks.wan.infer.post_infer import WanPostInfer
from lightx2v.models.networks.wan.infer.pre_infer import WanPreInfer
from lightx2v.models.networks.wan.infer.transformer_infer import (
    WanTransformerInfer,
)
from lightx2v.models.networks.wan.weights.post_weights import WanPostWeights
from lightx2v.models.networks.wan.weights.pre_weights import WanPreWeights
from lightx2v.models.networks.wan.weights.transformer_weights import (
    WanTransformerWeights,
29
)
30
from lightx2v.utils.envs import *
31
from lightx2v.utils.utils import *
helloyongyang's avatar
helloyongyang committed
32

33
34
35
36
37
try:
    import gguf
except ImportError:
    gguf = None

helloyongyang's avatar
helloyongyang committed
38
39
40
41
42
43

class WanModel:
    pre_weight_class = WanPreWeights
    post_weight_class = WanPostWeights
    transformer_weight_class = WanTransformerWeights

gushiqiao's avatar
gushiqiao committed
44
    def __init__(self, model_path, config, device):
helloyongyang's avatar
helloyongyang committed
45
46
        self.model_path = model_path
        self.config = config
47
48
49
        self.cpu_offload = self.config.get("cpu_offload", False)
        self.offload_granularity = self.config.get("offload_granularity", "block")

gushiqiao's avatar
gushiqiao committed
50
        self.clean_cuda_cache = self.config.get("clean_cuda_cache", False)
51
        self.dit_quantized = self.config.mm_config.get("mm_type", "Default") != "Default"
52

gushiqiao's avatar
gushiqiao committed
53
54
        if self.dit_quantized:
            dit_quant_scheme = self.config.mm_config.get("mm_type").split("-")[1]
gushiqiao's avatar
gushiqiao committed
55
56
            if self.config.model_cls == "wan2.1_distill":
                dit_quant_scheme = "distill_" + dit_quant_scheme
57
58
59
60
            if dit_quant_scheme == "gguf":
                self.dit_quantized_ckpt = find_gguf_model_path(config, "dit_quantized_ckpt", subdir=dit_quant_scheme)
                self.config.use_gguf = True
            else:
helloyongyang's avatar
helloyongyang committed
61
                self.dit_quantized_ckpt = find_hf_model_path(config, self.model_path, "dit_quantized_ckpt", subdir=dit_quant_scheme)
gushiqiao's avatar
Fix bug  
gushiqiao committed
62
63
64
65
66
            quant_config_path = os.path.join(self.dit_quantized_ckpt, "config.json")
            if os.path.exists(quant_config_path):
                with open(quant_config_path, "r") as f:
                    quant_model_config = json.load(f)
                self.config.update(quant_model_config)
gushiqiao's avatar
gushiqiao committed
67
68
        else:
            self.dit_quantized_ckpt = None
69
70
            assert not self.config.get("lazy_load", False)

gushiqiao's avatar
gushiqiao committed
71
        self.config.dit_quantized_ckpt = self.dit_quantized_ckpt
gushiqiao's avatar
gushiqiao committed
72

73
74
75
76
        self.weight_auto_quant = self.config.mm_config.get("weight_auto_quant", False)
        if self.dit_quantized:
            assert self.weight_auto_quant or self.dit_quantized_ckpt is not None

gushiqiao's avatar
gushiqiao committed
77
        self.device = device
helloyongyang's avatar
helloyongyang committed
78
79
80
81
82
83
84
        self._init_infer_class()
        self._init_weights()
        self._init_infer()

    def _init_infer_class(self):
        self.pre_infer_class = WanPreInfer
        self.post_infer_class = WanPostInfer
85
        if self.config["seq_parallel"]:
helloyongyang's avatar
helloyongyang committed
86
            self.transformer_infer_class = WanTransformerDistInfer
helloyongyang's avatar
helloyongyang committed
87
        else:
helloyongyang's avatar
helloyongyang committed
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
            if self.config["feature_caching"] == "NoCaching":
                self.transformer_infer_class = WanTransformerInfer
            elif self.config["feature_caching"] == "Tea":
                self.transformer_infer_class = WanTransformerInferTeaCaching
            elif self.config["feature_caching"] == "TaylorSeer":
                self.transformer_infer_class = WanTransformerInferTaylorCaching
            elif self.config["feature_caching"] == "Ada":
                self.transformer_infer_class = WanTransformerInferAdaCaching
            elif self.config["feature_caching"] == "Custom":
                self.transformer_infer_class = WanTransformerInferCustomCaching
            elif self.config["feature_caching"] == "FirstBlock":
                self.transformer_infer_class = WanTransformerInferFirstBlock
            elif self.config["feature_caching"] == "DualBlock":
                self.transformer_infer_class = WanTransformerInferDualBlock
            elif self.config["feature_caching"] == "DynamicBlock":
                self.transformer_infer_class = WanTransformerInferDynamicBlock
            else:
                raise NotImplementedError(f"Unsupported feature_caching type: {self.config['feature_caching']}")
helloyongyang's avatar
helloyongyang committed
106

107
    def _load_safetensor_to_dict(self, file_path, unified_dtype, sensitive_layer):
helloyongyang's avatar
helloyongyang committed
108
        with safe_open(file_path, framework="pt") as f:
109
110
111
112
            return {
                key: (f.get_tensor(key).to(GET_DTYPE()) if unified_dtype or all(s not in key for s in sensitive_layer) else f.get_tensor(key).to(GET_SENSITIVE_DTYPE())).pin_memory().to(self.device)
                for key in f.keys()
            }
helloyongyang's avatar
helloyongyang committed
113

114
    def _load_ckpt(self, unified_dtype, sensitive_layer):
helloyongyang's avatar
helloyongyang committed
115
        safetensors_path = find_hf_model_path(self.config, self.model_path, "dit_original_ckpt", subdir="original")
116
        safetensors_files = glob.glob(os.path.join(safetensors_path, "*.safetensors"))
helloyongyang's avatar
helloyongyang committed
117
118
        weight_dict = {}
        for file_path in safetensors_files:
119
            file_weights = self._load_safetensor_to_dict(file_path, unified_dtype, sensitive_layer)
helloyongyang's avatar
helloyongyang committed
120
121
122
            weight_dict.update(file_weights)
        return weight_dict

123
    def _load_quant_ckpt(self, unified_dtype, sensitive_layer):
gushiqiao's avatar
gushiqiao committed
124
        ckpt_path = self.dit_quantized_ckpt
125
        logger.info(f"Loading quant dit model from {ckpt_path}")
126

gushiqiao's avatar
Fix  
gushiqiao committed
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
        index_files = [f for f in os.listdir(ckpt_path) if f.endswith(".index.json")]
        if not index_files:
            raise FileNotFoundError(f"No *.index.json found in {ckpt_path}")

        index_path = os.path.join(ckpt_path, index_files[0])
        logger.info(f" Using safetensors index: {index_path}")

        with open(index_path, "r") as f:
            index_data = json.load(f)

        weight_dict = {}
        for filename in set(index_data["weight_map"].values()):
            safetensor_path = os.path.join(ckpt_path, filename)
            with safe_open(safetensor_path, framework="pt") as f:
                logger.info(f"Loading weights from {safetensor_path}")
                for k in f.keys():
143
                    if f.get_tensor(k).dtype in [torch.float16, torch.bfloat16, torch.float]:
144
145
                        if unified_dtype or all(s not in k for s in sensitive_layer):
                            weight_dict[k] = f.get_tensor(k).pin_memory().to(GET_DTYPE()).to(self.device)
gushiqiao's avatar
Fix  
gushiqiao committed
146
                        else:
147
                            weight_dict[k] = f.get_tensor(k).pin_memory().to(GET_SENSITIVE_DTYPE()).to(self.device)
gushiqiao's avatar
Fix  
gushiqiao committed
148
149
                    else:
                        weight_dict[k] = f.get_tensor(k).pin_memory().to(self.device)
150

151
152
        return weight_dict

153
    def _load_quant_split_ckpt(self, unified_dtype, sensitive_layer):
gushiqiao's avatar
gushiqiao committed
154
        lazy_load_model_path = self.dit_quantized_ckpt
155
        logger.info(f"Loading splited quant model from {lazy_load_model_path}")
gushiqiao's avatar
gushiqiao committed
156
        pre_post_weight_dict = {}
157
158

        safetensor_path = os.path.join(lazy_load_model_path, "non_block.safetensors")
gushiqiao's avatar
gushiqiao committed
159
        with safe_open(safetensor_path, framework="pt", device="cpu") as f:
160
            for k in f.keys():
161
                if f.get_tensor(k).dtype in [torch.float16, torch.bfloat16, torch.float]:
162
163
                    if unified_dtype or all(s not in k for s in sensitive_layer):
                        pre_post_weight_dict[k] = f.get_tensor(k).pin_memory().to(GET_DTYPE()).to(self.device)
gushiqiao's avatar
Fix  
gushiqiao committed
164
                    else:
165
                        pre_post_weight_dict[k] = f.get_tensor(k).pin_memory().to(GET_SENSITIVE_DTYPE()).to(self.device)
gushiqiao's avatar
Fix  
gushiqiao committed
166
167
                else:
                    pre_post_weight_dict[k] = f.get_tensor(k).pin_memory().to(self.device)
168

gushiqiao's avatar
gushiqiao committed
169
        return pre_post_weight_dict
170

171
172
173
174
175
176
177
178
    def _load_gguf_ckpt(self):
        gguf_path = self.dit_quantized_ckpt
        logger.info(f"Loading gguf-quant dit model from {gguf_path}")
        reader = gguf.GGUFReader(gguf_path)
        for tensor in reader.tensors:
            # TODO: implement _load_gguf_ckpt
            pass

lijiaqi2's avatar
lijiaqi2 committed
179
    def _init_weights(self, weight_dict=None):
180
        unified_dtype = GET_DTYPE() == GET_SENSITIVE_DTYPE()
gushiqiao's avatar
Fix  
gushiqiao committed
181
        # Some layers run with float32 to achieve high accuracy
182
        sensitive_layer = {
gushiqiao's avatar
gushiqiao committed
183
184
185
186
187
188
189
            "norm",
            "embedding",
            "modulation",
            "time",
            "img_emb.proj.0",
            "img_emb.proj.4",
        }
lijiaqi2's avatar
lijiaqi2 committed
190
        if weight_dict is None:
191
            if not self.dit_quantized or self.weight_auto_quant:
192
                self.original_weight_dict = self._load_ckpt(unified_dtype, sensitive_layer)
193
            else:
194
                if not self.config.get("lazy_load", False):
195
                    self.original_weight_dict = self._load_quant_ckpt(unified_dtype, sensitive_layer)
196
                else:
197
                    self.original_weight_dict = self._load_quant_split_ckpt(unified_dtype, sensitive_layer)
lijiaqi2's avatar
lijiaqi2 committed
198
199
        else:
            self.original_weight_dict = weight_dict
helloyongyang's avatar
helloyongyang committed
200
201
        # init weights
        self.pre_weight = self.pre_weight_class(self.config)
TorynCurtis's avatar
TorynCurtis committed
202
        self.post_weight = self.post_weight_class(self.config)
helloyongyang's avatar
helloyongyang committed
203
204
        self.transformer_weights = self.transformer_weight_class(self.config)
        # load weights
205
206
        self.pre_weight.load(self.original_weight_dict)
        self.post_weight.load(self.original_weight_dict)
gushiqiao's avatar
gushiqiao committed
207
        self.transformer_weights.load(self.original_weight_dict)
helloyongyang's avatar
helloyongyang committed
208
209
210
211
212

    def _init_infer(self):
        self.pre_infer = self.pre_infer_class(self.config)
        self.post_infer = self.post_infer_class(self.config)
        self.transformer_infer = self.transformer_infer_class(self.config)
213
        if self.config["cfg_parallel"]:
helloyongyang's avatar
helloyongyang committed
214
            self.infer_func = self.infer_with_cfg_parallel
215
        else:
helloyongyang's avatar
helloyongyang committed
216
            self.infer_func = self.infer_wo_cfg_parallel
helloyongyang's avatar
helloyongyang committed
217
218
219

    def set_scheduler(self, scheduler):
        self.scheduler = scheduler
220
221
        self.pre_infer.set_scheduler(scheduler)
        self.post_infer.set_scheduler(scheduler)
helloyongyang's avatar
helloyongyang committed
222
223
        self.transformer_infer.set_scheduler(scheduler)

TorynCurtis's avatar
TorynCurtis committed
224
225
226
227
228
229
230
231
232
233
    def to_cpu(self):
        self.pre_weight.to_cpu()
        self.post_weight.to_cpu()
        self.transformer_weights.to_cpu()

    def to_cuda(self):
        self.pre_weight.to_cuda()
        self.post_weight.to_cuda()
        self.transformer_weights.to_cuda()

helloyongyang's avatar
helloyongyang committed
234
235
236
237
    @torch.no_grad()
    def infer(self, inputs):
        return self.infer_func(inputs)

helloyongyang's avatar
helloyongyang committed
238
    @torch.no_grad()
239
    def infer_wo_cfg_parallel(self, inputs):
240
241
242
243
244
245
246
        if self.cpu_offload:
            if self.offload_granularity == "model" and self.scheduler.step_index == 0:
                self.to_cuda()
            elif self.offload_granularity != "model":
                self.pre_weight.to_cuda()
                self.post_weight.to_cuda()

247
248
249
250
251
        if self.transformer_infer.mask_map is None:
            _, c, h, w = self.scheduler.latents.shape
            video_token_num = c * (h // 2) * (w // 2)
            self.transformer_infer.mask_map = MaskMap(video_token_num, c)

252
        embed, grid_sizes, pre_infer_out = self.pre_infer.infer(self.pre_weight, inputs, positive=True)
gushiqiao's avatar
Fix bug  
gushiqiao committed
253
        x = self.transformer_infer.infer(self.transformer_weights, grid_sizes, embed, *pre_infer_out)
Dongz's avatar
Dongz committed
254
        noise_pred_cond = self.post_infer.infer(self.post_weight, x, embed, grid_sizes)[0]
helloyongyang's avatar
helloyongyang committed
255

root's avatar
root committed
256
        self.scheduler.noise_pred = noise_pred_cond
helloyongyang's avatar
helloyongyang committed
257

gushiqiao's avatar
gushiqiao committed
258
259
260
261
        if self.clean_cuda_cache:
            del x, embed, pre_infer_out, noise_pred_cond, grid_sizes
            torch.cuda.empty_cache()

262
        if self.config["enable_cfg"]:
root's avatar
root committed
263
            embed, grid_sizes, pre_infer_out = self.pre_infer.infer(self.pre_weight, inputs, positive=False)
gushiqiao's avatar
Fix bug  
gushiqiao committed
264
            x = self.transformer_infer.infer(self.transformer_weights, grid_sizes, embed, *pre_infer_out)
root's avatar
root committed
265
            noise_pred_uncond = self.post_infer.infer(self.post_weight, x, embed, grid_sizes)[0]
helloyongyang's avatar
helloyongyang committed
266

helloyongyang's avatar
helloyongyang committed
267
            self.scheduler.noise_pred = noise_pred_uncond + self.scheduler.sample_guide_scale * (self.scheduler.noise_pred - noise_pred_uncond)
gushiqiao's avatar
gushiqiao committed
268

269
270
271
272
273
274
275
276
            if self.clean_cuda_cache:
                del x, embed, pre_infer_out, noise_pred_uncond, grid_sizes
                torch.cuda.empty_cache()

        if self.cpu_offload:
            if self.offload_granularity == "model" and self.scheduler.step_index == self.scheduler.infer_steps - 1:
                self.to_cpu()
            elif self.offload_granularity != "model":
root's avatar
root committed
277
278
                self.pre_weight.to_cpu()
                self.post_weight.to_cpu()
gushiqiao's avatar
gushiqiao committed
279

280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
    @torch.no_grad()
    def infer_with_cfg_parallel(self, inputs):
        assert self.config["enable_cfg"], "enable_cfg must be True"
        cfg_p_group = self.config["device_mesh"].get_group(mesh_dim="cfg_p")
        assert dist.get_world_size(cfg_p_group) == 2, f"cfg_p_world_size must be equal to 2"
        cfg_p_rank = dist.get_rank(cfg_p_group)

        if cfg_p_rank == 0:
            embed, grid_sizes, pre_infer_out = self.pre_infer.infer(self.pre_weight, inputs, positive=True)
            x = self.transformer_infer.infer(self.transformer_weights, grid_sizes, embed, *pre_infer_out)
            noise_pred = self.post_infer.infer(self.post_weight, x, embed, grid_sizes)[0]
        else:
            embed, grid_sizes, pre_infer_out = self.pre_infer.infer(self.pre_weight, inputs, positive=False)
            x = self.transformer_infer.infer(self.transformer_weights, grid_sizes, embed, *pre_infer_out)
            noise_pred = self.post_infer.infer(self.post_weight, x, embed, grid_sizes)[0]

        noise_pred_list = [torch.zeros_like(noise_pred) for _ in range(2)]
        dist.all_gather(noise_pred_list, noise_pred, group=cfg_p_group)

        noise_pred_cond = noise_pred_list[0]  # cfg_p_rank == 0
        noise_pred_uncond = noise_pred_list[1]  # cfg_p_rank == 1
        self.scheduler.noise_pred = noise_pred_uncond + self.scheduler.sample_guide_scale * (noise_pred_cond - noise_pred_uncond)