model.py 15 KB
Newer Older
PengGao's avatar
PengGao committed
1
2
import glob
import json
helloyongyang's avatar
helloyongyang committed
3
import os
PengGao's avatar
PengGao committed
4

helloyongyang's avatar
helloyongyang committed
5
import torch
6
import torch.distributed as dist
PengGao's avatar
PengGao committed
7
8
9
from loguru import logger
from safetensors import safe_open

helloyongyang's avatar
helloyongyang committed
10
from lightx2v.common.ops.attn import MaskMap
PengGao's avatar
PengGao committed
11
from lightx2v.models.networks.wan.infer.dist_infer.transformer_infer import WanTransformerDistInfer
12
from lightx2v.models.networks.wan.infer.feature_caching.transformer_infer import (
13
14
    WanTransformerInferAdaCaching,
    WanTransformerInferCustomCaching,
Rongjin Yang's avatar
Rongjin Yang committed
15
16
    WanTransformerInferDualBlock,
    WanTransformerInferDynamicBlock,
PengGao's avatar
PengGao committed
17
18
19
20
21
22
23
24
25
26
27
28
29
    WanTransformerInferFirstBlock,
    WanTransformerInferTaylorCaching,
    WanTransformerInferTeaCaching,
)
from lightx2v.models.networks.wan.infer.post_infer import WanPostInfer
from lightx2v.models.networks.wan.infer.pre_infer import WanPreInfer
from lightx2v.models.networks.wan.infer.transformer_infer import (
    WanTransformerInfer,
)
from lightx2v.models.networks.wan.weights.post_weights import WanPostWeights
from lightx2v.models.networks.wan.weights.pre_weights import WanPreWeights
from lightx2v.models.networks.wan.weights.transformer_weights import (
    WanTransformerWeights,
30
)
31
from lightx2v.utils.envs import *
32
from lightx2v.utils.utils import *
helloyongyang's avatar
helloyongyang committed
33

34
35
36
37
38
try:
    import gguf
except ImportError:
    gguf = None

helloyongyang's avatar
helloyongyang committed
39
40
41
42
43
44

class WanModel:
    pre_weight_class = WanPreWeights
    post_weight_class = WanPostWeights
    transformer_weight_class = WanTransformerWeights

gushiqiao's avatar
gushiqiao committed
45
    def __init__(self, model_path, config, device):
helloyongyang's avatar
helloyongyang committed
46
47
        self.model_path = model_path
        self.config = config
48
49
50
        self.cpu_offload = self.config.get("cpu_offload", False)
        self.offload_granularity = self.config.get("offload_granularity", "block")

gushiqiao's avatar
gushiqiao committed
51
        self.clean_cuda_cache = self.config.get("clean_cuda_cache", False)
52
        self.dit_quantized = self.config.mm_config.get("mm_type", "Default") != "Default"
53

gushiqiao's avatar
gushiqiao committed
54
55
        if self.dit_quantized:
            dit_quant_scheme = self.config.mm_config.get("mm_type").split("-")[1]
56
57
58
59
60
            if dit_quant_scheme == "gguf":
                self.dit_quantized_ckpt = find_gguf_model_path(config, "dit_quantized_ckpt", subdir=dit_quant_scheme)
                self.config.use_gguf = True
            else:
                self.dit_quantized_ckpt = find_hf_model_path(config, "dit_quantized_ckpt", subdir=dit_quant_scheme)
gushiqiao's avatar
Fix bug  
gushiqiao committed
61
62
63
64
65
            quant_config_path = os.path.join(self.dit_quantized_ckpt, "config.json")
            if os.path.exists(quant_config_path):
                with open(quant_config_path, "r") as f:
                    quant_model_config = json.load(f)
                self.config.update(quant_model_config)
gushiqiao's avatar
gushiqiao committed
66
67
        else:
            self.dit_quantized_ckpt = None
68
69
            assert not self.config.get("lazy_load", False)

gushiqiao's avatar
gushiqiao committed
70
        self.config.dit_quantized_ckpt = self.dit_quantized_ckpt
gushiqiao's avatar
gushiqiao committed
71

72
73
74
75
        self.weight_auto_quant = self.config.mm_config.get("weight_auto_quant", False)
        if self.dit_quantized:
            assert self.weight_auto_quant or self.dit_quantized_ckpt is not None

gushiqiao's avatar
gushiqiao committed
76
        self.device = device
helloyongyang's avatar
helloyongyang committed
77
78
79
80
81
82
83
        self._init_infer_class()
        self._init_weights()
        self._init_infer()

    def _init_infer_class(self):
        self.pre_infer_class = WanPreInfer
        self.post_infer_class = WanPostInfer
84
        if self.config["seq_parallel"]:
helloyongyang's avatar
helloyongyang committed
85
            self.transformer_infer_class = WanTransformerDistInfer
helloyongyang's avatar
helloyongyang committed
86
        else:
helloyongyang's avatar
helloyongyang committed
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
            if self.config["feature_caching"] == "NoCaching":
                self.transformer_infer_class = WanTransformerInfer
            elif self.config["feature_caching"] == "Tea":
                self.transformer_infer_class = WanTransformerInferTeaCaching
            elif self.config["feature_caching"] == "TaylorSeer":
                self.transformer_infer_class = WanTransformerInferTaylorCaching
            elif self.config["feature_caching"] == "Ada":
                self.transformer_infer_class = WanTransformerInferAdaCaching
            elif self.config["feature_caching"] == "Custom":
                self.transformer_infer_class = WanTransformerInferCustomCaching
            elif self.config["feature_caching"] == "FirstBlock":
                self.transformer_infer_class = WanTransformerInferFirstBlock
            elif self.config["feature_caching"] == "DualBlock":
                self.transformer_infer_class = WanTransformerInferDualBlock
            elif self.config["feature_caching"] == "DynamicBlock":
                self.transformer_infer_class = WanTransformerInferDynamicBlock
            else:
                raise NotImplementedError(f"Unsupported feature_caching type: {self.config['feature_caching']}")
helloyongyang's avatar
helloyongyang committed
105

gushiqiao's avatar
Fix  
gushiqiao committed
106
    def _load_safetensor_to_dict(self, file_path, use_bf16, skip_bf16):
helloyongyang's avatar
helloyongyang committed
107
        with safe_open(file_path, framework="pt") as f:
gushiqiao's avatar
gushiqiao committed
108
            return {key: (f.get_tensor(key).to(torch.bfloat16) if use_bf16 or all(s not in key for s in skip_bf16) else f.get_tensor(key)).pin_memory().to(self.device) for key in f.keys()}
helloyongyang's avatar
helloyongyang committed
109

gushiqiao's avatar
Fix  
gushiqiao committed
110
    def _load_ckpt(self, use_bf16, skip_bf16):
111
112
        safetensors_path = find_hf_model_path(self.config, "dit_original_ckpt", subdir="original")
        safetensors_files = glob.glob(os.path.join(safetensors_path, "*.safetensors"))
helloyongyang's avatar
helloyongyang committed
113
114
        weight_dict = {}
        for file_path in safetensors_files:
gushiqiao's avatar
Fix  
gushiqiao committed
115
            file_weights = self._load_safetensor_to_dict(file_path, use_bf16, skip_bf16)
helloyongyang's avatar
helloyongyang committed
116
117
118
            weight_dict.update(file_weights)
        return weight_dict

gushiqiao's avatar
Fix  
gushiqiao committed
119
    def _load_quant_ckpt(self, use_bf16, skip_bf16):
gushiqiao's avatar
gushiqiao committed
120
        ckpt_path = self.dit_quantized_ckpt
121
        logger.info(f"Loading quant dit model from {ckpt_path}")
122

gushiqiao's avatar
Fix  
gushiqiao committed
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
        index_files = [f for f in os.listdir(ckpt_path) if f.endswith(".index.json")]
        if not index_files:
            raise FileNotFoundError(f"No *.index.json found in {ckpt_path}")

        index_path = os.path.join(ckpt_path, index_files[0])
        logger.info(f" Using safetensors index: {index_path}")

        with open(index_path, "r") as f:
            index_data = json.load(f)

        weight_dict = {}
        for filename in set(index_data["weight_map"].values()):
            safetensor_path = os.path.join(ckpt_path, filename)
            with safe_open(safetensor_path, framework="pt") as f:
                logger.info(f"Loading weights from {safetensor_path}")
                for k in f.keys():
                    if f.get_tensor(k).dtype == torch.float:
                        if use_bf16 or all(s not in k for s in skip_bf16):
                            weight_dict[k] = f.get_tensor(k).pin_memory().to(torch.bfloat16).to(self.device)
                        else:
                            weight_dict[k] = f.get_tensor(k).pin_memory().to(self.device)
                    else:
                        weight_dict[k] = f.get_tensor(k).pin_memory().to(self.device)
146

147
148
        return weight_dict

gushiqiao's avatar
Fix  
gushiqiao committed
149
    def _load_quant_split_ckpt(self, use_bf16, skip_bf16):
gushiqiao's avatar
gushiqiao committed
150
        lazy_load_model_path = self.dit_quantized_ckpt
151
        logger.info(f"Loading splited quant model from {lazy_load_model_path}")
gushiqiao's avatar
gushiqiao committed
152
        pre_post_weight_dict = {}
153
154

        safetensor_path = os.path.join(lazy_load_model_path, "non_block.safetensors")
gushiqiao's avatar
gushiqiao committed
155
        with safe_open(safetensor_path, framework="pt", device="cpu") as f:
156
            for k in f.keys():
gushiqiao's avatar
Fix  
gushiqiao committed
157
158
159
160
161
162
163
                if f.get_tensor(k).dtype == torch.float:
                    if use_bf16 or all(s not in k for s in skip_bf16):
                        pre_post_weight_dict[k] = f.get_tensor(k).pin_memory().to(torch.bfloat16).to(self.device)
                    else:
                        pre_post_weight_dict[k] = f.get_tensor(k).pin_memory().to(self.device)
                else:
                    pre_post_weight_dict[k] = f.get_tensor(k).pin_memory().to(self.device)
164

gushiqiao's avatar
gushiqiao committed
165
        return pre_post_weight_dict
166

167
168
169
170
171
172
173
174
    def _load_gguf_ckpt(self):
        gguf_path = self.dit_quantized_ckpt
        logger.info(f"Loading gguf-quant dit model from {gguf_path}")
        reader = gguf.GGUFReader(gguf_path)
        for tensor in reader.tensors:
            # TODO: implement _load_gguf_ckpt
            pass

lijiaqi2's avatar
lijiaqi2 committed
175
    def _init_weights(self, weight_dict=None):
gushiqiao's avatar
Fix  
gushiqiao committed
176
177
        use_bf16 = GET_DTYPE() == "BF16"
        # Some layers run with float32 to achieve high accuracy
gushiqiao's avatar
gushiqiao committed
178
179
180
181
182
183
184
185
        skip_bf16 = {
            "norm",
            "embedding",
            "modulation",
            "time",
            "img_emb.proj.0",
            "img_emb.proj.4",
        }
lijiaqi2's avatar
lijiaqi2 committed
186
        if weight_dict is None:
187
            if not self.dit_quantized or self.weight_auto_quant:
gushiqiao's avatar
Fix  
gushiqiao committed
188
                self.original_weight_dict = self._load_ckpt(use_bf16, skip_bf16)
189
190
            elif self.config.get("use_gguf", False):
                self.original_weight_dict = self._load_gguf_ckpt()
191
            else:
192
                if not self.config.get("lazy_load", False):
gushiqiao's avatar
Fix  
gushiqiao committed
193
                    self.original_weight_dict = self._load_quant_ckpt(use_bf16, skip_bf16)
194
                else:
gushiqiao's avatar
gushiqiao committed
195
                    self.original_weight_dict = self._load_quant_split_ckpt(use_bf16, skip_bf16)
lijiaqi2's avatar
lijiaqi2 committed
196
197
        else:
            self.original_weight_dict = weight_dict
helloyongyang's avatar
helloyongyang committed
198
199
        # init weights
        self.pre_weight = self.pre_weight_class(self.config)
TorynCurtis's avatar
TorynCurtis committed
200
        self.post_weight = self.post_weight_class(self.config)
helloyongyang's avatar
helloyongyang committed
201
202
        self.transformer_weights = self.transformer_weight_class(self.config)
        # load weights
203
204
        self.pre_weight.load(self.original_weight_dict)
        self.post_weight.load(self.original_weight_dict)
gushiqiao's avatar
gushiqiao committed
205
        self.transformer_weights.load(self.original_weight_dict)
helloyongyang's avatar
helloyongyang committed
206
207
208
209
210

    def _init_infer(self):
        self.pre_infer = self.pre_infer_class(self.config)
        self.post_infer = self.post_infer_class(self.config)
        self.transformer_infer = self.transformer_infer_class(self.config)
211
        if self.config["cfg_parallel"]:
helloyongyang's avatar
helloyongyang committed
212
            self.infer_func = self.infer_with_cfg_parallel
213
        else:
helloyongyang's avatar
helloyongyang committed
214
            self.infer_func = self.infer_wo_cfg_parallel
helloyongyang's avatar
helloyongyang committed
215
216
217

    def set_scheduler(self, scheduler):
        self.scheduler = scheduler
218
219
        self.pre_infer.set_scheduler(scheduler)
        self.post_infer.set_scheduler(scheduler)
helloyongyang's avatar
helloyongyang committed
220
221
        self.transformer_infer.set_scheduler(scheduler)

TorynCurtis's avatar
TorynCurtis committed
222
223
224
225
226
227
228
229
230
231
    def to_cpu(self):
        self.pre_weight.to_cpu()
        self.post_weight.to_cpu()
        self.transformer_weights.to_cpu()

    def to_cuda(self):
        self.pre_weight.to_cuda()
        self.post_weight.to_cuda()
        self.transformer_weights.to_cuda()

helloyongyang's avatar
helloyongyang committed
232
233
234
235
    @torch.no_grad()
    def infer(self, inputs):
        return self.infer_func(inputs)

helloyongyang's avatar
helloyongyang committed
236
    @torch.no_grad()
237
    def infer_wo_cfg_parallel(self, inputs):
238
239
240
241
242
243
244
        if self.cpu_offload:
            if self.offload_granularity == "model" and self.scheduler.step_index == 0:
                self.to_cuda()
            elif self.offload_granularity != "model":
                self.pre_weight.to_cuda()
                self.post_weight.to_cuda()

245
246
247
248
249
        if self.transformer_infer.mask_map is None:
            _, c, h, w = self.scheduler.latents.shape
            video_token_num = c * (h // 2) * (w // 2)
            self.transformer_infer.mask_map = MaskMap(video_token_num, c)

250
        embed, grid_sizes, pre_infer_out = self.pre_infer.infer(self.pre_weight, inputs, positive=True)
gushiqiao's avatar
Fix bug  
gushiqiao committed
251
        x = self.transformer_infer.infer(self.transformer_weights, grid_sizes, embed, *pre_infer_out)
Dongz's avatar
Dongz committed
252
        noise_pred_cond = self.post_infer.infer(self.post_weight, x, embed, grid_sizes)[0]
helloyongyang's avatar
helloyongyang committed
253

root's avatar
root committed
254
        self.scheduler.noise_pred = noise_pred_cond
helloyongyang's avatar
helloyongyang committed
255

gushiqiao's avatar
gushiqiao committed
256
257
258
259
        if self.clean_cuda_cache:
            del x, embed, pre_infer_out, noise_pred_cond, grid_sizes
            torch.cuda.empty_cache()

260
        if self.config["enable_cfg"]:
root's avatar
root committed
261
            embed, grid_sizes, pre_infer_out = self.pre_infer.infer(self.pre_weight, inputs, positive=False)
gushiqiao's avatar
Fix bug  
gushiqiao committed
262
            x = self.transformer_infer.infer(self.transformer_weights, grid_sizes, embed, *pre_infer_out)
root's avatar
root committed
263
            noise_pred_uncond = self.post_infer.infer(self.post_weight, x, embed, grid_sizes)[0]
helloyongyang's avatar
helloyongyang committed
264

helloyongyang's avatar
helloyongyang committed
265
            self.scheduler.noise_pred = noise_pred_uncond + self.scheduler.sample_guide_scale * (self.scheduler.noise_pred - noise_pred_uncond)
gushiqiao's avatar
gushiqiao committed
266

267
268
269
270
271
272
273
274
            if self.clean_cuda_cache:
                del x, embed, pre_infer_out, noise_pred_uncond, grid_sizes
                torch.cuda.empty_cache()

        if self.cpu_offload:
            if self.offload_granularity == "model" and self.scheduler.step_index == self.scheduler.infer_steps - 1:
                self.to_cpu()
            elif self.offload_granularity != "model":
root's avatar
root committed
275
276
                self.pre_weight.to_cpu()
                self.post_weight.to_cpu()
gushiqiao's avatar
gushiqiao committed
277

278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
    @torch.no_grad()
    def infer_with_cfg_parallel(self, inputs):
        assert self.config["enable_cfg"], "enable_cfg must be True"
        cfg_p_group = self.config["device_mesh"].get_group(mesh_dim="cfg_p")
        assert dist.get_world_size(cfg_p_group) == 2, f"cfg_p_world_size must be equal to 2"
        cfg_p_rank = dist.get_rank(cfg_p_group)

        if cfg_p_rank == 0:
            embed, grid_sizes, pre_infer_out = self.pre_infer.infer(self.pre_weight, inputs, positive=True)
            x = self.transformer_infer.infer(self.transformer_weights, grid_sizes, embed, *pre_infer_out)
            noise_pred = self.post_infer.infer(self.post_weight, x, embed, grid_sizes)[0]
        else:
            embed, grid_sizes, pre_infer_out = self.pre_infer.infer(self.pre_weight, inputs, positive=False)
            x = self.transformer_infer.infer(self.transformer_weights, grid_sizes, embed, *pre_infer_out)
            noise_pred = self.post_infer.infer(self.post_weight, x, embed, grid_sizes)[0]

        noise_pred_list = [torch.zeros_like(noise_pred) for _ in range(2)]
        dist.all_gather(noise_pred_list, noise_pred, group=cfg_p_group)

        noise_pred_cond = noise_pred_list[0]  # cfg_p_rank == 0
        noise_pred_uncond = noise_pred_list[1]  # cfg_p_rank == 1
        self.scheduler.noise_pred = noise_pred_uncond + self.scheduler.sample_guide_scale * (noise_pred_cond - noise_pred_uncond)

helloyongyang's avatar
helloyongyang committed
301
302
303
304
305
306
307
308
309
310
311
312

class Wan22MoeModel(WanModel):
    def _load_ckpt(self, use_bf16, skip_bf16):
        safetensors_files = glob.glob(os.path.join(self.model_path, "*.safetensors"))
        weight_dict = {}
        for file_path in safetensors_files:
            file_weights = self._load_safetensor_to_dict(file_path, use_bf16, skip_bf16)
            weight_dict.update(file_weights)
        return weight_dict

    @torch.no_grad()
    def infer(self, inputs):
313
314
315
316
        if self.cpu_offload and self.offload_granularity != "model":
            self.pre_weight.to_cuda()
            self.post_weight.to_cuda()

helloyongyang's avatar
helloyongyang committed
317
318
319
320
321
322
323
324
325
326
327
328
        embed, grid_sizes, pre_infer_out = self.pre_infer.infer(self.pre_weight, inputs, positive=True)
        x = self.transformer_infer.infer(self.transformer_weights, grid_sizes, embed, *pre_infer_out)
        noise_pred_cond = self.post_infer.infer(self.post_weight, x, embed, grid_sizes)[0]

        self.scheduler.noise_pred = noise_pred_cond

        if self.config["enable_cfg"]:
            embed, grid_sizes, pre_infer_out = self.pre_infer.infer(self.pre_weight, inputs, positive=False)
            x = self.transformer_infer.infer(self.transformer_weights, grid_sizes, embed, *pre_infer_out)
            noise_pred_uncond = self.post_infer.infer(self.post_weight, x, embed, grid_sizes)[0]

            self.scheduler.noise_pred = noise_pred_uncond + self.scheduler.sample_guide_scale * (self.scheduler.noise_pred - noise_pred_uncond)
329
330
331
332

        if self.cpu_offload and self.offload_granularity != "model":
            self.pre_weight.to_cpu()
            self.post_weight.to_cpu()