model.py 18.3 KB
Newer Older
1
import json
2
3
import os

helloyongyang's avatar
helloyongyang committed
4
import torch
5
import torch.distributed as dist
helloyongyang's avatar
helloyongyang committed
6
import torch.nn.functional as F
PengGao's avatar
PengGao committed
7
8
9
from loguru import logger
from safetensors import safe_open

helloyongyang's avatar
helloyongyang committed
10
from lightx2v.common.ops.attn import MaskMap
11
from lightx2v.models.networks.wan.infer.feature_caching.transformer_infer import (
12
13
    WanTransformerInferAdaCaching,
    WanTransformerInferCustomCaching,
Rongjin Yang's avatar
Rongjin Yang committed
14
15
    WanTransformerInferDualBlock,
    WanTransformerInferDynamicBlock,
PengGao's avatar
PengGao committed
16
    WanTransformerInferFirstBlock,
Musisoul's avatar
Musisoul committed
17
    WanTransformerInferMagCaching,
PengGao's avatar
PengGao committed
18
19
20
    WanTransformerInferTaylorCaching,
    WanTransformerInferTeaCaching,
)
21
22
23
from lightx2v.models.networks.wan.infer.offload.transformer_infer import (
    WanOffloadTransformerInfer,
)
PengGao's avatar
PengGao committed
24
25
26
27
28
29
30
31
from lightx2v.models.networks.wan.infer.post_infer import WanPostInfer
from lightx2v.models.networks.wan.infer.pre_infer import WanPreInfer
from lightx2v.models.networks.wan.infer.transformer_infer import (
    WanTransformerInfer,
)
from lightx2v.models.networks.wan.weights.pre_weights import WanPreWeights
from lightx2v.models.networks.wan.weights.transformer_weights import (
    WanTransformerWeights,
32
)
33
from lightx2v.utils.envs import *
34
from lightx2v.utils.utils import *
helloyongyang's avatar
helloyongyang committed
35

36
37
38
39
40
try:
    import gguf
except ImportError:
    gguf = None

helloyongyang's avatar
helloyongyang committed
41
42
43
44
45

class WanModel:
    pre_weight_class = WanPreWeights
    transformer_weight_class = WanTransformerWeights

helloyongyang's avatar
helloyongyang committed
46
    def __init__(self, model_path, config, device):
helloyongyang's avatar
helloyongyang committed
47
48
        self.model_path = model_path
        self.config = config
49
50
        self.cpu_offload = self.config.get("cpu_offload", False)
        self.offload_granularity = self.config.get("offload_granularity", "block")
helloyongyang's avatar
helloyongyang committed
51
52
53
54
55

        if self.config["seq_parallel"]:
            self.seq_p_group = self.config.get("device_mesh").get_group(mesh_dim="seq_p")
        else:
            self.seq_p_group = None
56

gushiqiao's avatar
gushiqiao committed
57
        self.clean_cuda_cache = self.config.get("clean_cuda_cache", False)
58
        self.dit_quantized = self.config.mm_config.get("mm_type", "Default") != "Default"
59

gushiqiao's avatar
gushiqiao committed
60
61
        if self.dit_quantized:
            dit_quant_scheme = self.config.mm_config.get("mm_type").split("-")[1]
gushiqiao's avatar
gushiqiao committed
62
63
            if self.config.model_cls == "wan2.1_distill":
                dit_quant_scheme = "distill_" + dit_quant_scheme
64
65
66
67
            if dit_quant_scheme == "gguf":
                self.dit_quantized_ckpt = find_gguf_model_path(config, "dit_quantized_ckpt", subdir=dit_quant_scheme)
                self.config.use_gguf = True
            else:
68
69
70
71
72
73
                self.dit_quantized_ckpt = find_hf_model_path(
                    config,
                    self.model_path,
                    "dit_quantized_ckpt",
                    subdir=dit_quant_scheme,
                )
gushiqiao's avatar
Fix bug  
gushiqiao committed
74
75
76
77
78
            quant_config_path = os.path.join(self.dit_quantized_ckpt, "config.json")
            if os.path.exists(quant_config_path):
                with open(quant_config_path, "r") as f:
                    quant_model_config = json.load(f)
                self.config.update(quant_model_config)
gushiqiao's avatar
gushiqiao committed
79
80
        else:
            self.dit_quantized_ckpt = None
81
82
            assert not self.config.get("lazy_load", False)

83
84
85
86
        self.weight_auto_quant = self.config.mm_config.get("weight_auto_quant", False)
        if self.dit_quantized:
            assert self.weight_auto_quant or self.dit_quantized_ckpt is not None

gushiqiao's avatar
gushiqiao committed
87
        self.device = device
helloyongyang's avatar
helloyongyang committed
88
89
90
91
92
93
94
        self._init_infer_class()
        self._init_weights()
        self._init_infer()

    def _init_infer_class(self):
        self.pre_infer_class = WanPreInfer
        self.post_infer_class = WanPostInfer
helloyongyang's avatar
helloyongyang committed
95
96

        if self.config["feature_caching"] == "NoCaching":
97
            self.transformer_infer_class = WanTransformerInfer if not self.cpu_offload else WanOffloadTransformerInfer
helloyongyang's avatar
helloyongyang committed
98
99
100
101
102
103
104
105
106
107
108
109
110
111
        elif self.config["feature_caching"] == "Tea":
            self.transformer_infer_class = WanTransformerInferTeaCaching
        elif self.config["feature_caching"] == "TaylorSeer":
            self.transformer_infer_class = WanTransformerInferTaylorCaching
        elif self.config["feature_caching"] == "Ada":
            self.transformer_infer_class = WanTransformerInferAdaCaching
        elif self.config["feature_caching"] == "Custom":
            self.transformer_infer_class = WanTransformerInferCustomCaching
        elif self.config["feature_caching"] == "FirstBlock":
            self.transformer_infer_class = WanTransformerInferFirstBlock
        elif self.config["feature_caching"] == "DualBlock":
            self.transformer_infer_class = WanTransformerInferDualBlock
        elif self.config["feature_caching"] == "DynamicBlock":
            self.transformer_infer_class = WanTransformerInferDynamicBlock
Musisoul's avatar
Musisoul committed
112
113
        elif self.config["feature_caching"] == "Mag":
            self.transformer_infer_class = WanTransformerInferMagCaching
helloyongyang's avatar
helloyongyang committed
114
        else:
helloyongyang's avatar
helloyongyang committed
115
            raise NotImplementedError(f"Unsupported feature_caching type: {self.config['feature_caching']}")
helloyongyang's avatar
helloyongyang committed
116

gushiqiao's avatar
gushiqiao committed
117
118
119
120
121
122
123
124
125
126
127
128
    def _should_load_weights(self):
        """Determine if current rank should load weights from disk."""
        if self.config.get("device_mesh") is None:
            # Single GPU mode
            return True
        elif dist.is_initialized():
            # Multi-GPU mode, only rank 0 loads
            if dist.get_rank() == 0:
                logger.info(f"Loading weights from {self.model_path}")
                return True
        return False

129
    def _load_safetensor_to_dict(self, file_path, unified_dtype, sensitive_layer):
helloyongyang's avatar
helloyongyang committed
130
        with safe_open(file_path, framework="pt") as f:
131
132
133
134
            return {
                key: (f.get_tensor(key).to(GET_DTYPE()) if unified_dtype or all(s not in key for s in sensitive_layer) else f.get_tensor(key).to(GET_SENSITIVE_DTYPE())).pin_memory().to(self.device)
                for key in f.keys()
            }
helloyongyang's avatar
helloyongyang committed
135

136
    def _load_ckpt(self, unified_dtype, sensitive_layer):
helloyongyang's avatar
helloyongyang committed
137
        safetensors_path = find_hf_model_path(self.config, self.model_path, "dit_original_ckpt", subdir="original")
138
        safetensors_files = glob.glob(os.path.join(safetensors_path, "*.safetensors"))
helloyongyang's avatar
helloyongyang committed
139
140
        weight_dict = {}
        for file_path in safetensors_files:
141
            file_weights = self._load_safetensor_to_dict(file_path, unified_dtype, sensitive_layer)
helloyongyang's avatar
helloyongyang committed
142
143
144
            weight_dict.update(file_weights)
        return weight_dict

145
    def _load_quant_ckpt(self, unified_dtype, sensitive_layer):
gushiqiao's avatar
gushiqiao committed
146
        ckpt_path = self.dit_quantized_ckpt
147
        logger.info(f"Loading quant dit model from {ckpt_path}")
148

gushiqiao's avatar
Fix  
gushiqiao committed
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
        index_files = [f for f in os.listdir(ckpt_path) if f.endswith(".index.json")]
        if not index_files:
            raise FileNotFoundError(f"No *.index.json found in {ckpt_path}")

        index_path = os.path.join(ckpt_path, index_files[0])
        logger.info(f" Using safetensors index: {index_path}")

        with open(index_path, "r") as f:
            index_data = json.load(f)

        weight_dict = {}
        for filename in set(index_data["weight_map"].values()):
            safetensor_path = os.path.join(ckpt_path, filename)
            with safe_open(safetensor_path, framework="pt") as f:
                logger.info(f"Loading weights from {safetensor_path}")
                for k in f.keys():
165
166
167
168
169
                    if f.get_tensor(k).dtype in [
                        torch.float16,
                        torch.bfloat16,
                        torch.float,
                    ]:
170
171
                        if unified_dtype or all(s not in k for s in sensitive_layer):
                            weight_dict[k] = f.get_tensor(k).pin_memory().to(GET_DTYPE()).to(self.device)
gushiqiao's avatar
Fix  
gushiqiao committed
172
                        else:
173
                            weight_dict[k] = f.get_tensor(k).pin_memory().to(GET_SENSITIVE_DTYPE()).to(self.device)
gushiqiao's avatar
Fix  
gushiqiao committed
174
175
                    else:
                        weight_dict[k] = f.get_tensor(k).pin_memory().to(self.device)
176

177
178
        return weight_dict

179
    def _load_quant_split_ckpt(self, unified_dtype, sensitive_layer):
gushiqiao's avatar
gushiqiao committed
180
        lazy_load_model_path = self.dit_quantized_ckpt
181
        logger.info(f"Loading splited quant model from {lazy_load_model_path}")
gushiqiao's avatar
gushiqiao committed
182
        pre_post_weight_dict = {}
183
184

        safetensor_path = os.path.join(lazy_load_model_path, "non_block.safetensors")
gushiqiao's avatar
gushiqiao committed
185
        with safe_open(safetensor_path, framework="pt", device="cpu") as f:
186
            for k in f.keys():
187
188
189
190
191
                if f.get_tensor(k).dtype in [
                    torch.float16,
                    torch.bfloat16,
                    torch.float,
                ]:
192
193
                    if unified_dtype or all(s not in k for s in sensitive_layer):
                        pre_post_weight_dict[k] = f.get_tensor(k).pin_memory().to(GET_DTYPE()).to(self.device)
gushiqiao's avatar
Fix  
gushiqiao committed
194
                    else:
195
                        pre_post_weight_dict[k] = f.get_tensor(k).pin_memory().to(GET_SENSITIVE_DTYPE()).to(self.device)
gushiqiao's avatar
Fix  
gushiqiao committed
196
197
                else:
                    pre_post_weight_dict[k] = f.get_tensor(k).pin_memory().to(self.device)
198

gushiqiao's avatar
gushiqiao committed
199
        return pre_post_weight_dict
200

201
202
203
204
205
206
207
208
    def _load_gguf_ckpt(self):
        gguf_path = self.dit_quantized_ckpt
        logger.info(f"Loading gguf-quant dit model from {gguf_path}")
        reader = gguf.GGUFReader(gguf_path)
        for tensor in reader.tensors:
            # TODO: implement _load_gguf_ckpt
            pass

lijiaqi2's avatar
lijiaqi2 committed
209
    def _init_weights(self, weight_dict=None):
210
        unified_dtype = GET_DTYPE() == GET_SENSITIVE_DTYPE()
gushiqiao's avatar
Fix  
gushiqiao committed
211
        # Some layers run with float32 to achieve high accuracy
212
        sensitive_layer = {
gushiqiao's avatar
gushiqiao committed
213
214
215
216
217
218
            "norm",
            "embedding",
            "modulation",
            "time",
            "img_emb.proj.0",
            "img_emb.proj.4",
gushiqiao's avatar
gushiqiao committed
219
220
            "before_proj",  # vace
            "after_proj",  # vace
gushiqiao's avatar
gushiqiao committed
221
        }
222

lijiaqi2's avatar
lijiaqi2 committed
223
        if weight_dict is None:
gushiqiao's avatar
gushiqiao committed
224
            is_weight_loader = self._should_load_weights()
225
226
            if is_weight_loader:
                if not self.dit_quantized or self.weight_auto_quant:
gushiqiao's avatar
gushiqiao committed
227
228
                    # Load original weights
                    weight_dict = self._load_ckpt(unified_dtype, sensitive_layer)
229
                else:
gushiqiao's avatar
gushiqiao committed
230
                    # Load quantized weights
231
                    if not self.config.get("lazy_load", False):
gushiqiao's avatar
gushiqiao committed
232
                        weight_dict = self._load_quant_ckpt(unified_dtype, sensitive_layer)
233
                    else:
gushiqiao's avatar
gushiqiao committed
234
                        weight_dict = self._load_quant_split_ckpt(unified_dtype, sensitive_layer)
235

gushiqiao's avatar
gushiqiao committed
236
            if self.config.get("device_mesh") is not None:
gushiqiao's avatar
gushiqiao committed
237
                weight_dict = self._load_weights_distribute(weight_dict, is_weight_loader)
238

gushiqiao's avatar
gushiqiao committed
239
            self.original_weight_dict = weight_dict
lijiaqi2's avatar
lijiaqi2 committed
240
241
        else:
            self.original_weight_dict = weight_dict
242

gushiqiao's avatar
gushiqiao committed
243
        # Initialize weight containers
helloyongyang's avatar
helloyongyang committed
244
245
        self.pre_weight = self.pre_weight_class(self.config)
        self.transformer_weights = self.transformer_weight_class(self.config)
gushiqiao's avatar
gushiqiao committed
246
247

        # Load weights into containers
248
        self.pre_weight.load(self.original_weight_dict)
gushiqiao's avatar
gushiqiao committed
249
        self.transformer_weights.load(self.original_weight_dict)
helloyongyang's avatar
helloyongyang committed
250

gushiqiao's avatar
gushiqiao committed
251
    def _load_weights_distribute(self, weight_dict, is_weight_loader):
gushiqiao's avatar
gushiqiao committed
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
        global_src_rank = 0
        target_device = "cpu" if self.cpu_offload else "cuda"

        if is_weight_loader:
            meta_dict = {}
            for key, tensor in weight_dict.items():
                meta_dict[key] = {"shape": tensor.shape, "dtype": tensor.dtype}

            obj_list = [meta_dict]
            dist.broadcast_object_list(obj_list, src=global_src_rank)
            synced_meta_dict = obj_list[0]
        else:
            obj_list = [None]
            dist.broadcast_object_list(obj_list, src=global_src_rank)
            synced_meta_dict = obj_list[0]

        distributed_weight_dict = {}
        for key, meta in synced_meta_dict.items():
            distributed_weight_dict[key] = torch.empty(meta["shape"], dtype=meta["dtype"], device=target_device)

        if target_device == "cuda":
            dist.barrier(device_ids=[torch.cuda.current_device()])

        for key in sorted(synced_meta_dict.keys()):
            if is_weight_loader:
                distributed_weight_dict[key].copy_(weight_dict[key], non_blocking=True)

gushiqiao's avatar
gushiqiao committed
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
            if target_device == "cpu":
                if is_weight_loader:
                    gpu_tensor = distributed_weight_dict[key].cuda()
                    dist.broadcast(gpu_tensor, src=global_src_rank)
                    distributed_weight_dict[key].copy_(gpu_tensor.cpu(), non_blocking=True)
                    del gpu_tensor
                    torch.cuda.empty_cache()
                else:
                    gpu_tensor = torch.empty_like(distributed_weight_dict[key], device="cuda")
                    dist.broadcast(gpu_tensor, src=global_src_rank)
                    distributed_weight_dict[key].copy_(gpu_tensor.cpu(), non_blocking=True)
                    del gpu_tensor
                    torch.cuda.empty_cache()

                if distributed_weight_dict[key].is_pinned():
                    distributed_weight_dict[key].copy_(distributed_weight_dict[key], non_blocking=True)
            else:
                dist.broadcast(distributed_weight_dict[key], src=global_src_rank)

        if target_device == "cuda":
            torch.cuda.synchronize()
        else:
            for tensor in distributed_weight_dict.values():
                if tensor.is_pinned():
                    tensor.copy_(tensor, non_blocking=False)
gushiqiao's avatar
gushiqiao committed
304
305
306
307

        logger.info(f"Weights distributed across {dist.get_world_size()} devices on {target_device}")
        return distributed_weight_dict

helloyongyang's avatar
helloyongyang committed
308
309
310
    def _init_infer(self):
        self.pre_infer = self.pre_infer_class(self.config)
        self.post_infer = self.post_infer_class(self.config)
helloyongyang's avatar
helloyongyang committed
311
        self.transformer_infer = self.transformer_infer_class(self.config)
helloyongyang's avatar
helloyongyang committed
312
313
314

    def set_scheduler(self, scheduler):
        self.scheduler = scheduler
315
316
        self.pre_infer.set_scheduler(scheduler)
        self.post_infer.set_scheduler(scheduler)
helloyongyang's avatar
helloyongyang committed
317
318
        self.transformer_infer.set_scheduler(scheduler)

TorynCurtis's avatar
TorynCurtis committed
319
320
321
322
323
324
325
326
    def to_cpu(self):
        self.pre_weight.to_cpu()
        self.transformer_weights.to_cpu()

    def to_cuda(self):
        self.pre_weight.to_cuda()
        self.transformer_weights.to_cuda()

helloyongyang's avatar
helloyongyang committed
327
328
    @torch.no_grad()
    def infer(self, inputs):
329
330
331
332
333
        if self.cpu_offload:
            if self.offload_granularity == "model" and self.scheduler.step_index == 0:
                self.to_cuda()
            elif self.offload_granularity != "model":
                self.pre_weight.to_cuda()
gushiqiao's avatar
gushiqiao committed
334
                self.transformer_weights.non_block_weights_to_cuda()
335

336
337
338
339
340
        if self.transformer_infer.mask_map is None:
            _, c, h, w = self.scheduler.latents.shape
            video_token_num = c * (h // 2) * (w // 2)
            self.transformer_infer.mask_map = MaskMap(video_token_num, c)

341
        if self.config["enable_cfg"]:
helloyongyang's avatar
helloyongyang committed
342
343
344
345
346
347
348
            if self.config["cfg_parallel"]:
                # ==================== CFG Parallel Processing ====================
                cfg_p_group = self.config["device_mesh"].get_group(mesh_dim="cfg_p")
                assert dist.get_world_size(cfg_p_group) == 2, "cfg_p_world_size must be equal to 2"
                cfg_p_rank = dist.get_rank(cfg_p_group)

                if cfg_p_rank == 0:
helloyongyang's avatar
helloyongyang committed
349
                    noise_pred = self._infer_cond_uncond(inputs, infer_condition=True)
helloyongyang's avatar
helloyongyang committed
350
                else:
helloyongyang's avatar
helloyongyang committed
351
                    noise_pred = self._infer_cond_uncond(inputs, infer_condition=False)
helloyongyang's avatar
helloyongyang committed
352

helloyongyang's avatar
helloyongyang committed
353
354
355
356
357
358
                noise_pred_list = [torch.zeros_like(noise_pred) for _ in range(2)]
                dist.all_gather(noise_pred_list, noise_pred, group=cfg_p_group)
                noise_pred_cond = noise_pred_list[0]  # cfg_p_rank == 0
                noise_pred_uncond = noise_pred_list[1]  # cfg_p_rank == 1
            else:
                # ==================== CFG Processing ====================
helloyongyang's avatar
helloyongyang committed
359
360
                noise_pred_cond = self._infer_cond_uncond(inputs, infer_condition=True)
                noise_pred_uncond = self._infer_cond_uncond(inputs, infer_condition=False)
gushiqiao's avatar
gushiqiao committed
361

helloyongyang's avatar
helloyongyang committed
362
363
364
            self.scheduler.noise_pred = noise_pred_uncond + self.scheduler.sample_guide_scale * (noise_pred_cond - noise_pred_uncond)
        else:
            # ==================== No CFG ====================
helloyongyang's avatar
helloyongyang committed
365
            self.scheduler.noise_pred = self._infer_cond_uncond(inputs, infer_condition=True)
366
367
368
369
370

        if self.cpu_offload:
            if self.offload_granularity == "model" and self.scheduler.step_index == self.scheduler.infer_steps - 1:
                self.to_cpu()
            elif self.offload_granularity != "model":
root's avatar
root committed
371
                self.pre_weight.to_cpu()
gushiqiao's avatar
gushiqiao committed
372
                self.transformer_weights.non_block_weights_to_cpu()
gushiqiao's avatar
gushiqiao committed
373

helloyongyang's avatar
helloyongyang committed
374
    @torch.compile(disable=not CHECK_ENABLE_GRAPH_MODE())
375
    @torch.no_grad()
helloyongyang's avatar
helloyongyang committed
376
377
378
379
    def _infer_cond_uncond(self, inputs, infer_condition=True):
        self.scheduler.infer_condition = infer_condition

        pre_infer_out = self.pre_infer.infer(self.pre_weight, inputs)
helloyongyang's avatar
helloyongyang committed
380
381
382
383
384
385
386
387
388

        if self.config["seq_parallel"]:
            pre_infer_out = self._seq_parallel_pre_process(pre_infer_out)

        x = self.transformer_infer.infer(self.transformer_weights, pre_infer_out)

        if self.config["seq_parallel"]:
            x = self._seq_parallel_post_process(x)

gushiqiao's avatar
gushiqiao committed
389
        noise_pred = self.post_infer.infer(x, pre_infer_out)[0]
helloyongyang's avatar
helloyongyang committed
390
391
392
393
394
395
396
397
398

        if self.clean_cuda_cache:
            del x, pre_infer_out
            torch.cuda.empty_cache()

        return noise_pred

    @torch.no_grad()
    def _seq_parallel_pre_process(self, pre_infer_out):
helloyongyang's avatar
helloyongyang committed
399
        x = pre_infer_out.x
helloyongyang's avatar
helloyongyang committed
400
401
402
403
404
        world_size = dist.get_world_size(self.seq_p_group)
        cur_rank = dist.get_rank(self.seq_p_group)

        padding_size = (world_size - (x.shape[0] % world_size)) % world_size
        if padding_size > 0:
helloyongyang's avatar
helloyongyang committed
405
            x = F.pad(x, (0, 0, 0, padding_size))
helloyongyang's avatar
helloyongyang committed
406

helloyongyang's avatar
helloyongyang committed
407
        pre_infer_out.x = torch.chunk(x, world_size, dim=0)[cur_rank]
helloyongyang's avatar
helloyongyang committed
408

sandy's avatar
sandy committed
409
        if self.config["model_cls"] in ["wan2.2", "wan2.2_audio"] and self.config["task"] == "i2v":
helloyongyang's avatar
helloyongyang committed
410
411
412
413
414
415
416
            embed, embed0 = pre_infer_out.embed, pre_infer_out.embed0

            padding_size = (world_size - (embed.shape[0] % world_size)) % world_size
            if padding_size > 0:
                embed = F.pad(embed, (0, 0, 0, padding_size))
                embed0 = F.pad(embed0, (0, 0, 0, 0, 0, padding_size))

helloyongyang's avatar
helloyongyang committed
417
418
            pre_infer_out.embed = torch.chunk(embed, world_size, dim=0)[cur_rank]
            pre_infer_out.embed0 = torch.chunk(embed0, world_size, dim=0)[cur_rank]
helloyongyang's avatar
helloyongyang committed
419
420
421
422
423
424
425
426
427

        return pre_infer_out

    @torch.no_grad()
    def _seq_parallel_post_process(self, x):
        world_size = dist.get_world_size(self.seq_p_group)
        gathered_x = [torch.empty_like(x) for _ in range(world_size)]
        dist.all_gather(gathered_x, x, group=self.seq_p_group)
        combined_output = torch.cat(gathered_x, dim=0)
helloyongyang's avatar
helloyongyang committed
428
        return combined_output