model.py 17.9 KB
Newer Older
1
import json
2
3
import os

helloyongyang's avatar
helloyongyang committed
4
import torch
5
import torch.distributed as dist
helloyongyang's avatar
helloyongyang committed
6
import torch.nn.functional as F
PengGao's avatar
PengGao committed
7
8
9
from loguru import logger
from safetensors import safe_open

helloyongyang's avatar
helloyongyang committed
10
from lightx2v.common.ops.attn import MaskMap
11
from lightx2v.models.networks.wan.infer.feature_caching.transformer_infer import (
12
13
    WanTransformerInferAdaCaching,
    WanTransformerInferCustomCaching,
Rongjin Yang's avatar
Rongjin Yang committed
14
15
    WanTransformerInferDualBlock,
    WanTransformerInferDynamicBlock,
PengGao's avatar
PengGao committed
16
    WanTransformerInferFirstBlock,
Musisoul's avatar
Musisoul committed
17
    WanTransformerInferMagCaching,
PengGao's avatar
PengGao committed
18
19
20
21
22
23
24
25
26
27
28
29
    WanTransformerInferTaylorCaching,
    WanTransformerInferTeaCaching,
)
from lightx2v.models.networks.wan.infer.post_infer import WanPostInfer
from lightx2v.models.networks.wan.infer.pre_infer import WanPreInfer
from lightx2v.models.networks.wan.infer.transformer_infer import (
    WanTransformerInfer,
)
from lightx2v.models.networks.wan.weights.post_weights import WanPostWeights
from lightx2v.models.networks.wan.weights.pre_weights import WanPreWeights
from lightx2v.models.networks.wan.weights.transformer_weights import (
    WanTransformerWeights,
30
)
31
from lightx2v.utils.envs import *
32
from lightx2v.utils.utils import *
helloyongyang's avatar
helloyongyang committed
33

34
35
36
37
38
try:
    import gguf
except ImportError:
    gguf = None

helloyongyang's avatar
helloyongyang committed
39
40
41
42
43
44

class WanModel:
    pre_weight_class = WanPreWeights
    post_weight_class = WanPostWeights
    transformer_weight_class = WanTransformerWeights

helloyongyang's avatar
helloyongyang committed
45
    def __init__(self, model_path, config, device):
helloyongyang's avatar
helloyongyang committed
46
47
        self.model_path = model_path
        self.config = config
48
49
        self.cpu_offload = self.config.get("cpu_offload", False)
        self.offload_granularity = self.config.get("offload_granularity", "block")
helloyongyang's avatar
helloyongyang committed
50
51
52
53
54

        if self.config["seq_parallel"]:
            self.seq_p_group = self.config.get("device_mesh").get_group(mesh_dim="seq_p")
        else:
            self.seq_p_group = None
55

gushiqiao's avatar
gushiqiao committed
56
        self.clean_cuda_cache = self.config.get("clean_cuda_cache", False)
57
        self.dit_quantized = self.config.mm_config.get("mm_type", "Default") != "Default"
58

gushiqiao's avatar
gushiqiao committed
59
60
        if self.dit_quantized:
            dit_quant_scheme = self.config.mm_config.get("mm_type").split("-")[1]
gushiqiao's avatar
gushiqiao committed
61
62
            if self.config.model_cls == "wan2.1_distill":
                dit_quant_scheme = "distill_" + dit_quant_scheme
63
64
65
66
            if dit_quant_scheme == "gguf":
                self.dit_quantized_ckpt = find_gguf_model_path(config, "dit_quantized_ckpt", subdir=dit_quant_scheme)
                self.config.use_gguf = True
            else:
helloyongyang's avatar
helloyongyang committed
67
                self.dit_quantized_ckpt = find_hf_model_path(config, self.model_path, "dit_quantized_ckpt", subdir=dit_quant_scheme)
gushiqiao's avatar
Fix bug  
gushiqiao committed
68
69
70
71
72
            quant_config_path = os.path.join(self.dit_quantized_ckpt, "config.json")
            if os.path.exists(quant_config_path):
                with open(quant_config_path, "r") as f:
                    quant_model_config = json.load(f)
                self.config.update(quant_model_config)
gushiqiao's avatar
gushiqiao committed
73
74
        else:
            self.dit_quantized_ckpt = None
75
76
            assert not self.config.get("lazy_load", False)

gushiqiao's avatar
gushiqiao committed
77
        self.config.dit_quantized_ckpt = self.dit_quantized_ckpt
gushiqiao's avatar
gushiqiao committed
78

79
80
81
82
        self.weight_auto_quant = self.config.mm_config.get("weight_auto_quant", False)
        if self.dit_quantized:
            assert self.weight_auto_quant or self.dit_quantized_ckpt is not None

gushiqiao's avatar
gushiqiao committed
83
        self.device = device
helloyongyang's avatar
helloyongyang committed
84
85
86
87
88
89
90
        self._init_infer_class()
        self._init_weights()
        self._init_infer()

    def _init_infer_class(self):
        self.pre_infer_class = WanPreInfer
        self.post_infer_class = WanPostInfer
helloyongyang's avatar
helloyongyang committed
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107

        if self.config["feature_caching"] == "NoCaching":
            self.transformer_infer_class = WanTransformerInfer
        elif self.config["feature_caching"] == "Tea":
            self.transformer_infer_class = WanTransformerInferTeaCaching
        elif self.config["feature_caching"] == "TaylorSeer":
            self.transformer_infer_class = WanTransformerInferTaylorCaching
        elif self.config["feature_caching"] == "Ada":
            self.transformer_infer_class = WanTransformerInferAdaCaching
        elif self.config["feature_caching"] == "Custom":
            self.transformer_infer_class = WanTransformerInferCustomCaching
        elif self.config["feature_caching"] == "FirstBlock":
            self.transformer_infer_class = WanTransformerInferFirstBlock
        elif self.config["feature_caching"] == "DualBlock":
            self.transformer_infer_class = WanTransformerInferDualBlock
        elif self.config["feature_caching"] == "DynamicBlock":
            self.transformer_infer_class = WanTransformerInferDynamicBlock
Musisoul's avatar
Musisoul committed
108
109
        elif self.config["feature_caching"] == "Mag":
            self.transformer_infer_class = WanTransformerInferMagCaching
helloyongyang's avatar
helloyongyang committed
110
        else:
helloyongyang's avatar
helloyongyang committed
111
            raise NotImplementedError(f"Unsupported feature_caching type: {self.config['feature_caching']}")
helloyongyang's avatar
helloyongyang committed
112

gushiqiao's avatar
gushiqiao committed
113
114
115
116
117
118
119
120
121
122
123
124
    def _should_load_weights(self):
        """Determine if current rank should load weights from disk."""
        if self.config.get("device_mesh") is None:
            # Single GPU mode
            return True
        elif dist.is_initialized():
            # Multi-GPU mode, only rank 0 loads
            if dist.get_rank() == 0:
                logger.info(f"Loading weights from {self.model_path}")
                return True
        return False

125
    def _load_safetensor_to_dict(self, file_path, unified_dtype, sensitive_layer):
helloyongyang's avatar
helloyongyang committed
126
        with safe_open(file_path, framework="pt") as f:
127
128
129
130
            return {
                key: (f.get_tensor(key).to(GET_DTYPE()) if unified_dtype or all(s not in key for s in sensitive_layer) else f.get_tensor(key).to(GET_SENSITIVE_DTYPE())).pin_memory().to(self.device)
                for key in f.keys()
            }
helloyongyang's avatar
helloyongyang committed
131

132
    def _load_ckpt(self, unified_dtype, sensitive_layer):
helloyongyang's avatar
helloyongyang committed
133
        safetensors_path = find_hf_model_path(self.config, self.model_path, "dit_original_ckpt", subdir="original")
134
        safetensors_files = glob.glob(os.path.join(safetensors_path, "*.safetensors"))
helloyongyang's avatar
helloyongyang committed
135
136
        weight_dict = {}
        for file_path in safetensors_files:
137
            file_weights = self._load_safetensor_to_dict(file_path, unified_dtype, sensitive_layer)
helloyongyang's avatar
helloyongyang committed
138
139
140
            weight_dict.update(file_weights)
        return weight_dict

141
    def _load_quant_ckpt(self, unified_dtype, sensitive_layer):
gushiqiao's avatar
gushiqiao committed
142
        ckpt_path = self.dit_quantized_ckpt
143
        logger.info(f"Loading quant dit model from {ckpt_path}")
144

gushiqiao's avatar
Fix  
gushiqiao committed
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
        index_files = [f for f in os.listdir(ckpt_path) if f.endswith(".index.json")]
        if not index_files:
            raise FileNotFoundError(f"No *.index.json found in {ckpt_path}")

        index_path = os.path.join(ckpt_path, index_files[0])
        logger.info(f" Using safetensors index: {index_path}")

        with open(index_path, "r") as f:
            index_data = json.load(f)

        weight_dict = {}
        for filename in set(index_data["weight_map"].values()):
            safetensor_path = os.path.join(ckpt_path, filename)
            with safe_open(safetensor_path, framework="pt") as f:
                logger.info(f"Loading weights from {safetensor_path}")
                for k in f.keys():
161
                    if f.get_tensor(k).dtype in [torch.float16, torch.bfloat16, torch.float]:
162
163
                        if unified_dtype or all(s not in k for s in sensitive_layer):
                            weight_dict[k] = f.get_tensor(k).pin_memory().to(GET_DTYPE()).to(self.device)
gushiqiao's avatar
Fix  
gushiqiao committed
164
                        else:
165
                            weight_dict[k] = f.get_tensor(k).pin_memory().to(GET_SENSITIVE_DTYPE()).to(self.device)
gushiqiao's avatar
Fix  
gushiqiao committed
166
167
                    else:
                        weight_dict[k] = f.get_tensor(k).pin_memory().to(self.device)
168

169
170
        return weight_dict

171
    def _load_quant_split_ckpt(self, unified_dtype, sensitive_layer):
gushiqiao's avatar
gushiqiao committed
172
        lazy_load_model_path = self.dit_quantized_ckpt
173
        logger.info(f"Loading splited quant model from {lazy_load_model_path}")
gushiqiao's avatar
gushiqiao committed
174
        pre_post_weight_dict = {}
175
176

        safetensor_path = os.path.join(lazy_load_model_path, "non_block.safetensors")
gushiqiao's avatar
gushiqiao committed
177
        with safe_open(safetensor_path, framework="pt", device="cpu") as f:
178
            for k in f.keys():
179
                if f.get_tensor(k).dtype in [torch.float16, torch.bfloat16, torch.float]:
180
181
                    if unified_dtype or all(s not in k for s in sensitive_layer):
                        pre_post_weight_dict[k] = f.get_tensor(k).pin_memory().to(GET_DTYPE()).to(self.device)
gushiqiao's avatar
Fix  
gushiqiao committed
182
                    else:
183
                        pre_post_weight_dict[k] = f.get_tensor(k).pin_memory().to(GET_SENSITIVE_DTYPE()).to(self.device)
gushiqiao's avatar
Fix  
gushiqiao committed
184
185
                else:
                    pre_post_weight_dict[k] = f.get_tensor(k).pin_memory().to(self.device)
186

gushiqiao's avatar
gushiqiao committed
187
        return pre_post_weight_dict
188

189
190
191
192
193
194
195
196
    def _load_gguf_ckpt(self):
        gguf_path = self.dit_quantized_ckpt
        logger.info(f"Loading gguf-quant dit model from {gguf_path}")
        reader = gguf.GGUFReader(gguf_path)
        for tensor in reader.tensors:
            # TODO: implement _load_gguf_ckpt
            pass

lijiaqi2's avatar
lijiaqi2 committed
197
    def _init_weights(self, weight_dict=None):
198
        unified_dtype = GET_DTYPE() == GET_SENSITIVE_DTYPE()
gushiqiao's avatar
Fix  
gushiqiao committed
199
        # Some layers run with float32 to achieve high accuracy
200
        sensitive_layer = {
gushiqiao's avatar
gushiqiao committed
201
202
203
204
205
206
207
            "norm",
            "embedding",
            "modulation",
            "time",
            "img_emb.proj.0",
            "img_emb.proj.4",
        }
208

lijiaqi2's avatar
lijiaqi2 committed
209
        if weight_dict is None:
gushiqiao's avatar
gushiqiao committed
210
            is_weight_loader = self._should_load_weights()
211
212
            if is_weight_loader:
                if not self.dit_quantized or self.weight_auto_quant:
gushiqiao's avatar
gushiqiao committed
213
214
                    # Load original weights
                    weight_dict = self._load_ckpt(unified_dtype, sensitive_layer)
215
                else:
gushiqiao's avatar
gushiqiao committed
216
                    # Load quantized weights
217
                    if not self.config.get("lazy_load", False):
gushiqiao's avatar
gushiqiao committed
218
                        weight_dict = self._load_quant_ckpt(unified_dtype, sensitive_layer)
219
                    else:
gushiqiao's avatar
gushiqiao committed
220
                        weight_dict = self._load_quant_split_ckpt(unified_dtype, sensitive_layer)
221

gushiqiao's avatar
gushiqiao committed
222
            if self.config.get("device_mesh") is not None:
gushiqiao's avatar
gushiqiao committed
223
                weight_dict = self._load_weights_distribute(weight_dict, is_weight_loader)
224

gushiqiao's avatar
gushiqiao committed
225
            self.original_weight_dict = weight_dict
lijiaqi2's avatar
lijiaqi2 committed
226
227
        else:
            self.original_weight_dict = weight_dict
228

gushiqiao's avatar
gushiqiao committed
229
        # Initialize weight containers
helloyongyang's avatar
helloyongyang committed
230
231
        self.pre_weight = self.pre_weight_class(self.config)
        self.transformer_weights = self.transformer_weight_class(self.config)
gushiqiao's avatar
gushiqiao committed
232
233

        # Load weights into containers
234
        self.pre_weight.load(self.original_weight_dict)
gushiqiao's avatar
gushiqiao committed
235
        self.transformer_weights.load(self.original_weight_dict)
helloyongyang's avatar
helloyongyang committed
236

gushiqiao's avatar
gushiqiao committed
237
    def _load_weights_distribute(self, weight_dict, is_weight_loader):
gushiqiao's avatar
gushiqiao committed
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
        global_src_rank = 0
        target_device = "cpu" if self.cpu_offload else "cuda"

        if is_weight_loader:
            meta_dict = {}
            for key, tensor in weight_dict.items():
                meta_dict[key] = {"shape": tensor.shape, "dtype": tensor.dtype}

            obj_list = [meta_dict]
            dist.broadcast_object_list(obj_list, src=global_src_rank)
            synced_meta_dict = obj_list[0]
        else:
            obj_list = [None]
            dist.broadcast_object_list(obj_list, src=global_src_rank)
            synced_meta_dict = obj_list[0]

        distributed_weight_dict = {}
        for key, meta in synced_meta_dict.items():
            distributed_weight_dict[key] = torch.empty(meta["shape"], dtype=meta["dtype"], device=target_device)

        if target_device == "cuda":
            dist.barrier(device_ids=[torch.cuda.current_device()])

        for key in sorted(synced_meta_dict.keys()):
            if is_weight_loader:
                distributed_weight_dict[key].copy_(weight_dict[key], non_blocking=True)

gushiqiao's avatar
gushiqiao committed
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
            if target_device == "cpu":
                if is_weight_loader:
                    gpu_tensor = distributed_weight_dict[key].cuda()
                    dist.broadcast(gpu_tensor, src=global_src_rank)
                    distributed_weight_dict[key].copy_(gpu_tensor.cpu(), non_blocking=True)
                    del gpu_tensor
                    torch.cuda.empty_cache()
                else:
                    gpu_tensor = torch.empty_like(distributed_weight_dict[key], device="cuda")
                    dist.broadcast(gpu_tensor, src=global_src_rank)
                    distributed_weight_dict[key].copy_(gpu_tensor.cpu(), non_blocking=True)
                    del gpu_tensor
                    torch.cuda.empty_cache()

                if distributed_weight_dict[key].is_pinned():
                    distributed_weight_dict[key].copy_(distributed_weight_dict[key], non_blocking=True)
            else:
                dist.broadcast(distributed_weight_dict[key], src=global_src_rank)

        if target_device == "cuda":
            torch.cuda.synchronize()
        else:
            for tensor in distributed_weight_dict.values():
                if tensor.is_pinned():
                    tensor.copy_(tensor, non_blocking=False)
gushiqiao's avatar
gushiqiao committed
290
291
292
293

        logger.info(f"Weights distributed across {dist.get_world_size()} devices on {target_device}")
        return distributed_weight_dict

helloyongyang's avatar
helloyongyang committed
294
295
296
    def _init_infer(self):
        self.pre_infer = self.pre_infer_class(self.config)
        self.post_infer = self.post_infer_class(self.config)
helloyongyang's avatar
helloyongyang committed
297
        self.transformer_infer = self.transformer_infer_class(self.config)
helloyongyang's avatar
helloyongyang committed
298
299
300

    def set_scheduler(self, scheduler):
        self.scheduler = scheduler
301
302
        self.pre_infer.set_scheduler(scheduler)
        self.post_infer.set_scheduler(scheduler)
helloyongyang's avatar
helloyongyang committed
303
304
        self.transformer_infer.set_scheduler(scheduler)

TorynCurtis's avatar
TorynCurtis committed
305
306
307
308
309
310
311
312
    def to_cpu(self):
        self.pre_weight.to_cpu()
        self.transformer_weights.to_cpu()

    def to_cuda(self):
        self.pre_weight.to_cuda()
        self.transformer_weights.to_cuda()

helloyongyang's avatar
helloyongyang committed
313
314
    @torch.no_grad()
    def infer(self, inputs):
315
316
317
318
319
        if self.cpu_offload:
            if self.offload_granularity == "model" and self.scheduler.step_index == 0:
                self.to_cuda()
            elif self.offload_granularity != "model":
                self.pre_weight.to_cuda()
gushiqiao's avatar
gushiqiao committed
320
                self.transformer_weights.post_weights_to_cuda()
321

322
323
324
325
326
        if self.transformer_infer.mask_map is None:
            _, c, h, w = self.scheduler.latents.shape
            video_token_num = c * (h // 2) * (w // 2)
            self.transformer_infer.mask_map = MaskMap(video_token_num, c)

327
        if self.config["enable_cfg"]:
helloyongyang's avatar
helloyongyang committed
328
329
330
331
332
333
334
            if self.config["cfg_parallel"]:
                # ==================== CFG Parallel Processing ====================
                cfg_p_group = self.config["device_mesh"].get_group(mesh_dim="cfg_p")
                assert dist.get_world_size(cfg_p_group) == 2, "cfg_p_world_size must be equal to 2"
                cfg_p_rank = dist.get_rank(cfg_p_group)

                if cfg_p_rank == 0:
helloyongyang's avatar
helloyongyang committed
335
                    noise_pred = self._infer_cond_uncond(inputs, infer_condition=True)
helloyongyang's avatar
helloyongyang committed
336
                else:
helloyongyang's avatar
helloyongyang committed
337
                    noise_pred = self._infer_cond_uncond(inputs, infer_condition=False)
helloyongyang's avatar
helloyongyang committed
338

helloyongyang's avatar
helloyongyang committed
339
340
341
342
343
344
                noise_pred_list = [torch.zeros_like(noise_pred) for _ in range(2)]
                dist.all_gather(noise_pred_list, noise_pred, group=cfg_p_group)
                noise_pred_cond = noise_pred_list[0]  # cfg_p_rank == 0
                noise_pred_uncond = noise_pred_list[1]  # cfg_p_rank == 1
            else:
                # ==================== CFG Processing ====================
helloyongyang's avatar
helloyongyang committed
345
346
                noise_pred_cond = self._infer_cond_uncond(inputs, infer_condition=True)
                noise_pred_uncond = self._infer_cond_uncond(inputs, infer_condition=False)
gushiqiao's avatar
gushiqiao committed
347

helloyongyang's avatar
helloyongyang committed
348
349
350
            self.scheduler.noise_pred = noise_pred_uncond + self.scheduler.sample_guide_scale * (noise_pred_cond - noise_pred_uncond)
        else:
            # ==================== No CFG ====================
helloyongyang's avatar
helloyongyang committed
351
            self.scheduler.noise_pred = self._infer_cond_uncond(inputs, infer_condition=True)
352
353
354
355
356

        if self.cpu_offload:
            if self.offload_granularity == "model" and self.scheduler.step_index == self.scheduler.infer_steps - 1:
                self.to_cpu()
            elif self.offload_granularity != "model":
root's avatar
root committed
357
                self.pre_weight.to_cpu()
gushiqiao's avatar
gushiqiao committed
358
                self.transformer_weights.post_weights_to_cpu()
gushiqiao's avatar
gushiqiao committed
359

helloyongyang's avatar
helloyongyang committed
360
    @torch.compile(disable=not CHECK_ENABLE_GRAPH_MODE())
361
    @torch.no_grad()
helloyongyang's avatar
helloyongyang committed
362
363
364
365
    def _infer_cond_uncond(self, inputs, infer_condition=True):
        self.scheduler.infer_condition = infer_condition

        pre_infer_out = self.pre_infer.infer(self.pre_weight, inputs)
helloyongyang's avatar
helloyongyang committed
366
367
368
369
370
371
372
373
374

        if self.config["seq_parallel"]:
            pre_infer_out = self._seq_parallel_pre_process(pre_infer_out)

        x = self.transformer_infer.infer(self.transformer_weights, pre_infer_out)

        if self.config["seq_parallel"]:
            x = self._seq_parallel_post_process(x)

gushiqiao's avatar
gushiqiao committed
375
        noise_pred = self.post_infer.infer(x, pre_infer_out)[0]
helloyongyang's avatar
helloyongyang committed
376
377
378
379
380
381
382
383
384

        if self.clean_cuda_cache:
            del x, pre_infer_out
            torch.cuda.empty_cache()

        return noise_pred

    @torch.no_grad()
    def _seq_parallel_pre_process(self, pre_infer_out):
helloyongyang's avatar
helloyongyang committed
385
        x = pre_infer_out.x
helloyongyang's avatar
helloyongyang committed
386
387
388
389
390
        world_size = dist.get_world_size(self.seq_p_group)
        cur_rank = dist.get_rank(self.seq_p_group)

        padding_size = (world_size - (x.shape[0] % world_size)) % world_size
        if padding_size > 0:
helloyongyang's avatar
helloyongyang committed
391
            x = F.pad(x, (0, 0, 0, padding_size))
helloyongyang's avatar
helloyongyang committed
392

helloyongyang's avatar
helloyongyang committed
393
        pre_infer_out.x = torch.chunk(x, world_size, dim=0)[cur_rank]
helloyongyang's avatar
helloyongyang committed
394
395
396
397
398
399
400
401
402

        if self.config["model_cls"] == "wan2.2" and self.config["task"] == "i2v":
            embed, embed0 = pre_infer_out.embed, pre_infer_out.embed0

            padding_size = (world_size - (embed.shape[0] % world_size)) % world_size
            if padding_size > 0:
                embed = F.pad(embed, (0, 0, 0, padding_size))
                embed0 = F.pad(embed0, (0, 0, 0, 0, 0, padding_size))

helloyongyang's avatar
helloyongyang committed
403
404
            pre_infer_out.embed = torch.chunk(embed, world_size, dim=0)[cur_rank]
            pre_infer_out.embed0 = torch.chunk(embed0, world_size, dim=0)[cur_rank]
helloyongyang's avatar
helloyongyang committed
405
406
407
408
409
410
411
412
413

        return pre_infer_out

    @torch.no_grad()
    def _seq_parallel_post_process(self, x):
        world_size = dist.get_world_size(self.seq_p_group)
        gathered_x = [torch.empty_like(x) for _ in range(world_size)]
        dist.all_gather(gathered_x, x, group=self.seq_p_group)
        combined_output = torch.cat(gathered_x, dim=0)
helloyongyang's avatar
helloyongyang committed
414
        return combined_output