model.py 19.6 KB
Newer Older
gushiqiao's avatar
gushiqiao committed
1
import gc
2
import json
3
4
import os

helloyongyang's avatar
helloyongyang committed
5
import torch
6
import torch.distributed as dist
helloyongyang's avatar
helloyongyang committed
7
import torch.nn.functional as F
PengGao's avatar
PengGao committed
8
9
10
from loguru import logger
from safetensors import safe_open

11
from lightx2v.models.networks.wan.infer.feature_caching.transformer_infer import (
12
13
    WanTransformerInferAdaCaching,
    WanTransformerInferCustomCaching,
Rongjin Yang's avatar
Rongjin Yang committed
14
15
    WanTransformerInferDualBlock,
    WanTransformerInferDynamicBlock,
PengGao's avatar
PengGao committed
16
    WanTransformerInferFirstBlock,
Musisoul's avatar
Musisoul committed
17
    WanTransformerInferMagCaching,
PengGao's avatar
PengGao committed
18
19
20
    WanTransformerInferTaylorCaching,
    WanTransformerInferTeaCaching,
)
21
22
23
from lightx2v.models.networks.wan.infer.offload.transformer_infer import (
    WanOffloadTransformerInfer,
)
PengGao's avatar
PengGao committed
24
25
26
27
28
29
30
31
from lightx2v.models.networks.wan.infer.post_infer import WanPostInfer
from lightx2v.models.networks.wan.infer.pre_infer import WanPreInfer
from lightx2v.models.networks.wan.infer.transformer_infer import (
    WanTransformerInfer,
)
from lightx2v.models.networks.wan.weights.pre_weights import WanPreWeights
from lightx2v.models.networks.wan.weights.transformer_weights import (
    WanTransformerWeights,
32
)
Yang Yong(雍洋)'s avatar
Yang Yong(雍洋) committed
33
from lightx2v.utils.custom_compiler import CompiledMethodsMixin, compiled_method
34
from lightx2v.utils.envs import *
35
from lightx2v.utils.utils import *
helloyongyang's avatar
helloyongyang committed
36

37
38
39
40
41
try:
    import gguf
except ImportError:
    gguf = None

helloyongyang's avatar
helloyongyang committed
42

Yang Yong(雍洋)'s avatar
Yang Yong(雍洋) committed
43
class WanModel(CompiledMethodsMixin):
helloyongyang's avatar
helloyongyang committed
44
45
46
    pre_weight_class = WanPreWeights
    transformer_weight_class = WanTransformerWeights

helloyongyang's avatar
helloyongyang committed
47
    def __init__(self, model_path, config, device):
Yang Yong(雍洋)'s avatar
Yang Yong(雍洋) committed
48
        super().__init__()
helloyongyang's avatar
helloyongyang committed
49
50
        self.model_path = model_path
        self.config = config
51
52
        self.cpu_offload = self.config.get("cpu_offload", False)
        self.offload_granularity = self.config.get("offload_granularity", "block")
helloyongyang's avatar
helloyongyang committed
53
54
55
56
57

        if self.config["seq_parallel"]:
            self.seq_p_group = self.config.get("device_mesh").get_group(mesh_dim="seq_p")
        else:
            self.seq_p_group = None
58

59
60
61
62
63
        if self.config.get("lora_configs") and self.config.lora_configs:
            self.init_empty_model = True
        else:
            self.init_empty_model = False

gushiqiao's avatar
gushiqiao committed
64
        self.clean_cuda_cache = self.config.get("clean_cuda_cache", False)
65
        self.dit_quantized = self.config["mm_config"].get("mm_type", "Default") != "Default"
66

gushiqiao's avatar
gushiqiao committed
67
        if self.dit_quantized:
68
69
            dit_quant_scheme = self.config["mm_config"].get("mm_type").split("-")[1]
            if self.config["model_cls"] == "wan2.1_distill":
gushiqiao's avatar
gushiqiao committed
70
                dit_quant_scheme = "distill_" + dit_quant_scheme
71
72
            if dit_quant_scheme == "gguf":
                self.dit_quantized_ckpt = find_gguf_model_path(config, "dit_quantized_ckpt", subdir=dit_quant_scheme)
73
                self.config["use_gguf"] = True
74
            else:
75
76
77
78
79
80
                self.dit_quantized_ckpt = find_hf_model_path(
                    config,
                    self.model_path,
                    "dit_quantized_ckpt",
                    subdir=dit_quant_scheme,
                )
gushiqiao's avatar
Fix bug  
gushiqiao committed
81
82
83
84
85
            quant_config_path = os.path.join(self.dit_quantized_ckpt, "config.json")
            if os.path.exists(quant_config_path):
                with open(quant_config_path, "r") as f:
                    quant_model_config = json.load(f)
                self.config.update(quant_model_config)
gushiqiao's avatar
gushiqiao committed
86
87
        else:
            self.dit_quantized_ckpt = None
88
89
            assert not self.config.get("lazy_load", False)

90
        self.weight_auto_quant = self.config["mm_config"].get("weight_auto_quant", False)
91
92
93
        if self.dit_quantized:
            assert self.weight_auto_quant or self.dit_quantized_ckpt is not None

gushiqiao's avatar
gushiqiao committed
94
        self.device = device
helloyongyang's avatar
helloyongyang committed
95
96
97
98
99
100
101
        self._init_infer_class()
        self._init_weights()
        self._init_infer()

    def _init_infer_class(self):
        self.pre_infer_class = WanPreInfer
        self.post_infer_class = WanPostInfer
helloyongyang's avatar
helloyongyang committed
102
103

        if self.config["feature_caching"] == "NoCaching":
104
            self.transformer_infer_class = WanTransformerInfer if not self.cpu_offload else WanOffloadTransformerInfer
helloyongyang's avatar
helloyongyang committed
105
106
107
108
109
110
111
112
113
114
115
116
117
118
        elif self.config["feature_caching"] == "Tea":
            self.transformer_infer_class = WanTransformerInferTeaCaching
        elif self.config["feature_caching"] == "TaylorSeer":
            self.transformer_infer_class = WanTransformerInferTaylorCaching
        elif self.config["feature_caching"] == "Ada":
            self.transformer_infer_class = WanTransformerInferAdaCaching
        elif self.config["feature_caching"] == "Custom":
            self.transformer_infer_class = WanTransformerInferCustomCaching
        elif self.config["feature_caching"] == "FirstBlock":
            self.transformer_infer_class = WanTransformerInferFirstBlock
        elif self.config["feature_caching"] == "DualBlock":
            self.transformer_infer_class = WanTransformerInferDualBlock
        elif self.config["feature_caching"] == "DynamicBlock":
            self.transformer_infer_class = WanTransformerInferDynamicBlock
Musisoul's avatar
Musisoul committed
119
120
        elif self.config["feature_caching"] == "Mag":
            self.transformer_infer_class = WanTransformerInferMagCaching
helloyongyang's avatar
helloyongyang committed
121
        else:
helloyongyang's avatar
helloyongyang committed
122
            raise NotImplementedError(f"Unsupported feature_caching type: {self.config['feature_caching']}")
helloyongyang's avatar
helloyongyang committed
123

gushiqiao's avatar
gushiqiao committed
124
125
126
127
128
129
    def _should_load_weights(self):
        """Determine if current rank should load weights from disk."""
        if self.config.get("device_mesh") is None:
            # Single GPU mode
            return True
        elif dist.is_initialized():
130
131
132
133
134
135
            if self.config.get("load_from_rank0", False):
                # Multi-GPU mode, only rank 0 loads
                if dist.get_rank() == 0:
                    logger.info(f"Loading weights from {self.model_path}")
                    return True
            else:
gushiqiao's avatar
gushiqiao committed
136
137
138
                return True
        return False

139
    def _load_safetensor_to_dict(self, file_path, unified_dtype, sensitive_layer):
140
141
        remove_keys = self.remove_keys if hasattr(self, "remove_keys") else []

142
143
144
145
        if self.device.type == "cuda" and dist.is_initialized():
            device = torch.device("cuda:{}".format(dist.get_rank()))
        else:
            device = self.device
146

147
        with safe_open(file_path, framework="pt", device=str(device)) as f:
148
149
150
151
152
            return {
                key: (f.get_tensor(key).to(GET_DTYPE()) if unified_dtype or all(s not in key for s in sensitive_layer) else f.get_tensor(key).to(GET_SENSITIVE_DTYPE()))
                for key in f.keys()
                if not any(remove_key in key for remove_key in remove_keys)
            }
helloyongyang's avatar
helloyongyang committed
153

154
    def _load_ckpt(self, unified_dtype, sensitive_layer):
helloyongyang's avatar
helloyongyang committed
155
        safetensors_path = find_hf_model_path(self.config, self.model_path, "dit_original_ckpt", subdir="original")
156
        safetensors_files = glob.glob(os.path.join(safetensors_path, "*.safetensors"))
157

helloyongyang's avatar
helloyongyang committed
158
159
        weight_dict = {}
        for file_path in safetensors_files:
160
            if self.config.get("adapter_model_path", None) is not None:
161
                if self.config["adapter_model_path"] == file_path:
162
                    continue
163
            file_weights = self._load_safetensor_to_dict(file_path, unified_dtype, sensitive_layer)
helloyongyang's avatar
helloyongyang committed
164
165
166
            weight_dict.update(file_weights)
        return weight_dict

167
    def _load_quant_ckpt(self, unified_dtype, sensitive_layer):
168
        remove_keys = self.remove_keys if hasattr(self, "remove_keys") else []
gushiqiao's avatar
gushiqiao committed
169
        ckpt_path = self.dit_quantized_ckpt
gushiqiao's avatar
Fix  
gushiqiao committed
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
        index_files = [f for f in os.listdir(ckpt_path) if f.endswith(".index.json")]
        if not index_files:
            raise FileNotFoundError(f"No *.index.json found in {ckpt_path}")

        index_path = os.path.join(ckpt_path, index_files[0])
        logger.info(f" Using safetensors index: {index_path}")

        with open(index_path, "r") as f:
            index_data = json.load(f)

        weight_dict = {}
        for filename in set(index_data["weight_map"].values()):
            safetensor_path = os.path.join(ckpt_path, filename)
            with safe_open(safetensor_path, framework="pt") as f:
                logger.info(f"Loading weights from {safetensor_path}")
                for k in f.keys():
186
187
188
                    if any(remove_key in k for remove_key in remove_keys):
                        continue

189
190
191
192
193
                    if f.get_tensor(k).dtype in [
                        torch.float16,
                        torch.bfloat16,
                        torch.float,
                    ]:
194
                        if unified_dtype or all(s not in k for s in sensitive_layer):
gushiqiao's avatar
gushiqiao committed
195
                            weight_dict[k] = f.get_tensor(k).to(GET_DTYPE()).to(self.device)
gushiqiao's avatar
Fix  
gushiqiao committed
196
                        else:
gushiqiao's avatar
gushiqiao committed
197
                            weight_dict[k] = f.get_tensor(k).to(GET_SENSITIVE_DTYPE()).to(self.device)
gushiqiao's avatar
Fix  
gushiqiao committed
198
                    else:
gushiqiao's avatar
gushiqiao committed
199
                        weight_dict[k] = f.get_tensor(k).to(self.device)
200

201
202
        return weight_dict

203
    def _load_quant_split_ckpt(self, unified_dtype, sensitive_layer):
gushiqiao's avatar
gushiqiao committed
204
        lazy_load_model_path = self.dit_quantized_ckpt
205
        logger.info(f"Loading splited quant model from {lazy_load_model_path}")
gushiqiao's avatar
gushiqiao committed
206
        pre_post_weight_dict = {}
207
208

        safetensor_path = os.path.join(lazy_load_model_path, "non_block.safetensors")
gushiqiao's avatar
gushiqiao committed
209
        with safe_open(safetensor_path, framework="pt", device="cpu") as f:
210
            for k in f.keys():
211
212
213
214
215
                if f.get_tensor(k).dtype in [
                    torch.float16,
                    torch.bfloat16,
                    torch.float,
                ]:
216
                    if unified_dtype or all(s not in k for s in sensitive_layer):
gushiqiao's avatar
gushiqiao committed
217
                        pre_post_weight_dict[k] = f.get_tensor(k).to(GET_DTYPE()).to(self.device)
gushiqiao's avatar
Fix  
gushiqiao committed
218
                    else:
gushiqiao's avatar
gushiqiao committed
219
                        pre_post_weight_dict[k] = f.get_tensor(k).to(GET_SENSITIVE_DTYPE()).to(self.device)
gushiqiao's avatar
Fix  
gushiqiao committed
220
                else:
gushiqiao's avatar
gushiqiao committed
221
                    pre_post_weight_dict[k] = f.get_tensor(k).to(self.device)
222

gushiqiao's avatar
gushiqiao committed
223
        return pre_post_weight_dict
224

225
226
227
228
229
230
231
232
    def _load_gguf_ckpt(self):
        gguf_path = self.dit_quantized_ckpt
        logger.info(f"Loading gguf-quant dit model from {gguf_path}")
        reader = gguf.GGUFReader(gguf_path)
        for tensor in reader.tensors:
            # TODO: implement _load_gguf_ckpt
            pass

lijiaqi2's avatar
lijiaqi2 committed
233
    def _init_weights(self, weight_dict=None):
234
        unified_dtype = GET_DTYPE() == GET_SENSITIVE_DTYPE()
gushiqiao's avatar
Fix  
gushiqiao committed
235
        # Some layers run with float32 to achieve high accuracy
236
        sensitive_layer = {
gushiqiao's avatar
gushiqiao committed
237
238
239
240
241
242
            "norm",
            "embedding",
            "modulation",
            "time",
            "img_emb.proj.0",
            "img_emb.proj.4",
gushiqiao's avatar
gushiqiao committed
243
244
            "before_proj",  # vace
            "after_proj",  # vace
gushiqiao's avatar
gushiqiao committed
245
        }
246

lijiaqi2's avatar
lijiaqi2 committed
247
        if weight_dict is None:
gushiqiao's avatar
gushiqiao committed
248
            is_weight_loader = self._should_load_weights()
249
250
            if is_weight_loader:
                if not self.dit_quantized or self.weight_auto_quant:
gushiqiao's avatar
gushiqiao committed
251
252
                    # Load original weights
                    weight_dict = self._load_ckpt(unified_dtype, sensitive_layer)
253
                else:
gushiqiao's avatar
gushiqiao committed
254
                    # Load quantized weights
255
                    if not self.config.get("lazy_load", False):
gushiqiao's avatar
gushiqiao committed
256
                        weight_dict = self._load_quant_ckpt(unified_dtype, sensitive_layer)
257
                    else:
gushiqiao's avatar
gushiqiao committed
258
                        weight_dict = self._load_quant_split_ckpt(unified_dtype, sensitive_layer)
259

260
261
            if self.config.get("device_mesh") is not None and self.config.get("load_from_rank0", False):
                weight_dict = self._load_weights_from_rank0(weight_dict, is_weight_loader)
262

263
264
265
            if hasattr(self, "adapter_weights_dict"):
                weight_dict.update(self.adapter_weights_dict)

gushiqiao's avatar
gushiqiao committed
266
            self.original_weight_dict = weight_dict
lijiaqi2's avatar
lijiaqi2 committed
267
268
        else:
            self.original_weight_dict = weight_dict
269

gushiqiao's avatar
gushiqiao committed
270
        # Initialize weight containers
helloyongyang's avatar
helloyongyang committed
271
272
        self.pre_weight = self.pre_weight_class(self.config)
        self.transformer_weights = self.transformer_weight_class(self.config)
273
274
        if not self.init_empty_model:
            self._apply_weights()
gushiqiao's avatar
gushiqiao committed
275

276
277
278
279
280
    def _apply_weights(self, weight_dict=None):
        if weight_dict is not None:
            self.original_weight_dict = weight_dict
            del weight_dict
            gc.collect()
gushiqiao's avatar
gushiqiao committed
281
        # Load weights into containers
282
        self.pre_weight.load(self.original_weight_dict)
gushiqiao's avatar
gushiqiao committed
283
        self.transformer_weights.load(self.original_weight_dict)
helloyongyang's avatar
helloyongyang committed
284

gushiqiao's avatar
gushiqiao committed
285
286
287
288
        del self.original_weight_dict
        torch.cuda.empty_cache()
        gc.collect()

289
290
    def _load_weights_from_rank0(self, weight_dict, is_weight_loader):
        logger.info("Loading distributed weights")
gushiqiao's avatar
gushiqiao committed
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
        global_src_rank = 0
        target_device = "cpu" if self.cpu_offload else "cuda"

        if is_weight_loader:
            meta_dict = {}
            for key, tensor in weight_dict.items():
                meta_dict[key] = {"shape": tensor.shape, "dtype": tensor.dtype}

            obj_list = [meta_dict]
            dist.broadcast_object_list(obj_list, src=global_src_rank)
            synced_meta_dict = obj_list[0]
        else:
            obj_list = [None]
            dist.broadcast_object_list(obj_list, src=global_src_rank)
            synced_meta_dict = obj_list[0]

        distributed_weight_dict = {}
        for key, meta in synced_meta_dict.items():
            distributed_weight_dict[key] = torch.empty(meta["shape"], dtype=meta["dtype"], device=target_device)

        if target_device == "cuda":
            dist.barrier(device_ids=[torch.cuda.current_device()])

        for key in sorted(synced_meta_dict.keys()):
            if is_weight_loader:
                distributed_weight_dict[key].copy_(weight_dict[key], non_blocking=True)

gushiqiao's avatar
gushiqiao committed
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
            if target_device == "cpu":
                if is_weight_loader:
                    gpu_tensor = distributed_weight_dict[key].cuda()
                    dist.broadcast(gpu_tensor, src=global_src_rank)
                    distributed_weight_dict[key].copy_(gpu_tensor.cpu(), non_blocking=True)
                    del gpu_tensor
                    torch.cuda.empty_cache()
                else:
                    gpu_tensor = torch.empty_like(distributed_weight_dict[key], device="cuda")
                    dist.broadcast(gpu_tensor, src=global_src_rank)
                    distributed_weight_dict[key].copy_(gpu_tensor.cpu(), non_blocking=True)
                    del gpu_tensor
                    torch.cuda.empty_cache()

                if distributed_weight_dict[key].is_pinned():
                    distributed_weight_dict[key].copy_(distributed_weight_dict[key], non_blocking=True)
            else:
                dist.broadcast(distributed_weight_dict[key], src=global_src_rank)

        if target_device == "cuda":
            torch.cuda.synchronize()
        else:
            for tensor in distributed_weight_dict.values():
                if tensor.is_pinned():
                    tensor.copy_(tensor, non_blocking=False)
gushiqiao's avatar
gushiqiao committed
343
344

        logger.info(f"Weights distributed across {dist.get_world_size()} devices on {target_device}")
345

gushiqiao's avatar
gushiqiao committed
346
347
        return distributed_weight_dict

helloyongyang's avatar
helloyongyang committed
348
349
350
    def _init_infer(self):
        self.pre_infer = self.pre_infer_class(self.config)
        self.post_infer = self.post_infer_class(self.config)
helloyongyang's avatar
helloyongyang committed
351
        self.transformer_infer = self.transformer_infer_class(self.config)
helloyongyang's avatar
helloyongyang committed
352
353
354

    def set_scheduler(self, scheduler):
        self.scheduler = scheduler
355
356
        self.pre_infer.set_scheduler(scheduler)
        self.post_infer.set_scheduler(scheduler)
helloyongyang's avatar
helloyongyang committed
357
358
        self.transformer_infer.set_scheduler(scheduler)

TorynCurtis's avatar
TorynCurtis committed
359
360
361
362
363
364
365
366
    def to_cpu(self):
        self.pre_weight.to_cpu()
        self.transformer_weights.to_cpu()

    def to_cuda(self):
        self.pre_weight.to_cuda()
        self.transformer_weights.to_cuda()

helloyongyang's avatar
helloyongyang committed
367
368
    @torch.no_grad()
    def infer(self, inputs):
369
        if self.cpu_offload:
370
            if self.offload_granularity == "model" and self.scheduler.step_index == 0 and "wan2.2_moe" not in self.config["model_cls"]:
371
372
373
                self.to_cuda()
            elif self.offload_granularity != "model":
                self.pre_weight.to_cuda()
gushiqiao's avatar
gushiqiao committed
374
                self.transformer_weights.non_block_weights_to_cuda()
375

376
        if self.config["enable_cfg"]:
helloyongyang's avatar
helloyongyang committed
377
378
379
380
381
382
383
            if self.config["cfg_parallel"]:
                # ==================== CFG Parallel Processing ====================
                cfg_p_group = self.config["device_mesh"].get_group(mesh_dim="cfg_p")
                assert dist.get_world_size(cfg_p_group) == 2, "cfg_p_world_size must be equal to 2"
                cfg_p_rank = dist.get_rank(cfg_p_group)

                if cfg_p_rank == 0:
helloyongyang's avatar
helloyongyang committed
384
                    noise_pred = self._infer_cond_uncond(inputs, infer_condition=True)
helloyongyang's avatar
helloyongyang committed
385
                else:
helloyongyang's avatar
helloyongyang committed
386
                    noise_pred = self._infer_cond_uncond(inputs, infer_condition=False)
helloyongyang's avatar
helloyongyang committed
387

helloyongyang's avatar
helloyongyang committed
388
389
390
391
392
393
                noise_pred_list = [torch.zeros_like(noise_pred) for _ in range(2)]
                dist.all_gather(noise_pred_list, noise_pred, group=cfg_p_group)
                noise_pred_cond = noise_pred_list[0]  # cfg_p_rank == 0
                noise_pred_uncond = noise_pred_list[1]  # cfg_p_rank == 1
            else:
                # ==================== CFG Processing ====================
helloyongyang's avatar
helloyongyang committed
394
395
                noise_pred_cond = self._infer_cond_uncond(inputs, infer_condition=True)
                noise_pred_uncond = self._infer_cond_uncond(inputs, infer_condition=False)
gushiqiao's avatar
gushiqiao committed
396

helloyongyang's avatar
helloyongyang committed
397
398
399
            self.scheduler.noise_pred = noise_pred_uncond + self.scheduler.sample_guide_scale * (noise_pred_cond - noise_pred_uncond)
        else:
            # ==================== No CFG ====================
helloyongyang's avatar
helloyongyang committed
400
            self.scheduler.noise_pred = self._infer_cond_uncond(inputs, infer_condition=True)
401
402

        if self.cpu_offload:
403
            if self.offload_granularity == "model" and self.scheduler.step_index == self.scheduler.infer_steps - 1 and "wan2.2_moe" not in self.config["model_cls"]:
404
405
                self.to_cpu()
            elif self.offload_granularity != "model":
root's avatar
root committed
406
                self.pre_weight.to_cpu()
gushiqiao's avatar
gushiqiao committed
407
                self.transformer_weights.non_block_weights_to_cpu()
gushiqiao's avatar
gushiqiao committed
408

Yang Yong(雍洋)'s avatar
Yang Yong(雍洋) committed
409
    @compiled_method()
410
    @torch.no_grad()
helloyongyang's avatar
helloyongyang committed
411
412
413
414
    def _infer_cond_uncond(self, inputs, infer_condition=True):
        self.scheduler.infer_condition = infer_condition

        pre_infer_out = self.pre_infer.infer(self.pre_weight, inputs)
helloyongyang's avatar
helloyongyang committed
415
416
417
418
419
420
421
422
423

        if self.config["seq_parallel"]:
            pre_infer_out = self._seq_parallel_pre_process(pre_infer_out)

        x = self.transformer_infer.infer(self.transformer_weights, pre_infer_out)

        if self.config["seq_parallel"]:
            x = self._seq_parallel_post_process(x)

gushiqiao's avatar
gushiqiao committed
424
        noise_pred = self.post_infer.infer(x, pre_infer_out)[0]
helloyongyang's avatar
helloyongyang committed
425
426
427
428
429
430
431
432
433

        if self.clean_cuda_cache:
            del x, pre_infer_out
            torch.cuda.empty_cache()

        return noise_pred

    @torch.no_grad()
    def _seq_parallel_pre_process(self, pre_infer_out):
helloyongyang's avatar
helloyongyang committed
434
        x = pre_infer_out.x
helloyongyang's avatar
helloyongyang committed
435
436
437
438
439
        world_size = dist.get_world_size(self.seq_p_group)
        cur_rank = dist.get_rank(self.seq_p_group)

        padding_size = (world_size - (x.shape[0] % world_size)) % world_size
        if padding_size > 0:
helloyongyang's avatar
helloyongyang committed
440
            x = F.pad(x, (0, 0, 0, padding_size))
helloyongyang's avatar
helloyongyang committed
441

helloyongyang's avatar
helloyongyang committed
442
        pre_infer_out.x = torch.chunk(x, world_size, dim=0)[cur_rank]
helloyongyang's avatar
helloyongyang committed
443

444
        if self.config["model_cls"] in ["wan2.2", "wan2.2_audio"] and self.config["task"] in ["i2v", "s2v"]:
helloyongyang's avatar
helloyongyang committed
445
446
447
448
449
450
451
            embed, embed0 = pre_infer_out.embed, pre_infer_out.embed0

            padding_size = (world_size - (embed.shape[0] % world_size)) % world_size
            if padding_size > 0:
                embed = F.pad(embed, (0, 0, 0, padding_size))
                embed0 = F.pad(embed0, (0, 0, 0, 0, 0, padding_size))

helloyongyang's avatar
helloyongyang committed
452
453
            pre_infer_out.embed = torch.chunk(embed, world_size, dim=0)[cur_rank]
            pre_infer_out.embed0 = torch.chunk(embed0, world_size, dim=0)[cur_rank]
helloyongyang's avatar
helloyongyang committed
454
455
456
457
458
459
460
461
462

        return pre_infer_out

    @torch.no_grad()
    def _seq_parallel_post_process(self, x):
        world_size = dist.get_world_size(self.seq_p_group)
        gathered_x = [torch.empty_like(x) for _ in range(world_size)]
        dist.all_gather(gathered_x, x, group=self.seq_p_group)
        combined_output = torch.cat(gathered_x, dim=0)
helloyongyang's avatar
helloyongyang committed
463
        return combined_output