model.py 18.3 KB
Newer Older
gushiqiao's avatar
gushiqiao committed
1
import gc
2
import json
3
4
import os

helloyongyang's avatar
helloyongyang committed
5
import torch
6
import torch.distributed as dist
helloyongyang's avatar
helloyongyang committed
7
import torch.nn.functional as F
PengGao's avatar
PengGao committed
8
9
10
from loguru import logger
from safetensors import safe_open

11
from lightx2v.models.networks.wan.infer.feature_caching.transformer_infer import (
12
13
    WanTransformerInferAdaCaching,
    WanTransformerInferCustomCaching,
Rongjin Yang's avatar
Rongjin Yang committed
14
15
    WanTransformerInferDualBlock,
    WanTransformerInferDynamicBlock,
PengGao's avatar
PengGao committed
16
    WanTransformerInferFirstBlock,
Musisoul's avatar
Musisoul committed
17
    WanTransformerInferMagCaching,
PengGao's avatar
PengGao committed
18
19
20
    WanTransformerInferTaylorCaching,
    WanTransformerInferTeaCaching,
)
21
22
23
from lightx2v.models.networks.wan.infer.offload.transformer_infer import (
    WanOffloadTransformerInfer,
)
PengGao's avatar
PengGao committed
24
25
26
27
28
29
30
31
from lightx2v.models.networks.wan.infer.post_infer import WanPostInfer
from lightx2v.models.networks.wan.infer.pre_infer import WanPreInfer
from lightx2v.models.networks.wan.infer.transformer_infer import (
    WanTransformerInfer,
)
from lightx2v.models.networks.wan.weights.pre_weights import WanPreWeights
from lightx2v.models.networks.wan.weights.transformer_weights import (
    WanTransformerWeights,
32
)
Yang Yong(雍洋)'s avatar
Yang Yong(雍洋) committed
33
from lightx2v.utils.custom_compiler import CompiledMethodsMixin, compiled_method
34
from lightx2v.utils.envs import *
35
from lightx2v.utils.utils import *
helloyongyang's avatar
helloyongyang committed
36

37
38
39
40
41
try:
    import gguf
except ImportError:
    gguf = None

helloyongyang's avatar
helloyongyang committed
42

Yang Yong(雍洋)'s avatar
Yang Yong(雍洋) committed
43
class WanModel(CompiledMethodsMixin):
helloyongyang's avatar
helloyongyang committed
44
45
46
    pre_weight_class = WanPreWeights
    transformer_weight_class = WanTransformerWeights

helloyongyang's avatar
helloyongyang committed
47
    def __init__(self, model_path, config, device):
Yang Yong(雍洋)'s avatar
Yang Yong(雍洋) committed
48
        super().__init__()
helloyongyang's avatar
helloyongyang committed
49
50
        self.model_path = model_path
        self.config = config
51
52
        self.cpu_offload = self.config.get("cpu_offload", False)
        self.offload_granularity = self.config.get("offload_granularity", "block")
helloyongyang's avatar
helloyongyang committed
53
54
55
56
57

        if self.config["seq_parallel"]:
            self.seq_p_group = self.config.get("device_mesh").get_group(mesh_dim="seq_p")
        else:
            self.seq_p_group = None
58

gushiqiao's avatar
gushiqiao committed
59
        self.clean_cuda_cache = self.config.get("clean_cuda_cache", False)
60
        self.dit_quantized = self.config.mm_config.get("mm_type", "Default") != "Default"
61

gushiqiao's avatar
gushiqiao committed
62
63
        if self.dit_quantized:
            dit_quant_scheme = self.config.mm_config.get("mm_type").split("-")[1]
gushiqiao's avatar
gushiqiao committed
64
65
            if self.config.model_cls == "wan2.1_distill":
                dit_quant_scheme = "distill_" + dit_quant_scheme
66
67
68
69
            if dit_quant_scheme == "gguf":
                self.dit_quantized_ckpt = find_gguf_model_path(config, "dit_quantized_ckpt", subdir=dit_quant_scheme)
                self.config.use_gguf = True
            else:
70
71
72
73
74
75
                self.dit_quantized_ckpt = find_hf_model_path(
                    config,
                    self.model_path,
                    "dit_quantized_ckpt",
                    subdir=dit_quant_scheme,
                )
gushiqiao's avatar
Fix bug  
gushiqiao committed
76
77
78
79
80
            quant_config_path = os.path.join(self.dit_quantized_ckpt, "config.json")
            if os.path.exists(quant_config_path):
                with open(quant_config_path, "r") as f:
                    quant_model_config = json.load(f)
                self.config.update(quant_model_config)
gushiqiao's avatar
gushiqiao committed
81
82
        else:
            self.dit_quantized_ckpt = None
83
84
            assert not self.config.get("lazy_load", False)

85
86
87
88
        self.weight_auto_quant = self.config.mm_config.get("weight_auto_quant", False)
        if self.dit_quantized:
            assert self.weight_auto_quant or self.dit_quantized_ckpt is not None

gushiqiao's avatar
gushiqiao committed
89
        self.device = device
helloyongyang's avatar
helloyongyang committed
90
91
92
93
94
95
96
        self._init_infer_class()
        self._init_weights()
        self._init_infer()

    def _init_infer_class(self):
        self.pre_infer_class = WanPreInfer
        self.post_infer_class = WanPostInfer
helloyongyang's avatar
helloyongyang committed
97
98

        if self.config["feature_caching"] == "NoCaching":
99
            self.transformer_infer_class = WanTransformerInfer if not self.cpu_offload else WanOffloadTransformerInfer
helloyongyang's avatar
helloyongyang committed
100
101
102
103
104
105
106
107
108
109
110
111
112
113
        elif self.config["feature_caching"] == "Tea":
            self.transformer_infer_class = WanTransformerInferTeaCaching
        elif self.config["feature_caching"] == "TaylorSeer":
            self.transformer_infer_class = WanTransformerInferTaylorCaching
        elif self.config["feature_caching"] == "Ada":
            self.transformer_infer_class = WanTransformerInferAdaCaching
        elif self.config["feature_caching"] == "Custom":
            self.transformer_infer_class = WanTransformerInferCustomCaching
        elif self.config["feature_caching"] == "FirstBlock":
            self.transformer_infer_class = WanTransformerInferFirstBlock
        elif self.config["feature_caching"] == "DualBlock":
            self.transformer_infer_class = WanTransformerInferDualBlock
        elif self.config["feature_caching"] == "DynamicBlock":
            self.transformer_infer_class = WanTransformerInferDynamicBlock
Musisoul's avatar
Musisoul committed
114
115
        elif self.config["feature_caching"] == "Mag":
            self.transformer_infer_class = WanTransformerInferMagCaching
helloyongyang's avatar
helloyongyang committed
116
        else:
helloyongyang's avatar
helloyongyang committed
117
            raise NotImplementedError(f"Unsupported feature_caching type: {self.config['feature_caching']}")
helloyongyang's avatar
helloyongyang committed
118

gushiqiao's avatar
gushiqiao committed
119
120
121
122
123
124
125
126
127
128
129
130
    def _should_load_weights(self):
        """Determine if current rank should load weights from disk."""
        if self.config.get("device_mesh") is None:
            # Single GPU mode
            return True
        elif dist.is_initialized():
            # Multi-GPU mode, only rank 0 loads
            if dist.get_rank() == 0:
                logger.info(f"Loading weights from {self.model_path}")
                return True
        return False

131
    def _load_safetensor_to_dict(self, file_path, unified_dtype, sensitive_layer):
gushiqiao's avatar
gushiqiao committed
132
133
        with safe_open(file_path, framework="pt", device=str(self.device)) as f:
            return {key: (f.get_tensor(key).to(GET_DTYPE()) if unified_dtype or all(s not in key for s in sensitive_layer) else f.get_tensor(key).to(GET_SENSITIVE_DTYPE())) for key in f.keys()}
helloyongyang's avatar
helloyongyang committed
134

135
    def _load_ckpt(self, unified_dtype, sensitive_layer):
helloyongyang's avatar
helloyongyang committed
136
        safetensors_path = find_hf_model_path(self.config, self.model_path, "dit_original_ckpt", subdir="original")
137
        safetensors_files = glob.glob(os.path.join(safetensors_path, "*.safetensors"))
138

helloyongyang's avatar
helloyongyang committed
139
140
        weight_dict = {}
        for file_path in safetensors_files:
141
142
143
            if self.config.get("adapter_model_path", None) is not None:
                if self.config.adapter_model_path == file_path:
                    continue
144
            file_weights = self._load_safetensor_to_dict(file_path, unified_dtype, sensitive_layer)
helloyongyang's avatar
helloyongyang committed
145
146
147
            weight_dict.update(file_weights)
        return weight_dict

148
    def _load_quant_ckpt(self, unified_dtype, sensitive_layer):
gushiqiao's avatar
gushiqiao committed
149
        ckpt_path = self.dit_quantized_ckpt
150
        logger.info(f"Loading quant dit model from {ckpt_path}")
151

gushiqiao's avatar
Fix  
gushiqiao committed
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
        index_files = [f for f in os.listdir(ckpt_path) if f.endswith(".index.json")]
        if not index_files:
            raise FileNotFoundError(f"No *.index.json found in {ckpt_path}")

        index_path = os.path.join(ckpt_path, index_files[0])
        logger.info(f" Using safetensors index: {index_path}")

        with open(index_path, "r") as f:
            index_data = json.load(f)

        weight_dict = {}
        for filename in set(index_data["weight_map"].values()):
            safetensor_path = os.path.join(ckpt_path, filename)
            with safe_open(safetensor_path, framework="pt") as f:
                logger.info(f"Loading weights from {safetensor_path}")
                for k in f.keys():
168
169
170
171
172
                    if f.get_tensor(k).dtype in [
                        torch.float16,
                        torch.bfloat16,
                        torch.float,
                    ]:
173
                        if unified_dtype or all(s not in k for s in sensitive_layer):
gushiqiao's avatar
gushiqiao committed
174
                            weight_dict[k] = f.get_tensor(k).to(GET_DTYPE()).to(self.device)
gushiqiao's avatar
Fix  
gushiqiao committed
175
                        else:
gushiqiao's avatar
gushiqiao committed
176
                            weight_dict[k] = f.get_tensor(k).to(GET_SENSITIVE_DTYPE()).to(self.device)
gushiqiao's avatar
Fix  
gushiqiao committed
177
                    else:
gushiqiao's avatar
gushiqiao committed
178
                        weight_dict[k] = f.get_tensor(k).to(self.device)
179

180
181
        return weight_dict

182
    def _load_quant_split_ckpt(self, unified_dtype, sensitive_layer):
gushiqiao's avatar
gushiqiao committed
183
        lazy_load_model_path = self.dit_quantized_ckpt
184
        logger.info(f"Loading splited quant model from {lazy_load_model_path}")
gushiqiao's avatar
gushiqiao committed
185
        pre_post_weight_dict = {}
186
187

        safetensor_path = os.path.join(lazy_load_model_path, "non_block.safetensors")
gushiqiao's avatar
gushiqiao committed
188
        with safe_open(safetensor_path, framework="pt", device="cpu") as f:
189
            for k in f.keys():
190
191
192
193
194
                if f.get_tensor(k).dtype in [
                    torch.float16,
                    torch.bfloat16,
                    torch.float,
                ]:
195
                    if unified_dtype or all(s not in k for s in sensitive_layer):
gushiqiao's avatar
gushiqiao committed
196
                        pre_post_weight_dict[k] = f.get_tensor(k).to(GET_DTYPE()).to(self.device)
gushiqiao's avatar
Fix  
gushiqiao committed
197
                    else:
gushiqiao's avatar
gushiqiao committed
198
                        pre_post_weight_dict[k] = f.get_tensor(k).to(GET_SENSITIVE_DTYPE()).to(self.device)
gushiqiao's avatar
Fix  
gushiqiao committed
199
                else:
gushiqiao's avatar
gushiqiao committed
200
                    pre_post_weight_dict[k] = f.get_tensor(k).to(self.device)
201

gushiqiao's avatar
gushiqiao committed
202
        return pre_post_weight_dict
203

204
205
206
207
208
209
210
211
    def _load_gguf_ckpt(self):
        gguf_path = self.dit_quantized_ckpt
        logger.info(f"Loading gguf-quant dit model from {gguf_path}")
        reader = gguf.GGUFReader(gguf_path)
        for tensor in reader.tensors:
            # TODO: implement _load_gguf_ckpt
            pass

lijiaqi2's avatar
lijiaqi2 committed
212
    def _init_weights(self, weight_dict=None):
213
        unified_dtype = GET_DTYPE() == GET_SENSITIVE_DTYPE()
gushiqiao's avatar
Fix  
gushiqiao committed
214
        # Some layers run with float32 to achieve high accuracy
215
        sensitive_layer = {
gushiqiao's avatar
gushiqiao committed
216
217
218
219
220
221
            "norm",
            "embedding",
            "modulation",
            "time",
            "img_emb.proj.0",
            "img_emb.proj.4",
gushiqiao's avatar
gushiqiao committed
222
223
            "before_proj",  # vace
            "after_proj",  # vace
gushiqiao's avatar
gushiqiao committed
224
        }
225

lijiaqi2's avatar
lijiaqi2 committed
226
        if weight_dict is None:
gushiqiao's avatar
gushiqiao committed
227
            is_weight_loader = self._should_load_weights()
228
229
            if is_weight_loader:
                if not self.dit_quantized or self.weight_auto_quant:
gushiqiao's avatar
gushiqiao committed
230
231
                    # Load original weights
                    weight_dict = self._load_ckpt(unified_dtype, sensitive_layer)
232
                else:
gushiqiao's avatar
gushiqiao committed
233
                    # Load quantized weights
234
                    if not self.config.get("lazy_load", False):
gushiqiao's avatar
gushiqiao committed
235
                        weight_dict = self._load_quant_ckpt(unified_dtype, sensitive_layer)
236
                    else:
gushiqiao's avatar
gushiqiao committed
237
                        weight_dict = self._load_quant_split_ckpt(unified_dtype, sensitive_layer)
238

gushiqiao's avatar
gushiqiao committed
239
            if self.config.get("device_mesh") is not None:
gushiqiao's avatar
gushiqiao committed
240
                weight_dict = self._load_weights_distribute(weight_dict, is_weight_loader)
241

242
243
244
            if hasattr(self, "adapter_weights_dict"):
                weight_dict.update(self.adapter_weights_dict)

gushiqiao's avatar
gushiqiao committed
245
            self.original_weight_dict = weight_dict
lijiaqi2's avatar
lijiaqi2 committed
246
247
        else:
            self.original_weight_dict = weight_dict
248

gushiqiao's avatar
gushiqiao committed
249
        # Initialize weight containers
helloyongyang's avatar
helloyongyang committed
250
251
        self.pre_weight = self.pre_weight_class(self.config)
        self.transformer_weights = self.transformer_weight_class(self.config)
gushiqiao's avatar
gushiqiao committed
252
253

        # Load weights into containers
254
        self.pre_weight.load(self.original_weight_dict)
gushiqiao's avatar
gushiqiao committed
255
        self.transformer_weights.load(self.original_weight_dict)
helloyongyang's avatar
helloyongyang committed
256

gushiqiao's avatar
gushiqiao committed
257
258
259
260
        del self.original_weight_dict
        torch.cuda.empty_cache()
        gc.collect()

gushiqiao's avatar
gushiqiao committed
261
    def _load_weights_distribute(self, weight_dict, is_weight_loader):
gushiqiao's avatar
gushiqiao committed
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
        global_src_rank = 0
        target_device = "cpu" if self.cpu_offload else "cuda"

        if is_weight_loader:
            meta_dict = {}
            for key, tensor in weight_dict.items():
                meta_dict[key] = {"shape": tensor.shape, "dtype": tensor.dtype}

            obj_list = [meta_dict]
            dist.broadcast_object_list(obj_list, src=global_src_rank)
            synced_meta_dict = obj_list[0]
        else:
            obj_list = [None]
            dist.broadcast_object_list(obj_list, src=global_src_rank)
            synced_meta_dict = obj_list[0]

        distributed_weight_dict = {}
        for key, meta in synced_meta_dict.items():
            distributed_weight_dict[key] = torch.empty(meta["shape"], dtype=meta["dtype"], device=target_device)

        if target_device == "cuda":
            dist.barrier(device_ids=[torch.cuda.current_device()])

        for key in sorted(synced_meta_dict.keys()):
            if is_weight_loader:
                distributed_weight_dict[key].copy_(weight_dict[key], non_blocking=True)

gushiqiao's avatar
gushiqiao committed
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
            if target_device == "cpu":
                if is_weight_loader:
                    gpu_tensor = distributed_weight_dict[key].cuda()
                    dist.broadcast(gpu_tensor, src=global_src_rank)
                    distributed_weight_dict[key].copy_(gpu_tensor.cpu(), non_blocking=True)
                    del gpu_tensor
                    torch.cuda.empty_cache()
                else:
                    gpu_tensor = torch.empty_like(distributed_weight_dict[key], device="cuda")
                    dist.broadcast(gpu_tensor, src=global_src_rank)
                    distributed_weight_dict[key].copy_(gpu_tensor.cpu(), non_blocking=True)
                    del gpu_tensor
                    torch.cuda.empty_cache()

                if distributed_weight_dict[key].is_pinned():
                    distributed_weight_dict[key].copy_(distributed_weight_dict[key], non_blocking=True)
            else:
                dist.broadcast(distributed_weight_dict[key], src=global_src_rank)

        if target_device == "cuda":
            torch.cuda.synchronize()
        else:
            for tensor in distributed_weight_dict.values():
                if tensor.is_pinned():
                    tensor.copy_(tensor, non_blocking=False)
gushiqiao's avatar
gushiqiao committed
314
315
316
317

        logger.info(f"Weights distributed across {dist.get_world_size()} devices on {target_device}")
        return distributed_weight_dict

helloyongyang's avatar
helloyongyang committed
318
319
320
    def _init_infer(self):
        self.pre_infer = self.pre_infer_class(self.config)
        self.post_infer = self.post_infer_class(self.config)
helloyongyang's avatar
helloyongyang committed
321
        self.transformer_infer = self.transformer_infer_class(self.config)
helloyongyang's avatar
helloyongyang committed
322
323
324

    def set_scheduler(self, scheduler):
        self.scheduler = scheduler
325
326
        self.pre_infer.set_scheduler(scheduler)
        self.post_infer.set_scheduler(scheduler)
helloyongyang's avatar
helloyongyang committed
327
328
        self.transformer_infer.set_scheduler(scheduler)

TorynCurtis's avatar
TorynCurtis committed
329
330
331
332
333
334
335
336
    def to_cpu(self):
        self.pre_weight.to_cpu()
        self.transformer_weights.to_cpu()

    def to_cuda(self):
        self.pre_weight.to_cuda()
        self.transformer_weights.to_cuda()

helloyongyang's avatar
helloyongyang committed
337
338
    @torch.no_grad()
    def infer(self, inputs):
339
340
341
342
343
        if self.cpu_offload:
            if self.offload_granularity == "model" and self.scheduler.step_index == 0:
                self.to_cuda()
            elif self.offload_granularity != "model":
                self.pre_weight.to_cuda()
gushiqiao's avatar
gushiqiao committed
344
                self.transformer_weights.non_block_weights_to_cuda()
345

346
        if self.config["enable_cfg"]:
helloyongyang's avatar
helloyongyang committed
347
348
349
350
351
352
353
            if self.config["cfg_parallel"]:
                # ==================== CFG Parallel Processing ====================
                cfg_p_group = self.config["device_mesh"].get_group(mesh_dim="cfg_p")
                assert dist.get_world_size(cfg_p_group) == 2, "cfg_p_world_size must be equal to 2"
                cfg_p_rank = dist.get_rank(cfg_p_group)

                if cfg_p_rank == 0:
helloyongyang's avatar
helloyongyang committed
354
                    noise_pred = self._infer_cond_uncond(inputs, infer_condition=True)
helloyongyang's avatar
helloyongyang committed
355
                else:
helloyongyang's avatar
helloyongyang committed
356
                    noise_pred = self._infer_cond_uncond(inputs, infer_condition=False)
helloyongyang's avatar
helloyongyang committed
357

helloyongyang's avatar
helloyongyang committed
358
359
360
361
362
363
                noise_pred_list = [torch.zeros_like(noise_pred) for _ in range(2)]
                dist.all_gather(noise_pred_list, noise_pred, group=cfg_p_group)
                noise_pred_cond = noise_pred_list[0]  # cfg_p_rank == 0
                noise_pred_uncond = noise_pred_list[1]  # cfg_p_rank == 1
            else:
                # ==================== CFG Processing ====================
helloyongyang's avatar
helloyongyang committed
364
365
                noise_pred_cond = self._infer_cond_uncond(inputs, infer_condition=True)
                noise_pred_uncond = self._infer_cond_uncond(inputs, infer_condition=False)
gushiqiao's avatar
gushiqiao committed
366

helloyongyang's avatar
helloyongyang committed
367
368
369
            self.scheduler.noise_pred = noise_pred_uncond + self.scheduler.sample_guide_scale * (noise_pred_cond - noise_pred_uncond)
        else:
            # ==================== No CFG ====================
helloyongyang's avatar
helloyongyang committed
370
            self.scheduler.noise_pred = self._infer_cond_uncond(inputs, infer_condition=True)
371
372
373
374
375

        if self.cpu_offload:
            if self.offload_granularity == "model" and self.scheduler.step_index == self.scheduler.infer_steps - 1:
                self.to_cpu()
            elif self.offload_granularity != "model":
root's avatar
root committed
376
                self.pre_weight.to_cpu()
gushiqiao's avatar
gushiqiao committed
377
                self.transformer_weights.non_block_weights_to_cpu()
gushiqiao's avatar
gushiqiao committed
378

Yang Yong(雍洋)'s avatar
Yang Yong(雍洋) committed
379
    @compiled_method()
380
    @torch.no_grad()
helloyongyang's avatar
helloyongyang committed
381
382
383
384
    def _infer_cond_uncond(self, inputs, infer_condition=True):
        self.scheduler.infer_condition = infer_condition

        pre_infer_out = self.pre_infer.infer(self.pre_weight, inputs)
helloyongyang's avatar
helloyongyang committed
385
386
387
388
389
390
391
392
393

        if self.config["seq_parallel"]:
            pre_infer_out = self._seq_parallel_pre_process(pre_infer_out)

        x = self.transformer_infer.infer(self.transformer_weights, pre_infer_out)

        if self.config["seq_parallel"]:
            x = self._seq_parallel_post_process(x)

gushiqiao's avatar
gushiqiao committed
394
        noise_pred = self.post_infer.infer(x, pre_infer_out)[0]
helloyongyang's avatar
helloyongyang committed
395
396
397
398
399
400
401
402
403

        if self.clean_cuda_cache:
            del x, pre_infer_out
            torch.cuda.empty_cache()

        return noise_pred

    @torch.no_grad()
    def _seq_parallel_pre_process(self, pre_infer_out):
helloyongyang's avatar
helloyongyang committed
404
        x = pre_infer_out.x
helloyongyang's avatar
helloyongyang committed
405
406
407
408
409
        world_size = dist.get_world_size(self.seq_p_group)
        cur_rank = dist.get_rank(self.seq_p_group)

        padding_size = (world_size - (x.shape[0] % world_size)) % world_size
        if padding_size > 0:
helloyongyang's avatar
helloyongyang committed
410
            x = F.pad(x, (0, 0, 0, padding_size))
helloyongyang's avatar
helloyongyang committed
411

helloyongyang's avatar
helloyongyang committed
412
        pre_infer_out.x = torch.chunk(x, world_size, dim=0)[cur_rank]
helloyongyang's avatar
helloyongyang committed
413

sandy's avatar
sandy committed
414
        if self.config["model_cls"] in ["wan2.2", "wan2.2_audio"] and self.config["task"] == "i2v":
helloyongyang's avatar
helloyongyang committed
415
416
417
418
419
420
421
            embed, embed0 = pre_infer_out.embed, pre_infer_out.embed0

            padding_size = (world_size - (embed.shape[0] % world_size)) % world_size
            if padding_size > 0:
                embed = F.pad(embed, (0, 0, 0, padding_size))
                embed0 = F.pad(embed0, (0, 0, 0, 0, 0, padding_size))

helloyongyang's avatar
helloyongyang committed
422
423
            pre_infer_out.embed = torch.chunk(embed, world_size, dim=0)[cur_rank]
            pre_infer_out.embed0 = torch.chunk(embed0, world_size, dim=0)[cur_rank]
helloyongyang's avatar
helloyongyang committed
424
425
426
427
428
429
430
431
432

        return pre_infer_out

    @torch.no_grad()
    def _seq_parallel_post_process(self, x):
        world_size = dist.get_world_size(self.seq_p_group)
        gathered_x = [torch.empty_like(x) for _ in range(world_size)]
        dist.all_gather(gathered_x, x, group=self.seq_p_group)
        combined_output = torch.cat(gathered_x, dim=0)
helloyongyang's avatar
helloyongyang committed
433
        return combined_output