distributed.py 1.27 KB
Newer Older
Rick Ho's avatar
Rick Ho committed
1
2
3
4
5
6
r"""
distributed support for Megatron
"""
from fmoe.distributed import DistributedGroupedDataParallel


Rick Ho's avatar
Rick Ho committed
7
8
9
10
11
12
13
14
_moe_group = None


def set_moe_group(moe_group):
    global _moe_group
    _moe_group = moe_group


15
16
17
18
def get_moe_group():
    return _moe_group


Rick Ho's avatar
Rick Ho committed
19
20
21
22
23
24
25
26
27
28
29
30
31
32
class DistributedDataParallel(DistributedGroupedDataParallel):
    r"""
    A wrapper that is used to replace the DDP module provided by Megatron, which
    is adapted to enable the sophiscated parallel and reduction strategies in
    Fast MoE.
    """

    def __init__(self, module):
        from megatron import mpu

        super().__init__(
            module,
            mp_group=mpu.get_model_parallel_group(),
            dp_group=mpu.get_data_parallel_group(),
Rick Ho's avatar
Rick Ho committed
33
            moe_group=_moe_group
Rick Ho's avatar
Rick Ho committed
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
        )

    def state_dict(self, *args, **kwargs):
        r"""
        Keep consitency with Megatron
        """
        return self.module.state_dict(*args, **kwargs)

    def state_dict_for_save_checkpoint(self, *args, **kwargs):
        r"""
        Keep consitency with Megatron
        """
        return self.module.state_dict_for_save_checkpoint(*args, **kwargs)

    def load_state_dict(self, *args, **kwargs):
        r"""
        Keep consitency with Megatron
        """
        return self.module.load_state_dict(*args, **kwargs)