test_moe_zero_init.py 3.73 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
from functools import partial

import colossalai
import pytest
import torch
import torch.multiprocessing as mp
import torch.nn as nn
from colossalai.logging import get_dist_logger
from colossalai.testing import parameterize
from colossalai.utils import free_port
from colossalai.context import MOE_CONTEXT
from colossalai.nn.layer import MoeModule
from colossalai.zero.init_ctx import ZeroInitContext
from colossalai.zero.shard_utils import (BucketTensorShardStrategy, TensorShardStrategy)

from colossalai.testing import rerun_on_exception
from colossalai.utils import get_current_device
from tests.test_zero_data_parallel.common import CONFIG


class MoeModel(nn.Module):

    def __init__(self):
        super().__init__()
25
        self.proj1 = nn.Linear(4, 16)
26
        expert_cls = nn.Linear
27
28
29
        expert_args_dict = dict(in_features=16, out_features=16)
        self.moe = MoeModule(dim_model=16, num_experts=8, use_residual=True, expert_cls=expert_cls, **expert_args_dict)
        self.proj2 = nn.Linear(16, 4)
30
31

    def forward(self, x):
32
        x = self.proj1(x)
33
34
35
36
37
38
39
40
41
42
43
        x = self.moe(x)
        x = self.proj2(x)
        return x


@parameterize("init_device_type", ['cpu', 'cuda'])
@parameterize("shard_strategy_class", [TensorShardStrategy, BucketTensorShardStrategy])
def run_moe_zero_init(init_device_type, shard_strategy_class):
    logger = get_dist_logger("test_moe_zero_init")

    if init_device_type == 'cuda':
44
        init_device = get_current_device()
45
46
47
48
49
50
51
52
53
    elif init_device_type == 'cpu':
        init_device = torch.device("cpu")
    else:
        raise NotImplementedError("Unknown device found.")

    model_numel_tensor = torch.zeros(1, dtype=torch.int)
    with ZeroInitContext(target_device=init_device,
                         shard_strategy=shard_strategy_class(),
                         shard_param=True,
54
                         model_numel_tensor=model_numel_tensor):
55
56
        model = MoeModel()

57
58
    for name, param in model.named_parameters():
        assert hasattr(param, 'colo_attr')
59

60
61
62
63
64
        # the weights in the gate should be fp32
        if 'gate' in name:
            assert param.colo_attr.sharded_data_tensor.dtype == torch.float32
        else:
            assert param.colo_attr.sharded_data_tensor.dtype == torch.half
65

66
67
68
69
70
        # the parameters in moe experts and its gate should not be sharded
        if ('experts' in name) or ('gate' in name) or ('residual_combine' in name):
            assert not param.colo_attr.sharded_data_tensor.is_sharded
        else:
            assert param.colo_attr.sharded_data_tensor.is_sharded
71

72
73
        # the parameters in moe experts is not replicated
        if 'experts' in name:
74
            assert not param.colo_attr.is_replicated
75
        else:
76
            assert param.colo_attr.is_replicated
77

78
79
80
81
82
        if param.colo_attr.param_is_sharded:
            assert param.colo_attr.sharded_data_tensor.payload.device.type == init_device.type, \
                f'{param.colo_attr.sharded_data_tensor.payload.device.type} vs. {init_device.type}'
        else:
            assert param.colo_attr.sharded_data_tensor.payload.device.type == 'cuda'
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100


def _run_dist(rank, world_size, port):
    colossalai.launch(config=CONFIG, rank=rank, world_size=world_size, host='localhost', port=port, backend='nccl')
    MOE_CONTEXT.setup(seed=42)
    run_moe_zero_init()


@pytest.mark.dist
@pytest.mark.parametrize("world_size", [2, 4])
@rerun_on_exception(exception_type=mp.ProcessRaisedException, pattern=".*Address already in use.*")
def test_moe_zero_init(world_size):
    run_func = partial(_run_dist, world_size=world_size, port=free_port())
    mp.spawn(run_func, nprocs=world_size)


if __name__ == '__main__':
    test_moe_zero_init(world_size=2)