test_distribute.py 3.82 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
from typing import Optional

import pytest
import torch
import torch.nn as nn

import colossalai
from colossalai.device.device_mesh import DeviceMesh
from colossalai.tensor.d_tensor.layout import Layout
from colossalai.tensor.d_tensor.sharding_spec import ShardingSpec
11
from colossalai.testing import parameterize, rerun_if_address_is_in_use, spawn
12
from colossalai.utils.common import print_rank_0
13
14
15
16
17

try:
    from colossalai.utils.model.experimental import LazyInitContext, LazyTensor, _MyTensor
except:
    pass
18
from utils import SUPPORT_LAZY, assert_dist_model_equal, set_seed
19

20
from tests.kit.model_zoo import model_zoo
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72


def find_shard_dim(shape: torch.Size) -> Optional[int]:
    for dim, size in enumerate(shape):
        if size % 2 == 0:
            return dim


def make_layout(device_mesh: DeviceMesh, original_tensor: torch.Tensor) -> Layout:
    shard_dim = find_shard_dim(original_tensor.shape)
    dim_partition_dict = {shard_dim: [0]} if shard_dim is not None else {}
    target_sharding_spec = ShardingSpec(dim_size=original_tensor.dim(), dim_partition_dict=dim_partition_dict)
    layout = Layout(device_mesh=device_mesh,
                    device_type=torch.device('cuda'),
                    sharding_spec=target_sharding_spec,
                    entire_shape=original_tensor.shape)
    return layout


def _get_current_name(prefix: str, name: str) -> str:
    return f'{prefix}.{name}'.lstrip('.')


def generate_layout_dict(model: nn.Module, device_mesh: DeviceMesh) -> dict:
    layout_dict = {}

    @torch.no_grad()
    def generate_recursively(module: nn.Module, prefix: str = ''):
        # recursively initialize the module
        for name, mod in module.named_children():
            generate_recursively(mod, prefix=_get_current_name(prefix, name))

        # initialize tensors directly attached to the current module
        for name, param in module.named_parameters(recurse=False):
            if isinstance(param, LazyTensor):
                layout = make_layout(device_mesh, param)
                layout_dict[_get_current_name(prefix, name)] = layout

        for name, buf in module.named_buffers(recurse=False):
            if isinstance(buf, LazyTensor):
                layout = make_layout(device_mesh, buf)
                layout_dict[_get_current_name(prefix, name)] = layout

    generate_recursively(model)

    return layout_dict


@parameterize('subset', ['torchvision', 'diffusers', 'timm', 'transformers', 'torchaudio', 'deepfm', 'dlrm'])
def run_dist_lazy_init(subset, seed: int = 42):
    sub_model_zoo = model_zoo.get_sub_registry(subset)
    device_mesh = DeviceMesh(torch.Tensor([0, 1, 2, 3]), (2, 2), init_process_group=True)
73
74
    _MyTensor._pre_op_fn = lambda *args: set_seed(seed)
    LazyTensor._pre_op_fn = lambda *args: set_seed(seed)
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89

    for name, entry in sub_model_zoo.items():
        # TODO(ver217): lazy init does not support weight norm, skip these models
        if name in ('torchaudio_wav2vec2_base', 'torchaudio_hubert_base'):
            continue
        print_rank_0(name)
        model_fn, data_gen_fn, output_transform_fn, model_attr = entry
        ctx = LazyInitContext(tensor_cls=_MyTensor)
        with ctx:
            model = model_fn()
        ctx = LazyInitContext()
        with ctx:
            deferred_model = model_fn()
        layout_dict = generate_layout_dict(deferred_model, device_mesh)
        ctx.distribute(deferred_model, layout_dict, verbose=True)
90
        assert_dist_model_equal(model, deferred_model, layout_dict)
91
92
93
94
95
96
97


def run_dist(rank, world_size, port) -> None:
    colossalai.launch({}, rank=rank, world_size=world_size, host='localhost', port=port)
    run_dist_lazy_init()


98
@pytest.mark.skipif(not SUPPORT_LAZY, reason='torch version should be >= 1.12.0')
99
100
101
@pytest.mark.dist
@rerun_if_address_is_in_use()
def test_dist_lazy_init():
102
    spawn(run_dist, 4)
103
104
105
106


if __name__ == '__main__':
    test_dist_lazy_init()