test_context.py 2.35 KB
Newer Older
1
2
from functools import partial

ver217's avatar
ver217 committed
3
import pytest
4
import torch
5
import torch.multiprocessing as mp
6

7
8
9
10
11
12
13
14
15
16
17
18
import colossalai
from colossalai.tensor import (
    ColoParameter,
    ColoTensorSpec,
    ComputePattern,
    ComputeSpec,
    ProcessGroup,
    ReplicaSpec,
    ShardSpec,
)
from colossalai.testing import parameterize, rerun_if_address_is_in_use
from colossalai.utils import free_port
19
from colossalai.utils.cuda import get_current_device
20
from colossalai.utils.model.colo_init_context import ColoInitContext
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
from tests.components_to_test.registry import non_distributed_component_funcs
from tests.test_tensor.common_utils import set_seed


def run_colo_init_context(rank: int, world_size: int, port: int):
    colossalai.launch(config={}, rank=rank, world_size=world_size, host='localhost', port=port, backend='nccl')

    # make sure seed of each process is the same, so the params are consistent among processes and the params are exactly replicated.
    set_seed(42)
    get_components_func = non_distributed_component_funcs.get_callable('gpt2')
    model_builder, train_dataloader, test_dataloader, optimizer_class, criterion = get_components_func()

    # keep parameters replicated during init
    with ColoInitContext(device=get_current_device()):
        model1 = model_builder()

    # shard the parameters during init
    set_seed(42)
    shard_spec = ReplicaSpec()
    # ShardSpec(dims=[0], num_partitions=[world_size])
    default_shard_plan = {'pg': ProcessGroup(tp_degree=world_size), 'shard_spec': shard_spec}
    with ColoInitContext(device=get_current_device(), default_shard_plan=default_shard_plan):
        model2 = model_builder()

    # reshard both models
    new_shard = ShardSpec(dims=[-1], num_partitions=[world_size])
    for p1, p2 in zip(model1.parameters(), model2.parameters()):
        p1: ColoParameter = p1
        p1.set_process_group(ProcessGroup(tp_degree=world_size))
        p1.set_dist_spec(new_shard)
        p2.set_dist_spec(new_shard)

    for p1, p2 in zip(model1.parameters(), model2.parameters()):
        assert (torch.allclose(p1, p2))


@pytest.mark.dist
@pytest.mark.parametrize('world_size', [1, 4])
@rerun_if_address_is_in_use()
def test_colo_init_context(world_size):
    run_func = partial(run_colo_init_context, world_size=world_size, port=free_port())
    mp.spawn(run_func, nprocs=world_size)


if __name__ == '__main__':
    test_colo_init_context(2)