test_shard_param.py 4.93 KB
Newer Older
Jiarui Fang's avatar
Jiarui Fang committed
1
2
3
#!/usr/bin/env python
# -*- encoding: utf-8 -*-

4
from copy import deepcopy
Jiarui Fang's avatar
Jiarui Fang committed
5
6
from functools import partial

7
import colossalai
Jiarui Fang's avatar
Jiarui Fang committed
8
9
10
import pytest
import torch
import torch.multiprocessing as mp
11
from colossalai.logging import disable_existing_loggers, get_dist_logger
Jiarui Fang's avatar
Jiarui Fang committed
12
from colossalai.utils import free_port
ver217's avatar
ver217 committed
13
from colossalai.zero.shard_utils import (BucketTensorShardStrategy, TensorShardStrategy)
14
15
from colossalai.zero.sharded_param import ShardedParam, ShardedTensor
from colossalai.zero.sharded_param.sharded_param import ShardedParamV2
16
from tests.components_to_test.registry import non_distributed_component_funcs
ver217's avatar
ver217 committed
17
from tests.test_zero_data_parallel.common import CONFIG, allclose
18
from colossalai.testing import parameterize
Jiarui Fang's avatar
Jiarui Fang committed
19

Jiarui Fang's avatar
Jiarui Fang committed
20

21
22
@parameterize("shard_strategy", [TensorShardStrategy, BucketTensorShardStrategy])
def run_shard_tensor_with_strategy(shard_strategy, world_size):
Jiarui Fang's avatar
Jiarui Fang committed
23
    t = ShardedTensor(tensor=torch.randn(world_size * 2, 3))
24
    assert list(t.origin_shape) == [world_size * 2, 3]
Jiarui Fang's avatar
Jiarui Fang committed
25
    assert list(t.shape) == [world_size * 2, 3]
26

ver217's avatar
ver217 committed
27
    shard_strategy = shard_strategy(process_group=None)
Jiarui Fang's avatar
Jiarui Fang committed
28

29
30
    # test shard strategy
    shard_strategy.shard([t])
31
    assert list(t.shape) == [6], f"{list(t.shape)} vs 6"
32
    shard_strategy.gather([t])
33
    assert list(t.shape) == [world_size * 2, 3], f"{list(t.shape)} vs {[world_size * 2, 3]}"
Jiarui Fang's avatar
Jiarui Fang committed
34
35


36
37
38
39
40
def _run_shard_tensor(rank, world_size, port):
    colossalai.launch(config=CONFIG, rank=rank, world_size=world_size, host='localhost', port=port, backend='nccl')
    run_shard_tensor_with_strategy(world_size=world_size)


Jiarui Fang's avatar
Jiarui Fang committed
41
@pytest.mark.dist
jiaruifang's avatar
jiaruifang committed
42
@pytest.mark.parametrize("world_size", [1, 2])
43
44
def test_shard_tensor(world_size):
    run_func = partial(_run_shard_tensor, world_size=world_size, port=free_port())
Jiarui Fang's avatar
Jiarui Fang committed
45
46
47
    mp.spawn(run_func, nprocs=world_size)


48
def _run_shard_param_v2(rank, world_size, port):
Jiarui Fang's avatar
Jiarui Fang committed
49
50
    colossalai.launch(config=CONFIG, rank=rank, world_size=world_size, host='localhost', port=port, backend='nccl')

51
52
53
54
    param = torch.nn.Parameter(torch.randn(2, 3))
    param_ref = deepcopy(param)
    sparam = ShardedParamV2(param=param, process_group=None)

55
    allclose(sparam.data.payload, param_ref.data)
56
57

    sparam.remove_torch_payload()
58
    assert (param.data.numel() == 1)
Jiarui Fang's avatar
Jiarui Fang committed
59
60


61
@pytest.mark.dist
jiaruifang's avatar
jiaruifang committed
62
63
@pytest.mark.parametrize("world_size", [1, 2])
def test_shard_param_v2(world_size):
64
65
    run_func = partial(_run_shard_param_v2, world_size=world_size, port=free_port())
    mp.spawn(run_func, nprocs=world_size)
Jiarui Fang's avatar
Jiarui Fang committed
66

67
68

def _run_test_shard_param(rank, world_size, port):
Jiarui Fang's avatar
Jiarui Fang committed
69
70
    colossalai.launch(config=CONFIG, rank=rank, world_size=world_size, host='localhost', port=port, backend='nccl')

71
72
73
74
75
76
    param = torch.nn.Parameter(torch.randn(2, 3))
    param_ref = deepcopy(param)
    sparam = ShardedParamV2(param=param, process_group=None)
    print(sparam.data)
    print(param_ref.data)

Jiarui Fang's avatar
Jiarui Fang committed
77
    logger = get_dist_logger()
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
    for get_components_func in non_distributed_component_funcs:
        model_builder, *_ = get_components_func()
        model = model_builder(checkpoint=True)
        # add an attribute as col_attr to hijack the access to param.data
        for _, param in model.named_parameters():
            numel_ref = (param.numel() + world_size - 1) // world_size
            param.col_attr = ShardedParam(param)
            param.col_attr.shard()
            param_data = param.col_attr.payload(torch.device('cpu'))
            assert (numel_ref == param_data.numel())

        for _, param in model.named_parameters():
            param.col_attr.gather()
            param_data = param.col_attr.payload(torch.device('cpu'))

        disable_existing_loggers([logger])
Jiarui Fang's avatar
Jiarui Fang committed
94

Jiarui Fang's avatar
Jiarui Fang committed
95

Jiarui Fang's avatar
Jiarui Fang committed
96
@pytest.mark.dist
jiaruifang's avatar
jiaruifang committed
97
98
@pytest.mark.parametrize("world_size", [1, 2])
def test_shard_param(world_size):
99
    run_func = partial(_run_test_shard_param, world_size=world_size, port=free_port())
Jiarui Fang's avatar
Jiarui Fang committed
100
101
    mp.spawn(run_func, nprocs=world_size)

Jiarui Fang's avatar
Jiarui Fang committed
102

jiaruifang's avatar
jiaruifang committed
103
def _run_init_shard_param(rank, world_size, port):
104
    colossalai.launch(config=CONFIG, rank=rank, world_size=world_size, host='localhost', port=port, backend='nccl')
jiaruifang's avatar
jiaruifang committed
105
    param = torch.nn.Parameter(data=torch.rand(world_size, 3))
106
107
108
109
110
    sparam = ShardedParam(param, None, True)
    payload = sparam.payload(torch.device('cuda'))
    assert (list(payload.shape) == [3])
    del sparam

jiaruifang's avatar
jiaruifang committed
111
    param_shape = (world_size, 3)
112
113
114
115
    sparam = ShardedParam(param_shape, process_group=None, is_sharded=True, device=torch.device('cpu'))
    payload = sparam.payload(torch.device('cuda'))
    assert (list(payload.shape) == [3])

jiaruifang's avatar
jiaruifang committed
116
    param_shape = (world_size, 3)
117
118
    sparam = ShardedParam(param_shape, process_group=None, is_sharded=False, device=torch.device('cpu'))
    payload = sparam.payload(torch.device('cuda'))
jiaruifang's avatar
jiaruifang committed
119
    assert (list(payload.shape) == [world_size, 3])
120
121


Jiarui Fang's avatar
Jiarui Fang committed
122
@pytest.mark.dist
jiaruifang's avatar
jiaruifang committed
123
124
125
@pytest.mark.parametrize("world_size", [1, 4])
def test_init_shard_param(world_size):
    run_func = partial(_run_init_shard_param, world_size=world_size, port=free_port())
Jiarui Fang's avatar
Jiarui Fang committed
126
127
128
    mp.spawn(run_func, nprocs=world_size)


Jiarui Fang's avatar
Jiarui Fang committed
129
if __name__ == '__main__':
130
    test_shard_tensor(2)
jiaruifang's avatar
jiaruifang committed
131
132
133
    test_shard_param(2)
    test_shard_param_v2(2)
    test_init_shard_param(4)