test_shard_param.py 4.21 KB
Newer Older
Jiarui Fang's avatar
Jiarui Fang committed
1
2
3
#!/usr/bin/env python
# -*- encoding: utf-8 -*-

4
from copy import deepcopy
Jiarui Fang's avatar
Jiarui Fang committed
5
6
7
from functools import partial

import colossalai
8
from colossalai.zero.sharded_param.sharded_param import ShardedParamV2
Jiarui Fang's avatar
Jiarui Fang committed
9
10
11
import pytest
import torch
import torch.multiprocessing as mp
12
from colossalai.zero.shard_utils import TensorShardStrategy
Jiarui Fang's avatar
Jiarui Fang committed
13
from colossalai.zero.sharded_param import ShardedTensor, ShardedParam
Jiarui Fang's avatar
Jiarui Fang committed
14
15
from colossalai.utils import free_port
from colossalai.logging import get_dist_logger, disable_existing_loggers
16
from tests.test_zero_data_parallel.common import Net, CONFIG, allclose
Jiarui Fang's avatar
Jiarui Fang committed
17

Jiarui Fang's avatar
Jiarui Fang committed
18

Jiarui Fang's avatar
Jiarui Fang committed
19
20
21
def run_shard_tensor(rank, world_size, port):
    colossalai.launch(config=CONFIG, rank=rank, world_size=world_size, host='localhost', port=port, backend='nccl')
    t = ShardedTensor(tensor=torch.randn(world_size * 2, 3))
22
    assert list(t.origin_shape) == [world_size * 2, 3]
Jiarui Fang's avatar
Jiarui Fang committed
23
    assert list(t.shape) == [world_size * 2, 3]
24

25
    shard_strategy = TensorShardStrategy(process_group=None)
Jiarui Fang's avatar
Jiarui Fang committed
26

27
28
    # test shard strategy
    shard_strategy.shard([t])
29
    assert list(t.shape) == [6], f"{list(t.shape)} vs 6"
30
    shard_strategy.gather([t])
31
    assert list(t.shape) == [world_size * 2, 3], f"{list(t.shape)} vs {[world_size * 2, 3]}"
Jiarui Fang's avatar
Jiarui Fang committed
32
33
34
35
36
37
38
39
40


@pytest.mark.dist
def test_shard_tensor():
    world_size = 2
    run_func = partial(run_shard_tensor, world_size=world_size, port=free_port())
    mp.spawn(run_func, nprocs=world_size)


41
def _run_shard_param_v2(rank, world_size, port):
Jiarui Fang's avatar
Jiarui Fang committed
42
43
    colossalai.launch(config=CONFIG, rank=rank, world_size=world_size, host='localhost', port=port, backend='nccl')

44
45
46
47
48
    param = torch.nn.Parameter(torch.randn(2, 3))
    param_ref = deepcopy(param)
    sparam = ShardedParamV2(param=param, process_group=None)

    allclose(sparam.data, param_ref.data)
49
50

    sparam.remove_torch_payload()
51
    assert (param.data.numel() == 1)
Jiarui Fang's avatar
Jiarui Fang committed
52
53


54
55
56
57
58
@pytest.mark.dist
def test_shard_param_v2():
    world_size = 2
    run_func = partial(_run_shard_param_v2, world_size=world_size, port=free_port())
    mp.spawn(run_func, nprocs=world_size)
Jiarui Fang's avatar
Jiarui Fang committed
59

60
61

def _run_test_shard_param(rank, world_size, port):
Jiarui Fang's avatar
Jiarui Fang committed
62
63
    colossalai.launch(config=CONFIG, rank=rank, world_size=world_size, host='localhost', port=port, backend='nccl')

64
65
66
67
68
69
    param = torch.nn.Parameter(torch.randn(2, 3))
    param_ref = deepcopy(param)
    sparam = ShardedParamV2(param=param, process_group=None)
    print(sparam.data)
    print(param_ref.data)

Jiarui Fang's avatar
Jiarui Fang committed
70
71
72
73
74
75
    logger = get_dist_logger()
    model = Net()

    # add an attribute as ca_attr to hijack the access to param.data
    for _, param in model.named_parameters():
        numel_ref = (param.numel() + world_size - 1) // world_size
Jiarui Fang's avatar
Jiarui Fang committed
76
        param.ca_attr = ShardedParam(param)
Jiarui Fang's avatar
Jiarui Fang committed
77
78
        param.ca_attr.shard()
        param_data = param.ca_attr.payload(torch.device('cpu'))
Jiarui Fang's avatar
Jiarui Fang committed
79
        assert (numel_ref == param_data.numel())
Jiarui Fang's avatar
Jiarui Fang committed
80
81
82
83

    for _, param in model.named_parameters():
        param.ca_attr.gather()
        param_data = param.ca_attr.payload(torch.device('cpu'))
Jiarui Fang's avatar
Jiarui Fang committed
84

Jiarui Fang's avatar
Jiarui Fang committed
85
86
    disable_existing_loggers([logger])

Jiarui Fang's avatar
Jiarui Fang committed
87

Jiarui Fang's avatar
Jiarui Fang committed
88
@pytest.mark.dist
89
def test_shard_param():
Jiarui Fang's avatar
Jiarui Fang committed
90
    world_size = 2
91
    run_func = partial(_run_test_shard_param, world_size=world_size, port=free_port())
Jiarui Fang's avatar
Jiarui Fang committed
92
93
    mp.spawn(run_func, nprocs=world_size)

Jiarui Fang's avatar
Jiarui Fang committed
94

95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
def run_init_shard_param(rank, world_size, port):
    colossalai.launch(config=CONFIG, rank=rank, world_size=world_size, host='localhost', port=port, backend='nccl')
    param = torch.nn.Parameter(data=torch.rand(2, 3))
    sparam = ShardedParam(param, None, True)
    payload = sparam.payload(torch.device('cuda'))
    assert (list(payload.shape) == [3])
    del sparam

    param_shape = (2, 3)
    sparam = ShardedParam(param_shape, process_group=None, is_sharded=True, device=torch.device('cpu'))
    payload = sparam.payload(torch.device('cuda'))
    assert (list(payload.shape) == [3])

    param_shape = (2, 3)
    sparam = ShardedParam(param_shape, process_group=None, is_sharded=False, device=torch.device('cpu'))
    payload = sparam.payload(torch.device('cuda'))
    assert (list(payload.shape) == [2, 3])


Jiarui Fang's avatar
Jiarui Fang committed
114
115
116
117
118
119
120
@pytest.mark.dist
def test_init_shard_param():
    world_size = 2
    run_func = partial(run_init_shard_param, world_size=world_size, port=free_port())
    mp.spawn(run_func, nprocs=world_size)


Jiarui Fang's avatar
Jiarui Fang committed
121
if __name__ == '__main__':
Jiarui Fang's avatar
Jiarui Fang committed
122
    test_shard_tensor()
123
124
    test_shard_param()
    test_shard_param_v2()
Jiarui Fang's avatar
Jiarui Fang committed
125
    test_init_shard_param()