test_shard_param.py 3.62 KB
Newer Older
1
from copy import deepcopy
Jiarui Fang's avatar
Jiarui Fang committed
2
3
from functools import partial

4
import colossalai
Jiarui Fang's avatar
Jiarui Fang committed
5
6
7
import pytest
import torch
import torch.multiprocessing as mp
8
from colossalai.testing import parameterize
Jiarui Fang's avatar
Jiarui Fang committed
9
from colossalai.utils import free_port
ver217's avatar
ver217 committed
10
from colossalai.zero.shard_utils import (BucketTensorShardStrategy, TensorShardStrategy)
11
from colossalai.zero.sharded_param import ShardedTensor
12
from colossalai.zero.sharded_param.sharded_param import ShardedParamV2
13
from colossalai.testing import rerun_on_exception
ver217's avatar
ver217 committed
14
from tests.test_zero_data_parallel.common import CONFIG, allclose
15
from colossalai.zero.sharded_param.tensorful_state import StatefulTensor
Jiarui Fang's avatar
Jiarui Fang committed
16

Jiarui Fang's avatar
Jiarui Fang committed
17

18
19
@parameterize("shard_strategy_class", [TensorShardStrategy, BucketTensorShardStrategy])
def run_shard_tensor_with_strategy(shard_strategy_class, world_size):
Jiarui Fang's avatar
Jiarui Fang committed
20
    t = ShardedTensor(tensor=torch.randn(world_size * 2, 3))
21
    assert list(t.origin_shape) == [world_size * 2, 3]
Jiarui Fang's avatar
Jiarui Fang committed
22
    assert list(t.shape) == [world_size * 2, 3]
23

24
    shard_strategy = shard_strategy_class()
Jiarui Fang's avatar
Jiarui Fang committed
25

26
27
    # test shard strategy
    shard_strategy.shard([t])
28
    assert list(t.shape) == [6], f"{list(t.shape)} vs 6"
29
    shard_strategy.gather([t])
30
    assert list(t.shape) == [world_size * 2, 3], f"{list(t.shape)} vs {[world_size * 2, 3]}"
Jiarui Fang's avatar
Jiarui Fang committed
31
32


33
34
35
36
37
def _run_shard_tensor(rank, world_size, port):
    colossalai.launch(config=CONFIG, rank=rank, world_size=world_size, host='localhost', port=port, backend='nccl')
    run_shard_tensor_with_strategy(world_size=world_size)


Jiarui Fang's avatar
Jiarui Fang committed
38
@pytest.mark.dist
jiaruifang's avatar
jiaruifang committed
39
@pytest.mark.parametrize("world_size", [1, 2])
40
@rerun_on_exception(exception_type=mp.ProcessRaisedException, pattern=".*Address already in use.*")
41
42
def test_shard_tensor(world_size):
    run_func = partial(_run_shard_tensor, world_size=world_size, port=free_port())
Jiarui Fang's avatar
Jiarui Fang committed
43
44
45
    mp.spawn(run_func, nprocs=world_size)


46
def _run_shard_param_v2(rank, world_size, port):
Jiarui Fang's avatar
Jiarui Fang committed
47
48
    colossalai.launch(config=CONFIG, rank=rank, world_size=world_size, host='localhost', port=port, backend='nccl')

49
50
    param = torch.nn.Parameter(torch.randn(2, 3))
    param_ref = deepcopy(param)
51
    sparam = ShardedParamV2(param=param)
52

53
    allclose(sparam.sharded_data_tensor.payload, param_ref.data)
54

55
    # Test get memory usage
56
    sparam.saved_grad = StatefulTensor(torch.randn(2, 3))
57
    cuda_mem_use, cpu_mem_use = sparam.get_memory_usage()
58
59
60
    assert cpu_mem_use == 2 * 3 * 4 * 2, f"cpu_mem_use: {cpu_mem_use}"

    sparam.remove_torch_payload()
61
    assert (param.data.numel() == 0)
62
63
    cuda_mem_use, cpu_mem_use = sparam.get_memory_usage()
    # 4 is size of dummy tensor of param.data
64
    assert cpu_mem_use == 2 * 3 * 4 * 2
65

66
    sparam.saved_grad = StatefulTensor(torch.randn(2, 3))
67
68
    sparam.remove_torch_payload()
    cuda_mem_use, cpu_mem_use = sparam.get_memory_usage()
69
    assert cpu_mem_use == 2 * 3 * 4 * 2
70
71
72
73
74
75
76
77
78
79
    assert cuda_mem_use == 0

    # append a grad to torch param
    param.data = sparam.sharded_data_tensor.payload
    param.grad = torch.randn(2, 3)
    cuda_mem_use, cpu_mem_use = sparam.get_memory_usage()
    assert cpu_mem_use == 2 * 3 * 4 * 2 + 2 * 3 * 4, f"cpu_mem_use {cpu_mem_use}"
    assert cuda_mem_use == 0

    # reuse torch grad for sparam
80
    sparam.saved_grad = StatefulTensor(param.grad)
81
    cuda_mem_use, cpu_mem_use = sparam.get_memory_usage()
82
83
84
    assert cpu_mem_use == 2 * 3 * 4 * 2
    assert cuda_mem_use == 0

Jiarui Fang's avatar
Jiarui Fang committed
85

86
@pytest.mark.dist
jiaruifang's avatar
jiaruifang committed
87
@pytest.mark.parametrize("world_size", [1, 2])
88
@rerun_on_exception(exception_type=mp.ProcessRaisedException, pattern=".*Address already in use.*")
jiaruifang's avatar
jiaruifang committed
89
def test_shard_param_v2(world_size):
90
91
    run_func = partial(_run_shard_param_v2, world_size=world_size, port=free_port())
    mp.spawn(run_func, nprocs=world_size)
Jiarui Fang's avatar
Jiarui Fang committed
92

93

Jiarui Fang's avatar
Jiarui Fang committed
94
if __name__ == '__main__':
95
    # test_shard_tensor(2)
jiaruifang's avatar
jiaruifang committed
96
    test_shard_param_v2(2)