"tests/vscode:/vscode.git/clone" did not exist on "bcc8655021c60aaed008e26d9a4ea63c164e6952"
test_init_context.py 3.3 KB
Newer Older
Jiarui Fang's avatar
Jiarui Fang committed
1
2
3
4
5
6
7
8
9
#!/usr/bin/env python
# -*- encoding: utf-8 -*-

from functools import partial

import colossalai
import pytest
import torch
import torch.multiprocessing as mp
10
from colossalai.testing import parameterize
Jiarui Fang's avatar
Jiarui Fang committed
11
from colossalai.utils import free_port
ver217's avatar
ver217 committed
12
from colossalai.utils.cuda import get_current_device
13
from colossalai.utils.memory_tracer.model_data_memtracer import col_model_data_mem_usage
14
from colossalai.zero.init_ctx import ZeroInitContext
15
from colossalai.utils.memory_utils.memory_monitor import colo_cuda_memory_used
ver217's avatar
ver217 committed
16
from colossalai.zero.shard_utils import (BucketTensorShardStrategy, TensorShardStrategy)
17
from colossalai.testing import rerun_on_exception
18
from tests.components_to_test.registry import non_distributed_component_funcs
19
from colossalai.logging import get_dist_logger
20
from common import CONFIG
21

Jiarui Fang's avatar
Jiarui Fang committed
22

23
@parameterize("init_device_type", ['cpu', 'cuda'])
24
@parameterize("shard_strategy_class", [TensorShardStrategy, BucketTensorShardStrategy])
25
def run_model_test(init_device_type, shard_strategy_class):
26
27
    logger = get_dist_logger("test_zero_init")

28
29
    for get_components_func in non_distributed_component_funcs:
        model_builder, _, _, _, _ = get_components_func()
30
        model_numel_tensor = torch.zeros(1, dtype=torch.int)
31
32
33
34
35
36
        if init_device_type == 'cuda':
            init_device = torch.device(f"cuda:{get_current_device()}")
        elif init_device_type == 'cpu':
            init_device = torch.device("cpu")
        else:
            continue
37
38

        model_numel_tensor = torch.zeros(1, dtype=torch.int)
39
        with ZeroInitContext(convert_fp16=True,
40
                             target_device=init_device,
41
                             shard_strategy=shard_strategy_class(),
42
                             shard_param=True,
43
44
                             model_numel_tensor=model_numel_tensor,
                             rm_torch_payload_on_the_fly=False):
45
46
            model = model_builder(checkpoint=True)

47
48
        for param in model.parameters():
            assert hasattr(param, 'col_attr')
49
50
51
52
            assert param.col_attr.sharded_data_tensor.dtype == torch.half
            assert param.col_attr.sharded_data_tensor.is_sharded
            assert param.col_attr.sharded_data_tensor.payload.device.type == init_device.type, \
                f'{param.col_attr.sharded_data_tensor.payload.device.type} vs. {init_device.type}'
53
54
55
56
57
58
59

        cuda_mem_use, cpu_mem_use = col_model_data_mem_usage(model)
        model_data_cuda_mem_MB = cuda_mem_use / 1e6
        logger.info(f"Existing ZeRO Context.\nModel Data CUDA Memory {model_data_cuda_mem_MB} MB", ranks=[0])
        sys_cuda_mem_MB = colo_cuda_memory_used() / 1e6
        logger.info(f"System CUDA Memory Usage {sys_cuda_mem_MB} MB", ranks=[0])
        logger.info(f"Model Number Parameter {model_numel_tensor.numpy()[0]/1e6} M", ranks=[0])
60

Jiarui Fang's avatar
Jiarui Fang committed
61

62
63
64
65
66
def run_dist(rank, world_size, port):
    colossalai.launch(config=CONFIG, rank=rank, world_size=world_size, host='localhost', port=port, backend='nccl')
    run_model_test()


Jiarui Fang's avatar
Jiarui Fang committed
67
@pytest.mark.dist
68
@pytest.mark.parametrize("world_size", [1, 4])
69
@rerun_on_exception(exception_type=mp.ProcessRaisedException, pattern=".*Address already in use.*")
70
71
def test_zero_init_context(world_size):
    run_func = partial(run_dist, world_size=world_size, port=free_port())
Jiarui Fang's avatar
Jiarui Fang committed
72
73
74
75
    mp.spawn(run_func, nprocs=world_size)


if __name__ == '__main__':
76
    test_zero_init_context(4)