test_ddp_ignore_params.py 2.89 KB
Newer Older
1
2
3
4
5
6
import os
import random
from functools import partial
from typing import Callable, Type

import numpy as np
7
8
import pytest
import torch
9
import torch.distributed as dist
10
import torch.multiprocessing as mp
11
12

import colossalai
13
from colossalai.gemini.chunk import ChunkManager, search_chunk_configuration
14
from colossalai.gemini.gemini_mgr import GeminiManager
15
from colossalai.nn.parallel import ColoDDP, ZeroDDP
16
from colossalai.tensor import ProcessGroup
17
18
19
20
from colossalai.testing import rerun_if_address_is_in_use
from colossalai.utils import free_port
from colossalai.utils.cuda import get_current_device
from colossalai.utils.model.colo_init_context import ColoInitContext
21
22
23
24
25
26
27
28
29
30
31
32


def set_seed(seed):
    random.seed(seed)
    os.environ['PYTHONHASHSEED'] = str(seed)
    np.random.seed(seed)
    torch.manual_seed(seed)
    torch.cuda.manual_seed(seed)
    torch.backends.cudnn.deterministic = True


def init_ddp(module: torch.nn.Module) -> ColoDDP:
33
34
    pg = ProcessGroup()
    return ColoDDP(module, process_group=pg)
35
36


37
def init_ddpv2(module: torch.nn.Module) -> ZeroDDP:
38
    chunk_config, _ = search_chunk_configuration(module, 4, 1024)
39
    chunk_manager = ChunkManager(chunk_config)
40
    gemini_manager = GeminiManager('cuda', chunk_manager)
41
    return ZeroDDP(module, gemini_manager)
42
43
44
45
46
47
48
49
50
51
52
53
54


class Net(torch.nn.Module):

    def __init__(self) -> None:
        super().__init__()
        self.fc1 = torch.nn.Linear(3, 3, bias=False)
        self.fc2 = torch.nn.Linear(3, 1, bias=False)

    def forward(self, x):
        return self.fc2(self.fc1(x))


55
def run_fwd_bwd(ddp_cls: Type[ColoDDP], init_ddp_func: Callable[[torch.nn.Module], ColoDDP]):
56
57
58
59
60
61
62
63
64
65
    with ColoInitContext(device=get_current_device()):
        model = Net().cuda()
    w1 = model.fc1.weight
    w2 = model.fc2.weight
    ddp_cls.set_params_to_ignore([w2])
    model = init_ddp_func(model)
    x = torch.rand(2, 3, device=get_current_device())
    logits = model(x)
    loss = torch.sum(logits)
    model.backward(loss)
66
67
68
69
70
71

    if ddp_cls is ZeroDDP:
        w1s_grad = w1
    else:
        w1s_grad = w1.grad

72
    w1_grads = [torch.empty_like(w1) for _ in range(dist.get_world_size())]
73
    dist.all_gather(w1_grads, w1s_grad)
74
75
76
77
78
79
80
81
82
83
    assert torch.equal(w1_grads[0], w1_grads[1])
    w2_grads = [torch.empty_like(w2) for _ in range(dist.get_world_size())]
    dist.all_gather(w2_grads, w2.grad)
    assert not torch.equal(w2_grads[0], w2_grads[1])


def run_dist(rank, world_size, port):
    colossalai.launch(config={}, rank=rank, world_size=world_size, host='localhost', port=port, backend='nccl')
    set_seed(dist.get_rank())
    run_fwd_bwd(ColoDDP, init_ddp)
84
    run_fwd_bwd(ZeroDDP, init_ddpv2)
85
86
87
88
89
90
91
92
93
94
95
96


@pytest.mark.dist
@pytest.mark.parametrize('world_size', [2])
@rerun_if_address_is_in_use()
def test_ddp_ignore_params(world_size):
    run_func = partial(run_dist, world_size=world_size, port=free_port())
    mp.spawn(run_func, nprocs=world_size)


if __name__ == '__main__':
    test_ddp_ignore_params(2)