test_ddp_ignore_params.py 2.7 KB
Newer Older
1
2
3
4
5
import os
import random
from typing import Callable, Type

import numpy as np
6
7
import pytest
import torch
8
9
10
import torch.distributed as dist

import colossalai
11
from colossalai.nn.parallel import ColoDDP
12
from colossalai.tensor import ProcessGroup
13
from colossalai.testing import rerun_if_address_is_in_use, spawn
14
from colossalai.utils.cuda import get_current_device
15
16
17
from colossalai.zero import ColoInitContext, ZeroDDP
from colossalai.zero.gemini.chunk import ChunkManager, search_chunk_configuration
from colossalai.zero.gemini.gemini_mgr import GeminiManager
18
19
20
21
22
23
24
25
26
27
28
29


def set_seed(seed):
    random.seed(seed)
    os.environ['PYTHONHASHSEED'] = str(seed)
    np.random.seed(seed)
    torch.manual_seed(seed)
    torch.cuda.manual_seed(seed)
    torch.backends.cudnn.deterministic = True


def init_ddp(module: torch.nn.Module) -> ColoDDP:
30
31
    pg = ProcessGroup()
    return ColoDDP(module, process_group=pg)
32
33


34
def init_ddpv2(module: torch.nn.Module) -> ZeroDDP:
35
    chunk_config, *_ = search_chunk_configuration(module, 4, 1024)
36
    chunk_manager = ChunkManager(chunk_config)
37
    gemini_manager = GeminiManager('cuda', chunk_manager)
38
    return ZeroDDP(module, gemini_manager)
39
40
41
42
43
44
45
46
47
48
49
50
51


class Net(torch.nn.Module):

    def __init__(self) -> None:
        super().__init__()
        self.fc1 = torch.nn.Linear(3, 3, bias=False)
        self.fc2 = torch.nn.Linear(3, 1, bias=False)

    def forward(self, x):
        return self.fc2(self.fc1(x))


52
def run_fwd_bwd(ddp_cls: Type[ColoDDP], init_ddp_func: Callable[[torch.nn.Module], ColoDDP]):
53
54
55
56
57
58
59
60
61
62
    with ColoInitContext(device=get_current_device()):
        model = Net().cuda()
    w1 = model.fc1.weight
    w2 = model.fc2.weight
    ddp_cls.set_params_to_ignore([w2])
    model = init_ddp_func(model)
    x = torch.rand(2, 3, device=get_current_device())
    logits = model(x)
    loss = torch.sum(logits)
    model.backward(loss)
63
64
65
66
67
68

    if ddp_cls is ZeroDDP:
        w1s_grad = w1
    else:
        w1s_grad = w1.grad

69
    w1_grads = [torch.empty_like(w1) for _ in range(dist.get_world_size())]
70
    dist.all_gather(w1_grads, w1s_grad)
71
72
73
74
75
76
77
78
79
80
    assert torch.equal(w1_grads[0], w1_grads[1])
    w2_grads = [torch.empty_like(w2) for _ in range(dist.get_world_size())]
    dist.all_gather(w2_grads, w2.grad)
    assert not torch.equal(w2_grads[0], w2_grads[1])


def run_dist(rank, world_size, port):
    colossalai.launch(config={}, rank=rank, world_size=world_size, host='localhost', port=port, backend='nccl')
    set_seed(dist.get_rank())
    run_fwd_bwd(ColoDDP, init_ddp)
81
    run_fwd_bwd(ZeroDDP, init_ddpv2)
82
83
84
85
86
87


@pytest.mark.dist
@pytest.mark.parametrize('world_size', [2])
@rerun_if_address_is_in_use()
def test_ddp_ignore_params(world_size):
88
    spawn(run_dist, world_size)
89
90
91
92


if __name__ == '__main__':
    test_ddp_ignore_params(2)