common.py 4.28 KB
Newer Older
Jiarui Fang's avatar
Jiarui Fang committed
1
from functools import partial
2

Jiarui Fang's avatar
Jiarui Fang committed
3
import torch
4
5
import torch.distributed as dist
import torch.nn as nn
6

ver217's avatar
ver217 committed
7
from colossalai.logging import get_dist_logger
8
from colossalai.utils import checkpoint
9
from colossalai.zero.sharded_model import ShardedModelV2
Jiarui Fang's avatar
Jiarui Fang committed
10
11
12

LOGGER = get_dist_logger()

ver217's avatar
ver217 committed
13
14
15
16
17
18
19
20
21
22
CONFIG = dict(fp16=dict(mode=None,),
              zero=dict(level=3,
                        verbose=False,
                        offload_optimizer_config=dict(device='cpu', pin_memory=True, buffer_count=5, fast_init=False),
                        offload_param_config=dict(device='cpu',
                                                  pin_memory=True,
                                                  buffer_count=5,
                                                  buffer_size=1e8,
                                                  max_in_cpu=1e9)),
              parallel=dict(pipeline=dict(size=1), tensor=dict(size=1, mode=None)))
Jiarui Fang's avatar
Jiarui Fang committed
23

24

25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
def run_fwd_bwd(model, data, label, criterion, enable_autocast=False):
    model.train()
    with torch.cuda.amp.autocast(enabled=enable_autocast):
        if criterion:
            y = model(data)
            loss = criterion(y, label)
        else:
            loss = model(data, label)
        loss = loss.float()
    if isinstance(model, ShardedModelV2):
        model.backward(loss)
    else:
        loss.backward()


Jiarui Fang's avatar
Jiarui Fang committed
40
41
42
43
44
45
46
def checkpoint_wrapper(module, enable=True):
    if enable:
        module.forward = partial(checkpoint, module.forward)
    return module


class Net(nn.Module):
ver217's avatar
ver217 committed
47

Jiarui Fang's avatar
Jiarui Fang committed
48
49
50
51
52
53
54
    def __init__(self, checkpoint=False) -> None:
        super().__init__()
        self.fc1 = nn.Linear(5, 5)
        self.fc2 = nn.Linear(5, 5)
        self.fc3 = nn.Linear(5, 1)
        if checkpoint:
            self.fc1 = checkpoint_wrapper(self.fc1)
ver217's avatar
ver217 committed
55
        self.layers = [self.fc1, self.fc2, self.fc1, self.fc2, self.fc3]
Jiarui Fang's avatar
Jiarui Fang committed
56
57
58
59
60
61

    def forward(self, x):
        for layer in self.layers:
            x = layer(x)
        return x

62

Jiarui Fang's avatar
Jiarui Fang committed
63
64
def allclose(tensor_a: torch.Tensor, tensor_b: torch.Tensor, loose=False) -> bool:
    if loose:
65
        return torch.allclose(tensor_a, tensor_b, atol=1e-2, rtol=1e-3)
Jiarui Fang's avatar
Jiarui Fang committed
66
67
68
69
70
71
    return torch.allclose(tensor_a, tensor_b)


def check_grads(model, zero_model, loose=False):
    for p, zero_p in zip(model.parameters(), zero_model.parameters()):
        zero_grad = zero_p.grad.clone().to(p.device)
72
73
74
        grad = p.grad.float()
        assert grad.dtype == zero_grad.dtype
        assert allclose(grad, zero_grad, loose=loose)
75

Jiarui Fang's avatar
Jiarui Fang committed
76
77
78
79
80
81
82

def check_params(model, zero_model, loose=False):
    for p, zero_p in zip(model.parameters(), zero_model.parameters()):
        zero_p = zero_p.clone().to(p.device)
        assert p.dtype == zero_p.dtype
        assert allclose(p, zero_p, loose=loose)

83
84
85
86
87
88
89
90

def check_grads_padding(model, zero_model, loose=False):
    rank = dist.get_rank()
    for p, zero_p in zip(model.parameters(), zero_model.parameters()):
        zero_grad = zero_p.grad.clone().to(p.device)
        chunks = torch.flatten(p.grad).chunk(dist.get_world_size())
        if rank >= len(chunks):
            continue
91
        grad = chunks[rank].float()
92
93
94
        if zero_grad.size(0) > grad.size(0):
            zero_grad = zero_grad[:grad.size(0)]
        assert grad.dtype == zero_grad.dtype
95
        assert allclose(grad, zero_grad, loose=loose), f'diff: {grad - zero_grad}'
96
97
98
99
100
101
102
103
104
105
106
107
108
109


def check_params_padding(model, zero_model, loose=False):
    rank = dist.get_rank()
    for p, zero_p in zip(model.parameters(), zero_model.parameters()):
        zero_p = zero_p.clone().to(p.device)
        chunks = torch.flatten(p).chunk(dist.get_world_size())
        if rank >= len(chunks):
            continue
        p = chunks[rank]
        if zero_p.size(0) > p.size(0):
            zero_p = zero_p[:p.size(0)]
        assert p.dtype == zero_p.dtype
        assert allclose(p, zero_p, loose=loose)
ver217's avatar
ver217 committed
110
111
112
113
114


def check_sharded_params_padding(model, zero_model, loose=False):
    rank = dist.get_rank()
    for p, zero_p in zip(model.parameters(), zero_model.parameters()):
ver217's avatar
ver217 committed
115
        zero_p = zero_p.col_attr.data.payload.to(p.device).float()
ver217's avatar
ver217 committed
116
117
118
        chunks = torch.flatten(p).chunk(dist.get_world_size())
        if rank >= len(chunks):
            continue
ver217's avatar
ver217 committed
119
        p = chunks[rank].float()
ver217's avatar
ver217 committed
120
121
122
        if zero_p.size(0) > p.size(0):
            zero_p = zero_p[:p.size(0)]
        assert p.dtype == zero_p.dtype
ver217's avatar
ver217 committed
123
        assert allclose(p, zero_p, loose=loose), f'{p} vs {zero_p}'