_util.py 1.81 KB
Newer Older
1
2
3
import os
import random
import numpy as np
Ziyue Jiang's avatar
Ziyue Jiang committed
4
5
import torch
import torch.distributed as dist
6
7
8
9
10
11
12
13
14
15
16
from colossalai.core import global_context as gpc
from colossalai.context import ParallelMode


def set_seed(seed):
    random.seed(seed)
    os.environ['PYTHONHASHSEED'] = str(seed)
    np.random.seed(seed)
    torch.manual_seed(seed)
    torch.cuda.manual_seed(seed)
    torch.backends.cudnn.deterministic = True
Ziyue Jiang's avatar
Ziyue Jiang committed
17

ver217's avatar
ver217 committed
18

Ziyue Jiang's avatar
Ziyue Jiang committed
19
20
21
def check_equal(A, B):
    assert torch.allclose(A, B, rtol=1e-3, atol=1e-1) == True

ver217's avatar
ver217 committed
22

Ziyue Jiang's avatar
Ziyue Jiang committed
23
24
25
26
27
28
29
30
31
32
def replace_parameter_add_grad(layer, weight=None, bias=None):
    if weight is not None:
        delattr(layer, 'weight')
        setattr(layer, 'weight', weight)
        layer.weight.requires_grad = True
    if bias is not None:
        delattr(layer, 'bias')
        setattr(layer, 'bias', bias)
        layer.bias.requires_grad = True

ver217's avatar
ver217 committed
33

Ziyue Jiang's avatar
Ziyue Jiang committed
34
35
36
def broadcast_tensor_chunk(tensor, chunk_size=1, local_rank=0):
    dist.broadcast(tensor, src=0)
    tensor_chunk = torch.chunk(tensor, chunk_size, dim=-1)[local_rank]
ver217's avatar
ver217 committed
37
38
39
40
41
    return tensor_chunk.clone()


def tensor_equal(A, B):
    return torch.allclose(A, B, rtol=1e-3, atol=1e-1)
42
43


44
def tensor_shard_equal(tensor: torch.Tensor, shard: torch.Tensor, rank, world_size):
45
46
47
48
49
50
51
52
    assert tensor.ndim == shard.ndim
    if tensor.shape == shard.shape:
        return tensor_equal(tensor, shard)
    else:
        dims_not_eq = torch.nonzero(torch.tensor(tensor.shape) != torch.tensor(shard.shape))
        if dims_not_eq.numel() == 1:
            # 1D shard
            dim = dims_not_eq.item()
53
54
55
56
            if world_size is None:
                world_size = gpc.get_world_size(ParallelMode.PARALLEL_1D)
            if rank is None:
                rank = gpc.get_local_rank(ParallelMode.PARALLEL_1D)
57
58
59
            return tensor_equal(tensor.chunk(world_size, dim)[rank], shard)
        else:
            raise NotImplementedError