test_embedding_tp.py 1.64 KB
Newer Older
1
from torch.nn import functional as F
2
3
4
5
6
7
from functools import partial

import colossalai
import pytest
import torch
import torch.multiprocessing as mp
8
from colossalai.testing import rerun_if_address_is_in_use
9
from colossalai.utils import free_port
10
from colossalai.tensor import ColoTensorSpec, ProcessGroup, ColoTensor
11
from tests.test_tensor.common_utils import tensor_equal, tensor_shard_equal, split_param_col_tp1d, split_param_row_tp1d
12
13


14
def run_with_spec(spec_init_func, pg: ProcessGroup):
15
    model = torch.nn.Embedding(12, 32).cuda()
16
    weight = ColoTensor(torch.nn.Parameter(model.weight.detach()), ColoTensorSpec(pg))
17

18
    spec_init_func(weight, pg)
19

20
21
22
    x = torch.tensor((0, 3, 6, 9)).cuda()
    out = model(x)
    colo_out = F.embedding(x, weight)
23
    assert tensor_equal(out, colo_out)
24
25
26
    grad = torch.rand_like(out)
    out.backward(grad)
    colo_out.backward(grad)
27
28
    # compare grad inside a TP group
    assert tensor_shard_equal(model.weight.grad, weight.grad, pg.tp_local_rank(), pg.tp_world_size())
29
30


31
def run_dist(rank, world_size, port):
32
33
34
    # config = dict(parallel=dict(tensor=dict(mode="1d", size=world_size),))
    colossalai.launch(config={}, rank=rank, world_size=world_size, host='localhost', port=port, backend='nccl')
    pg = ProcessGroup(tp_degree=world_size)
35
36
    run_with_spec(split_param_row_tp1d, pg)
    run_with_spec(split_param_col_tp1d, pg)
37

38
39

@pytest.mark.dist
40
@pytest.mark.parametrize('world_size', [1, 4])
41
42
43
44
45
46
47
@rerun_if_address_is_in_use()
def test_embedding_1d(world_size):
    run_func = partial(run_dist, world_size=world_size, port=free_port())
    mp.spawn(run_func, nprocs=world_size)


if __name__ == '__main__':
48
    test_embedding_1d(4)