test_embedding_tp.py 2.13 KB
Newer Older
1
2
import torch
from colossalai.context.parallel_mode import ParallelMode
ver217's avatar
ver217 committed
3
from colossalai.tensor import ColoTensor, distspec
4
from torch.nn import functional as F
5
6
7
8
9
10
from functools import partial

import colossalai
import pytest
import torch
import torch.multiprocessing as mp
11
from colossalai.testing import rerun_if_address_is_in_use
12
13
from colossalai.utils import free_port
from colossalai.core import global_context as gpc
14
from colossalai.tensor import TensorSpec, ComputePattern, ComputeSpec, DistSpecManager
15
from _utils import tensor_equal, tensor_shard_equal
16
17
18
19


def init_1d_row(weight):
    spec = TensorSpec(
ver217's avatar
ver217 committed
20
        distspec.shard(gpc.get_group(ParallelMode.PARALLEL_1D), [0], [gpc.get_world_size(ParallelMode.PARALLEL_1D)]),
21
        ComputeSpec(ComputePattern.TP1D))
22
    with DistSpecManager.no_grad():
23
        weight.set_tensor_spec(spec)
24
25
26
27


def init_1d_col(weight):
    spec = TensorSpec(
ver217's avatar
ver217 committed
28
        distspec.shard(gpc.get_group(ParallelMode.PARALLEL_1D), [-1], [gpc.get_world_size(ParallelMode.PARALLEL_1D)]),
29
        ComputeSpec(ComputePattern.TP1D))
30
    with DistSpecManager.no_grad():
31
        weight.set_tensor_spec(spec)
32
33


34
def run_with_spec(spec_init_func):
35
    model = torch.nn.Embedding(12, 32).cuda()
ver217's avatar
ver217 committed
36
    weight = ColoTensor(torch.nn.Parameter(model.weight.detach()))
37
38
39
40
    spec_init_func(weight)
    x = torch.tensor((0, 3, 6, 9)).cuda()
    out = model(x)
    colo_out = F.embedding(x, weight)
41
    assert tensor_equal(out, colo_out)
42
43
44
    grad = torch.rand_like(out)
    out.backward(grad)
    colo_out.backward(grad)
45
    assert tensor_shard_equal(model.weight.grad, weight.grad)
46
47


48
49
50
def run_dist(rank, world_size, port):
    config = dict(parallel=dict(tensor=dict(mode="1d", size=world_size),))
    colossalai.launch(config=config, rank=rank, world_size=world_size, host='localhost', port=port, backend='nccl')
51
52
    run_with_spec(init_1d_row)
    run_with_spec(init_1d_col)
53

54
55

@pytest.mark.dist
56
@pytest.mark.parametrize('world_size', [1, 4])
57
58
59
60
61
62
63
@rerun_if_address_is_in_use()
def test_embedding_1d(world_size):
    run_func = partial(run_dist, world_size=world_size, port=free_port())
    mp.spawn(run_func, nprocs=world_size)


if __name__ == '__main__':
64
    test_embedding_1d(4)