test_kernel.py 3.64 KB
Newer Older
1
2
3
4
5
6
7
8
9
from functools import partial
import pytest
import torch
import torch.nn as nn
import torch.multiprocessing as mp
import colossalai
from colossalai.context import ParallelMode
from colossalai.core import global_context as gpc
from colossalai.utils import free_port, get_current_device
10
from colossalai.nn.layer.moe import Top1Router, Top2Router, MoeLayer, Experts
11
from colossalai.context.moe_context import MOE_CONTEXT
12

13
BATCH_SIZE = 16
14
NUM_EXPERTS = 4
15
CONFIG = dict()
16
17


18
19
def check_equal(tensor_a, tensor_b, atol=1e-06):
    assert torch.allclose(tensor_a, tensor_b, rtol=0, atol=atol) is True
20
21


22
23
def run_routing(rank, world_size, port, rs=2, hidden_size=128, data_type=torch.float32, router=Top2Router):
    # Here we do not need TF32, since it brings absolute error on results
24
    torch.backends.cuda.matmul.allow_tf32 = False
25
26

    colossalai.launch(config=CONFIG, rank=rank, world_size=world_size, host='localhost', port=port, backend='nccl')
27
    local_rank = gpc.get_local_rank(ParallelMode.GLOBAL)
28
29
30
31
32
33

    MOE_CONTEXT.setup(42)    # MOE environment initialization
    MOE_CONTEXT.reset_loss()
    torch.manual_seed(rs + local_rank)    # set each process has different random seed

    # get randomized data
1SAA's avatar
1SAA committed
34
    tokens = torch.randn(BATCH_SIZE, hidden_size, dtype=data_type, device=get_current_device(), requires_grad=True)
35

36
37
38
39
    expert_module = nn.Linear
    expert_factor = dict(in_features=hidden_size, out_features=hidden_size, device=get_current_device())
    expert = Experts(expert_module, NUM_EXPERTS, **expert_factor)
    layer = MoeLayer(hidden_size, NUM_EXPERTS, router(capacity_factor_train=1.0), expert)
40
41
42
    if data_type == torch.float16:
        layer = layer.half()

43
44
    # use matrix multiplication instead of COL_MOE_KERNL in MOE dispatch and combine
    layer.use_kernel = False
45
46
47
    old_out = layer(tokens)
    ech = old_out.shape
    grad = torch.randn(ech, device=get_current_device())
48
    old_out.backward(grad)    # get gradient
49

50
    # save all results
51
52
53
    o_tk_grad = tokens.grad.data.clone()
    o_gt_grad = layer.gate.weight.grad.data.clone()

54
    # reset all gradients
55
56
57
    tokens.grad.zero_()
    layer.gate.weight.grad.zero_()

58
59
    layer.use_kernel = True
    new_out = layer(tokens)    # get ouputs through colossal kernel
60
61
62
63
64

    if data_type == torch.float32:
        check_equal(old_out, new_out)
    else:
        check_equal(old_out, new_out, 1e-2)
65
    # forward function passed
66

67
    new_out.backward(grad)    # get new type gradient
68
69
70
71
72
73
74
    n_tk_grad = tokens.grad.data.clone()
    n_gt_grad = layer.gate.weight.grad.data.clone()

    if data_type == torch.float32:
        check_equal(o_tk_grad, n_tk_grad)
    else:
        check_equal(o_tk_grad, o_tk_grad, 1e-2)
75
    # tokens gradient is correct
76
77
78
79
80

    if data_type == torch.float32:
        check_equal(o_gt_grad, n_gt_grad, 5e-05)
    else:
        check_equal(o_gt_grad, n_gt_grad, 2e-01)
81
    # bias gradient is correct
82
83
84
85
86
87


@pytest.mark.dist
@pytest.mark.parametrize("rs", [131])
@pytest.mark.parametrize("hidden_size", [32, 144])
@pytest.mark.parametrize("data_type", [torch.float32, torch.float16])
88
89
@pytest.mark.parametrize("router", [Top1Router, Top2Router])
def test_moe_kernel(rs, hidden_size, data_type, router):
90
    world_size = 4
1SAA's avatar
1SAA committed
91
92
93
94
95
    run_func = partial(run_routing,
                       world_size=world_size,
                       port=free_port(),
                       rs=rs,
                       hidden_size=hidden_size,
96
97
                       data_type=data_type,
                       router=router)
98
99
100
101
    mp.spawn(run_func, nprocs=world_size)


if __name__ == '__main__':
102
    test_moe_kernel(2, 256, torch.float16, Top2Router)