schedule.py 5 KB
Newer Older
Rick Ho's avatar
Rick Ho committed
1
2
3
4
5
6
7
8
9
r"""
The smart schedule proposed in FasterMoE.
"""
import torch
from torch.autograd.function import Function

from fmoe.functions import prepare_forward, ensure_comm
from fmoe.functions import _local_scatter, _local_gather 
import fmoe_cuda as fmoe_native
Rick Ho's avatar
Rick Ho committed
10
from fmoe.fastermoe import expert_utils
Rick Ho's avatar
Rick Ho committed
11

Rick Ho's avatar
Rick Ho committed
12
13
from .shadow_policy import get_shadow_policy

Rick Ho's avatar
Rick Ho committed
14
15
16
17
18
19

class MoEForward(Function):
    @staticmethod
    def forward(
            ctx,
            expert_fn,
Rick Ho's avatar
Rick Ho committed
20
            experts,
Rick Ho's avatar
Rick Ho committed
21
22
23
24
25
26
27
28
            inp, # models,
            pos_s, pos_g,
            local_expert_count, global_expert_count,
            stored_models,
            fwd_batch_size, out_batch_size,
            world_size):
        local_input_buf = _local_scatter(inp, pos_s)

Rick Ho's avatar
Rick Ho committed
29
30
        ctx.gibs = [None] * (world_size * 2)
        ctx.gobs = [None] * (world_size * 2)
Rick Ho's avatar
Rick Ho committed
31
        def _expert_forward(x, y, idx):
Rick Ho's avatar
Rick Ho committed
32
            nothing = lambda a: a
Rick Ho's avatar
Rick Ho committed
33
            x = x.data
Rick Ho's avatar
Rick Ho committed
34
35
            with torch.enable_grad():
                x.requires_grad = True
Rick Ho's avatar
Rick Ho committed
36
                # To skip torch autograd's version check.
Rick Ho's avatar
Rick Ho committed
37
38
                with torch.autograd.graph.saved_tensors_hooks(nothing, nothing):
                    y0 = expert_fn(x, [x.shape[0]])
Rick Ho's avatar
Rick Ho committed
39
40
41
42
            ctx.gibs[idx] = x
            ctx.gobs[idx] = y0
            y.copy_(y0)

Rick Ho's avatar
Rick Ho committed
43
44
45
46
47
48
49
50
51
        ctx.experts = experts
        if stored_models.any():
            ctx.expert_size = expert_utils.get_expert_param_size(experts)
        else:
            ctx.expert_size = 0
        get_param_fn = lambda out: expert_utils.get_expert_params(experts, out)
        pop_fn = lambda: expert_utils.pop_expert_params(experts)
        ctx.shadows = [None] * world_size
        def stash_fn(params, idx):
Rick Ho's avatar
Rick Ho committed
52
            expert_utils.stash_expert_params(experts, params)
Rick Ho's avatar
Rick Ho committed
53
54
            ctx.shadows[idx] = params

Rick Ho's avatar
Rick Ho committed
55
        local_output_buf, gib = fmoe_native.smart_sch_forward(
Rick Ho's avatar
Rick Ho committed
56
57
                local_input_buf,
                local_expert_count, global_expert_count, 
Rick Ho's avatar
Rick Ho committed
58
59
                stored_models, fwd_batch_size, ctx.expert_size,
                world_size, _expert_forward, get_param_fn, stash_fn, pop_fn)
Rick Ho's avatar
Rick Ho committed
60
61
62
63

        out = _local_gather(local_output_buf, pos_g, out_batch_size,
                maybe_overlap=False)
        
Rick Ho's avatar
Rick Ho committed
64
65
        # gib and local_input_buf are necessary, because ctx.gibs are created
        # based on their memory
Rick Ho's avatar
Rick Ho committed
66
        variables = (pos_s, pos_g, local_expert_count, global_expert_count,
Rick Ho's avatar
Rick Ho committed
67
                stored_models, gib, local_input_buf)
Rick Ho's avatar
Rick Ho committed
68
69
70
71
72
73
74
75
76
        
        ctx.moe_args = fwd_batch_size, inp.shape[0], world_size
        ctx.save_for_backward(*variables)

        return out

    @staticmethod
    def backward(ctx, grad_out):
        (pos_s, pos_g, local_expert_count, global_expert_count,
Rick Ho's avatar
Rick Ho committed
77
                stored_models, _1, _2) = ctx.saved_tensors
Rick Ho's avatar
Rick Ho committed
78
79
        (fwd_batch_size, inp_batch_size, world_size) = ctx.moe_args

Rick Ho's avatar
Rick Ho committed
80
        def _expert_backward(grad_y, grad_x, idx):
Rick Ho's avatar
Rick Ho committed
81
82
            y = ctx.gobs[idx]
            x = ctx.gibs[idx]
Rick Ho's avatar
Rick Ho committed
83
            torch.autograd.backward([y], [grad_y])
Rick Ho's avatar
Rick Ho committed
84
            grad_x.copy_(x.grad)
Rick Ho's avatar
Rick Ho committed
85

Rick Ho's avatar
Rick Ho committed
86
87
88
89
        experts = ctx.experts
        def stash_fn(idx):
            expert_utils.stash_expert_params(experts, ctx.shadows[idx])
        pop_fn = lambda: expert_utils.pop_expert_params(experts)
Rick Ho's avatar
Rick Ho committed
90
91
92
93
94
        def collect_fn(idx, root): 
            grad = ctx.shadows[idx]
            expert_utils.collect_expert_grads(experts, grad)
            fmoe_native.reduce_grad(grad, root, ctx.expert_size)
        set_grad_fn = lambda idx: expert_utils.set_grads(experts, ctx.shadows[idx])
Rick Ho's avatar
Rick Ho committed
95

Rick Ho's avatar
Rick Ho committed
96
97
        grad_out_buf = _local_scatter(grad_out.contiguous(), pos_g)
        grad_in_buf = fmoe_native.smart_sch_backward(
Rick Ho's avatar
Rick Ho committed
98
                grad_out_buf,
Rick Ho's avatar
Rick Ho committed
99
100
                local_expert_count, global_expert_count,
                stored_models,
Rick Ho's avatar
Rick Ho committed
101
102
103
                pos_s.shape[0], fwd_batch_size,
                world_size,
                _expert_backward, stash_fn, pop_fn, collect_fn, set_grad_fn)
Rick Ho's avatar
Rick Ho committed
104
105
        grad_in = _local_gather(grad_in_buf, pos_s, inp_batch_size)

Rick Ho's avatar
Rick Ho committed
106
        return (None, None, grad_in, None, None, None, None, None, None, None, None)
Rick Ho's avatar
Rick Ho committed
107
108


Rick Ho's avatar
Rick Ho committed
109
110
111
policy_fn = None


Rick Ho's avatar
Rick Ho committed
112
def _fmoe_general_global_forward(inp, gate, expert_fn, n_expert, world_size, experts=None, stored_models=None):
Rick Ho's avatar
Rick Ho committed
113
114
115
116
117
118
119
120
121
122
123
124
    # TODO: Using multiple tensors as input is to be supported.
    assert(isinstance(inp, torch.Tensor))
    # TODO: Support many experts on each process
    assert(n_expert == 1)
    (
        pos,
        local_expert_count,
        global_expert_count,
        fwd_expert_count,
        fwd_batch_size,
    ) = prepare_forward(gate, n_expert, world_size)

Rick Ho's avatar
Rick Ho committed
125
126
127
128
    global policy_fn
    if policy_fn is None:
        policy_fn = get_shadow_policy(d_model=inp.shape[-1])

Rick Ho's avatar
Rick Ho committed
129
    if stored_models is None:
Rick Ho's avatar
Rick Ho committed
130
131
        stored_models = policy_fn(local_expert_count, global_expert_count,
                n_expert, world_size)
Rick Ho's avatar
Rick Ho committed
132
133
134
135
136
137

    topk = 1
    if len(gate.shape) == 2:
        topk = gate.shape[1]
    out_batch_size = inp.shape[0] * topk

Rick Ho's avatar
Rick Ho committed
138
    return MoEForward.apply(expert_fn, experts, inp,
Rick Ho's avatar
Rick Ho committed
139
140
141
            torch.div(pos, topk, rounding_mode='floor'), pos,
            local_expert_count, global_expert_count, stored_models,
            fwd_batch_size, out_batch_size, world_size)