layers.py 9.98 KB
Newer Older
Sengxian's avatar
Sengxian committed
1
r"""
Rick Ho's avatar
Rick Ho committed
2
FMoE core layer
Sengxian's avatar
Sengxian committed
3
"""
4
import tree
Rick Ho's avatar
Rick Ho committed
5
import os
Rick Ho's avatar
Rick Ho committed
6
import torch
Rick Ho's avatar
Rick Ho committed
7
8
import torch.nn as nn

9
from .functions import prepare_forward, ensure_comm
Rick Ho's avatar
Rick Ho committed
10
from .functions import MOEScatter, MOEGather
Sengxian's avatar
Sengxian committed
11
from .functions import AllGather, Slice
Rick Ho's avatar
Rick Ho committed
12
from .gates import NaiveGate
Rick Ho's avatar
Rick Ho committed
13

Rick Ho's avatar
Rick Ho committed
14
15
from .fastermoe.config import switch_from_env

Rick Ho's avatar
Rick Ho committed
16

Rick Ho's avatar
Rick Ho committed
17
def mark_module_parallel_comm(module, comm):
Sengxian's avatar
Sengxian committed
18
    r"""
Rick Ho's avatar
Rick Ho committed
19
20
    Mark all parameters in `module` as doing data parallel in `comm`, where
    `comm` may be one of `'world', 'dp', 'none'`.
Sengxian's avatar
Sengxian committed
21
    """
Rick Ho's avatar
Rick Ho committed
22
    for p in module.parameters():
Sengxian's avatar
Sengxian committed
23
        setattr(p, "dp_comm", comm)
Rick Ho's avatar
Rick Ho committed
24
25


Rick Ho's avatar
Rick Ho committed
26
def _fmoe_general_global_forward(inp, gate, expert_fn, num_expert, world_size, **kwargs):
Sengxian's avatar
Sengxian committed
27
    r"""
Rick Ho's avatar
Rick Ho committed
28
29
30
31
32
    A private function that performs the following steps to complete the MoE
    computation.
    * Count the number of tokens from each worker to each expert.
    * Send the features to their target position so that input features to each
    expert are contiguous in memory.
Rick Ho's avatar
Rick Ho committed
33
    * Perform the forward computation of the experts using `expert_fn`
Rick Ho's avatar
Rick Ho committed
34
35
36
    * Gather the output features of experts back, and reorder them as sentences.
    Intermediate results like expert counts are hidden from users by this
    function.
Sengxian's avatar
Sengxian committed
37
    """
38
    (
Sengxian's avatar
Sengxian committed
39
40
41
42
43
        pos,
        local_expert_count,
        global_expert_count,
        fwd_expert_count,
        fwd_batch_size,
44
    ) = prepare_forward(gate, num_expert, world_size)
Rick Ho's avatar
Rick Ho committed
45
46
47
    topk = 1
    if len(gate.shape) == 2:
        topk = gate.shape[1]
48

49
50
51
    def scatter_func(tensor):
        return MOEScatter.apply(
            tensor,
Rick Ho's avatar
Rick Ho committed
52
            torch.div(pos, topk, rounding_mode='floor'),
53
54
55
56
57
58
59
60
            local_expert_count,
            global_expert_count,
            fwd_batch_size,
            world_size,
        )

    x = tree.map_structure(scatter_func, inp)

Rick Ho's avatar
Rick Ho committed
61
    x = expert_fn(x, fwd_expert_count)
62

63
    out_batch_size = tree.flatten(inp)[0].shape[0]
64
65
66
    if len(gate.shape) == 2:
        out_batch_size *= gate.shape[1]

67
68
69
    def gather_func(tensor):
        return MOEGather.apply(
            tensor,
70
71
72
73
74
75
76
            pos,
            local_expert_count,
            global_expert_count,
            out_batch_size,
            world_size,
        )

77
    outp = tree.map_structure(gather_func, x)
78
    return outp
Rick Ho's avatar
Rick Ho committed
79
80


Rick Ho's avatar
Rick Ho committed
81
82
83
fmoe_faster_schedule = False
if switch_from_env('FMOE_FASTER_SCHEDULE_ENABLE', False):
    fmoe_faster_schedule = True
Rick Ho's avatar
Rick Ho committed
84
85
86
    from .fastermoe.schedule import _fmoe_general_global_forward


Rick Ho's avatar
Rick Ho committed
87
class FMoE(nn.Module):
Sengxian's avatar
Sengxian committed
88
    r"""
Rick Ho's avatar
Rick Ho committed
89
90
    A general moe implementation that supports an arbitrary module as the
    expert.
Rick Ho's avatar
Rick Ho committed
91
92
93
    * `num_expert` stands for the number of experts on **each** worker.
    * `world_size` stands for the total number of workers that contains
    different experts.
Rick Ho's avatar
Rick Ho committed
94
95
96
97
98
    * `slice_group` can be a torch's communication group, indicating that
    specific model parallel is applied across the group, and workers in the
    group hold the same copy of input feature, and requires the same copy of
    the output. For each worker, FMoE only computes the output of a certain
    slice of the input batch, and will all-gather the outputs after
Rick Ho's avatar
Rick Ho committed
99
    computation.
Rick Ho's avatar
Rick Ho committed
100
    * `top_k` stands for the number of experts each token is going to.
Rick Ho's avatar
Rick Ho committed
101
102
103
    * `gate` is a gate class which can found in `fmoe.gates`.
    * `expert` can be specified as a module class, it is used to generate
    `num_expert` expert modules.
Sengxian's avatar
Sengxian committed
104
105
106
107
108
109
110
    """

    def __init__(
        self,
        num_expert=32,
        d_model=1024,
        world_size=1,
111
        mp_group=None,  # being deprecated
Rick Ho's avatar
Rick Ho committed
112
        slice_group=None,
Rick Ho's avatar
Rick Ho committed
113
        moe_group=None,
Sengxian's avatar
Sengxian committed
114
115
116
        top_k=2,
        gate=NaiveGate,
        expert=None,
117
        gate_hook=None,
Colin's avatar
Colin committed
118
119
        mask=None,
        mask_dict=None,
Sengxian's avatar
Sengxian committed
120
    ):
Rick Ho's avatar
Rick Ho committed
121
        super().__init__()
Rick Ho's avatar
Rick Ho committed
122
123
124
        self.num_expert = num_expert
        self.d_model = d_model
        self.world_size = world_size
Rick Ho's avatar
Rick Ho committed
125
126
127

        self.slice_group = slice_group
        if mp_group is not None:
128
            print("[Warning] mp_group is being deprecated")
Rick Ho's avatar
Rick Ho committed
129
130
131
132
            self.slice_group = mp_group
        if self.slice_group is None:
            self.slice_size = 1
            self.slice_rank = 0
Rick Ho's avatar
Rick Ho committed
133
        else:
Rick Ho's avatar
Rick Ho committed
134
135
            self.slice_size = self.slice_group.size()
            self.slice_rank = self.slice_group.rank()
Rick Ho's avatar
Rick Ho committed
136

Rick Ho's avatar
Rick Ho committed
137
        self.top_k = top_k
Colin's avatar
Colin committed
138
139
140
141
142
        if type(expert) is list:
            self.experts = nn.ModuleList([e(d_model) for e in expert])
            self.experts_fused = False
            self.num_expert = num_expert = len(expert)
        elif expert is not None:
143
            self.experts = nn.ModuleList([expert(d_model) for _ in range(num_expert)])
144
145
146
            self.experts_fused = False
        else:
            self.experts_fused = True
Rick Ho's avatar
Rick Ho committed
147

Colin's avatar
Colin committed
148
        self.gate = gate(d_model, num_expert, world_size, top_k)
149
        self.gate_hook = gate_hook
Colin's avatar
Colin committed
150
151
        self.mask = mask
        self.mask_dict = mask_dict
Rick Ho's avatar
Rick Ho committed
152
        self.moe_group = moe_group
Rick Ho's avatar
Rick Ho committed
153
154

    def expert_fn(self, inp, fwd_expert_count):
Sengxian's avatar
Sengxian committed
155
        r"""
Rick Ho's avatar
Rick Ho committed
156
157
        The default expert function which either calls the experts as a whole
        or as separate experts.
Sengxian's avatar
Sengxian committed
158
        """
159
        if self.experts_fused:
Rick Ho's avatar
Rick Ho committed
160
            return self.experts(inp, fwd_expert_count)
Rick Ho's avatar
Rick Ho committed
161
        if isinstance(fwd_expert_count, torch.Tensor):
162
            fwd_expert_count_cpu = fwd_expert_count.cpu().numpy()
Rick Ho's avatar
Rick Ho committed
163
164
165
        outputs = []
        base_idx = 0
        for i in range(self.num_expert):
166
            batch_size = fwd_expert_count_cpu[i]
Sengxian's avatar
Sengxian committed
167
            inp_slice = inp[base_idx : base_idx + batch_size]
168
            outputs.append(self.experts[i](inp_slice, torch.tensor([fwd_expert_count[i]])))
Rick Ho's avatar
Rick Ho committed
169
170
            base_idx += batch_size
        return torch.cat(outputs, dim=0)
Rick Ho's avatar
Rick Ho committed
171

172
173
174
175
176
177
178
179
    def expert_fn_single(self, inp, fwd_expert_count, idx):
        r"""
        forward single expert for smart scheduling.
        """
        assert not self.experts_fused, "should not use fused experts"
        output = self.experts[idx](inp, fwd_expert_count)
        return output

Sengxian's avatar
Sengxian committed
180
181
    def mark_parallel_comm(self, expert_dp_comm="none"):
        r"""
Rick Ho's avatar
Rick Ho committed
182
183
184
        Automatically mark the data parallel comms of the parameters within the
        module. This can be typically called at the end of the __init__ function
        in child classes.
Sengxian's avatar
Sengxian committed
185
        """
Rick Ho's avatar
Rick Ho committed
186
        if self.experts is not None:
187
            comm = expert_dp_comm
Rick Ho's avatar
Rick Ho committed
188
189
190
191
192
            if isinstance(self.experts, list):
                for e in self.experts:
                    mark_module_parallel_comm(e, comm)
            else:
                mark_module_parallel_comm(self.experts, comm)
Rick Ho's avatar
Rick Ho committed
193
        mark_module_parallel_comm(self.gate, "gate")
Rick Ho's avatar
Rick Ho committed
194

Jiezhong Qiu's avatar
Jiezhong Qiu committed
195
    def forward(self, moe_inp):
Sengxian's avatar
Sengxian committed
196
        r"""
Rick Ho's avatar
Rick Ho committed
197
198
199
        The FMoE module first computes gate output, and then conduct MoE forward
        according to the gate.  The score of the selected gate given by the
        expert is multiplied to the experts' output tensors as a weight.
Sengxian's avatar
Sengxian committed
200
        """
201
202
203
204
205
206
207
208

        moe_inp_batch_size = tree.flatten(
            tree.map_structure(lambda tensor: tensor.shape[0], moe_inp)
        )
        assert all(
            [batch_size == moe_inp_batch_size[0] for batch_size in moe_inp_batch_size]
        ), "MoE inputs must have the same batch size"

209
        if self.world_size > 1:
210
211
212
213
214

            def ensure_comm_func(tensor):
                ensure_comm(tensor, self.moe_group)

            tree.map_structure(ensure_comm_func, moe_inp)
Rick Ho's avatar
Rick Ho committed
215
        if self.slice_size > 1:
Sengxian's avatar
Sengxian committed
216

217
218
219
220
221
222
223
224
            def slice_func(tensor):
                return Slice.apply(
                    tensor, self.slice_rank, self.slice_size, self.slice_group
                )

            moe_inp = tree.map_structure(slice_func, moe_inp)

        gate_top_k_idx, gate_score = self.gate(moe_inp)
Rick Ho's avatar
Rick Ho committed
225

226
227
228
        if self.gate_hook is not None:
            self.gate_hook(gate_top_k_idx, gate_score, None)

Colin's avatar
Colin committed
229
        # delete masked tensors
Colin's avatar
Colin committed
230
        if self.mask is not None and self.mask_dict is not None:
231
232
233
234
235
236
            # TODO: to fix
            def delete_mask_func(tensor):
                # to: (BxL') x d_model
                tensor = tensor[mask == 0, :]
                return tensor

Colin's avatar
Colin committed
237
            mask = self.mask.view(-1)
238
            moe_inp = tree.map_structure(delete_mask_func, moe_inp)
Colin's avatar
Colin committed
239
240
241
            gate_top_k_idx = gate_top_k_idx[mask == 0, :]

        fwd = _fmoe_general_global_forward(
242
            moe_inp, gate_top_k_idx, self.expert_fn_single if fmoe_faster_schedule else self.expert_fn,
Rick Ho's avatar
Rick Ho committed
243
244
            self.num_expert, self.world_size,
            experts=self.experts
Sengxian's avatar
Sengxian committed
245
        )
246

Colin's avatar
Colin committed
247
        # recover deleted tensors
Colin's avatar
Colin committed
248
        if self.mask is not None and self.mask_dict is not None:
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268

            def recover_func(tensor):
                # to: (BxL') x top_k x dim
                dim = tensor.shape[-1]
                tensor = tensor.view(-1, self.top_k, dim)
                # to: (BxL) x top_k x d_model
                x = torch.zeros(
                    mask.shape[0],
                    self.top_k,
                    dim,
                    device=tensor.device,
                    dtype=tensor.dtype,
                )
                # recover
                x[mask == 0] = tensor
                for k, v in self.mask_dict.items():
                    x[mask == k] = v
                return x

            moe_outp = tree.map_structure(recover_func, fwd)
Colin's avatar
Colin committed
269
270
        else:

271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
            def view_func(tensor):
                dim = tensor.shape[-1]
                tensor = tensor.view(-1, self.top_k, dim)
                return tensor

            moe_outp = tree.map_structure(view_func, fwd)

        gate_score = gate_score.view(-1, 1, self.top_k)

        def bmm_func(tensor):
            dim = tensor.shape[-1]
            tensor = torch.bmm(gate_score, tensor).reshape(-1, dim)
            return tensor

        moe_outp = tree.map_structure(bmm_func, moe_outp)
Sengxian's avatar
Sengxian committed
286

Rick Ho's avatar
Rick Ho committed
287
        if self.slice_size > 1:
288
289
290
291
292
293
294

            def all_gather_func(tensor):
                return AllGather.apply(
                    tensor, self.slice_rank, self.slice_size, self.slice_group
                )

            moe_outp = tree.map_structure(all_gather_func, moe_outp)
295
296
297
298
299
300
301

        moe_outp_batch_size = tree.flatten(
            tree.map_structure(lambda tensor: tensor.shape[0], moe_outp)
        )
        assert all(
            [batch_size == moe_outp_batch_size[0] for batch_size in moe_outp_batch_size]
        ), "MoE outputs must have the same batch size"
302
        return moe_outp