megatron.py 25.9 KB
Newer Older
Sengxian's avatar
Sengxian committed
1
r"""
Rick Ho's avatar
Rick Ho committed
2
3
4
The adaptor to seamlessly enable FastMoE in Megatron-LM v2.0 with at most two
lines of modification.
See `examples/megatron` for usage instructions.
Sengxian's avatar
Sengxian committed
5
"""
6
import os
7
import sys
Rick Ho's avatar
Rick Ho committed
8
import math
9
10
import random
from collections import OrderedDict
Jiezhong Qiu's avatar
Jiezhong Qiu committed
11
import numpy as np
Rick Ho's avatar
Rick Ho committed
12
import torch
Rick Ho's avatar
Rick Ho committed
13
import torch.nn as nn
Rick Ho's avatar
Rick Ho committed
14
import torch.nn.functional as F
Rick Ho's avatar
Rick Ho committed
15
16
17

from .transformer import FMoETransformerMLP
from .distributed import DistributedGroupedDataParallel
18
19
from .balance import update_balance_profile, reset_balance_profile
from .utils import get_torch_default_comm
Rick Ho's avatar
Rick Ho committed
20
21


22
class _FakeMegatronMLP(nn.Module):
Sengxian's avatar
Sengxian committed
23
    r"""
24
    A fake mlp without model parallelism for correctness testing
Sengxian's avatar
Sengxian committed
25
26
    """

Rick Ho's avatar
Rick Ho committed
27
    def __init__(self, args, _):
Rick Ho's avatar
Rick Ho committed
28
29
30
        super().__init__()
        self.fc1 = nn.Linear(args.hidden_size, args.hidden_hidden_size)
        self.fc2 = nn.Linear(args.hidden_hidden_size, args.hidden_size)
Sengxian's avatar
Sengxian committed
31

Rick Ho's avatar
Rick Ho committed
32
    def forward(self, x):
Sengxian's avatar
Sengxian committed
33
        r"""
Rick Ho's avatar
Rick Ho committed
34
        Directly use GeLU
Sengxian's avatar
Sengxian committed
35
        """
Rick Ho's avatar
Rick Ho committed
36
37
38
39
40
        x = self.fc1(x)
        x = F.gelu(x)
        x = self.fc2(x)
        return x, torch.zeros_like(x)

Sengxian's avatar
Sengxian committed
41

42
def _megatron_init_method(self, rng, sigma):
Sengxian's avatar
Sengxian committed
43
    r"""
44
45
    Init method based on N(0, sigma).
    Copied from Megatron-LM
Sengxian's avatar
Sengxian committed
46
    """
47
48
49
    device = self.weight.device
    dtype = self.weight.dtype
    weight = rng.normal(loc=0.0, scale=sigma, size=tuple(self.weight.size()))
Rick Ho's avatar
Rick Ho committed
50
    self.weight.data = torch.from_numpy(weight).to(dtype=dtype, device=device)
51
52
53
54
55

    if self.bias is not None:
        # Always initialize bias to zero.
        with torch.no_grad():
            self.bias.zero_()
Rick Ho's avatar
Rick Ho committed
56

Sengxian's avatar
Sengxian committed
57

Rick Ho's avatar
Rick Ho committed
58
def _random_init_weight(self, rng):
Sengxian's avatar
Sengxian committed
59
    r"""
Rick Ho's avatar
Rick Ho committed
60
    Copied from torch.nn.init.kaiming_uniform_
Sengxian's avatar
Sengxian committed
61
62
63
    """
    fan = nn.init._calculate_correct_fan(self.weight[0], "fan_in")
    gain = nn.init.calculate_gain("leaky_relu", math.sqrt(5))
Rick Ho's avatar
Rick Ho committed
64
65
66
67
68
    std = gain / math.sqrt(fan)
    bound = math.sqrt(3.0) * std
    device = self.weight.device
    dtype = self.weight.dtype
    weight = rng.uniform(-bound, bound, size=tuple(self.weight.size()))
Rick Ho's avatar
Rick Ho committed
69
    self.weight.data = torch.from_numpy(weight).to(dtype=dtype, device=device)
Rick Ho's avatar
Rick Ho committed
70
71
72
73
74

    if self.bias is not None:
        fan_in, _ = nn.init._calculate_fan_in_and_fan_out(self.weight[0])
        bound = 1 / math.sqrt(fan_in)
        bias = rng.uniform(-bound, bound, size=tuple(self.bias.size()))
Rick Ho's avatar
Rick Ho committed
75
        self.bias.data = torch.from_numpy(bias).to(dtype=dtype, device=device)
Rick Ho's avatar
Rick Ho committed
76
77


78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
balance_dict = {}
num_layers = 0


def reset_gate_hook():
    from megatron import get_args

    global balance_dict, num_layers
    reset_balance_profile(balance_dict, num_layers, get_args().balance_strategy)


def get_balance_profile():
    global balance_dict
    return balance_dict


def generate_megatron_gate_hook(layer_idx, num_expert_global):
    from megatron import get_args

    balance_strategy = get_args().balance_strategy

    def megatron_gate_hook(gate_top_k_idx, gate_score_top_k, gate_state_dict):
        global balance_dict
        update_balance_profile(
            balance_dict,
            gate_top_k_idx,
            gate_score_top_k,
            gate_state_dict,
            layer_idx,
            num_expert_global,
            balance_strategy,
        )

    return megatron_gate_hook


def add_fmoe_args(parser):
    group = parser.add_argument_group(title="fastmoe")

    group.add_argument("--fmoefy", action="store_true")
    group.add_argument("--num-experts", type=int, default=None)
    group.add_argument("--top-k", type=int, default=2)
    group.add_argument("--balance-loss-weight", type=float, default=1)
    group.add_argument("--balance-strategy", type=str, default=None)

    return parser


def add_balance_log(writer, iteration):
    from megatron import is_last_rank

    balance_dict_tensor = torch.vstack(
        [torch.tensor(item, device=item[0].device) for item in balance_dict.values()]
    ).detach()
    world_group = get_torch_default_comm()
    world_size = torch.distributed.get_world_size(group=world_group)
    torch.distributed.all_reduce(balance_dict_tensor, group=world_group)
    balance_dict_tensor /= world_size

    if writer and is_last_rank():
        for idx, metric_name in enumerate(balance_dict):
            for layer_id, val in enumerate(balance_dict_tensor[idx]):
                writer.add_scalar(
                    f"balance-{metric_name}/layer-{layer_id}", val.item(), iteration
                )
            writer.add_scalar(
                f"balance-{metric_name}/all",
                balance_dict_tensor[idx].mean().item(),
                iteration,
            )

    reset_gate_hook()


def patch_forward_step(forward_step_func):
    r"""
    Patch model's forward_step_func to support balance loss
    """

    from megatron.mpu import is_pipeline_last_stage
    from megatron import get_args

    if not get_args().balance_strategy:
        return forward_step_func

    def forward_step_with_balance_loss(data_iterator, model, input_tensor):
        args = get_args()
        output = forward_step_func(data_iterator, model, input_tensor)

        if is_pipeline_last_stage():
            loss_name = args.balance_strategy + "_loss"

            (loss, state_dict), bal_loss = (
                output,
                (
                    torch.tensor(
                        balance_dict[loss_name],
                        device=balance_dict[loss_name][0].device,
                    ).mean()
                    * args.balance_loss_weight
                ).float(),
            )

            # avarage across world group
            world_group = get_torch_default_comm()
            world_size = torch.distributed.get_world_size(group=world_group)
            averaged_bal_loss = bal_loss.clone().detach()
            torch.distributed.all_reduce(averaged_bal_loss, group=world_group)
            averaged_bal_loss /= world_size

            loss += bal_loss
            state_dict[loss_name] = averaged_bal_loss

            return loss, state_dict
        else:
            return output

    return forward_step_with_balance_loss


def patch_model_provider(model_provider):
    from megatron import get_args

    def fmoefied_model_provider():
        args = get_args()
        return fmoefy(
            model_provider(),
            num_experts=args.num_experts,
            hidden_hidden_size=4 * args.hidden_size // args.top_k,
            top_k=args.top_k,
        )

    return fmoefied_model_provider


Rick Ho's avatar
Rick Ho committed
213
class MegatronMLP(FMoETransformerMLP):
Sengxian's avatar
Sengxian committed
214
    r"""
Rick Ho's avatar
Rick Ho committed
215
216
    Make the FMoETransformerMLP layer that distributes experts across
    communication group `group` to replace the original MLP layer in Megatron.
Sengxian's avatar
Sengxian committed
217
218
    """

219
    def __init__(self, args, group, layer_idx):
Sengxian's avatar
Sengxian committed
220
        assert (
221
            args.seq_length * args.micro_batch_size % args.tensor_model_parallel_size
Sengxian's avatar
Sengxian committed
222
            == 0
Rick Ho's avatar
Rick Ho committed
223
224
225
226
227
        ), "Batch size x sequence length should be multiple of mp size"
        if not args.distributed_experts:
            world_size = 1
        else:
            world_size = args.world_size
228
229
230
231
232
233
234
235
236
237
238
        gate = None
        if not args.balance_strategy or args.balance_strategy == "gshard":
            from .gates import NaiveGate

            gate = NaiveGate
        elif args.balance_strategy == "noisy":
            from .gates import NoisyGate

            gate = NoisyGate
        else:
            assert False, "Undefined balance strategy {}" % (args.balance_strategy)
Sengxian's avatar
Sengxian committed
239
240
241
242
243
244
245
246
        super().__init__(
            args.num_experts,
            top_k=args.top_k,
            d_model=args.hidden_size,
            d_hidden=args.hidden_hidden_size,
            world_size=world_size,
            mp_group=group,
            expert_dp_comm="none" if args.distributed_experts else "dp",
247
248
249
250
            gate_hook=generate_megatron_gate_hook(
                layer_idx, args.num_experts * world_size
            ),
            gate=gate,
Sengxian's avatar
Sengxian committed
251
        )
Rick Ho's avatar
Rick Ho committed
252
        self.hidden_size = args.hidden_size
Rick Ho's avatar
Rick Ho committed
253
254
255
256
        if args.distributed_experts:
            self.rank = args.rank
        else:
            self.rank = 0
257
258
        self.sigma = args.init_method_std
        self.num_layers = args.num_layers
Rick Ho's avatar
Rick Ho committed
259
260
261
        self.reset_parameters()

    def reset_parameters(self):
Sengxian's avatar
Sengxian committed
262
        r"""
Rick Ho's avatar
Rick Ho committed
263
264
        Initialize the weight as linear layers.
        As megatron is using fixed random seed for some nasty stuff, an
Rick Ho's avatar
Rick Ho committed
265
        additional numpy rng is used.
Sengxian's avatar
Sengxian committed
266
        """
Rick Ho's avatar
Rick Ho committed
267
        rng = np.random.default_rng(np.random.randint(2048) + self.rank)
268
        _megatron_init_method(self.experts.htoh4, rng, self.sigma)
269
        std = self.sigma / math.sqrt(2.0 * self.num_layers)
270
        _megatron_init_method(self.experts.h4toh, rng, std)
Rick Ho's avatar
Rick Ho committed
271
272

    def forward(self, inp):
Sengxian's avatar
Sengxian committed
273
274
275
276
        return (
            super().forward(inp),
            torch.zeros(self.hidden_size, dtype=inp.dtype, device=inp.device),
        )
Rick Ho's avatar
Rick Ho committed
277
278


Sengxian's avatar
Sengxian committed
279
280
281
282
283
284
285
286
def fmoefy(
    model,
    num_experts=None,
    distributed_experts=True,
    hidden_hidden_size=None,
    top_k=None,
):
    r"""
Rick Ho's avatar
Rick Ho committed
287
288
289
290
291
292
293
294
295
296
297
298
    Replace MLP layers in a transformer-based model in Megatron by MoE.
    * `model` should be a standard Megatron model that has
    `model.language_model.transformer.layers` as transformer layers, which is an
    array of transformer blocks that contain an `mlp` member.
    * `distributed_expert` is set to True if different experts are located in
    different workers. Otherwise, the experts on the workers are identical, and
    they are trained in data-parallel mode. This can be useful when testing on
    small models that do not require high training throughput or large parameter
    capacity.
    Note that pipeline parallel is not supported yet. When distributed experts
    are enabled, their communicator should be Megatron's
    tensor_model_parall_comm x data_parallel_comm, which is not created.
Sengxian's avatar
Sengxian committed
299
    """
Rick Ho's avatar
Rick Ho committed
300
    from megatron import get_args
Rick Ho's avatar
Rick Ho committed
301
    from megatron import mpu
Sengxian's avatar
Sengxian committed
302

Rick Ho's avatar
Rick Ho committed
303
304
305
306
    args = get_args()
    if num_experts is not None:
        args.num_experts = num_experts
    assert (
Sengxian's avatar
Sengxian committed
307
308
        "num_experts" in args
    ), "num_experts should be specified in arguments or fmoefy function"
Rick Ho's avatar
Rick Ho committed
309
310
311

    if hidden_hidden_size is not None:
        args.hidden_hidden_size = hidden_hidden_size
Sengxian's avatar
Sengxian committed
312
    elif not hasattr(args, "hidden_hidden_size"):
Rick Ho's avatar
Rick Ho committed
313
314
315
316
        args.hidden_hidden_size = args.hidden_size * 4

    if top_k is not None:
        args.top_k = top_k
Sengxian's avatar
Sengxian committed
317
    elif not hasattr(args, "top_k"):
Rick Ho's avatar
Rick Ho committed
318
319
320
321
322
323
        args.top_k = 2

    # Set distributed_experts to None to use default setting in args
    if distributed_experts is not None:
        args.distributed_experts = distributed_experts

324
325
326
327
328
329
330
331
    for idx, l in enumerate(model.language_model.transformer.layers):
        l.mlp = MegatronMLP(args, mpu.get_model_parallel_group(), idx)

    # initialize gate hook
    global num_layers, balance_dict
    num_layers = len(model.language_model.transformer.layers)
    reset_gate_hook()

Rick Ho's avatar
Rick Ho committed
332
333
334
335
    return model


class DistributedDataParallel(DistributedGroupedDataParallel):
Sengxian's avatar
Sengxian committed
336
    r"""
Rick Ho's avatar
Rick Ho committed
337
338
339
    A wrapper that is used to replace the DDP module provided by Megatron, which
    is adapted to enable the sophiscated parallel and reduction strategies in
    Fast MoE.
Sengxian's avatar
Sengxian committed
340
341
    """

Rick Ho's avatar
Rick Ho committed
342
343
    def __init__(self, module):
        from megatron import mpu
Sengxian's avatar
Sengxian committed
344

Rick Ho's avatar
Rick Ho committed
345
346
347
        super().__init__(
            module,
            mp_group=mpu.get_model_parallel_group(),
Sengxian's avatar
Sengxian committed
348
            dp_group=mpu.get_data_parallel_group(),
Rick Ho's avatar
Rick Ho committed
349
350
351
        )

    def state_dict(self, *args, **kwargs):
Sengxian's avatar
Sengxian committed
352
        r"""
Rick Ho's avatar
Rick Ho committed
353
        Keep consitency with Megatron
Sengxian's avatar
Sengxian committed
354
        """
Rick Ho's avatar
Rick Ho committed
355
356
357
        return self.module.state_dict(*args, **kwargs)

    def state_dict_for_save_checkpoint(self, *args, **kwargs):
Sengxian's avatar
Sengxian committed
358
        r"""
Rick Ho's avatar
Rick Ho committed
359
        Keep consitency with Megatron
Sengxian's avatar
Sengxian committed
360
        """
Rick Ho's avatar
Rick Ho committed
361
362
363
        return self.module.state_dict_for_save_checkpoint(*args, **kwargs)

    def load_state_dict(self, *args, **kwargs):
Sengxian's avatar
Sengxian committed
364
        r"""
Rick Ho's avatar
Rick Ho committed
365
        Keep consitency with Megatron
Sengxian's avatar
Sengxian committed
366
        """
Rick Ho's avatar
Rick Ho committed
367
        return self.module.load_state_dict(*args, **kwargs)
368

Jiezhong Qiu's avatar
Jiezhong Qiu committed
369
370
371
372

def get_fmoe_checkpoint_name(
    checkpoints_path, iteration, release=False, data_parallel_rank=-1
):
373
    """A unified checkpoint name, allowing specifying a data parallel rank"""
374
    from megatron import mpu
375
    from megatron.checkpointing import get_checkpoint_name
Jiezhong Qiu's avatar
Jiezhong Qiu committed
376

377
378
379
380
    if data_parallel_rank == -1:
        data_parallel_rank = mpu.get_data_parallel_rank()
    if data_parallel_rank == 0:
        return get_checkpoint_name(checkpoints_path, iteration, release)
381
382

    if release:
Jiezhong Qiu's avatar
Jiezhong Qiu committed
383
        directory = "release"
384
    else:
Jiezhong Qiu's avatar
Jiezhong Qiu committed
385
        directory = "iter_{:07d}".format(iteration)
386
387
    # Use both the tensor and pipeline MP rank.
    if mpu.get_pipeline_model_parallel_world_size() == 1:
Jiezhong Qiu's avatar
Jiezhong Qiu committed
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
        return os.path.join(
            checkpoints_path,
            directory,
            "mp_rank_{:02d}_dp_rank_{:04d}".format(
                mpu.get_tensor_model_parallel_rank(), data_parallel_rank
            ),
            "model_optim_rng.pt",
        )
    return os.path.join(
        checkpoints_path,
        directory,
        "mp_rank_{:02d}_{:03d}_dp_rank_{:04d}".format(
            mpu.get_tensor_model_parallel_rank(),
            mpu.get_pipeline_model_parallel_rank(),
            data_parallel_rank,
        ),
        "model_optim_rng.pt",
    )

407

Jiezhong Qiu's avatar
Jiezhong Qiu committed
408
def save_checkpoint(iteration, model, optimizer, lr_scheduler):
409
    """Save a model checkpoint with expert parallel """
Jiezhong Qiu's avatar
Jiezhong Qiu committed
410
    # TODO: update patch
411
412
    from megatron import get_args
    from megatron import mpu
Jiezhong Qiu's avatar
Jiezhong Qiu committed
413
414
415
    from megatron import print_rank_last

    expert_dp_comm = "none"
416

417
418
419
    if mpu.get_data_parallel_rank() == 0:
        # at dp rank 0, we still follows the native load_checkpoint by megatron
        from megatron.checkpointing import save_checkpoint as save_checkpoint_native
Jiezhong Qiu's avatar
Jiezhong Qiu committed
420

421
422
        save_checkpoint_native(iteration, model, optimizer, lr_scheduler)
        return
423

424
    args = get_args()
425

426
427
428
429
    # Only rank zero of the data parallel writes to the disk.
    if isinstance(model, DistributedDataParallel):
        model = model.module

Jiezhong Qiu's avatar
Jiezhong Qiu committed
430
431
432
    print_rank_last(
        "saving checkpoint at iteration {:7d} to {}".format(iteration, args.save)
    )
433
434
435

    # Arguments, iteration, and model.
    state_dict = {}
Jiezhong Qiu's avatar
Jiezhong Qiu committed
436
437
438
    state_dict["model"] = model.state_dict_for_save_checkpoint(
        keep_vars=(mpu.get_data_parallel_rank() > 0)
    )
439

Jiezhong Qiu's avatar
Jiezhong Qiu committed
440
    def extract_expert_param(state_dict, expert_dp_comm="none"):
441
442
443
444
445
446
447
        state_dict_new = state_dict.__class__()
        for k, v in state_dict.items():
            # megatron uses both dict and OrderedDict in its state_dict
            if isinstance(v, (OrderedDict, dict)):
                v_new = extract_expert_param(v, expert_dp_comm)
                if len(v_new) > 0:
                    state_dict_new[k] = v_new
Jiezhong Qiu's avatar
Jiezhong Qiu committed
448
            elif hasattr(v, "dp_comm") and v.dp_comm == expert_dp_comm:
449
450
451
                state_dict_new[k] = v.detach()
        return state_dict_new

Jiezhong Qiu's avatar
Jiezhong Qiu committed
452
    state_dict["model"] = extract_expert_param(state_dict["model"], expert_dp_comm)
453
454
455
456

    # Optimizer stuff.
    if not args.no_save_optim:
        if optimizer is not None:
Jiezhong Qiu's avatar
Jiezhong Qiu committed
457
            state_dict["optimizer"] = optimizer.state_dict()
458
            param_global_idx = 0
459
            for param_group in optimizer.optimizer.param_groups:
Jiezhong Qiu's avatar
Jiezhong Qiu committed
460
461
462
463
                for param in param_group["params"]:
                    if not (
                        hasattr(param, "dp_comm") and param.dp_comm == expert_dp_comm
                    ):
464
465
466
                        # this parameter is not an expert parameter
                        # thus there is no need to save its state in current rank
                        # since it has been saved by data parallel rank 0
467
468
                        if args.fp16:
                            # fp16 optimizer may have empty state due to overflow
Jiezhong Qiu's avatar
Jiezhong Qiu committed
469
470
471
                            state_dict["optimizer"]["optimizer"]["state"].pop(
                                param_global_idx, None
                            )
472
                        else:
Jiezhong Qiu's avatar
Jiezhong Qiu committed
473
                            state_dict["optimizer"]["state"].pop(param_global_idx)
474
                    param_global_idx += 1
475
            if args.fp16:
Jiezhong Qiu's avatar
Jiezhong Qiu committed
476
                state_dict["optimizer"]["optimizer"].pop("param_groups")
477
478
479
480
481
482
                # fp32_from_fp16_params in state_dict is not a copy
                # but a reference to optimizer.fp32_from_fp16_params,
                # changing it in state_dict will change
                # optimizer.fp32_from_fp16_params as well
                # thus we create an empty fp32_from_fp16_params in state_dict
                # and only insert expert parameters.
Jiezhong Qiu's avatar
Jiezhong Qiu committed
483
484
                fp32_from_fp16_params = state_dict["optimizer"]["fp32_from_fp16_params"]
                state_dict["optimizer"]["fp32_from_fp16_params"] = []
485
486
487
                for param_group in fp32_from_fp16_params:
                    param_group_copy = []
                    for param in param_group:
Jiezhong Qiu's avatar
Jiezhong Qiu committed
488
489
490
491
492
493
                        param_copy = (
                            param
                            if hasattr(param, "dp_comm")
                            and param.dp_comm == expert_dp_comm
                            else None
                        )
494
                        param_group_copy.append(param_copy)
Jiezhong Qiu's avatar
Jiezhong Qiu committed
495
496
497
                    state_dict["optimizer"]["fp32_from_fp16_params"].append(
                        param_group_copy
                    )
498
            else:
Jiezhong Qiu's avatar
Jiezhong Qiu committed
499
                state_dict["optimizer"].pop("param_groups")
500
501

    # Save.
502
    checkpoint_name = get_fmoe_checkpoint_name(args.save, iteration)
503
504
    from megatron.checkpointing import ensure_directory_exists
    from megatron.checkpointing import get_checkpoint_tracker_filename
Jiezhong Qiu's avatar
Jiezhong Qiu committed
505

506
507
508
509
510
511
    ensure_directory_exists(checkpoint_name)
    torch.save(state_dict, checkpoint_name)

    # Wait so everyone is done (necessary)
    torch.distributed.barrier()
    if torch.distributed.get_rank() == 0:
Jiezhong Qiu's avatar
Jiezhong Qiu committed
512
513
514
515
516
517
        print(
            "  successfully saved checkpoint at iteration {:7d} to {}".format(
                iteration, args.save
            ),
            flush=True,
        )
518
519
520
    # And update the latest iteration
    if torch.distributed.get_rank() == 0:
        tracker_filename = get_checkpoint_tracker_filename(args.save)
Jiezhong Qiu's avatar
Jiezhong Qiu committed
521
        with open(tracker_filename, "w") as f:
522
523
524
            f.write(str(iteration))
    # Wait so everyone is done (not necessary)
    torch.distributed.barrier()
525
526


527
def merge_state_dict(state_dict_rank0, state_dict_local, fp16):
528
529
530
    """merge two state dicts, one from data parallel rank 0,
    another only contains expert states"""
    from megatron import print_rank_last
Jiezhong Qiu's avatar
Jiezhong Qiu committed
531

532
533
534
535
536
537
538
539
    def merge_model(state_dict_rank0, state_dict_local):
        for k, v in state_dict_local.items():
            # megatron uses both dict and OrderedDict in its state_dict
            if isinstance(v, (OrderedDict, dict)):
                print_rank_last("[merge model] go recursively to {}".format(k))
                merge_model(state_dict_rank0[k], v)
            else:
                state_dict_rank0[k] = v
Jiezhong Qiu's avatar
Jiezhong Qiu committed
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555

    merge_model(state_dict_rank0["model"], state_dict_local["model"])

    optimizer_rank0 = (
        state_dict_rank0["optimizer"]["optimizer"]
        if fp16
        else state_dict_rank0["optimizer"]
    )
    optimizer_local = (
        state_dict_local["optimizer"]["optimizer"]
        if fp16
        else state_dict_local["optimizer"]
    )

    for k, v in optimizer_local["state"].items():
        optimizer_rank0["state"][k] = v
556
557

    if fp16:
Jiezhong Qiu's avatar
Jiezhong Qiu committed
558
559
560
        for group_idx, param_group in enumerate(
            state_dict_local["optimizer"]["fp32_from_fp16_params"]
        ):
561
562
            for param_in_group_idx, param in enumerate(param_group):
                if param is not None:
Jiezhong Qiu's avatar
Jiezhong Qiu committed
563
564
565
566
567
568
569
570
                    state_dict_rank0["optimizer"]["fp32_from_fp16_params"][group_idx][
                        param_in_group_idx
                    ] = param
                    print_rank_last(
                        "[merge fp32_from_fp16_params] copy parameter ({:d}, {:d})".format(
                            group_idx, param_in_group_idx
                        )
                    )
571

572
573
    return state_dict_rank0

Jiezhong Qiu's avatar
Jiezhong Qiu committed
574
575

def load_checkpoint(model, optimizer, lr_scheduler, load_arg="load"):
576
577
578
579
580
    """Load a model checkpoint and return the iteration."""

    from megatron import get_args
    from megatron import mpu
    from megatron import print_rank_last
Jiezhong Qiu's avatar
Jiezhong Qiu committed
581
582
583
584
    from megatron.checkpointing import get_checkpoint_tracker_filename
    from megatron.checkpointing import set_checkpoint_version
    from megatron.checkpointing import check_checkpoint_args
    from megatron.checkpointing import update_num_microbatches
Jiezhong Qiu's avatar
Jiezhong Qiu committed
585

586
587
588
    if mpu.get_data_parallel_rank() == 0:
        # at dp rank 0, we still follow the native load_checkpoint by megatron
        from megatron.checkpointing import load_checkpoint as load_checkpoint_native
Jiezhong Qiu's avatar
Jiezhong Qiu committed
589

590
591
592
593
594
595
596
597
598
599
600
601
        return load_checkpoint_native(model, optimizer, lr_scheduler, load_arg)

    args = get_args()
    load_dir = getattr(args, load_arg)

    if isinstance(model, DistributedDataParallel):
        model = model.module
    # Read the tracker file and set the iteration.
    tracker_filename = get_checkpoint_tracker_filename(load_dir)

    # If no tracker file, return iretation zero.
    if not os.path.isfile(tracker_filename):
Jiezhong Qiu's avatar
Jiezhong Qiu committed
602
603
604
605
606
607
        print_rank_last(
            "WARNING: could not find the metadata file {} ".format(tracker_filename)
        )
        print_rank_last(
            "    will not load any checkpoints and will start from " "random"
        )
608
609
610
611
612
613
        return 0

    # Otherwise, read the tracker file and either set the iteration or
    # mark it as a release checkpoint.
    iteration = 0
    release = False
Jiezhong Qiu's avatar
Jiezhong Qiu committed
614
    with open(tracker_filename, "r") as f:
615
616
617
618
        metastring = f.read().strip()
        try:
            iteration = int(metastring)
        except ValueError:
Jiezhong Qiu's avatar
Jiezhong Qiu committed
619
            release = metastring == "release"
620
            if not release:
Jiezhong Qiu's avatar
Jiezhong Qiu committed
621
622
623
                print_rank_last(
                    "ERROR: Invalid metadata file {}. Exiting".format(tracker_filename)
                )
624
625
                sys.exit()

Jiezhong Qiu's avatar
Jiezhong Qiu committed
626
627
628
    assert iteration > 0 or release, "error parsing metadata file {}".format(
        tracker_filename
    )
629
630

    # Checkpoint.
Jiezhong Qiu's avatar
Jiezhong Qiu committed
631
    checkpoint_name_rank0 = get_fmoe_checkpoint_name(load_dir, iteration, release, 0)
632
    checkpoint_name_local = get_fmoe_checkpoint_name(
Jiezhong Qiu's avatar
Jiezhong Qiu committed
633
634
635
636
637
638
639
640
641
642
        load_dir, iteration, release, mpu.get_data_parallel_rank()
    )
    print_rank_last(
        " loading checkpoint at rank 0 from {} and rank {} from {} at iteration {}, will merge them later".format(
            checkpoint_name_rank0,
            mpu.get_data_parallel_rank(),
            checkpoint_name_local,
            iteration,
        )
    )
643
644
645
646

    # Load the checkpoint.
    def load_state_dict(checkpoint_name):
        try:
Jiezhong Qiu's avatar
Jiezhong Qiu committed
647
            state_dict = torch.load(checkpoint_name, map_location="cpu")
648
649
        except ModuleNotFoundError:
            from megatron.fp16_deprecated import loss_scaler
Jiezhong Qiu's avatar
Jiezhong Qiu committed
650

651
            # For backward compatibility.
Jiezhong Qiu's avatar
Jiezhong Qiu committed
652
653
654
655
656
657
658
659
660
661
            print_rank_last(" > deserializing using the old code structure ...")
            sys.modules["fp16.loss_scaler"] = sys.modules[
                "megatron.fp16_deprecated.loss_scaler"
            ]
            sys.modules["megatron.fp16.loss_scaler"] = sys.modules[
                "megatron.fp16_deprecated.loss_scaler"
            ]
            state_dict = torch.load(checkpoint_name, map_location="cpu")
            sys.modules.pop("fp16.loss_scaler", None)
            sys.modules.pop("megatron.fp16.loss_scaler", None)
662
        except BaseException:
Jiezhong Qiu's avatar
Jiezhong Qiu committed
663
            print_rank_last("could not load the checkpoint")
664
665
            sys.exit()
        return state_dict
Jiezhong Qiu's avatar
Jiezhong Qiu committed
666

667
668
669
    state_dict_rank0 = load_state_dict(checkpoint_name_rank0)
    state_dict_local = load_state_dict(checkpoint_name_local)

670
    state_dict = merge_state_dict(state_dict_rank0, state_dict_local, args.fp16)
671
672

    # set checkpoint version
Jiezhong Qiu's avatar
Jiezhong Qiu committed
673
    set_checkpoint_version(state_dict.get("checkpoint_version", 0))
674
675
676
677
678
679

    # Set iteration.
    if args.finetune or release:
        iteration = 0
    else:
        try:
Jiezhong Qiu's avatar
Jiezhong Qiu committed
680
            iteration = state_dict["iteration"]
681
682
        except KeyError:
            try:  # Backward compatible with older checkpoints
Jiezhong Qiu's avatar
Jiezhong Qiu committed
683
                iteration = state_dict["total_iters"]
684
            except KeyError:
Jiezhong Qiu's avatar
Jiezhong Qiu committed
685
686
687
688
689
690
                print_rank_last(
                    "A metadata file exists but unable to load "
                    "iteration from checkpoint {}, exiting".format(
                        checkpoint_name_local
                    )
                )
691
692
693
694
695
                sys.exit()

    # Check arguments.
    assert args.consumed_train_samples == 0
    assert args.consumed_valid_samples == 0
Jiezhong Qiu's avatar
Jiezhong Qiu committed
696
697
    if "args" in state_dict:
        checkpoint_args = state_dict["args"]
698
        check_checkpoint_args(checkpoint_args)
Jiezhong Qiu's avatar
Jiezhong Qiu committed
699
700
701
        args.consumed_train_samples = getattr(
            checkpoint_args, "consumed_train_samples", 0
        )
702
        update_num_microbatches(consumed_samples=args.consumed_train_samples)
Jiezhong Qiu's avatar
Jiezhong Qiu committed
703
704
705
        args.consumed_valid_samples = getattr(
            checkpoint_args, "consumed_valid_samples", 0
        )
706
    else:
Jiezhong Qiu's avatar
Jiezhong Qiu committed
707
        print_rank_last("could not find arguments in the checkpoint ...")
708
709

    # Model.
Jiezhong Qiu's avatar
Jiezhong Qiu committed
710
    model.load_state_dict(state_dict["model"])
711
712
713
714
715

    # Optimizer.
    if not release and not args.finetune and not args.no_load_optim:
        try:
            if optimizer is not None:
Jiezhong Qiu's avatar
Jiezhong Qiu committed
716
                optimizer.load_state_dict(state_dict["optimizer"])
717
            if lr_scheduler is not None:
Jiezhong Qiu's avatar
Jiezhong Qiu committed
718
                lr_scheduler.load_state_dict(state_dict["lr_scheduler"])
719
        except KeyError:
Jiezhong Qiu's avatar
Jiezhong Qiu committed
720
721
722
723
724
725
            print_rank_last(
                "Unable to load optimizer from checkpoint {}. "
                "Specify --no-load-optim or --finetune to prevent "
                "attempting to load the optimizer state, "
                "exiting ...".format(checkpoint_name_local)
            )
726
727
728
729
730
            sys.exit()

    # rng states.
    if not release and not args.finetune and not args.no_load_rng:
        try:
Jiezhong Qiu's avatar
Jiezhong Qiu committed
731
732
733
734
735
            random.setstate(state_dict["random_rng_state"])
            np.random.set_state(state_dict["np_rng_state"])
            torch.set_rng_state(state_dict["torch_rng_state"])
            torch.cuda.set_rng_state(state_dict["cuda_rng_state"])
            mpu.get_cuda_rng_tracker().set_states(state_dict["rng_tracker_states"])
736
        except KeyError:
Jiezhong Qiu's avatar
Jiezhong Qiu committed
737
738
739
740
741
742
            print_rank_last(
                "Unable to load optimizer from checkpoint {}. "
                "Specify --no-load-rng or --finetune to prevent "
                "attempting to load the optimizer state, "
                "exiting ...".format(checkpoint_name_local)
            )
743
744
745
            sys.exit()

    torch.distributed.barrier()
Jiezhong Qiu's avatar
Jiezhong Qiu committed
746
747
748
749
750
    print_rank_last(
        "  successfully loaded checkpoint (with expert parametes updated) from {} at iteration {}".format(
            args.load, iteration
        )
    )
751
752

    return iteration