pipe.py 20.1 KB
Newer Older
Mandeep Singh Baines's avatar
Mandeep Singh Baines committed
1
2
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.

Tom Birch's avatar
Tom Birch committed
3
import argparse
Mandeep Singh Baines's avatar
Mandeep Singh Baines committed
4
import math
Tom Birch's avatar
Tom Birch committed
5
import os
Mandeep Singh Baines's avatar
Mandeep Singh Baines committed
6
import time
Tom Birch's avatar
Tom Birch committed
7
import warnings
Mandeep Singh Baines's avatar
Mandeep Singh Baines committed
8

Tom Birch's avatar
Tom Birch committed
9
from benchmark_dataset import BenchmarkLMDataset, collate_sentences_lm
Mandeep Singh Baines's avatar
Mandeep Singh Baines committed
10
import torch
Tom Birch's avatar
Tom Birch committed
11
12
from torch.distributed import rpc
import torch.multiprocessing as mp
Mandeep Singh Baines's avatar
Mandeep Singh Baines committed
13
import torch.nn as nn
Tom Birch's avatar
Tom Birch committed
14
from torch.utils.data import DataLoader
Mandeep Singh Baines's avatar
Mandeep Singh Baines committed
15
16
17
import torchtext
from torchtext.data.utils import get_tokenizer

18
from fairscale.nn import Pipe
Tom Birch's avatar
Tom Birch committed
19
20
from fairscale.nn.model_parallel import initialize_model_parallel
from fairscale.nn.pipe import pipe
Jun Ru Anderson's avatar
Jun Ru Anderson committed
21
from fairscale.optim import GradScaler
Tom Birch's avatar
Tom Birch committed
22
from tests.nn.model_parallel.commons import dist_init, get_worker_map
23

Jun Ru Anderson's avatar
Jun Ru Anderson committed
24
try:
Tom Birch's avatar
Tom Birch committed
25
    from fairscale.optim import Adam  # type: ignore
Jun Ru Anderson's avatar
Jun Ru Anderson committed
26
27
28
29
30
31
32

    can_benchmark = True
except ImportError:
    from torch.optim import Adam  # type: ignore

    can_benchmark = False

33

Tom Birch's avatar
Tom Birch committed
34
35
36
37
38
39
40
41
42
43
44
45
def init_random_seed(seed: int):
    import numpy

    torch.manual_seed(seed)
    torch.cuda.manual_seed(seed)
    numpy.random.seed(seed)


PIPE_CHUNKS = 2
iteration_count = 0


46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
class EmbeddingLayer(nn.Embedding):
    def __init__(self, ntoken, ninp, initrange):
        super().__init__(ntoken, ninp)
        self.ninp = ninp
        self.weight.data.uniform_(-initrange, initrange)

    def forward(self, src):
        return super().forward(src) * math.sqrt(self.ninp)


class PositionalEncodingLayer(nn.Module):
    def __init__(self, d_model, dropout=0.1, max_len=5000):
        super(PositionalEncodingLayer, self).__init__()
        self.dropout = nn.Dropout(p=dropout)

        pe = torch.zeros(max_len, d_model)
        position = torch.arange(0, max_len, dtype=torch.float).unsqueeze(1)
        div_term = torch.exp(torch.arange(0, d_model, 2).float() * (-math.log(10000.0) / d_model))
        pe[:, 0::2] = torch.sin(position * div_term)
        pe[:, 1::2] = torch.cos(position * div_term)
        pe = pe.unsqueeze(0).transpose(0, 1)
        self.register_buffer("pe", pe)

    def forward(self, x):
        x = x + self.pe[: x.size(0), :]
        return self.dropout(x)


class TransformerDecoderLayer(nn.TransformerEncoderLayer):
    """Though this class inherits from torch.nn.TransformerEncoderLayer,
Tom Birch's avatar
Tom Birch committed
76
    it functions as a decoder in this model"""
77
78
79
80
81
82
83
84
85
86
87

    def __init__(self, ninp, nhead, nhid, droupout):
        super().__init__(ninp, nhead, nhid, droupout)
        self.src_mask = None

    def _generate_square_subsequent_mask(self, sz):
        mask = (torch.triu(torch.ones(sz, sz)) == 1).transpose(0, 1)
        mask = mask.float().masked_fill(mask == 0, float("-inf")).masked_fill(mask == 1, float(0.0))
        return mask

    def forward(self, src):
Tom Birch's avatar
Tom Birch committed
88
89
90
91
92
        global iteration_count
        iteration_count += 1
        # if iteration_count == 196:
        #    dump_cuda_tensors()

93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
        if self.src_mask is None or self.src_mask.size(0) != len(src):
            device = src.device
            mask = self._generate_square_subsequent_mask(len(src)).to(device)
            self.src_mask = mask

        return super().forward(src, self.src_mask)


class LinearLayer(nn.Linear):
    def __init__(self, ninp, ntoken, initrange):
        super().__init__(ninp, ntoken)
        self.bias.data.zero_()
        self.weight.data.uniform_(-initrange, initrange)


class TransformerLMSequntial(nn.Sequential):
    """A small language model based on the design of GPT-2 using nn.Sequeitnal
Tom Birch's avatar
Tom Birch committed
110
    for compatability with Pipe"""
111

Tom Birch's avatar
Tom Birch committed
112
113
    def __init__(self, ntokens, ninp, nhead, nhid, dropout, initrange, ndecoder):
        layers = [
114
115
            EmbeddingLayer(ntokens, ninp, initrange),
            PositionalEncodingLayer(ninp, dropout),
Tom Birch's avatar
Tom Birch committed
116
117
118
119
120
121
        ]
        for _ in range(ndecoder):
            layers.append(TransformerDecoderLayer(ninp, nhead, nhid, dropout))

        layers.append(LinearLayer(ninp, ntokens, initrange))
        super(TransformerLMSequntial, self).__init__(*layers)
122

Mandeep Singh Baines's avatar
Mandeep Singh Baines committed
123
124

def get_data(device):
Tom Birch's avatar
Tom Birch committed
125
126
127
128
129
130
131
    with warnings.catch_warnings(record=True) as fjldska:
        TEXT = torchtext.data.Field(
            tokenize=get_tokenizer("basic_english"), init_token="<sos>", eos_token="<eos>", lower=True
        )
        train_txt, val_txt, test_txt = torchtext.datasets.WikiText2.splits(TEXT)
        TEXT.build_vocab(train_txt)
        ntokens = len(TEXT.vocab.stoi)
Mandeep Singh Baines's avatar
Mandeep Singh Baines committed
132

Tom Birch's avatar
Tom Birch committed
133
134
135
136
137
        batch_size = 20
        eval_batch_size = 10
        train_data = batchify(train_txt, batch_size, TEXT, device)
        val_data = batchify(val_txt, eval_batch_size, TEXT, device)
        test_data = batchify(test_txt, eval_batch_size, TEXT, device)
Mandeep Singh Baines's avatar
Mandeep Singh Baines committed
138

Tom Birch's avatar
Tom Birch committed
139
        return ntokens, train_data, val_data, test_data
Mandeep Singh Baines's avatar
Mandeep Singh Baines committed
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156


def batchify(data, bsz, TEXT, device):
    data = TEXT.numericalize([data.examples[0].text])
    nbatch = data.size(0) // bsz
    data = data.narrow(0, 0, nbatch * bsz)
    data = data.view(bsz, -1).t().contiguous()
    return data.to(device)


def get_batch(source, i, bptt):
    seq_len = min(bptt, len(source) - 1 - i)
    data = source[i : i + seq_len]
    target = source[i + 1 : i + 1 + seq_len].view(-1)
    return data, target


Tom Birch's avatar
Tom Birch committed
157
158
159
160
def make_model(args, device, ntokens):
    ninp = 2048  # embedding dimension
    nhid = 2048  # the dimension of the feedforward network model in nn.TransformerEncoder
    nhead = 32  # the number of heads in the multiheadattention models
161
162
    dropout = 0
    initrange = 0.1
Tom Birch's avatar
Tom Birch committed
163
    ndecoder = args.num_decoder_layers
164

Tom Birch's avatar
Tom Birch committed
165
166
167
168
169
170
171
172
173
174
175
176
    if args.lazy_construction:
        layers = [
            lambda: EmbeddingLayer(ntokens, ninp, initrange),
            lambda: PositionalEncodingLayer(ninp, dropout),
        ]
        for _ in range(ndecoder):
            layers.append(lambda: TransformerDecoderLayer(ninp, nhead, nhid, dropout))

        layers.append(lambda: LinearLayer(ninp, ntokens, initrange))
        model = layers
    else:
        model = TransformerLMSequntial(ntokens, ninp, nhead, nhid, dropout, initrange, ndecoder).to(device)
177

Mandeep Singh Baines's avatar
Mandeep Singh Baines committed
178
    criterion = nn.CrossEntropyLoss()
Tom Birch's avatar
Tom Birch committed
179
    lr = 0.01  # learning rate
180

Tom Birch's avatar
Tom Birch committed
181
182
183
184
    def make_adam(model):
        return Adam(model.parameters(), lr=lr)

    optimizer = make_adam
Jun Ru Anderson's avatar
Jun Ru Anderson committed
185
    scaler = GradScaler()
Mandeep Singh Baines's avatar
Mandeep Singh Baines committed
186

Tom Birch's avatar
Tom Birch committed
187
    return model, criterion, optimizer, scaler
Mandeep Singh Baines's avatar
Mandeep Singh Baines committed
188
189


Tom Birch's avatar
Tom Birch committed
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
def get_tensors_by_size_bucket():
    import gc
    from collections import defaultdict

    size_buckets = defaultdict(int)
    for obj in gc.get_objects():
        if not isinstance(obj, torch.Tensor):
            continue
        if obj.device.type == "cuda":
            size_buckets[(*obj.size(),) + (obj.element_size(),)] += 1

    return size_buckets


def dump_size_buckets(size_buckets, prefix=""):
    import operator
    from functools import reduce

    total = 0
    for key, value in size_buckets.items():
        this = reduce(operator.mul, key) * value
        total += this
        print(prefix + f"{key} : {value}, {this}")

    print(prefix + f"total = {total}")


last_size_buckets = None
once = True


def safe_rank():
    try:
        return torch.distributed.get_rank()
    except AssertionError:
        return 0


def check_size_buckets():
    global last_size_buckets
    global once
    size_buckets = get_tensors_by_size_bucket()
    if last_size_buckets is not None:
        if size_buckets != last_size_buckets:
            print(f"difference is oustanding tensors: {safe-rank()}")
            dump_size_buckets(last_size_buckets, "old: ")
            dump_size_buckets(size_buckets, "new: ")
        if once:
            print(f"dumping buckets for: {safe_rank()}")
            dump_size_buckets(last_size_buckets, "old: ")
            dump_size_buckets(size_buckets, "new: ")
            once = False
    else:
        print(f"size buckets none on {safe_rank()}")
    last_size_buckets = size_buckets


def dump_cuda_tensors():
    print(f"dumping cuda tensors...")
    from functools import reduce
    import operator
    import gc

    for obj in gc.get_objects():
        if not isinstance(obj, torch.Tensor):
            continue
        if obj.device.type == "cuda":
            size_buckets[(*obj.size(),) + (obj.element_size(),)] += 1

    print(f"outstanding cuda tensors:")
    total = 0
    for key, value in size_buckets.items():
        this = reduce(operator.mul, key) * value
        total += this
        print(f"{key} : {value}, {this}")
    print(f"total size = {total}")

    import pprint

    pprint.pprint(torch.cuda.memory_stats())


def train(lm_dataloader, model, criterion, optimizer, vocab_size, args):
Mandeep Singh Baines's avatar
Mandeep Singh Baines committed
273
    model.train()
Tom Birch's avatar
Tom Birch committed
274
275
276
277
278
279
280
281
282
    from functools import reduce
    import operator

    num_params = reduce(operator.add, (reduce(operator.mul, x.size()) for x in model.parameters()))
    if model.group:
        print(f"training model, #prams = {num_params}, group: {model.group.rank()}, sizes {model.group.size()}")
    else:
        print(f"training model, #prams = {num_params}")
    vocab_size = 10000  # FIXME
Mandeep Singh Baines's avatar
Mandeep Singh Baines committed
283
284
    total_loss = 0.0
    start_time = time.time()
Tom Birch's avatar
Tom Birch committed
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
    word_counter = 0

    optimizer = optimizer(model)

    def get_first_device(model):
        if model.devices:
            return model.devices[0]
        else:
            return torch.cuda.current_device()

    def get_last_device(model):
        if model.devices:
            return model.devices[-1]
        else:
            return torch.cuda.current_device()

    for i, batch in enumerate(lm_dataloader):
        if args.max_batch and i > args.max_batch:
            break
Mandeep Singh Baines's avatar
Mandeep Singh Baines committed
304
        optimizer.zero_grad()
Tom Birch's avatar
Tom Birch committed
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
        output = model(batch["input"].to(get_first_device(model)))

        if model.group is None or model.group.rank() == model.group.size() - 1:
            target = batch["target"].to(get_last_device(model))
            output = output.to(target.device)
            loss = criterion(output.view(-1, vocab_size), target.view(-1))
            loss.backward()
        else:
            model.back_helper(output)

        del output

        torch.nn.utils.clip_grad_value_(model.parameters(), 0.05)
        optimizer.step()

        if model.group is None or model.group.rank() == model.group.size() - 1:
            total_loss += loss.item()
            log_interval = 1
            word_counter += batch["ntokens"]
            if i % log_interval == 0 and i > 0:
                cur_loss = total_loss / log_interval
                elapsed = time.time() - start_time
Jun Ru Anderson's avatar
Jun Ru Anderson committed
327
                print(
Tom Birch's avatar
Tom Birch committed
328
329
                    "| batch {:5d} | wps {:5.2f} | loss {:5.2f} | ppl {:8.2f}".format(
                        i, word_counter / elapsed, cur_loss, math.exp(cur_loss)
Jun Ru Anderson's avatar
Jun Ru Anderson committed
330
                    )
Mandeep Singh Baines's avatar
Mandeep Singh Baines committed
331
                )
Tom Birch's avatar
Tom Birch committed
332
333
334
335
336
337
338
                word_counter = 0
                total_loss = 0
                start_time = time.time()
        # if i >= 10:
        #    break
        # torch.cuda.empty_cache()
        # check_size_buckets()
Mandeep Singh Baines's avatar
Mandeep Singh Baines committed
339
340
341
342
343
344
345
346
347


def evaluate(eval_model, data_source, criterion, bptt, ntokens):
    eval_model.eval()
    total_loss = 0.0
    with torch.no_grad():
        for i in range(0, data_source.size(0) - 1, bptt):
            data, targets = get_batch(data_source, i, bptt)
            output = eval_model(data)
348
            output = output.to(targets.device)
Mandeep Singh Baines's avatar
Mandeep Singh Baines committed
349
350
351
352
353
354
355
356
357
            output_flat = output.view(-1, ntokens)
            total_loss += len(data) * criterion(output_flat, targets).item()
    return total_loss / (len(data_source) - 1)


def get_number_of_words(data):
    return data.size()[0] * data.size()[1]


Tom Birch's avatar
Tom Birch committed
358
def benchmark_language_model(train_data, val_data, test_data, model, criterion, optimizer, ntokens, args):
Mandeep Singh Baines's avatar
Mandeep Singh Baines committed
359
360
361
362
    epoch = 1
    bptt = 35
    start_time = time.time()

Jun Ru Anderson's avatar
Jun Ru Anderson committed
363
    print("-" * 110)
Mandeep Singh Baines's avatar
Mandeep Singh Baines committed
364
    print("| start of epoch {:1d}".format(epoch))
Jun Ru Anderson's avatar
Jun Ru Anderson committed
365
    print("-" * 110)
Mandeep Singh Baines's avatar
Mandeep Singh Baines committed
366
    epoch_start_time = time.time()
Tom Birch's avatar
Tom Birch committed
367
368
369
    train(train_data, model, criterion, optimizer, bptt, ntokens, args)
    val_loss = 1  # evaluate(model, val_data, criterion, bptt, ntokens)
    print("-" * 89)
Mandeep Singh Baines's avatar
Mandeep Singh Baines committed
370
    print(
371
372
373
        "| end of epoch {:1d} | time: {:5.2f}s | valid loss {:5.2f} ".format(
            epoch, (time.time() - epoch_start_time), val_loss
        )
Mandeep Singh Baines's avatar
Mandeep Singh Baines committed
374
    )
Jun Ru Anderson's avatar
Jun Ru Anderson committed
375
    print("-" * 110)
Mandeep Singh Baines's avatar
Mandeep Singh Baines committed
376
377
378
379
380

    elapsed_time = time.time() - start_time
    nwords = get_number_of_words(train_data) + get_number_of_words(val_data)
    wps = nwords / elapsed_time

Tom Birch's avatar
Tom Birch committed
381
382
    test_loss = 1  # evaluate(model, test_data, criterion, bptt, ntokens)
    print("=" * 89)
Mandeep Singh Baines's avatar
Mandeep Singh Baines committed
383
    print(
384
385
        "| end of training | test loss {:5.2f} \n| time: {:5.2f}s | words: {:3d} | wps: {:5.2f}".format(
            test_loss, elapsed_time, nwords, wps
Mandeep Singh Baines's avatar
Mandeep Singh Baines committed
386
387
        )
    )
Jun Ru Anderson's avatar
Jun Ru Anderson committed
388
    print("=" * 110)
Mandeep Singh Baines's avatar
Mandeep Singh Baines committed
389

Jun Ru Anderson's avatar
Jun Ru Anderson committed
390
    if can_benchmark and len(model.balance) == 4:
391
        # Assert that words per second is within 3 standard deviations of the average
Jun Ru Anderson's avatar
Jun Ru Anderson committed
392
        # of six golden runs
393
        assert wps > 36954.4 - (3 * 116.825)
394
395
396
397
398
399
400

        print("Peak allocated bytes on cuda:0: {:1d}".format(torch.cuda.memory_stats(0)["allocated_bytes.all.peak"]))
        print("Peak allocated bytes on cuda:1: {:1d}".format(torch.cuda.memory_stats(1)["allocated_bytes.all.peak"]))
        print("Peak allocated bytes on cuda:2: {:1d}".format(torch.cuda.memory_stats(2)["allocated_bytes.all.peak"]))
        print("Peak allocated bytes on cuda:3: {:1d}".format(torch.cuda.memory_stats(3)["allocated_bytes.all.peak"]))

        # Assert that memory usage on each GPU is within 10% of golden run
401
        # Right-hand-side is golden run bytes * 110%
402
403
404
405
        assert torch.cuda.memory_stats(0)["allocated_bytes.all.peak"] < 4061909504 * 1.1
        assert torch.cuda.memory_stats(1)["allocated_bytes.all.peak"] < 4050944 * 1.1
        assert torch.cuda.memory_stats(2)["allocated_bytes.all.peak"] < 10427392 * 1.1
        assert torch.cuda.memory_stats(3)["allocated_bytes.all.peak"] < 2031824896 * 1.1
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
        print("No regression detected")


def generate_balance(num_devices, num_layers):
    balance = []
    layers_assigned = 0
    for i in range(num_devices):
        x = (num_layers - layers_assigned) / (num_devices - i)
        if x.is_integer():
            balance.append(int(x))
            layers_assigned += x
        else:
            balance.append(math.ceil(x))
            layers_assigned += math.ceil(x)
    return balance

Mandeep Singh Baines's avatar
Mandeep Singh Baines committed
422

Tom Birch's avatar
Tom Birch committed
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
def make_model_and_data(args, device, new_data: bool = True):
    if new_data:
        device = torch.device("cuda")
        vocab_size = 10000
        model, criterion, optimizer, scaler = make_model(args, device, vocab_size)
        lm_dataset = BenchmarkLMDataset()
        lm_dataloader = DataLoader(
            lm_dataset, batch_size=args.batch_size, shuffle=True, num_workers=0, collate_fn=collate_sentences_lm
        )
        return {
            "model": model,
            "criterion": criterion,
            "optimizer": optimizer,
            "data": lm_dataloader,
            "vocab_size": vocab_size,
        }
    else:
        device = torch.device("cuda")
        data = get_data(device)
        ntokens, train_data, val_data, test_data = data
        model, criterion, optimizer, scaler = make_model(args, device, ntokens)
        return {
            "model": model,
            "criterion": criterion,
            "optimizer": optimizer,
            "data": data,
        }


def bench_single_process(args):
453
454
    num_devices = torch.cuda.device_count()
    assert num_devices > 0
Tom Birch's avatar
Tom Birch committed
455
    init_random_seed(0)
Mandeep Singh Baines's avatar
Mandeep Singh Baines committed
456
    device = torch.device("cuda")
Tom Birch's avatar
Tom Birch committed
457
458
459
460
461
462
463
464
465
466

    new_data = True

    blob = make_model_and_data(args, None, new_data=new_data)
    model = blob["model"]

    balance = generate_balance(min(num_devices, 8), len(model))
    p = pipe.Pipe(
        model, balance, chunks=args.chunks, pipelined_backward=args.pipelined_backward, checkpoint=args.checkpoint
    )
467
    del model
Tom Birch's avatar
Tom Birch committed
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
    del blob["model"]

    if new_data:
        train(blob["data"], p, blob["criterion"], blob["optimizer"], blob["vocab_size"], args)
    else:
        ntokens, train_data, val_data, test_data = blob["data"]
        benchmark_language_model(train_data, val_data, test_data, p, criterion, optimizer, ntokens, args)


def run_mp_worker(args, available_workers):
    new_data = True

    blob = make_model_and_data(args, None, new_data=new_data)
    model = blob["model"]

    balance = generate_balance(min(available_workers, 8), len(model))
    p = pipe.Pipe(
        model,
        balance,
        style=Pipe.MultiProcess,
        chunks=args.chunks,
        worker_map=get_worker_map(),
        input_device=torch.cuda.current_device(),
        pipelined_backward=args.pipelined_backward,
        checkpoint=args.checkpoint,
    ).cuda()

    if args.all_at_once and p.pipeline:
        print(f"running all at once")
        p.pipeline.all_at_once = True

    if new_data:
        train(blob["data"], p, blob["criterion"], blob["optimizer"], blob["vocab_size"], args)
    else:
        ntokens, train_data, val_data, test_data = blob["data"]
        benchmark_language_model(train_data, val_data, test_data, p, criterion, optimizer, ntokens, args)


def run_worker(rank, world_size, args):
    if args.world_size != 0:
        world_size = args.world_size
    dist_init(rank + args.rank_base, world_size, hostname=args.host)
    initialize_model_parallel(1, world_size)
    init_random_seed(0)
    run_mp_worker(args, world_size)

    rpc.shutdown()
    torch.distributed.destroy_process_group()


def bench_multi_process(args, all_at_once=False):
    if args.local_world_size != 0:
        world_size = args.local_world_size
    else:
        world_size = min(torch.cuda.device_count(), 2)
    mp.spawn(run_worker, args=(world_size, args), nprocs=world_size, join=True)


best_device_map = {
    0: "mlx5_0:1",
    1: "mlx5_0:1",
    2: "mlx5_1:1",
    3: "mlx5_1:1",
    4: "mlx5_2:1",
    5: "mlx5_2:1",
    6: "mlx5_3:1",
    7: "mlx5_3:1",
}


def bench_mpi(args):
    guess_rank = int(os.environ["OMPI_COMM_WORLD_RANK"])
    os.environ["UCX_NET_DEVICES"] = best_device_map[guess_rank]

    torch.distributed.init_process_group(backend="mpi")
    os.environ["MASTER_ADDR"] = args.host
    os.environ["MASTER_PORT"] = "10639"
    if args.socket_name:
        os.environ["GLOO_SOCKET_IFNAME"] = args.socket_name
        os.environ["TP_SOCKET_IFNAME"] = args.socket_name
    init_method = f"tcp://{os.environ['MASTER_ADDR']}:{os.environ['MASTER_PORT']}"
    rank = torch.distributed.get_rank()
    world_size = torch.distributed.get_world_size()
    torch.cuda.set_device(rank % torch.cuda.device_count())

    rpc.init_rpc(
        f"Test{rank}",
        rank=rank,
        world_size=world_size,
        backend=rpc.BackendType.PROCESS_GROUP,
        rpc_backend_options=rpc.ProcessGroupRpcBackendOptions(rpc_timeout=20, init_method=init_method),
    )

    initialize_model_parallel(1, world_size)
    init_random_seed(0)

    run_mp_worker(args, world_size)

    rpc.shutdown()
    torch.distributed.destroy_process_group()


parser = argparse.ArgumentParser(description="benchmark")
parser.add_argument("--local-world-size", "-l", type=int, default=0, help="local world size")
parser.add_argument("--world-size", "-w", type=int, default=0, help="world size")
parser.add_argument("--rank-base", "-r", type=int, help="rank base", default=0)
parser.add_argument("--host", "-o", type=str, default="localhost", help="hostname")
parser.add_argument("--no-mpi", action="store_true", default=False, help="disable mpi")
parser.add_argument("--chunks", type=int, default=1, help="number of microbatches per batch")
parser.add_argument("--batch-size", type=int, default=8, help="size of a batch")
parser.add_argument("--all-at-once", action="store_true", default=False, help="do backward pass on whole batch at once")
parser.add_argument("--max-batch", type=int, default=4, help="Max number of batches")
parser.add_argument("--socket-name", type=str, default=None, help="socket ifname for gloo/tp")
parser.add_argument("--num-decoder-layers", type=int, default=10, help="Number of decoder layers in the model")
parser.add_argument(
    "--lazy-construction", action="store_true", default=False, help="Number of decoder layers in the model"
)
parser.add_argument(
    "--checkpoint", default="never", choices=["always", "except_last", "never"], help="Checkpointing strategy for pipe"
)
parser.add_argument(
    "--pipelined-backward", dest="pipelined_backward", action="store_true", help="Pipelined backward pass"
)
parser.add_argument(
    "--no-pipelined-backward", dest="pipelined_backward", action="store_false", help="Pipelined backward pass"
)
parser.set_defaults(pipelined_backward=True)

if __name__ == "__main__":
    args = parser.parse_args()
    # bench_multi_process(args, all_at_once=True)
    if args.no_mpi or "OMPI_COMM_WORLD_RANK" not in os.environ:
        print(f"Running benchmark with args: {args}")
        bench_single_process(args)
    else:
        if os.environ["OMPI_COMM_WORLD_RANK"] == "0":
            print(f"Running benchmark with args: {args}")
        bench_mpi(args)