lm_wikitext2.py 2.84 KB
Newer Older
1
2
3
4
5
6
7
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.

import torch.nn as nn

from fairscale.optim import GradScaler


8
9
class Offload_Transformer:
    def get_model_config():
10
        return {
11
12
13
14
15
16
17
18
19
20
            "vocab_size": 10000,
            "ninp": 2048,  # embedding dimension
            "nhid": 2048,  # the dimension of the feedforward network model in nn.TransformerEncoder
            "nhead": 32,  # the number of heads in the multiheadattention models
            "dropout": 0,
            "initrange": 0.1,
            "scaler": GradScaler(),
            "clip_value": 0.05,
            "num_decoder_layers": 10,
            "seq_len": 32,
21
        }
22
23
24
25
26
27
28
29
30

    def get_benchmark_config():

        return {
            "epochs": 1,
            "lr": 0.001,  # learning rate
            "batch_size": 8,
            "criterion": nn.CrossEntropyLoss(),
            "checkpoint_activation": True,
31
            "num_microbatches": 1,
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
            "slices": 3,
        }


class Offload_Sequential:
    def get_model_config():
        return {
            "inputs": 100,
            "outputs": 5,
            "hidden": 1000,
            "layers": 100,
            "clip_value": 0.05,
        }

    def get_benchmark_config():

        return {
            "epochs": 1,
            "lr": 0.001,  # learning rate
            "batch_size": 8,
            "criterion": nn.CrossEntropyLoss(),
            "slices": 3,
            "checkpoint_activation": True,
            "num_microbatches": 4,
        }


class Pipe:
    def get_model_config():
        return {
            "vocab_size": 10000,
            "ninp": 2048,  # embedding dimension
            "nhid": 2048,  # the dimension of the feedforward network model in nn.TransformerEncoder
            "nhead": 32,  # the number of heads in the multiheadattention models
            "dropout": 0,
            "initrange": 0.1,
            "scaler": GradScaler(),
            "clip_value": 0.05,
            "num_decoder_layers": 10,
            "seq_len": 32,
        }

    def get_benchmark_config():

76
        return {
77
78
79
80
            "epochs": 1,
            "lr": 0.001,  # learning rate
            "batch_size": 8,
            "criterion": nn.CrossEntropyLoss(),
81
        }
82

83
84
85
86
87
88
89
90
91
92
93
94
95
    def get_golden_real_stats(multiprocess=False):
        if not multiprocess:
            return {
                "avg_wps": 703.778,
                "std_dev_wps": 5.732,
                "peak_mem_usage": [2320996352, 1396742144, 1396742144, 2340010496],
            }
        else:
            return {
                "avg_wps": 647.404,
                "std_dev_wps": 14.51,
                "peak_mem_usage": [3305007616, 2578692608, 3304524288, 2578692608],
            }
96

97
98
99
    def get_golden_synthetic_stats():
        # TODO(anj-s): Add support for synthetic regression benchmarks
        raise NotImplementedError("Synthetic data benchmarks are not supported.")