lm_wikitext2.py 2.88 KB
Newer Older
1
2
3
4
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
5
6
7
8
9
10

import torch.nn as nn

from fairscale.optim import GradScaler


11
12
class Offload_Transformer:
    def get_model_config():
13
        return {
14
15
16
17
18
19
20
21
22
23
            "vocab_size": 10000,
            "ninp": 2048,  # embedding dimension
            "nhid": 2048,  # the dimension of the feedforward network model in nn.TransformerEncoder
            "nhead": 32,  # the number of heads in the multiheadattention models
            "dropout": 0,
            "initrange": 0.1,
            "scaler": GradScaler(),
            "clip_value": 0.05,
            "num_decoder_layers": 10,
            "seq_len": 32,
24
        }
25

26
    def get_benchmark_config(checkpoint_activation=True):
27
28
29
30
31
32

        return {
            "epochs": 1,
            "lr": 0.001,  # learning rate
            "batch_size": 8,
            "criterion": nn.CrossEntropyLoss(),
33
            "checkpoint_activation": checkpoint_activation,
34
            "num_microbatches": 1,
35
36
37
            "slices": 3,
        }

38
39
40
41
42
43
44
    def get_golden_real_stats():
        return {
            "avg_wps": 192.105,
            "std_dev_wps": 39.56,
            "peak_mem_usage": 1180848128,
        }

45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64

class Offload_Sequential:
    def get_model_config():
        return {
            "inputs": 100,
            "outputs": 5,
            "hidden": 1000,
            "layers": 100,
            "clip_value": 0.05,
        }

    def get_benchmark_config():

        return {
            "epochs": 1,
            "lr": 0.001,  # learning rate
            "batch_size": 8,
            "criterion": nn.CrossEntropyLoss(),
            "slices": 3,
            "checkpoint_activation": True,
65
            "num_microbatches": 1,
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
        }


class Pipe:
    def get_model_config():
        return {
            "vocab_size": 10000,
            "ninp": 2048,  # embedding dimension
            "nhid": 2048,  # the dimension of the feedforward network model in nn.TransformerEncoder
            "nhead": 32,  # the number of heads in the multiheadattention models
            "dropout": 0,
            "initrange": 0.1,
            "scaler": GradScaler(),
            "clip_value": 0.05,
            "num_decoder_layers": 10,
            "seq_len": 32,
        }

    def get_benchmark_config():

86
        return {
87
88
89
90
            "epochs": 1,
            "lr": 0.001,  # learning rate
            "batch_size": 8,
            "criterion": nn.CrossEntropyLoss(),
91
        }
92

93
94
95
96
97
98
    def get_golden_real_stats():
        return {
            "avg_wps": 703.778,
            "std_dev_wps": 5.732,
            "peak_mem_usage": [2320996352, 1396742144, 1396742144, 2340010496],
        }
99

100
101
102
    def get_golden_synthetic_stats():
        # TODO(anj-s): Add support for synthetic regression benchmarks
        raise NotImplementedError("Synthetic data benchmarks are not supported.")