"test/vscode:/vscode.git/clone" did not exist on "241488570d3d58312ed628edd84101a5074aaa42"
transformer_base.py 9.89 KB
Newer Older
1
import argparse
2
import logging
3
4
5
6
7
8
9
10
11
import os
import random

import numpy as np
import pytorch_lightning as pl
import torch

from transformers import (
    AdamW,
12
13
14
15
16
17
18
19
    AutoConfig,
    AutoModel,
    AutoModelForPreTraining,
    AutoModelForQuestionAnswering,
    AutoModelForSequenceClassification,
    AutoModelForTokenClassification,
    AutoModelWithLMHead,
    AutoTokenizer,
20
21
22
23
    get_linear_schedule_with_warmup,
)


24
25
26
logger = logging.getLogger(__name__)


27
28
29
30
31
32
33
MODEL_MODES = {
    "base": AutoModel,
    "sequence-classification": AutoModelForSequenceClassification,
    "question-answering": AutoModelForQuestionAnswering,
    "pretraining": AutoModelForPreTraining,
    "token-classification": AutoModelForTokenClassification,
    "language-modeling": AutoModelWithLMHead,
34
35
36
}


37
def set_seed(args: argparse.Namespace):
38
39
40
41
42
43
44
45
    random.seed(args.seed)
    np.random.seed(args.seed)
    torch.manual_seed(args.seed)
    if args.n_gpu > 0:
        torch.cuda.manual_seed_all(args.seed)


class BaseTransformer(pl.LightningModule):
46
    def __init__(self, hparams: argparse.Namespace, num_labels=None, mode="base", **config_kwargs):
47
48
        "Initialize a model."

Julien Chaumond's avatar
Julien Chaumond committed
49
        super().__init__()
50
        self.hparams = hparams
51
        cache_dir = self.hparams.cache_dir if self.hparams.cache_dir else None
Julien Chaumond's avatar
Julien Chaumond committed
52
        self.config = AutoConfig.from_pretrained(
53
            self.hparams.config_name if self.hparams.config_name else self.hparams.model_name_or_path,
54
            **({"num_labels": num_labels} if num_labels is not None else {}),
55
56
            cache_dir=cache_dir,
            **config_kwargs,
57
        )
Julien Chaumond's avatar
Julien Chaumond committed
58
        self.tokenizer = AutoTokenizer.from_pretrained(
59
            self.hparams.tokenizer_name if self.hparams.tokenizer_name else self.hparams.model_name_or_path,
60
            cache_dir=cache_dir,
61
        )
Julien Chaumond's avatar
Julien Chaumond committed
62
        self.model = MODEL_MODES[mode].from_pretrained(
63
64
            self.hparams.model_name_or_path,
            from_tf=bool(".ckpt" in self.hparams.model_name_or_path),
Julien Chaumond's avatar
Julien Chaumond committed
65
            config=self.config,
66
            cache_dir=cache_dir,
67
68
69
        )

    def is_logger(self):
70
        return self.trainer.proc_rank <= 0
71
72
73
74

    def configure_optimizers(self):
        "Prepare optimizer and schedule (linear warmup and decay)"

75
        model = self.model
76
77
78
79
80
81
82
83
84
85
86
87
        no_decay = ["bias", "LayerNorm.weight"]
        optimizer_grouped_parameters = [
            {
                "params": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)],
                "weight_decay": self.hparams.weight_decay,
            },
            {
                "params": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)],
                "weight_decay": 0.0,
            },
        ]
        optimizer = AdamW(optimizer_grouped_parameters, lr=self.hparams.learning_rate, eps=self.hparams.adam_epsilon)
88
        self.opt = optimizer
89
90
91
        return [optimizer]

    def optimizer_step(self, epoch, batch_idx, optimizer, optimizer_idx, second_order_closure=None):
92
93
94
95
        if self.trainer.use_tpu:
            xm.optimizer_step(optimizer)
        else:
            optimizer.step()
96
        optimizer.zero_grad()
97
        self.lr_scheduler.step()
98
99

    def get_tqdm_dict(self):
100
101
        avg_loss = getattr(self.trainer, "avg_loss", 0.0)
        tqdm_dict = {"loss": "{:.3f}".format(avg_loss), "lr": self.lr_scheduler.get_last_lr()[-1]}
102
103
104
105
106
107
108
109
110
        return tqdm_dict

    def test_step(self, batch, batch_nb):
        return self.validation_step(batch, batch_nb)

    def test_end(self, outputs):
        return self.validation_end(outputs)

    def train_dataloader(self):
111
112
113
114
115
116
117
118
119
120
121
122
123
        train_batch_size = self.hparams.train_batch_size
        dataloader = self.load_dataset("train", train_batch_size)

        t_total = (
            (len(dataloader.dataset) // (train_batch_size * max(1, self.hparams.n_gpu)))
            // self.hparams.gradient_accumulation_steps
            * float(self.hparams.num_train_epochs)
        )
        scheduler = get_linear_schedule_with_warmup(
            self.opt, num_warmup_steps=self.hparams.warmup_steps, num_training_steps=t_total
        )
        self.lr_scheduler = scheduler
        return dataloader
124
125
126
127
128
129
130

    def val_dataloader(self):
        return self.load_dataset("dev", self.hparams.eval_batch_size)

    def test_dataloader(self):
        return self.load_dataset("test", self.hparams.eval_batch_size)

131
132
133
134
135
136
137
138
139
140
    def _feature_file(self, mode):
        return os.path.join(
            self.hparams.data_dir,
            "cached_{}_{}_{}".format(
                mode,
                list(filter(None, self.hparams.model_name_or_path.split("/"))).pop(),
                str(self.hparams.max_seq_length),
            ),
        )

141
142
143
144
145
146
147
    @staticmethod
    def add_model_specific_args(parser, root_dir):
        parser.add_argument(
            "--model_name_or_path",
            default=None,
            type=str,
            required=True,
Julien Chaumond's avatar
Julien Chaumond committed
148
            help="Path to pretrained model or model identifier from huggingface.co/models",
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
        )
        parser.add_argument(
            "--config_name", default="", type=str, help="Pretrained config name or path if not the same as model_name"
        )
        parser.add_argument(
            "--tokenizer_name",
            default="",
            type=str,
            help="Pretrained tokenizer name or path if not the same as model_name",
        )
        parser.add_argument(
            "--cache_dir",
            default="",
            type=str,
            help="Where do you want to store the pre-trained models downloaded from s3",
        )
        parser.add_argument("--learning_rate", default=5e-5, type=float, help="The initial learning rate for Adam.")
        parser.add_argument("--weight_decay", default=0.0, type=float, help="Weight decay if we apply some.")
        parser.add_argument("--adam_epsilon", default=1e-8, type=float, help="Epsilon for Adam optimizer.")
        parser.add_argument("--warmup_steps", default=0, type=int, help="Linear warmup over warmup_steps.")
        parser.add_argument(
            "--num_train_epochs", default=3, type=int, help="Total number of training epochs to perform."
        )

        parser.add_argument("--train_batch_size", default=32, type=int)
        parser.add_argument("--eval_batch_size", default=32, type=int)


177
class LoggingCallback(pl.Callback):
178
    def on_validation_end(self, trainer: pl.Trainer, pl_module: pl.LightningModule):
179
180
181
182
183
184
185
186
        logger.info("***** Validation results *****")
        if pl_module.is_logger():
            metrics = trainer.callback_metrics
            # Log results
            for key in sorted(metrics):
                if key not in ["log", "progress_bar"]:
                    logger.info("{} = {}\n".format(key, str(metrics[key])))

187
    def on_test_end(self, trainer: pl.Trainer, pl_module: pl.LightningModule):
188
189
190
191
192
193
194
195
196
197
198
199
200
201
        logger.info("***** Test results *****")

        if pl_module.is_logger():
            metrics = trainer.callback_metrics

            # Log and save results to file
            output_test_results_file = os.path.join(pl_module.hparams.output_dir, "test_results.txt")
            with open(output_test_results_file, "w") as writer:
                for key in sorted(metrics):
                    if key not in ["log", "progress_bar"]:
                        logger.info("{} = {}\n".format(key, str(metrics[key])))
                        writer.write("{} = {}\n".format(key, str(metrics[key])))


202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
def add_generic_args(parser, root_dir):
    parser.add_argument(
        "--output_dir",
        default=None,
        type=str,
        required=True,
        help="The output directory where the model predictions and checkpoints will be written.",
    )

    parser.add_argument(
        "--fp16",
        action="store_true",
        help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit",
    )

    parser.add_argument(
        "--fp16_opt_level",
        type=str,
        default="O1",
        help="For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."
        "See details at https://nvidia.github.io/apex/amp.html",
    )

    parser.add_argument("--n_gpu", type=int, default=1)
226
    parser.add_argument("--n_tpu_cores", type=int, default=0)
227
228
229
230
231
232
233
234
235
236
237
238
239
    parser.add_argument("--max_grad_norm", default=1.0, type=float, help="Max gradient norm.")
    parser.add_argument("--do_train", action="store_true", help="Whether to run training.")
    parser.add_argument("--do_predict", action="store_true", help="Whether to run predictions on the test set.")
    parser.add_argument(
        "--gradient_accumulation_steps",
        type=int,
        default=1,
        help="Number of updates steps to accumulate before performing a backward/update pass.",
    )

    parser.add_argument("--seed", type=int, default=42, help="random seed for initialization")


240
def generic_train(model: BaseTransformer, args: argparse.Namespace):
241
242
243
244
245
246
247
248
249
250
    # init model
    set_seed(args)

    if os.path.exists(args.output_dir) and os.listdir(args.output_dir) and args.do_train:
        raise ValueError("Output directory ({}) already exists and is not empty.".format(args.output_dir))

    checkpoint_callback = pl.callbacks.ModelCheckpoint(
        filepath=args.output_dir, prefix="checkpoint", monitor="val_loss", mode="min", save_top_k=5
    )

srush's avatar
srush committed
251
    train_params = dict(
252
253
254
        accumulate_grad_batches=args.gradient_accumulation_steps,
        gpus=args.n_gpu,
        max_epochs=args.num_train_epochs,
255
        early_stop_callback=False,
256
257
        gradient_clip_val=args.max_grad_norm,
        checkpoint_callback=checkpoint_callback,
258
        callbacks=[LoggingCallback()],
259
    )
260

srush's avatar
srush committed
261
262
263
264
    if args.fp16:
        train_params["use_amp"] = args.fp16
        train_params["amp_level"] = args.fp16_opt_level

265
266
267
268
269
270
271
    if args.n_tpu_cores > 0:
        global xm
        import torch_xla.core.xla_model as xm

        train_params["num_tpu_cores"] = args.n_tpu_cores
        train_params["gpus"] = 0

srush's avatar
srush committed
272
273
274
275
276
    if args.n_gpu > 1:
        train_params["distributed_backend"] = "ddp"

    trainer = pl.Trainer(**train_params)

277
278
279
280
    if args.do_train:
        trainer.fit(model)

    return trainer