lightning_base.py 14.9 KB
Newer Older
1
import argparse
2
import logging
3
import os
4
5
from pathlib import Path
from typing import Any, Dict
6

7
import packaging
8
import pytorch_lightning as pl
9
from pytorch_lightning.utilities import rank_zero_info
10

11
import pkg_resources
12
13
from transformers import (
    AdamW,
14
15
16
17
    AutoConfig,
    AutoModel,
    AutoModelForPreTraining,
    AutoModelForQuestionAnswering,
18
    AutoModelForSeq2SeqLM,
19
20
21
22
    AutoModelForSequenceClassification,
    AutoModelForTokenClassification,
    AutoModelWithLMHead,
    AutoTokenizer,
23
24
    PretrainedConfig,
    PreTrainedTokenizer,
25
26
)
from transformers.optimization import (
Sam Shleifer's avatar
Sam Shleifer committed
27
    Adafactor,
28
29
    get_cosine_schedule_with_warmup,
    get_cosine_with_hard_restarts_schedule_with_warmup,
30
    get_linear_schedule_with_warmup,
31
    get_polynomial_decay_schedule_with_warmup,
32
33
34
)


35
36
logger = logging.getLogger(__name__)

37

38
39
40
41
42
43
44
45
46
47
def require_min_ver(pkg, min_ver):
    got_ver = pkg_resources.get_distribution(pkg).version
    if packaging.version.parse(got_ver) < packaging.version.parse(min_ver):
        logger.warning(
            f"{pkg}>={min_ver} is required for a normal functioning of this module, but found {pkg}=={got_ver}. "
            "Try: pip install -r examples/requirements.txt"
        )


require_min_ver("pytorch_lightning", "1.0.4")
48

49
50
51
52
53
54
55
MODEL_MODES = {
    "base": AutoModel,
    "sequence-classification": AutoModelForSequenceClassification,
    "question-answering": AutoModelForQuestionAnswering,
    "pretraining": AutoModelForPreTraining,
    "token-classification": AutoModelForTokenClassification,
    "language-modeling": AutoModelWithLMHead,
56
57
    "summarization": AutoModelForSeq2SeqLM,
    "translation": AutoModelForSeq2SeqLM,
58
59
60
}


61
62
63
64
65
# update this and the import above to support new schedulers from transformers.optimization
arg_to_scheduler = {
    "linear": get_linear_schedule_with_warmup,
    "cosine": get_cosine_schedule_with_warmup,
    "cosine_w_restarts": get_cosine_with_hard_restarts_schedule_with_warmup,
66
    "polynomial": get_polynomial_decay_schedule_with_warmup,
67
68
69
70
71
72
73
    # '': get_constant_schedule,             # not supported for now
    # '': get_constant_schedule_with_warmup, # not supported for now
}
arg_to_scheduler_choices = sorted(arg_to_scheduler.keys())
arg_to_scheduler_metavar = "{" + ", ".join(arg_to_scheduler_choices) + "}"


74
class BaseTransformer(pl.LightningModule):
75
76
77
78
79
80
81
82
83
84
    def __init__(
        self,
        hparams: argparse.Namespace,
        num_labels=None,
        mode="base",
        config=None,
        tokenizer=None,
        model=None,
        **config_kwargs
    ):
85
        """Initialize a model, tokenizer and config."""
Julien Chaumond's avatar
Julien Chaumond committed
86
        super().__init__()
87
88
89
90
        # TODO: move to self.save_hyperparameters()
        # self.save_hyperparameters()
        # can also expand arguments into trainer signature for easier reading

91
        self.save_hyperparameters(hparams)
92
93
        self.step_count = 0
        self.output_dir = Path(self.hparams.output_dir)
94
        cache_dir = self.hparams.cache_dir if self.hparams.cache_dir else None
95
96
97
98
99
100
101
102
103
        if config is None:
            self.config = AutoConfig.from_pretrained(
                self.hparams.config_name if self.hparams.config_name else self.hparams.model_name_or_path,
                **({"num_labels": num_labels} if num_labels is not None else {}),
                cache_dir=cache_dir,
                **config_kwargs,
            )
        else:
            self.config: PretrainedConfig = config
104
105
106
107
108
109
110

        extra_model_params = ("encoder_layerdrop", "decoder_layerdrop", "dropout", "attention_dropout")
        for p in extra_model_params:
            if getattr(self.hparams, p, None):
                assert hasattr(self.config, p), f"model config doesn't have a `{p}` attribute"
                setattr(self.config, p, getattr(self.hparams, p))

111
112
113
114
115
116
117
        if tokenizer is None:
            self.tokenizer = AutoTokenizer.from_pretrained(
                self.hparams.tokenizer_name if self.hparams.tokenizer_name else self.hparams.model_name_or_path,
                cache_dir=cache_dir,
            )
        else:
            self.tokenizer: PreTrainedTokenizer = tokenizer
118
        self.model_type = MODEL_MODES[mode]
119
120
121
122
123
124
125
126
127
128
129
130
        if model is None:
            self.model = self.model_type.from_pretrained(
                self.hparams.model_name_or_path,
                from_tf=bool(".ckpt" in self.hparams.model_name_or_path),
                config=self.config,
                cache_dir=cache_dir,
            )
        else:
            self.model = model

    def load_hf_checkpoint(self, *args, **kwargs):
        self.model = self.model_type.from_pretrained(*args, **kwargs)
131

132
133
134
    def get_lr_scheduler(self):
        get_schedule_func = arg_to_scheduler[self.hparams.lr_scheduler]
        scheduler = get_schedule_func(
135
            self.opt, num_warmup_steps=self.hparams.warmup_steps, num_training_steps=self.total_steps()
136
137
138
139
        )
        scheduler = {"scheduler": scheduler, "interval": "step", "frequency": 1}
        return scheduler

140
    def configure_optimizers(self):
141
        """Prepare optimizer and schedule (linear warmup and decay)"""
142
        model = self.model
143
144
145
146
147
148
149
150
151
152
153
        no_decay = ["bias", "LayerNorm.weight"]
        optimizer_grouped_parameters = [
            {
                "params": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)],
                "weight_decay": self.hparams.weight_decay,
            },
            {
                "params": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)],
                "weight_decay": 0.0,
            },
        ]
Sam Shleifer's avatar
Sam Shleifer committed
154
155
156
157
158
159
160
161
162
        if self.hparams.adafactor:
            optimizer = Adafactor(
                optimizer_grouped_parameters, lr=self.hparams.learning_rate, scale_parameter=False, relative_step=False
            )

        else:
            optimizer = AdamW(
                optimizer_grouped_parameters, lr=self.hparams.learning_rate, eps=self.hparams.adam_epsilon
            )
163
        self.opt = optimizer
164

165
166
        scheduler = self.get_lr_scheduler()

167
        return [optimizer], [scheduler]
168

169
170
171
    def test_step(self, batch, batch_nb):
        return self.validation_step(batch, batch_nb)

172
    def test_epoch_end(self, outputs):
173
174
        return self.validation_end(outputs)

175
176
177
178
    def total_steps(self) -> int:
        """The number of total training steps that will be run. Used for lr scheduler purposes."""
        num_devices = max(1, self.hparams.gpus)  # TODO: consider num_tpu_cores
        effective_batch_size = self.hparams.train_batch_size * self.hparams.accumulate_grad_batches * num_devices
179
        return (self.dataset_size / effective_batch_size) * self.hparams.max_epochs
180
181

    def setup(self, mode):
182
183
184
        if mode == "test":
            self.dataset_size = len(self.test_dataloader().dataset)
        else:
185
            self.train_loader = self.get_dataloader("train", self.hparams.train_batch_size, shuffle=True)
186
            self.dataset_size = len(self.train_dataloader().dataset)
187

188
    def get_dataloader(self, type_path: str, batch_size: int, shuffle: bool = False):
189
        raise NotImplementedError("You must implement this for your task")
190
191
192

    def train_dataloader(self):
        return self.train_loader
193
194

    def val_dataloader(self):
195
        return self.get_dataloader("dev", self.hparams.eval_batch_size, shuffle=False)
196
197

    def test_dataloader(self):
198
        return self.get_dataloader("test", self.hparams.eval_batch_size, shuffle=False)
199

200
201
202
203
204
205
206
207
208
209
    def _feature_file(self, mode):
        return os.path.join(
            self.hparams.data_dir,
            "cached_{}_{}_{}".format(
                mode,
                list(filter(None, self.hparams.model_name_or_path.split("/"))).pop(),
                str(self.hparams.max_seq_length),
            ),
        )

210
211
212
213
214
215
216
    @pl.utilities.rank_zero_only
    def on_save_checkpoint(self, checkpoint: Dict[str, Any]) -> None:
        save_path = self.output_dir.joinpath("best_tfmr")
        self.model.config.save_step = self.step_count
        self.model.save_pretrained(save_path)
        self.tokenizer.save_pretrained(save_path)

217
218
219
220
221
222
223
    @staticmethod
    def add_model_specific_args(parser, root_dir):
        parser.add_argument(
            "--model_name_or_path",
            default=None,
            type=str,
            required=True,
Julien Chaumond's avatar
Julien Chaumond committed
224
            help="Path to pretrained model or model identifier from huggingface.co/models",
225
226
227
228
229
230
        )
        parser.add_argument(
            "--config_name", default="", type=str, help="Pretrained config name or path if not the same as model_name"
        )
        parser.add_argument(
            "--tokenizer_name",
231
            default=None,
232
233
234
235
236
237
238
239
240
            type=str,
            help="Pretrained tokenizer name or path if not the same as model_name",
        )
        parser.add_argument(
            "--cache_dir",
            default="",
            type=str,
            help="Where do you want to store the pre-trained models downloaded from s3",
        )
241
242
243
244
245
246
247
248
249
250
251
        parser.add_argument(
            "--encoder_layerdrop",
            type=float,
            help="Encoder layer dropout probability (Optional). Goes into model.config",
        )
        parser.add_argument(
            "--decoder_layerdrop",
            type=float,
            help="Decoder layer dropout probability (Optional). Goes into model.config",
        )
        parser.add_argument(
Lysandre's avatar
Lysandre committed
252
253
254
            "--dropout",
            type=float,
            help="Dropout probability (Optional). Goes into model.config",
255
256
        )
        parser.add_argument(
Lysandre's avatar
Lysandre committed
257
258
259
            "--attention_dropout",
            type=float,
            help="Attention dropout probability (Optional). Goes into model.config",
260
        )
261
        parser.add_argument("--learning_rate", default=5e-5, type=float, help="The initial learning rate for Adam.")
262
263
264
265
266
267
268
269
        parser.add_argument(
            "--lr_scheduler",
            default="linear",
            choices=arg_to_scheduler_choices,
            metavar=arg_to_scheduler_metavar,
            type=str,
            help="Learning rate scheduler",
        )
270
271
        parser.add_argument("--weight_decay", default=0.0, type=float, help="Weight decay if we apply some.")
        parser.add_argument("--adam_epsilon", default=1e-8, type=float, help="Epsilon for Adam optimizer.")
272
        parser.add_argument("--warmup_steps", default=0, type=int, help="Linear warmup over warmup_steps.")
273
        parser.add_argument("--num_workers", default=4, type=int, help="kwarg passed to DataLoader")
274
        parser.add_argument("--num_train_epochs", dest="max_epochs", default=3, type=int)
275
276
        parser.add_argument("--train_batch_size", default=32, type=int)
        parser.add_argument("--eval_batch_size", default=32, type=int)
Sam Shleifer's avatar
Sam Shleifer committed
277
        parser.add_argument("--adafactor", action="store_true")
278
279


280
class LoggingCallback(pl.Callback):
281
    def on_batch_end(self, trainer, pl_module):
282
283
        lr_scheduler = trainer.lr_schedulers[0]["scheduler"]
        lrs = {f"lr_group_{i}": lr for i, lr in enumerate(lr_scheduler.get_lr())}
284
285
        pl_module.logger.log_metrics(lrs)

286
    def on_validation_end(self, trainer: pl.Trainer, pl_module: pl.LightningModule):
287
288
289
290
291
292
293
294
        rank_zero_info("***** Validation results *****")
        metrics = trainer.callback_metrics
        # Log results
        for key in sorted(metrics):
            if key not in ["log", "progress_bar"]:
                rank_zero_info("{} = {}\n".format(key, str(metrics[key])))

    def on_test_end(self, trainer: pl.Trainer, pl_module: pl.LightningModule):
295
        rank_zero_info("***** Test results *****")
296
297
298
299
        metrics = trainer.callback_metrics
        # Log and save results to file
        output_test_results_file = os.path.join(pl_module.hparams.output_dir, "test_results.txt")
        with open(output_test_results_file, "w") as writer:
300
301
            for key in sorted(metrics):
                if key not in ["log", "progress_bar"]:
302
                    rank_zero_info("{} = {}\n".format(key, str(metrics[key])))
303
                    writer.write("{} = {}\n".format(key, str(metrics[key])))
304
305


306
def add_generic_args(parser, root_dir) -> None:
307
308
    #  To allow all pl args uncomment the following line
    #  parser = pl.Trainer.add_argparse_args(parser)
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
    parser.add_argument(
        "--output_dir",
        default=None,
        type=str,
        required=True,
        help="The output directory where the model predictions and checkpoints will be written.",
    )
    parser.add_argument(
        "--fp16",
        action="store_true",
        help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit",
    )

    parser.add_argument(
        "--fp16_opt_level",
        type=str,
325
        default="O2",
326
327
328
        help="For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."
        "See details at https://nvidia.github.io/apex/amp.html",
    )
329
    parser.add_argument("--n_tpu_cores", dest="tpu_cores", type=int)
330
    parser.add_argument("--max_grad_norm", dest="gradient_clip_val", default=1.0, type=float, help="Max gradient norm")
331
332
333
334
    parser.add_argument("--do_train", action="store_true", help="Whether to run training.")
    parser.add_argument("--do_predict", action="store_true", help="Whether to run predictions on the test set.")
    parser.add_argument(
        "--gradient_accumulation_steps",
335
        dest="accumulate_grad_batches",
336
337
338
339
340
        type=int,
        default=1,
        help="Number of updates steps to accumulate before performing a backward/update pass.",
    )
    parser.add_argument("--seed", type=int, default=42, help="random seed for initialization")
341
342
343
344
345
346
347
    parser.add_argument(
        "--data_dir",
        default=None,
        type=str,
        required=True,
        help="The input data dir. Should contain the training files for the CoNLL-2003 NER task.",
    )
348
349
350
351
352


def generic_train(
    model: BaseTransformer,
    args: argparse.Namespace,
353
    early_stopping_callback=None,
354
355
356
357
358
359
    logger=True,  # can pass WandbLogger() here
    extra_callbacks=[],
    checkpoint_callback=None,
    logging_callback=None,
    **extra_train_kwargs
):
360
361
    pl.seed_everything(args.seed)

362
    # init model
363
364
    odir = Path(model.hparams.output_dir)
    odir.mkdir(exist_ok=True)
365
366

    # add custom checkpoints
367
368
369
370
    if checkpoint_callback is None:
        checkpoint_callback = pl.callbacks.ModelCheckpoint(
            filepath=args.output_dir, prefix="checkpoint", monitor="val_loss", mode="min", save_top_k=1
        )
371
372
    if early_stopping_callback:
        extra_callbacks.append(early_stopping_callback)
373
374
    if logging_callback is None:
        logging_callback = LoggingCallback()
375

376
    train_params = {}
377

378
    # TODO: remove with PyTorch 1.6 since pl uses native amp
srush's avatar
srush committed
379
    if args.fp16:
380
        train_params["precision"] = 16
srush's avatar
srush committed
381
382
        train_params["amp_level"] = args.fp16_opt_level

383
    if args.gpus > 1:
srush's avatar
srush committed
384
385
        train_params["distributed_backend"] = "ddp"

Ola Piktus's avatar
Ola Piktus committed
386
387
    train_params["accumulate_grad_batches"] = args.accumulate_grad_batches

388
389
390
391
    trainer = pl.Trainer.from_argparse_args(
        args,
        weights_summary=None,
        callbacks=[logging_callback] + extra_callbacks,
392
393
394
395
        logger=logger,
        checkpoint_callback=checkpoint_callback,
        **train_params,
    )
srush's avatar
srush committed
396

397
398
    if args.do_train:
        trainer.fit(model)
399

400
    return trainer