finetune.py 13.7 KB
Newer Older
1
2
3
4
5
import argparse
import glob
import logging
import os
import time
6
import warnings
7
from collections import defaultdict
8
9
from pathlib import Path
from typing import Dict, List, Tuple
10

11
12
import numpy as np
import pytorch_lightning as pl
13
14
15
import torch
from torch.utils.data import DataLoader

16
17
from lightning_base import BaseTransformer, add_generic_args, generic_train
from transformers import get_linear_schedule_with_warmup
18
19
20


try:
21
22
23
24
25
26
27
    from .utils import (
        use_task_specific_params,
        SummarizationDataset,
        lmap,
        flatten_list,
        pickle_save,
        save_git_info,
28
        save_json,
29
30
31
32
        freeze_params,
        calculate_rouge,
        get_git_info,
        ROUGE_KEYS,
33
        calculate_bleu_score,
34
    )
35
    from .callbacks import Seq2SeqLoggingCallback, get_checkpoint_callback
36
except ImportError:
37
38
39
40
41
42
43
    from utils import (
        use_task_specific_params,
        SummarizationDataset,
        lmap,
        flatten_list,
        pickle_save,
        save_git_info,
44
        save_json,
45
46
47
48
        freeze_params,
        calculate_rouge,
        get_git_info,
        ROUGE_KEYS,
49
        calculate_bleu_score,
50
    )
51
    from callbacks import Seq2SeqLoggingCallback, get_checkpoint_callback
52
53
54
55

logger = logging.getLogger(__name__)


56
57
58
class SummarizationModule(BaseTransformer):
    mode = "summarization"
    loss_names = ["loss"]
59
60
    metric_names = ROUGE_KEYS
    val_metric = "rouge2"
61

62
63
64
65
    def __init__(self, hparams, **kwargs):
        super().__init__(hparams, num_labels=None, mode=self.mode, **kwargs)
        use_task_specific_params(self.model, "summarization")
        save_git_info(self.hparams.output_dir)
66
        self.metrics_save_path = Path(self.output_dir) / "metrics.json"
67
        self.hparams_save_path = Path(self.output_dir) / "hparams.pkl"
68
        pickle_save(self.hparams, self.hparams_save_path)
69
        self.step_count = 0
70
        self.metrics = defaultdict(list)
71

72
73
74
        self.dataset_kwargs: dict = dict(
            data_dir=self.hparams.data_dir,
            max_source_length=self.hparams.max_source_length,
75
            prefix=self.model.config.prefix or "",
76
        )
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
        n_observations_per_split = {
            "train": self.hparams.n_train,
            "val": self.hparams.n_val,
            "test": self.hparams.n_test,
        }
        self.n_obs = {k: v if v >= 0 else None for k, v in n_observations_per_split.items()}

        self.target_lens = {
            "train": self.hparams.max_target_length,
            "val": self.hparams.val_max_target_length,
            "test": self.hparams.test_max_target_length,
        }
        assert self.target_lens["train"] <= self.target_lens["val"], f"target_lens: {self.target_lens}"
        assert self.target_lens["train"] <= self.target_lens["test"], f"target_lens: {self.target_lens}"

        if self.hparams.freeze_embeds:
            self.freeze_embeds()
        if self.hparams.freeze_encoder:
            freeze_params(self.model.model.encoder)  # TODO: this will break for t5
        self.hparams.git_sha = get_git_info()["repo_sha"]
97
        self.num_workers = hparams.num_workers
98
99
100

    def freeze_embeds(self):
        """Freeze token embeddings and positional embeddings for bart, just token embeddings for t5."""
101
        try:
102
103
104
105
            freeze_params(self.model.model.shared)
            for d in [self.model.model.encoder, self.model.model.decoder]:
                freeze_params(d.embed_positions)
                freeze_params(d.embed_tokens)
106
        except AttributeError:
107
108
109
110
111
112
113
114
115
116
            freeze_params(self.model.shared)
            for d in [self.model.encoder, self.model.decoder]:
                freeze_params(d.embed_tokens)

    def forward(self, input_ids, **kwargs):
        return self.model(input_ids, **kwargs)

    def ids_to_clean_text(self, generated_ids: List[int]):
        gen_text = self.tokenizer.batch_decode(
            generated_ids, skip_special_tokens=True, clean_up_tokenization_spaces=True
117
        )
118
        return lmap(str.strip, gen_text)
119

120
    def _step(self, batch: dict) -> Tuple:
121
        pad_token_id = self.tokenizer.pad_token_id
122
        source_ids, source_mask, y = batch["input_ids"], batch["attention_mask"], batch["decoder_input_ids"]
123
124
        y_ids = y[:, :-1].contiguous()
        lm_labels = y[:, 1:].clone()
125
        lm_labels[y[:, 1:] == pad_token_id] = -100
126
        outputs = self(source_ids, attention_mask=source_mask, decoder_input_ids=y_ids, labels=lm_labels,)
127
        loss = outputs[0]
128
129
130
131
132
133
134
135
136
137
        return (loss,)

    def training_step(self, batch, batch_idx) -> Dict:
        loss_tensors = self._step(batch)
        logs = {name: loss for name, loss in zip(self.loss_names, loss_tensors)}
        return {"loss": loss_tensors[0], "log": logs}

    def validation_step(self, batch, batch_idx) -> Dict:
        return self._generative_step(batch)

138
    def validation_epoch_end(self, outputs, prefix="val") -> Dict:
139
140
141
        self.step_count += 1
        losses = {k: torch.stack([x[k] for x in outputs]).mean() for k in self.loss_names}
        loss = losses["loss"]
142
143
        rouges = {k: np.array([x[k] for x in outputs]).mean() for k in self.metric_names + ["gen_time", "summ_len"]}
        rouge_tensor: torch.FloatTensor = torch.tensor(rouges[self.val_metric]).type_as(loss)
144
145
146
147
148
149
        rouges.update({k: v.item() for k, v in losses.items()})
        losses.update(rouges)
        metrics = {f"{prefix}_avg_{k}": x for k, x in losses.items()}
        metrics["step_count"] = self.step_count
        self.save_metrics(metrics, prefix)  # writes to self.metrics_save_path
        preds = flatten_list([x["preds"] for x in outputs])
150
151
152
153
154
        return {"log": metrics, "preds": preds, f"{prefix}_loss": loss, f"{prefix}_{self.val_metric}": rouge_tensor}

    def save_metrics(self, latest_metrics, type_path) -> None:
        self.metrics[type_path].append(latest_metrics)
        save_json(self.metrics, self.metrics_save_path)
155

156
157
    def calc_generative_metrics(self, preds, target) -> Dict:
        return calculate_rouge(preds, target)
158

159
    def _generative_step(self, batch: dict) -> dict:
160
161
        pad_token_id = self.tokenizer.pad_token_id
        source_ids, source_mask, y = SummarizationDataset.trim_seq2seq_batch(batch, pad_token_id)
162
163
        t0 = time.time()
        generated_ids = self.model.generate(input_ids=source_ids, attention_mask=source_mask, use_cache=True,)
164
        gen_time = (time.time() - t0) / source_ids.shape[0]
165
166
167
168
        preds = self.ids_to_clean_text(generated_ids)
        target = self.ids_to_clean_text(y)
        loss_tensors = self._step(batch)
        base_metrics = {name: loss for name, loss in zip(self.loss_names, loss_tensors)}
169
        rouge: Dict = self.calc_generative_metrics(preds, target)
170
171
172
        summ_len = np.mean(lmap(len, generated_ids))
        base_metrics.update(gen_time=gen_time, summ_len=summ_len, preds=preds, target=target, **rouge)
        return base_metrics
173

174
175
    def test_step(self, batch, batch_idx):
        return self._generative_step(batch)
176
177

    def test_epoch_end(self, outputs):
178
        return self.validation_epoch_end(outputs, prefix="test")
179
180
181
182
183
184
185
186
187
188
189
190
191

    def get_dataset(self, type_path) -> SummarizationDataset:
        n_obs = self.n_obs[type_path]
        max_target_length = self.target_lens[type_path]
        dataset = SummarizationDataset(
            self.tokenizer,
            type_path=type_path,
            n_obs=n_obs,
            max_target_length=max_target_length,
            **self.dataset_kwargs,
        )
        return dataset

192
    def get_dataloader(self, type_path: str, batch_size: int, shuffle: bool = False) -> DataLoader:
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
        dataset = self.get_dataset(type_path)
        sampler = None
        if self.hparams.sortish_sampler and type_path == "train":
            assert self.hparams.gpus <= 1  # TODO: assert earlier
            sampler = dataset.make_sortish_sampler(batch_size)
            shuffle = False

        dataloader = DataLoader(
            dataset,
            batch_size=batch_size,
            collate_fn=dataset.collate_fn,
            shuffle=shuffle,
            num_workers=self.num_workers,
            sampler=sampler,
        )
208
209
210
        return dataloader

    def train_dataloader(self) -> DataLoader:
211
        dataloader = self.get_dataloader("train", batch_size=self.hparams.train_batch_size, shuffle=True)
212
        t_total = (
213
            (len(dataloader.dataset) // (self.hparams.train_batch_size * max(1, self.hparams.gpus)))
214
215
216
217
218
219
            // self.hparams.gradient_accumulation_steps
            * float(self.hparams.num_train_epochs)
        )
        scheduler = get_linear_schedule_with_warmup(
            self.opt, num_warmup_steps=self.hparams.warmup_steps, num_training_steps=t_total
        )
220
221
        if max(scheduler.get_last_lr()) > 0:
            warnings.warn("All learning rates are 0")
222
223
224
        self.lr_scheduler = scheduler
        return dataloader

225
226
    def val_dataloader(self) -> DataLoader:
        return self.get_dataloader("val", batch_size=self.hparams.eval_batch_size)
227

228
229
    def test_dataloader(self) -> DataLoader:
        return self.get_dataloader("test", batch_size=self.hparams.eval_batch_size)
230
231
232
233

    @staticmethod
    def add_model_specific_args(parser, root_dir):
        BaseTransformer.add_model_specific_args(parser, root_dir)
234
        add_generic_args(parser, root_dir)
235
        parser.add_argument(
236
            "--max_source_length",
237
238
239
240
241
            default=1024,
            type=int,
            help="The maximum total input sequence length after tokenization. Sequences longer "
            "than this will be truncated, sequences shorter will be padded.",
        )
242
243
244
245
246
247
248
        parser.add_argument(
            "--max_target_length",
            default=56,
            type=int,
            help="The maximum total input sequence length after tokenization. Sequences longer "
            "than this will be truncated, sequences shorter will be padded.",
        )
249
250
251
252
253
254
255
256
257
258
259
260
261
262
        parser.add_argument(
            "--val_max_target_length",
            default=142,  # these defaults are optimized for CNNDM. For xsum, see README.md.
            type=int,
            help="The maximum total input sequence length after tokenization. Sequences longer "
            "than this will be truncated, sequences shorter will be padded.",
        )
        parser.add_argument(
            "--test_max_target_length",
            default=142,
            type=int,
            help="The maximum total input sequence length after tokenization. Sequences longer "
            "than this will be truncated, sequences shorter will be padded.",
        )
263
264
265
266
        parser.add_argument(
            "--data_dir",
            type=str,
            required=True,
267
            help="The input data dir. Should contain train.source, train.target, val.source, val.target, test.source, test.target",
268
        )
269
270
271
272
273
274
275
        parser.add_argument("--freeze_encoder", action="store_true")
        parser.add_argument("--freeze_embeds", action="store_true")
        parser.add_argument("--sortish_sampler", action="store_true", default=False)
        parser.add_argument("--logger", type=str, choices=["default", "wandb", "wandb_shared"], default="default")
        parser.add_argument("--n_train", type=int, default=-1, required=False, help="# examples. -1 means use all.")
        parser.add_argument("--n_val", type=int, default=500, required=False, help="# examples. -1 means use all.")
        parser.add_argument("--n_test", type=int, default=-1, required=False, help="# examples. -1 means use all.")
276
277
278
        parser.add_argument(
            "--task", type=str, default="summarization", required=False, help="# examples. -1 means use all."
        )
279
280
281
        return parser


282
283
284
285
286
287
288
289
290
291
class TranslationModule(SummarizationModule):
    mode = "translation"
    loss_names = ["loss"]
    metric_names = ["bleu"]
    val_metric = "bleu"

    def calc_generative_metrics(self, preds, target) -> dict:
        return calculate_bleu_score(preds, target)


292
293
294
295
296
def main(args, model=None) -> SummarizationModule:
    Path(args.output_dir).mkdir(exist_ok=True)
    if len(os.listdir(args.output_dir)) > 3 and args.do_train:
        raise ValueError("Output directory ({}) already exists and is not empty.".format(args.output_dir))
    if model is None:
297
298
299
300
        if args.task == "summarization":
            model: SummarizationModule = SummarizationModule(args)
        else:
            model: SummarizationModule = TranslationModule(args)
301
302
303
304
305
306
307
308
309
310
    if (
        args.logger == "default"
        or args.fast_dev_run
        or str(args.output_dir).startswith("/tmp")
        or str(args.output_dir).startswith("/var")
    ):
        logger = True  # don't pollute wandb logs unnecessarily
    elif args.logger == "wandb":
        from pytorch_lightning.loggers import WandbLogger

311
        logger = WandbLogger(name=model.output_dir.name)
312

313
314
315
    elif args.logger == "wandb_shared":
        from pytorch_lightning.loggers import WandbLogger

316
        logger = WandbLogger(name=model.output_dir.name)
317
318
319
320
    trainer: pl.Trainer = generic_train(
        model,
        args,
        logging_callback=Seq2SeqLoggingCallback(),
321
        checkpoint_callback=get_checkpoint_callback(args.output_dir, model.val_metric),
322
323
324
        logger=logger,
        # TODO: early stopping callback seems messed up
    )
325
    pickle_save(model.hparams, model.output_dir / "hparams.pkl")
326
327
328
329
330
331
332
333
334
335
336
    if not args.do_predict:
        return model

    model.hparams.test_checkpoint = ""
    checkpoints = list(sorted(glob.glob(os.path.join(args.output_dir, "*.ckpt"), recursive=True)))
    if checkpoints:
        model.hparams.test_checkpoint = checkpoints[-1]
        trainer.resume_from_checkpoint = checkpoints[-1]
    trainer.logger.log_hyperparams(model.hparams)
    trainer.test(model)  # this breaks in DDP, known lightning issue. See evaluate_checkpoint to recover metrics.
    return model
337
338
339
340


if __name__ == "__main__":
    parser = argparse.ArgumentParser()
341
    parser = SummarizationModule.add_model_specific_args(parser, os.getcwd())
342
343
344
    args = parser.parse_args()

    main(args)