finetune_trainer.py 13 KB
Newer Older
1
#!/usr/bin/env python
Sylvain Gugger's avatar
Sylvain Gugger committed
2
3
4
5
6
7
8
9
10
11
12
13
14
# Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
15

Suraj Patil's avatar
Suraj Patil committed
16
17
18
19
import logging
import os
import sys
from dataclasses import dataclass, field
20
from typing import Optional
Suraj Patil's avatar
Suraj Patil committed
21

22
import transformers
Sylvain Gugger's avatar
Sylvain Gugger committed
23
24
25
26
27
28
29
30
31
32
from transformers import (
    AutoConfig,
    AutoModelForSeq2SeqLM,
    AutoTokenizer,
    HfArgumentParser,
    MBartTokenizer,
    Seq2SeqTrainer,
    Seq2SeqTrainingArguments,
    set_seed,
)
33
from transformers.trainer_utils import EvaluationStrategy, is_main_process
34
from transformers.training_args import ParallelMode
Suraj Patil's avatar
Suraj Patil committed
35
from utils import (
36
    Seq2SeqDataCollator,
Suraj Patil's avatar
Suraj Patil committed
37
38
    Seq2SeqDataset,
    assert_all_frozen,
39
    build_compute_metrics_fn,
40
    check_output_dir,
41
    freeze_embeds,
Suraj Patil's avatar
Suraj Patil committed
42
43
    freeze_params,
    lmap,
44
    save_json,
Suraj Patil's avatar
Suraj Patil committed
45
    use_task_specific_params,
46
    write_txt_file,
Suraj Patil's avatar
Suraj Patil committed
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
)


logger = logging.getLogger(__name__)


@dataclass
class ModelArguments:
    """
    Arguments pertaining to which model/config/tokenizer we are going to fine-tune from.
    """

    model_name_or_path: str = field(
        metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"}
    )
    config_name: Optional[str] = field(
        default=None, metadata={"help": "Pretrained config name or path if not the same as model_name"}
    )
    tokenizer_name: Optional[str] = field(
        default=None, metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"}
    )
    cache_dir: Optional[str] = field(
69
70
        default=None,
        metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"},
Suraj Patil's avatar
Suraj Patil committed
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
    )
    freeze_encoder: bool = field(default=False, metadata={"help": "Whether tp freeze the encoder."})
    freeze_embeds: bool = field(default=False, metadata={"help": "Whether  to freeze the embeddings."})


@dataclass
class DataTrainingArguments:
    """
    Arguments pertaining to what data we are going to input our model for training and eval.
    """

    data_dir: str = field(
        metadata={"help": "The input data dir. Should contain the .tsv files (or other data files) for the task."}
    )
    task: Optional[str] = field(
        default="summarization",
        metadata={"help": "Task name, summarization (or summarization_{dataset} for pegasus) or translation"},
    )
    max_source_length: Optional[int] = field(
        default=1024,
        metadata={
            "help": "The maximum total input sequence length after tokenization. Sequences longer "
            "than this will be truncated, sequences shorter will be padded."
        },
    )
Sylvain Gugger's avatar
Sylvain Gugger committed
96
    max_length: Optional[int] = field(
Suraj Patil's avatar
Suraj Patil committed
97
98
99
100
101
102
        default=128,
        metadata={
            "help": "The maximum total sequence length for target text after tokenization. Sequences longer "
            "than this will be truncated, sequences shorter will be padded."
        },
    )
Sylvain Gugger's avatar
Sylvain Gugger committed
103
    eval_max_length: Optional[int] = field(
Suraj Patil's avatar
Suraj Patil committed
104
105
106
107
        default=142,
        metadata={
            "help": "The maximum total sequence length for validation target text after tokenization. Sequences longer "
            "than this will be truncated, sequences shorter will be padded."
Stas Bekman's avatar
Stas Bekman committed
108
            " This argument is also used to override the ``max_length`` param of ``model.generate``, which is used during ``evaluate`` and ``predict``"
Suraj Patil's avatar
Suraj Patil committed
109
110
111
112
113
114
115
116
        },
    )
    n_train: Optional[int] = field(default=-1, metadata={"help": "# training examples. -1 means use all."})
    n_val: Optional[int] = field(default=-1, metadata={"help": "# validation examples. -1 means use all."})
    n_test: Optional[int] = field(default=-1, metadata={"help": "# test examples. -1 means use all."})
    src_lang: Optional[str] = field(default=None, metadata={"help": "Source language id for translation."})
    tgt_lang: Optional[str] = field(default=None, metadata={"help": "Target language id for translation."})
    eval_beams: Optional[int] = field(default=None, metadata={"help": "# num_beams to use for evaluation."})
117
118
119
120
    ignore_pad_token_for_loss: bool = field(
        default=True,
        metadata={"help": "If only pad tokens should be ignored. This assumes that `config.pad_token_id` is defined."},
    )
Suraj Patil's avatar
Suraj Patil committed
121
122


123
124
125
126
127
128
129
130
131
132
133
def handle_metrics(split, metrics, output_dir):
    """
    Log and save metrics

    Args:
    - split: one of train, val, test
    - metrics: metrics dict
    - output_dir: where to save the metrics
    """

    logger.info(f"***** {split} metrics *****")
134
135
    for key in sorted(metrics.keys()):
        logger.info(f"  {key} = {metrics[key]}")
136
137
138
    save_json(metrics, os.path.join(output_dir, f"{split}_results.json"))


Suraj Patil's avatar
Suraj Patil committed
139
140
141
142
143
144
145
146
147
148
149
150
151
152
def main():
    # See all possible arguments in src/transformers/training_args.py
    # or by passing the --help flag to this script.
    # We now keep distinct sets of args, for a cleaner separation of concerns.

    parser = HfArgumentParser((ModelArguments, DataTrainingArguments, Seq2SeqTrainingArguments))

    if len(sys.argv) == 2 and sys.argv[1].endswith(".json"):
        # If we pass only one argument to the script and it's the path to a json file,
        # let's parse it to get our arguments.
        model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))
    else:
        model_args, data_args, training_args = parser.parse_args_into_dataclasses()

153
    check_output_dir(training_args)
Suraj Patil's avatar
Suraj Patil committed
154
155
156
157
158
159
160
161
162
163
164
165

    # Setup logging
    logging.basicConfig(
        format="%(asctime)s - %(levelname)s - %(name)s -   %(message)s",
        datefmt="%m/%d/%Y %H:%M:%S",
        level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN,
    )
    logger.warning(
        "Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s",
        training_args.local_rank,
        training_args.device,
        training_args.n_gpu,
166
        bool(training_args.parallel_mode == ParallelMode.DISTRIBUTED),
Suraj Patil's avatar
Suraj Patil committed
167
168
        training_args.fp16,
    )
169
170
171
172
173
    # Set the verbosity to info of the Transformers logger (on main process only):
    if is_main_process(training_args.local_rank):
        transformers.utils.logging.set_verbosity_info()
        transformers.utils.logging.enable_default_handler()
        transformers.utils.logging.enable_explicit_format()
Suraj Patil's avatar
Suraj Patil committed
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
    logger.info("Training/evaluation parameters %s", training_args)

    # Set seed
    set_seed(training_args.seed)

    # Load pretrained model and tokenizer
    #
    # Distributed training:
    # The .from_pretrained methods guarantee that only one local process can concurrently
    # download model & vocab.

    config = AutoConfig.from_pretrained(
        model_args.config_name if model_args.config_name else model_args.model_name_or_path,
        cache_dir=model_args.cache_dir,
    )
189
190
191
192
193
194
195

    extra_model_params = ("encoder_layerdrop", "decoder_layerdrop", "dropout", "attention_dropout")
    for p in extra_model_params:
        if getattr(training_args, p, None):
            assert hasattr(config, p), f"({config.__class__.__name__}) doesn't have a `{p}` attribute"
            setattr(config, p, getattr(training_args, p))

Suraj Patil's avatar
Suraj Patil committed
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
    tokenizer = AutoTokenizer.from_pretrained(
        model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path,
        cache_dir=model_args.cache_dir,
    )
    model = AutoModelForSeq2SeqLM.from_pretrained(
        model_args.model_name_or_path,
        from_tf=".ckpt" in model_args.model_name_or_path,
        config=config,
        cache_dir=model_args.cache_dir,
    )

    # use task specific params
    use_task_specific_params(model, data_args.task)

    # set num_beams for evaluation
211
212
    if data_args.eval_beams is None:
        data_args.eval_beams = model.config.num_beams
Suraj Patil's avatar
Suraj Patil committed
213
214
215

    # set decoder_start_token_id for MBart
    if model.config.decoder_start_token_id is None and isinstance(tokenizer, MBartTokenizer):
216
217
218
219
        assert (
            data_args.tgt_lang is not None and data_args.src_lang is not None
        ), "mBart requires --tgt_lang and --src_lang"
        model.config.decoder_start_token_id = tokenizer.lang_code_to_id[data_args.tgt_lang]
Suraj Patil's avatar
Suraj Patil committed
220
221
222
223
224
225
226

    if model_args.freeze_embeds:
        freeze_embeds(model)
    if model_args.freeze_encoder:
        freeze_params(model.get_encoder())
        assert_all_frozen(model.get_encoder())

227
    dataset_class = Seq2SeqDataset
Suraj Patil's avatar
Suraj Patil committed
228
229
230
231
232
233
234
235

    # Get datasets
    train_dataset = (
        dataset_class(
            tokenizer,
            type_path="train",
            data_dir=data_args.data_dir,
            n_obs=data_args.n_train,
Sylvain Gugger's avatar
Sylvain Gugger committed
236
            max_target_length=data_args.max_length,
Suraj Patil's avatar
Suraj Patil committed
237
238
239
240
241
242
243
244
245
246
247
248
            max_source_length=data_args.max_source_length,
            prefix=model.config.prefix or "",
        )
        if training_args.do_train
        else None
    )
    eval_dataset = (
        dataset_class(
            tokenizer,
            type_path="val",
            data_dir=data_args.data_dir,
            n_obs=data_args.n_val,
Sylvain Gugger's avatar
Sylvain Gugger committed
249
            max_target_length=data_args.eval_max_length,
Suraj Patil's avatar
Suraj Patil committed
250
251
252
            max_source_length=data_args.max_source_length,
            prefix=model.config.prefix or "",
        )
253
        if training_args.do_eval or training_args.evaluation_strategy != EvaluationStrategy.NO
Suraj Patil's avatar
Suraj Patil committed
254
255
256
257
258
259
260
261
        else None
    )
    test_dataset = (
        dataset_class(
            tokenizer,
            type_path="test",
            data_dir=data_args.data_dir,
            n_obs=data_args.n_test,
Sylvain Gugger's avatar
Sylvain Gugger committed
262
            max_target_length=data_args.eval_max_length,
Suraj Patil's avatar
Suraj Patil committed
263
264
265
266
267
268
269
270
            max_source_length=data_args.max_source_length,
            prefix=model.config.prefix or "",
        )
        if training_args.do_predict
        else None
    )

    # Initialize our Trainer
271
272
273
    compute_metrics_fn = (
        build_compute_metrics_fn(data_args.task, tokenizer) if training_args.predict_with_generate else None
    )
Suraj Patil's avatar
Suraj Patil committed
274
275
276
277
278
279
    trainer = Seq2SeqTrainer(
        model=model,
        args=training_args,
        train_dataset=train_dataset,
        eval_dataset=eval_dataset,
        data_collator=Seq2SeqDataCollator(tokenizer, data_args, training_args.tpu_num_cores),
280
        compute_metrics=compute_metrics_fn,
Sylvain Gugger's avatar
Sylvain Gugger committed
281
        tokenizer=tokenizer,
Suraj Patil's avatar
Suraj Patil committed
282
283
    )

284
    all_metrics = {}
Suraj Patil's avatar
Suraj Patil committed
285
286
    # Training
    if training_args.do_train:
287
288
        logger.info("*** Train ***")

289
        train_result = trainer.train(
Suraj Patil's avatar
Suraj Patil committed
290
291
            model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path) else None
        )
292
293
        metrics = train_result.metrics
        metrics["train_n_objs"] = data_args.n_train
294
295
296

        trainer.save_model()  # this also saves the tokenizer

Suraj Patil's avatar
Suraj Patil committed
297
        if trainer.is_world_process_zero():
298
299
300
301
            handle_metrics("train", metrics, training_args.output_dir)
            all_metrics.update(metrics)

            # Need to save the state, since Trainer.save_model saves only the tokenizer with the model
302
            trainer.state.save_to_json(os.path.join(training_args.output_dir, "trainer_state.json"))
303
304
305

            # For convenience, we also re-save the tokenizer to the same directory,
            # so that you can share your model easily on huggingface.co/models =)
Suraj Patil's avatar
Suraj Patil committed
306
307
308
309
310
311
            tokenizer.save_pretrained(training_args.output_dir)

    # Evaluation
    if training_args.do_eval:
        logger.info("*** Evaluate ***")

Sylvain Gugger's avatar
Sylvain Gugger committed
312
313
314
        metrics = trainer.evaluate(
            metric_key_prefix="val", max_length=data_args.eval_max_length, num_beams=data_args.eval_beams
        )
315
        metrics["val_n_objs"] = data_args.n_val
316
        metrics["val_loss"] = round(metrics["val_loss"], 4)
Suraj Patil's avatar
Suraj Patil committed
317
318

        if trainer.is_world_process_zero():
319
320
321

            handle_metrics("val", metrics, training_args.output_dir)
            all_metrics.update(metrics)
Suraj Patil's avatar
Suraj Patil committed
322
323

    if training_args.do_predict:
324
        logger.info("*** Predict ***")
Suraj Patil's avatar
Suraj Patil committed
325

Sylvain Gugger's avatar
Sylvain Gugger committed
326
327
328
329
330
331
        test_output = trainer.predict(
            test_dataset=test_dataset,
            metric_key_prefix="test",
            max_length=data_args.eval_max_length,
            num_beams=data_args.eval_beams,
        )
332
        metrics = test_output.metrics
333
        metrics["test_n_objs"] = data_args.n_test
Suraj Patil's avatar
Suraj Patil committed
334
335

        if trainer.is_world_process_zero():
336
337
338
            metrics["test_loss"] = round(metrics["test_loss"], 4)
            handle_metrics("test", metrics, training_args.output_dir)
            all_metrics.update(metrics)
Suraj Patil's avatar
Suraj Patil committed
339
340

            if training_args.predict_with_generate:
341
342
343
                test_preds = tokenizer.batch_decode(
                    test_output.predictions, skip_special_tokens=True, clean_up_tokenization_spaces=True
                )
Suraj Patil's avatar
Suraj Patil committed
344
                test_preds = lmap(str.strip, test_preds)
345
                write_txt_file(test_preds, os.path.join(training_args.output_dir, "test_generations.txt"))
Suraj Patil's avatar
Suraj Patil committed
346

347
    if trainer.is_world_process_zero():
348
349
350
        save_json(all_metrics, os.path.join(training_args.output_dir, "all_results.json"))

    return all_metrics
Suraj Patil's avatar
Suraj Patil committed
351
352
353
354
355
356
357
358
359


def _mp_fn(index):
    # For xla_spawn (TPUs)
    main()


if __name__ == "__main__":
    main()