run_glue.py 23 KB
Newer Older
1
#!/usr/bin/env python
thomwolf's avatar
thomwolf committed
2
# coding=utf-8
Sylvain Gugger's avatar
Sylvain Gugger committed
3
# Copyright 2020 The HuggingFace Inc. team. All rights reserved.
thomwolf's avatar
thomwolf committed
4
5
6
7
8
9
10
11
12
13
14
15
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
Lysandre's avatar
Lysandre committed
16
""" Finetuning the library models for sequence classification on GLUE."""
Sylvain Gugger's avatar
Sylvain Gugger committed
17
# You can also adapt this script on your own text classification task. Pointers for this are left as comments.
thomwolf's avatar
thomwolf committed
18
19
20

import logging
import os
Sylvain Gugger's avatar
Sylvain Gugger committed
21
import random
22
import sys
23
from dataclasses import dataclass, field
Sylvain Gugger's avatar
Sylvain Gugger committed
24
from typing import Optional
thomwolf's avatar
thomwolf committed
25
26

import numpy as np
Sylvain Gugger's avatar
Sylvain Gugger committed
27
from datasets import load_dataset, load_metric
thomwolf's avatar
thomwolf committed
28

Sylvain Gugger's avatar
Sylvain Gugger committed
29
import transformers
30
from transformers import (
Sylvain Gugger's avatar
Sylvain Gugger committed
31
32
33
    AutoConfig,
    AutoModelForSequenceClassification,
    AutoTokenizer,
34
    DataCollatorWithPadding,
Sylvain Gugger's avatar
Sylvain Gugger committed
35
    EvalPrediction,
36
    HfArgumentParser,
Sylvain Gugger's avatar
Sylvain Gugger committed
37
    PretrainedConfig,
Julien Chaumond's avatar
Julien Chaumond committed
38
    Trainer,
39
    TrainingArguments,
Sylvain Gugger's avatar
Sylvain Gugger committed
40
    default_data_collator,
Julien Chaumond's avatar
Julien Chaumond committed
41
    set_seed,
42
)
43
from transformers.trainer_utils import get_last_checkpoint, is_main_process
Sylvain Gugger's avatar
Sylvain Gugger committed
44

Aymeric Augustin's avatar
Aymeric Augustin committed
45

Sylvain Gugger's avatar
Sylvain Gugger committed
46
47
48
49
50
51
52
53
54
55
56
task_to_keys = {
    "cola": ("sentence", None),
    "mnli": ("premise", "hypothesis"),
    "mrpc": ("sentence1", "sentence2"),
    "qnli": ("question", "sentence"),
    "qqp": ("question1", "question2"),
    "rte": ("sentence1", "sentence2"),
    "sst2": ("sentence", None),
    "stsb": ("sentence1", "sentence2"),
    "wnli": ("sentence1", "sentence2"),
}
thomwolf's avatar
thomwolf committed
57
58
59

logger = logging.getLogger(__name__)

thomwolf's avatar
thomwolf committed
60

Sylvain Gugger's avatar
Sylvain Gugger committed
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
@dataclass
class DataTrainingArguments:
    """
    Arguments pertaining to what data we are going to input our model for training and eval.

    Using `HfArgumentParser` we can turn this class
    into argparse arguments to be able to specify them on
    the command line.
    """

    task_name: Optional[str] = field(
        default=None,
        metadata={"help": "The name of the task to train on: " + ", ".join(task_to_keys.keys())},
    )
    max_seq_length: int = field(
        default=128,
        metadata={
            "help": "The maximum total input sequence length after tokenization. Sequences longer "
            "than this will be truncated, sequences shorter will be padded."
        },
    )
    overwrite_cache: bool = field(
        default=False, metadata={"help": "Overwrite the cached preprocessed datasets or not."}
    )
    pad_to_max_length: bool = field(
        default=True,
        metadata={
            "help": "Whether to pad all samples to `max_seq_length`. "
            "If False, will pad the samples dynamically when batching to the maximum length in the batch."
        },
    )
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
    max_train_samples: Optional[int] = field(
        default=None,
        metadata={
            "help": "For debugging purposes or quicker training, truncate the number of training examples to this "
            "value if set."
        },
    )
    max_val_samples: Optional[int] = field(
        default=None,
        metadata={
            "help": "For debugging purposes or quicker training, truncate the number of validation examples to this "
            "value if set."
        },
    )
    max_test_samples: Optional[int] = field(
        default=None,
        metadata={
            "help": "For debugging purposes or quicker training, truncate the number of test examples to this "
            "value if set."
        },
    )
Sylvain Gugger's avatar
Sylvain Gugger committed
113
114
115
116
117
118
    train_file: Optional[str] = field(
        default=None, metadata={"help": "A csv or a json file containing the training data."}
    )
    validation_file: Optional[str] = field(
        default=None, metadata={"help": "A csv or a json file containing the validation data."}
    )
119
    test_file: Optional[str] = field(default=None, metadata={"help": "A csv or a json file containing the test data."})
Sylvain Gugger's avatar
Sylvain Gugger committed
120
121
122
123
124
125
126
127
128

    def __post_init__(self):
        if self.task_name is not None:
            self.task_name = self.task_name.lower()
            if self.task_name not in task_to_keys.keys():
                raise ValueError("Unknown task, you should pick one in " + ",".join(task_to_keys.keys()))
        elif self.train_file is None or self.validation_file is None:
            raise ValueError("Need either a GLUE task or a training/validation file.")
        else:
129
130
131
132
133
134
            train_extension = self.train_file.split(".")[-1]
            assert train_extension in ["csv", "json"], "`train_file` should be a csv or a json file."
            validation_extension = self.validation_file.split(".")[-1]
            assert (
                validation_extension == train_extension
            ), "`validation_file` should have the same extension (csv or json) as `train_file`."
Sylvain Gugger's avatar
Sylvain Gugger committed
135
136


137
138
139
140
141
142
143
@dataclass
class ModelArguments:
    """
    Arguments pertaining to which model/config/tokenizer we are going to fine-tune from.
    """

    model_name_or_path: str = field(
Julien Chaumond's avatar
Julien Chaumond committed
144
        metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"}
145
    )
146
147
    config_name: Optional[str] = field(
        default=None, metadata={"help": "Pretrained config name or path if not the same as model_name"}
148
    )
149
150
    tokenizer_name: Optional[str] = field(
        default=None, metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"}
151
    )
152
    cache_dir: Optional[str] = field(
153
154
        default=None,
        metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"},
155
    )
Sylvain Gugger's avatar
Sylvain Gugger committed
156
157
158
159
    use_fast_tokenizer: bool = field(
        default=True,
        metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."},
    )
160
161
162
163
164
165
166
167
168
169
170
    model_revision: str = field(
        default="main",
        metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."},
    )
    use_auth_token: bool = field(
        default=False,
        metadata={
            "help": "Will use the token generated when running `transformers-cli login` (necessary to use this script "
            "with private models)."
        },
    )
171
172


173
def main():
Julien Chaumond's avatar
Julien Chaumond committed
174
175
176
    # See all possible arguments in src/transformers/training_args.py
    # or by passing the --help flag to this script.
    # We now keep distinct sets of args, for a cleaner separation of concerns.
177

178
179
180
181
182
183
184
    parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments))
    if len(sys.argv) == 2 and sys.argv[1].endswith(".json"):
        # If we pass only one argument to the script and it's the path to a json file,
        # let's parse it to get our arguments.
        model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))
    else:
        model_args, data_args, training_args = parser.parse_args_into_dataclasses()
thomwolf's avatar
thomwolf committed
185

186
187
188
189
190
191
192
193
194
195
196
197
198
199
    # Detecting last checkpoint.
    last_checkpoint = None
    if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir:
        last_checkpoint = get_last_checkpoint(training_args.output_dir)
        if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0:
            raise ValueError(
                f"Output directory ({training_args.output_dir}) already exists and is not empty. "
                "Use --overwrite_output_dir to overcome."
            )
        elif last_checkpoint is not None:
            logger.info(
                f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
                "the `--output_dir` or add `--overwrite_output_dir` to train from scratch."
            )
thomwolf's avatar
thomwolf committed
200

thomwolf's avatar
thomwolf committed
201
    # Setup logging
202
203
204
    logging.basicConfig(
        format="%(asctime)s - %(levelname)s - %(name)s -   %(message)s",
        datefmt="%m/%d/%Y %H:%M:%S",
205
        handlers=[logging.StreamHandler(sys.stdout)],
206
    )
207
    logger.setLevel(logging.INFO if is_main_process(training_args.local_rank) else logging.WARN)
Sylvain Gugger's avatar
Sylvain Gugger committed
208
209

    # Log on each process the small summary:
210
    logger.warning(
Sylvain Gugger's avatar
Sylvain Gugger committed
211
212
        f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
        + f"distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fp16}"
213
    )
Sylvain Gugger's avatar
Sylvain Gugger committed
214
215
216
    # Set the verbosity to info of the Transformers logger (on main process only):
    if is_main_process(training_args.local_rank):
        transformers.utils.logging.set_verbosity_info()
217
218
        transformers.utils.logging.enable_default_handler()
        transformers.utils.logging.enable_explicit_format()
Sylvain Gugger's avatar
Sylvain Gugger committed
219
    logger.info(f"Training/evaluation parameters {training_args}")
thomwolf's avatar
thomwolf committed
220

Sylvain Gugger's avatar
Sylvain Gugger committed
221
    # Set seed before initializing model.
Julien Chaumond's avatar
Julien Chaumond committed
222
    set_seed(training_args.seed)
thomwolf's avatar
thomwolf committed
223

Sylvain Gugger's avatar
Sylvain Gugger committed
224
    # Get the datasets: you can either provide your own CSV/JSON training and evaluation files (see below)
Sylvain Gugger's avatar
Sylvain Gugger committed
225
    # or specify a GLUE benchmark task (the dataset will be downloaded automatically from the datasets Hub).
Sylvain Gugger's avatar
Sylvain Gugger committed
226
227
228
229
230
231
232
233
234
235
236
237
238
239
    #
    # For CSV/JSON files, this script will use as labels the column called 'label' and as pair of sentences the
    # sentences in columns called 'sentence1' and 'sentence2' if such column exists or the first two columns not named
    # label if at least two columns are provided.
    #
    # If the CSVs/JSONs contain only one non-label column, the script does single sentence classification on this
    # single column. You can easily tweak this behavior (see below)
    #
    # In distributed training, the load_dataset function guarantee that only one local process can concurrently
    # download the dataset.
    if data_args.task_name is not None:
        # Downloading and loading a dataset from the hub.
        datasets = load_dataset("glue", data_args.task_name)
    else:
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
        # Loading a dataset from your local files.
        # CSV/JSON training and evaluation files are needed.
        data_files = {"train": data_args.train_file, "validation": data_args.validation_file}

        # Get the test dataset: you can provide your own CSV/JSON test file (see below)
        # when you use `do_predict` without specifying a GLUE benchmark task.
        if training_args.do_predict:
            if data_args.test_file is not None:
                train_extension = data_args.train_file.split(".")[-1]
                test_extension = data_args.test_file.split(".")[-1]
                assert (
                    test_extension == train_extension
                ), "`test_file` should have the same extension (csv or json) as `train_file`."
                data_files["test"] = data_args.test_file
            else:
                raise ValueError("Need either a GLUE task or a test file for `do_predict`.")

        for key in data_files.keys():
            logger.info(f"load a local file for {key}: {data_files[key]}")

        if data_args.train_file.endswith(".csv"):
            # Loading a dataset from local csv files
            datasets = load_dataset("csv", data_files=data_files)
        else:
            # Loading a dataset from local json files
            datasets = load_dataset("json", data_files=data_files)
Sylvain Gugger's avatar
Sylvain Gugger committed
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
    # See more about loading any type of standard or custom dataset at
    # https://huggingface.co/docs/datasets/loading_datasets.html.

    # Labels
    if data_args.task_name is not None:
        is_regression = data_args.task_name == "stsb"
        if not is_regression:
            label_list = datasets["train"].features["label"].names
            num_labels = len(label_list)
        else:
            num_labels = 1
    else:
        # Trying to have good defaults here, don't hesitate to tweak to your needs.
        is_regression = datasets["train"].features["label"].dtype in ["float32", "float64"]
        if is_regression:
            num_labels = 1
        else:
            # A useful fast method:
            # https://huggingface.co/docs/datasets/package_reference/main_classes.html#datasets.Dataset.unique
            label_list = datasets["train"].unique("label")
            label_list.sort()  # Let's sort it for determinism
            num_labels = len(label_list)
thomwolf's avatar
thomwolf committed
288
289

    # Load pretrained model and tokenizer
Julien Chaumond's avatar
Julien Chaumond committed
290
    #
Sylvain Gugger's avatar
Sylvain Gugger committed
291
    # In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
Julien Chaumond's avatar
Julien Chaumond committed
292
    # download model & vocab.
293
    config = AutoConfig.from_pretrained(
Julien Chaumond's avatar
Julien Chaumond committed
294
        model_args.config_name if model_args.config_name else model_args.model_name_or_path,
295
        num_labels=num_labels,
Julien Chaumond's avatar
Julien Chaumond committed
296
297
        finetuning_task=data_args.task_name,
        cache_dir=model_args.cache_dir,
298
299
        revision=model_args.model_revision,
        use_auth_token=True if model_args.use_auth_token else None,
300
    )
301
    tokenizer = AutoTokenizer.from_pretrained(
Julien Chaumond's avatar
Julien Chaumond committed
302
303
        model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path,
        cache_dir=model_args.cache_dir,
Sylvain Gugger's avatar
Sylvain Gugger committed
304
        use_fast=model_args.use_fast_tokenizer,
305
306
        revision=model_args.model_revision,
        use_auth_token=True if model_args.use_auth_token else None,
307
    )
308
    model = AutoModelForSequenceClassification.from_pretrained(
Julien Chaumond's avatar
Julien Chaumond committed
309
310
        model_args.model_name_or_path,
        from_tf=bool(".ckpt" in model_args.model_name_or_path),
311
        config=config,
Julien Chaumond's avatar
Julien Chaumond committed
312
        cache_dir=model_args.cache_dir,
313
314
        revision=model_args.model_revision,
        use_auth_token=True if model_args.use_auth_token else None,
315
    )
thomwolf's avatar
thomwolf committed
316

Sylvain Gugger's avatar
Sylvain Gugger committed
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
    # Preprocessing the datasets
    if data_args.task_name is not None:
        sentence1_key, sentence2_key = task_to_keys[data_args.task_name]
    else:
        # Again, we try to have some nice defaults but don't hesitate to tweak to your use case.
        non_label_column_names = [name for name in datasets["train"].column_names if name != "label"]
        if "sentence1" in non_label_column_names and "sentence2" in non_label_column_names:
            sentence1_key, sentence2_key = "sentence1", "sentence2"
        else:
            if len(non_label_column_names) >= 2:
                sentence1_key, sentence2_key = non_label_column_names[:2]
            else:
                sentence1_key, sentence2_key = non_label_column_names[0], None

    # Padding strategy
    if data_args.pad_to_max_length:
        padding = "max_length"
    else:
        # We will pad later, dynamically at batch creation, to the max sequence length in each batch
        padding = False
thomwolf's avatar
thomwolf committed
337

Sylvain Gugger's avatar
Sylvain Gugger committed
338
339
340
341
342
    # Some models have set the order of the labels to use, so let's make sure we do use it.
    label_to_id = None
    if (
        model.config.label2id != PretrainedConfig(num_labels=num_labels).label2id
        and data_args.task_name is not None
343
        and not is_regression
Sylvain Gugger's avatar
Sylvain Gugger committed
344
345
346
347
    ):
        # Some have all caps in their config, some don't.
        label_name_to_id = {k.lower(): v for k, v in model.config.label2id.items()}
        if list(sorted(label_name_to_id.keys())) == list(sorted(label_list)):
348
            label_to_id = {i: int(label_name_to_id[label_list[i]]) for i in range(num_labels)}
Sylvain Gugger's avatar
Sylvain Gugger committed
349
350
351
352
353
354
        else:
            logger.warn(
                "Your model seems to have been trained with labels, but they don't match the dataset: ",
                f"model labels: {list(sorted(label_name_to_id.keys()))}, dataset labels: {list(sorted(label_list))}."
                "\nIgnoring the model labels as a result.",
            )
355
    elif data_args.task_name is None and not is_regression:
Sylvain Gugger's avatar
Sylvain Gugger committed
356
        label_to_id = {v: i for i, v in enumerate(label_list)}
357

358
359
360
361
362
363
364
    if data_args.max_seq_length > tokenizer.model_max_length:
        logger.warn(
            f"The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the"
            f"model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}."
        )
    max_seq_length = min(data_args.max_seq_length, tokenizer.model_max_length)

Sylvain Gugger's avatar
Sylvain Gugger committed
365
366
367
368
369
    def preprocess_function(examples):
        # Tokenize the texts
        args = (
            (examples[sentence1_key],) if sentence2_key is None else (examples[sentence1_key], examples[sentence2_key])
        )
370
        result = tokenizer(*args, padding=padding, max_length=max_seq_length, truncation=True)
Sylvain Gugger's avatar
Sylvain Gugger committed
371
372
373

        # Map labels to IDs (not necessary for GLUE tasks)
        if label_to_id is not None and "label" in examples:
374
            result["label"] = [(label_to_id[l] if l != -1 else -1) for l in examples["label"]]
Sylvain Gugger's avatar
Sylvain Gugger committed
375
376
        return result

377
378
379
380
381
382
383
384
385
386
387
    if training_args.do_train:
        if "train" not in datasets:
            raise ValueError("--do_train requires a train dataset")
        train_dataset = datasets["train"]
        if data_args.max_train_samples is not None:
            train_dataset = train_dataset.select(range(data_args.max_train_samples))
        train_dataset = train_dataset.map(
            preprocess_function,
            batched=True,
            load_from_cache_file=not data_args.overwrite_cache,
        )
Sylvain Gugger's avatar
Sylvain Gugger committed
388

389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
    if training_args.do_eval:
        if "validation" not in datasets and "validation_matched" not in datasets:
            raise ValueError("--do_eval requires a validation dataset")
        eval_dataset = datasets["validation_matched" if data_args.task_name == "mnli" else "validation"]
        if data_args.max_val_samples is not None:
            eval_dataset = eval_dataset.select(range(data_args.max_val_samples))
        eval_dataset = eval_dataset.map(
            preprocess_function,
            batched=True,
            load_from_cache_file=not data_args.overwrite_cache,
        )

    if training_args.do_predict or data_args.task_name is not None or data_args.test_file is not None:
        if "test" not in datasets and "test_matched" not in datasets:
            raise ValueError("--do_predict requires a test dataset")
Sylvain Gugger's avatar
Sylvain Gugger committed
404
        test_dataset = datasets["test_matched" if data_args.task_name == "mnli" else "test"]
405
406
407
408
409
410
411
        if data_args.max_test_samples is not None:
            test_dataset = test_dataset.select(range(data_args.max_test_samples))
        test_dataset = test_dataset.map(
            preprocess_function,
            batched=True,
            load_from_cache_file=not data_args.overwrite_cache,
        )
Sylvain Gugger's avatar
Sylvain Gugger committed
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436

    # Log a few random samples from the training set:
    for index in random.sample(range(len(train_dataset)), 3):
        logger.info(f"Sample {index} of the training set: {train_dataset[index]}.")

    # Get the metric function
    if data_args.task_name is not None:
        metric = load_metric("glue", data_args.task_name)
    # TODO: When datasets metrics include regular accuracy, make an else here and remove special branch from
    # compute_metrics

    # You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
    # predictions and label_ids field) and has to return a dictionary string to float.
    def compute_metrics(p: EvalPrediction):
        preds = p.predictions[0] if isinstance(p.predictions, tuple) else p.predictions
        preds = np.squeeze(preds) if is_regression else np.argmax(preds, axis=1)
        if data_args.task_name is not None:
            result = metric.compute(predictions=preds, references=p.label_ids)
            if len(result) > 1:
                result["combined_score"] = np.mean(list(result.values())).item()
            return result
        elif is_regression:
            return {"mse": ((preds - p.label_ids) ** 2).mean().item()}
        else:
            return {"accuracy": (preds == p.label_ids).astype(np.float32).mean().item()}
thomwolf's avatar
thomwolf committed
437

438
439
440
441
442
443
444
445
    # Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
    if data_args.pad_to_max_length:
        data_collator = default_data_collator
    elif training_args.fp16:
        data_collator = DataCollatorWithPadding(tokenizer, pad_to_multiple_of=8)
    else:
        data_collator = None

Julien Chaumond's avatar
Julien Chaumond committed
446
447
448
449
450
    # Initialize our Trainer
    trainer = Trainer(
        model=model,
        args=training_args,
        train_dataset=train_dataset,
Sylvain Gugger's avatar
Sylvain Gugger committed
451
452
453
        eval_dataset=eval_dataset if training_args.do_eval else None,
        compute_metrics=compute_metrics,
        tokenizer=tokenizer,
454
        data_collator=data_collator,
Julien Chaumond's avatar
Julien Chaumond committed
455
    )
thomwolf's avatar
thomwolf committed
456

thomwolf's avatar
thomwolf committed
457
    # Training
Julien Chaumond's avatar
Julien Chaumond committed
458
    if training_args.do_train:
459
        checkpoint = None
460
        if last_checkpoint is not None:
461
            checkpoint = last_checkpoint
462
        elif os.path.isdir(model_args.model_name_or_path):
463
464
465
466
467
            # Check the config from that potential checkpoint has the right number of labels before using it as a
            # checkpoint.
            if AutoConfig.from_pretrained(model_args.model_name_or_path).num_labels == num_labels:
                checkpoint = model_args.model_name_or_path

468
        train_result = trainer.train(resume_from_checkpoint=checkpoint)
469
        metrics = train_result.metrics
470
471
472
473
        max_train_samples = (
            data_args.max_train_samples if data_args.max_train_samples is not None else len(train_dataset)
        )
        metrics["train_samples"] = min(max_train_samples, len(train_dataset))
474

Sylvain Gugger's avatar
Sylvain Gugger committed
475
        trainer.save_model()  # Saves the tokenizer too for easy upload
thomwolf's avatar
thomwolf committed
476

477
478
479
        trainer.log_metrics("train", metrics)
        trainer.save_metrics("train", metrics)
        trainer.save_state()
480

thomwolf's avatar
thomwolf committed
481
    # Evaluation
482
    if training_args.do_eval:
Julien Chaumond's avatar
Julien Chaumond committed
483
484
485
        logger.info("*** Evaluate ***")

        # Loop to handle MNLI double evaluation (matched, mis-matched)
Sylvain Gugger's avatar
Sylvain Gugger committed
486
        tasks = [data_args.task_name]
Julien Chaumond's avatar
Julien Chaumond committed
487
488
        eval_datasets = [eval_dataset]
        if data_args.task_name == "mnli":
Sylvain Gugger's avatar
Sylvain Gugger committed
489
490
            tasks.append("mnli-mm")
            eval_datasets.append(datasets["validation_mismatched"])
Julien Chaumond's avatar
Julien Chaumond committed
491

Sylvain Gugger's avatar
Sylvain Gugger committed
492
        for eval_dataset, task in zip(eval_datasets, tasks):
493
            metrics = trainer.evaluate(eval_dataset=eval_dataset)
Julien Chaumond's avatar
Julien Chaumond committed
494

495
496
            max_val_samples = data_args.max_val_samples if data_args.max_val_samples is not None else len(eval_dataset)
            metrics["eval_samples"] = min(max_val_samples, len(eval_dataset))
497

498
499
            trainer.log_metrics("eval", metrics)
            trainer.save_metrics("eval", metrics)
thomwolf's avatar
thomwolf committed
500

501
    if training_args.do_predict:
Sylvain Gugger's avatar
Sylvain Gugger committed
502
503
504
505
        logger.info("*** Test ***")

        # Loop to handle MNLI double evaluation (matched, mis-matched)
        tasks = [data_args.task_name]
506
507
        test_datasets = [test_dataset]
        if data_args.task_name == "mnli":
Sylvain Gugger's avatar
Sylvain Gugger committed
508
509
            tasks.append("mnli-mm")
            test_datasets.append(datasets["test_mismatched"])
510

Sylvain Gugger's avatar
Sylvain Gugger committed
511
512
513
        for test_dataset, task in zip(test_datasets, tasks):
            # Removing the `label` columns because it contains -1 and Trainer won't like that.
            test_dataset.remove_columns_("label")
514
            predictions = trainer.predict(test_dataset=test_dataset).predictions
Sylvain Gugger's avatar
Sylvain Gugger committed
515
            predictions = np.squeeze(predictions) if is_regression else np.argmax(predictions, axis=1)
516

Sylvain Gugger's avatar
Sylvain Gugger committed
517
518
            output_test_file = os.path.join(training_args.output_dir, f"test_results_{task}.txt")
            if trainer.is_world_process_zero():
519
                with open(output_test_file, "w") as writer:
Sylvain Gugger's avatar
Sylvain Gugger committed
520
                    logger.info(f"***** Test results {task} *****")
521
522
                    writer.write("index\tprediction\n")
                    for index, item in enumerate(predictions):
Sylvain Gugger's avatar
Sylvain Gugger committed
523
524
                        if is_regression:
                            writer.write(f"{index}\t{item:3.3f}\n")
525
                        else:
Sylvain Gugger's avatar
Sylvain Gugger committed
526
527
                            item = label_list[item]
                            writer.write(f"{index}\t{item}\n")
thomwolf's avatar
thomwolf committed
528
529


Lysandre Debut's avatar
Lysandre Debut committed
530
531
532
533
534
def _mp_fn(index):
    # For xla_spawn (TPUs)
    main()


thomwolf's avatar
thomwolf committed
535
536
if __name__ == "__main__":
    main()