trainer.py 70.9 KB
Newer Older
Sylvain Gugger's avatar
Sylvain Gugger committed
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
# coding=utf-8
# Copyright 2020-present the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
The Trainer class, to easily train a 馃 Transformers from scratch or finetune it on a new task.
"""

19
import collections
20
import inspect
21
import math
Julien Chaumond's avatar
Julien Chaumond committed
22
23
24
import os
import re
import shutil
25
import warnings
Julien Chaumond's avatar
Julien Chaumond committed
26
from pathlib import Path
27
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
Julien Chaumond's avatar
Julien Chaumond committed
28
29
30

import numpy as np
import torch
31
from packaging import version
Julien Chaumond's avatar
Julien Chaumond committed
32
33
34
35
from torch import nn
from torch.utils.data.dataloader import DataLoader
from torch.utils.data.dataset import Dataset
from torch.utils.data.distributed import DistributedSampler
Sylvain Gugger's avatar
Sylvain Gugger committed
36
from torch.utils.data.sampler import RandomSampler, SequentialSampler
Julien Chaumond's avatar
Julien Chaumond committed
37

38
from .data.data_collator import DataCollator, DataCollatorWithPadding, default_data_collator
39
from .file_utils import WEIGHTS_NAME, is_datasets_available, is_in_notebook, is_torch_tpu_available
40
41
from .integrations import (
    default_hp_search_backend,
42
    hp_params,
43
    is_comet_available,
44
    is_mlflow_available,
45
46
47
48
    is_optuna_available,
    is_ray_available,
    is_tensorboard_available,
    is_wandb_available,
49
50
    run_hp_search_optuna,
    run_hp_search_ray,
51
)
Sylvain Gugger's avatar
Sylvain Gugger committed
52
from .modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING
Julien Chaumond's avatar
Julien Chaumond committed
53
54
from .modeling_utils import PreTrainedModel
from .optimization import AdamW, get_linear_schedule_with_warmup
55
from .tokenization_utils_base import PreTrainedTokenizerBase
Sylvain Gugger's avatar
Sylvain Gugger committed
56
57
58
59
60
61
62
63
64
65
from .trainer_callback import (
    CallbackHandler,
    DefaultFlowCallback,
    PrinterCallback,
    ProgressCallback,
    TrainerCallback,
    TrainerControl,
    TrainerState,
)
from .trainer_pt_utils import (
66
    DistributedTensorGatherer,
Sylvain Gugger's avatar
Sylvain Gugger committed
67
68
69
70
71
72
73
74
75
76
    SequentialDistributedSampler,
    distributed_broadcast_scalars,
    distributed_concat,
    get_tpu_sampler,
    nested_concat,
    nested_detach,
    nested_numpify,
    nested_xla_mesh_reduce,
    reissue_pt_warnings,
)
77
78
79
80
81
82
83
84
85
86
87
from .trainer_utils import (
    PREFIX_CHECKPOINT_DIR,
    BestRun,
    EvalPrediction,
    HPSearchBackend,
    PredictionOutput,
    TrainOutput,
    default_compute_objective,
    default_hp_space,
    set_seed,
)
Patrick von Platen's avatar
Patrick von Platen committed
88
from .training_args import TrainingArguments
Lysandre Debut's avatar
Lysandre Debut committed
89
from .utils import logging
Julien Chaumond's avatar
Julien Chaumond committed
90
91


92
93
94
_use_native_amp = False
_use_apex = False

Sylvain Gugger's avatar
Sylvain Gugger committed
95
DEFAULT_CALLBACKS = [DefaultFlowCallback]
96
DEFAULT_PROGRESS_CALLBACK = ProgressCallback
Sylvain Gugger's avatar
Sylvain Gugger committed
97

98
99
100
101
if is_in_notebook():
    from .utils.notebook import NotebookProgressCallback

    DEFAULT_PROGRESS_CALLBACK = NotebookProgressCallback
102

103
104
# Check if Pytorch version >= 1.6 to switch between Native AMP and Apex
if version.parse(torch.__version__) < version.parse("1.6"):
105
    from .file_utils import is_apex_available
106
107
108
109
110
111
112

    if is_apex_available():
        from apex import amp
    _use_apex = True
else:
    _use_native_amp = True
    from torch.cuda.amp import autocast
Julien Chaumond's avatar
Julien Chaumond committed
113

114
115
116
117
118
if version.parse(torch.__version__) < version.parse("1.2"):
    _use_ddp_no_sync = False
else:
    _use_ddp_no_sync = True

119
120
if is_datasets_available():
    import datasets
Julien Chaumond's avatar
Julien Chaumond committed
121

122
if is_torch_tpu_available():
Lysandre Debut's avatar
Lysandre Debut committed
123
124
125
126
    import torch_xla.core.xla_model as xm
    import torch_xla.debug.metrics as met
    import torch_xla.distributed.parallel_loader as pl

127
if is_tensorboard_available():
Sylvain Gugger's avatar
Sylvain Gugger committed
128
129
130
131
    from .integrations import TensorBoardCallback

    DEFAULT_CALLBACKS.append(TensorBoardCallback)

Julien Chaumond's avatar
Julien Chaumond committed
132

133
if is_wandb_available():
Sylvain Gugger's avatar
Sylvain Gugger committed
134
135
136
    from .integrations import WandbCallback

    DEFAULT_CALLBACKS.append(WandbCallback)
137

138
if is_comet_available():
Sylvain Gugger's avatar
Sylvain Gugger committed
139
140
141
    from .integrations import CometCallback

    DEFAULT_CALLBACKS.append(CometCallback)
142

143
144
145
146
147
if is_mlflow_available():
    from .integrations import MLflowCallback

    DEFAULT_CALLBACKS.append(MLflowCallback)

148
149
150
151
152
153
if is_optuna_available():
    import optuna

if is_ray_available():
    from ray import tune

Lysandre Debut's avatar
Lysandre Debut committed
154
logger = logging.get_logger(__name__)
Julien Chaumond's avatar
Julien Chaumond committed
155
156
157
158
159


class Trainer:
    """
    Trainer is a simple but feature-complete training and eval loop for PyTorch,
160
161
162
    optimized for 馃 Transformers.

    Args:
Sylvain Gugger's avatar
Sylvain Gugger committed
163
        model (:class:`~transformers.PreTrainedModel` or :obj:`torch.nn.Module`, `optional`):
164
            The model to train, evaluate or use for predictions. If not provided, a ``model_init`` must be passed.
Sylvain Gugger's avatar
Sylvain Gugger committed
165
166
167
168
169
170

            .. note::

                :class:`~transformers.Trainer` is optimized to work with the :class:`~transformers.PreTrainedModel`
                provided by the library. You can still use your own models defined as :obj:`torch.nn.Module` as long as
                they work the same way as the 馃 Transformers models.
171
172
173
        args (:class:`~transformers.TrainingArguments`, `optional`):
            The arguments to tweak for training. Will default to a basic instance of :class:`~transformers.TrainingArguments`
            with the ``output_dir`` set to a directory named `tmp_trainer` in the current directory if not provided.
174
        data_collator (:obj:`DataCollator`, `optional`):
175
            The function to use to form a batch from a list of elements of :obj:`train_dataset` or
176
177
            :obj:`eval_dataset`. Will default to :func:`~transformers.default_data_collator` if no ``tokenizer`` is
            provided, an instance of :func:`~transformers.DataCollatorWithPadding` otherwise.
Sylvain Gugger's avatar
Sylvain Gugger committed
178
        train_dataset (:obj:`torch.utils.data.dataset.Dataset`, `optional`):
179
            The dataset to use for training. If it is an :obj:`datasets.Dataset`, columns not accepted by the
180
            ``model.forward()`` method are automatically removed.
Sylvain Gugger's avatar
Sylvain Gugger committed
181
        eval_dataset (:obj:`torch.utils.data.dataset.Dataset`, `optional`):
182
             The dataset to use for evaluation. If it is an :obj:`datasets.Dataset`, columns not accepted by the
183
            ``model.forward()`` method are automatically removed.
184
185
186
187
        tokenizer (:class:`PreTrainedTokenizerBase`, `optional`):
            The tokenizer used to preprocess the data. If provided, will be used to automatically pad the inputs the
            maximum length when batching inputs, and it will be saved along the model to make it easier to rerun an
            interrupted training or reuse the fine-tuned model.
188
189
190
        model_init (:obj:`Callable[[], PreTrainedModel]`, `optional`):
            A function that instantiates the model to be used. If provided, each call to
            :meth:`~transformers.Trainer.train` will start from a new instance of the model as given by this function.
191
192
193

            The function may have zero argument, or a single one containing the optuna/Ray Tune trial object, to be able to choose
            different architectures according to hyper parameters (such as layer count, sizes of inner layers, dropout probabilities etc).
194
195
196
        compute_metrics (:obj:`Callable[[EvalPrediction], Dict]`, `optional`):
            The function that will be used to compute metrics at evaluation. Must take a
            :class:`~transformers.EvalPrediction` and return a dictionary string to metric values.
Sylvain Gugger's avatar
Sylvain Gugger committed
197
198
199
200
201
        callbacks (List of :obj:`~transformers.TrainerCallback`, `optional`):
            A list of callbacks to customize the training loop. Will add those to the list of default callbacks
            detailed in :doc:`here <callback>`.

            If you want to remove one of the default callbacks used, use the :meth:`Trainer.remove_callback` method.
202
203
204
205
        optimizers (:obj:`Tuple[torch.optim.Optimizer, torch.optim.lr_scheduler.LambdaLR`, `optional`):
            A tuple containing the optimizer and the scheduler to use. Will default to an instance of
            :class:`~transformers.AdamW` on your model and a scheduler given by
            :func:`~transformers.get_linear_schedule_with_warmup` controlled by :obj:`args`.
206
207
        kwargs:
            Deprecated keyword arguments.
Julien Chaumond's avatar
Julien Chaumond committed
208
209
210
211
    """

    def __init__(
        self,
Sylvain Gugger's avatar
Sylvain Gugger committed
212
        model: Union[PreTrainedModel, torch.nn.Module] = None,
213
        args: TrainingArguments = None,
Julien Chaumond's avatar
Julien Chaumond committed
214
215
216
        data_collator: Optional[DataCollator] = None,
        train_dataset: Optional[Dataset] = None,
        eval_dataset: Optional[Dataset] = None,
217
        tokenizer: Optional["PreTrainedTokenizerBase"] = None,
218
        model_init: Callable[[], PreTrainedModel] = None,
Julien Chaumond's avatar
Julien Chaumond committed
219
        compute_metrics: Optional[Callable[[EvalPrediction], Dict]] = None,
Sylvain Gugger's avatar
Sylvain Gugger committed
220
        callbacks: Optional[List[TrainerCallback]] = None,
221
        optimizers: Tuple[torch.optim.Optimizer, torch.optim.lr_scheduler.LambdaLR] = (None, None),
222
        **kwargs,
Julien Chaumond's avatar
Julien Chaumond committed
223
    ):
Sylvain Gugger's avatar
Sylvain Gugger committed
224
225
226
227
228
229
        if args is None:
            logger.info("No `TrainingArguments` passed, using the current path as `output_dir`.")
            args = TrainingArguments("tmp_trainer")
        self.args = args
        # Seed must be set before instantiating the model when using model
        set_seed(self.args.seed)
230
231
232
        assert (
            model is not None or model_init is not None
        ), "You must provide a model to use `Trainer`, either by using the `model` argument or the `model_init` argument."
233
        self.model_init = model_init
234
        self.hp_name = None
235
        if model is None and model_init is not None:
236
            model = self.call_model_init()
237
        self.model = model.to(args.device) if model is not None else None
238
239
        default_collator = default_data_collator if tokenizer is None else DataCollatorWithPadding(tokenizer)
        self.data_collator = data_collator if data_collator is not None else default_collator
Julien Chaumond's avatar
Julien Chaumond committed
240
241
        self.train_dataset = train_dataset
        self.eval_dataset = eval_dataset
242
        self.tokenizer = tokenizer
243

Julien Chaumond's avatar
Julien Chaumond committed
244
        self.compute_metrics = compute_metrics
245
        self.optimizer, self.lr_scheduler = optimizers
Sylvain Gugger's avatar
Sylvain Gugger committed
246
247
248
249
250
        if model_init is not None and (self.optimizer is not None or self.lr_scheduler is not None):
            raise RuntimeError(
                "Passing a `model_init` is incompatible with providing the `optimizers` argument."
                "You should subclass `Trainer` and override the `create_optimizer_and_scheduler` method."
            )
Sylvain Gugger's avatar
Sylvain Gugger committed
251
252
        callbacks = DEFAULT_CALLBACKS if callbacks is None else DEFAULT_CALLBACKS + callbacks
        self.callback_handler = CallbackHandler(callbacks, self.model, self.optimizer, self.lr_scheduler)
253
        self.add_callback(PrinterCallback if self.args.disable_tqdm else DEFAULT_PROGRESS_CALLBACK)
Sylvain Gugger's avatar
Sylvain Gugger committed
254
255
256
257
258
259
260
261
262
263
264
265

        # Deprecated arguments
        if "tb_writer" in kwargs:
            warnings.warn(
                "Passing `tb_writer` as a keyword argument is deprecated and won't be possible in a "
                + "future version. Use `TensorBoardCallback(tb_writer=...)` instead and pass it to the `callbacks`"
                + "argument",
                FutureWarning,
            )
            tb_writer = kwargs.pop("tb_writer")
            self.remove_callback(TensorBoardCallback)
            self.add_callback(TensorBoardCallback(tb_writer=tb_writer))
266
267
        if "prediction_loss_only" in kwargs:
            warnings.warn(
268
269
270
                "Passing `prediction_loss_only` as a keyword argument is deprecated and won't be possible in a "
                + "future version. Use `args.prediction_loss_only` instead. Setting "
                + f"`args.prediction_loss_only={kwargs['prediction_loss_only']}",
271
272
273
274
275
                FutureWarning,
            )
            self.args.prediction_loss_only = kwargs.pop("prediction_loss_only")
        assert kwargs == {}, f"Unexpected keyword arguments: {list(kwargs.keys())}."

276
277
278
        # Will be set to True by `self._setup_loggers()` on first call to `self.log()`.
        self._loggers_initialized = False

Julien Chaumond's avatar
Julien Chaumond committed
279
        # Create output directory if needed
280
        if self.is_world_process_zero():
Julien Chaumond's avatar
Julien Chaumond committed
281
            os.makedirs(self.args.output_dir, exist_ok=True)
282
        if is_torch_tpu_available() and isinstance(self.model, PreTrainedModel):
Lysandre Debut's avatar
Lysandre Debut committed
283
284
285
            # Set an xla_device flag on the model's config.
            # We'll find a more elegant and not need to do this in the future.
            self.model.config.xla_device = True
286
287
288
289
290
291
292
293
294
        if not callable(self.data_collator) and callable(getattr(self.data_collator, "collate_batch", None)):
            self.data_collator = self.data_collator.collate_batch
            warnings.warn(
                (
                    "The `data_collator` should now be a simple callable (function, class with `__call__`), classes "
                    + "with a `collate_batch` are deprecated and won't be supported in a future version."
                ),
                FutureWarning,
            )
295

296
297
298
299
300
301
302
303
304
        if args.max_steps > 0:
            logger.info("max_steps is given, it will override any value given in num_train_epochs")

        # Enforce rules on using datasets with no __len__
        if train_dataset is not None and not isinstance(train_dataset, collections.abc.Sized) and args.max_steps <= 0:
            raise ValueError("train_dataset does not implement __len__, max_steps has to be specified")
        if eval_dataset is not None and not isinstance(eval_dataset, collections.abc.Sized):
            raise ValueError("eval_dataset must implement __len__")

305
306
        if is_datasets_available():
            if isinstance(train_dataset, datasets.Dataset):
307
                self._remove_unused_columns(self.train_dataset, description="training")
308
            if isinstance(eval_dataset, datasets.Dataset):
309
310
                self._remove_unused_columns(self.eval_dataset, description="evaluation")

311
        self.state = TrainerState()
Sylvain Gugger's avatar
Sylvain Gugger committed
312
        self.control = TrainerControl()
313
314
315
        # Internal variable for total_flos used to count as tensors (for distributed + TPU), will be sent in the
        # state at each call to self.log.
        self._total_flos = None
316
317
        if self.args.fp16 and _use_native_amp:
            self.scaler = torch.cuda.amp.GradScaler()
318
        self.hp_search_backend = None
319
        self.use_tune_checkpoints = False
320
321
322
323
324
325
        default_label_names = (
            ["start_positions, end_positions"]
            if type(self.model) in MODEL_FOR_QUESTION_ANSWERING_MAPPING.values()
            else ["labels"]
        )
        self.label_names = default_label_names if self.args.label_names is None else self.args.label_names
Sylvain Gugger's avatar
Sylvain Gugger committed
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
        self.control = self.callback_handler.on_init_end(self.args, self.state, self.control)

    def add_callback(self, callback):
        """
        Add a callback to the current list of :class:`~transformer.TrainerCallback`.

        Args:
           callback (:obj:`type` or :class:`~transformer.TrainerCallback`):
               A :class:`~transformer.TrainerCallback` class or an instance of a :class:`~transformer.TrainerCallback`.
               In the first case, will instantiate a member of that class.
        """
        self.callback_handler.add_callback(callback)

    def pop_callback(self, callback):
        """
        Remove a callback from the current list of :class:`~transformer.TrainerCallback` and returns it.

        If the callback is not found, returns :obj:`None` (and no error is raised).

        Args:
           callback (:obj:`type` or :class:`~transformer.TrainerCallback`):
               A :class:`~transformer.TrainerCallback` class or an instance of a :class:`~transformer.TrainerCallback`.
               In the first case, will pop the first member of that class found in the list of callbacks.

        Returns:
            :class:`~transformer.TrainerCallback`: The callback removed, if found.
        """
        return self.callback_handler.pop_callback(callback)

    def remove_callback(self, callback):
        """
        Remove a callback from the current list of :class:`~transformer.TrainerCallback`.

        Args:
           callback (:obj:`type` or :class:`~transformer.TrainerCallback`):
               A :class:`~transformer.TrainerCallback` class or an instance of a :class:`~transformer.TrainerCallback`.
               In the first case, will remove the first member of that class found in the list of callbacks.
        """
        self.callback_handler.remove_callback(callback)
Julien Chaumond's avatar
Julien Chaumond committed
365

366
    def _remove_unused_columns(self, dataset: "datasets.Dataset", description: Optional[str] = None):
367
368
        if not self.args.remove_unused_columns:
            return
369
370
371
372
373
374
375
376
377
378
379
        # Inspect model forward signature to keep only the arguments it accepts.
        signature = inspect.signature(self.model.forward)
        signature_columns = list(signature.parameters.keys())
        # Labels may be named label or label_ids, the default data collator handles that.
        signature_columns += ["label", "label_ids"]
        columns = [k for k in signature_columns if k in dataset.column_names]
        ignored_columns = list(set(dataset.column_names) - set(signature_columns))
        dset_description = "" if description is None else f"in the {description} set "
        logger.info(
            f"The following columns {dset_description}don't have a corresponding argument in `{self.model.__class__.__name__}.forward` and have been ignored: {', '.join(ignored_columns)}."
        )
sgugger's avatar
sgugger committed
380
        dataset.set_format(type=dataset.format["type"], columns=columns)
381

382
    def _get_train_sampler(self) -> Optional[torch.utils.data.sampler.Sampler]:
383
        if not isinstance(self.train_dataset, collections.abc.Sized):
384
            return None
385
        elif is_torch_tpu_available():
386
            return get_tpu_sampler(self.train_dataset)
Lysandre Debut's avatar
Lysandre Debut committed
387
        else:
388
            return (
Lysandre Debut's avatar
Lysandre Debut committed
389
390
391
392
                RandomSampler(self.train_dataset)
                if self.args.local_rank == -1
                else DistributedSampler(self.train_dataset)
            )
393
394
395
396
397

    def get_train_dataloader(self) -> DataLoader:
        """
        Returns the training :class:`~torch.utils.data.DataLoader`.

398
        Will use no sampler if :obj:`self.train_dataset` does not implement :obj:`__len__`, a random sampler
399
400
401
402
403
404
405
406
407
        (adapted to distributed training if necessary) otherwise.

        Subclass and override this method if you want to inject some custom behavior.
        """
        if self.train_dataset is None:
            raise ValueError("Trainer: training requires a train_dataset.")
        train_sampler = self._get_train_sampler()

        return DataLoader(
Julien Chaumond's avatar
Julien Chaumond committed
408
409
410
            self.train_dataset,
            batch_size=self.args.train_batch_size,
            sampler=train_sampler,
411
            collate_fn=self.data_collator,
Setu Shah's avatar
Setu Shah committed
412
            drop_last=self.args.dataloader_drop_last,
Chady Kamar's avatar
Chady Kamar committed
413
            num_workers=self.args.dataloader_num_workers,
Julien Chaumond's avatar
Julien Chaumond committed
414
415
        )

416
    def _get_eval_sampler(self, eval_dataset: Dataset) -> Optional[torch.utils.data.sampler.Sampler]:
417
        if is_torch_tpu_available():
418
419
420
421
422
            return SequentialDistributedSampler(eval_dataset, num_replicas=xm.xrt_world_size(), rank=xm.get_ordinal())
        elif self.args.local_rank != -1:
            return SequentialDistributedSampler(eval_dataset)
        else:
            return SequentialSampler(eval_dataset)
Lysandre Debut's avatar
Lysandre Debut committed
423

Julien Chaumond's avatar
Julien Chaumond committed
424
    def get_eval_dataloader(self, eval_dataset: Optional[Dataset] = None) -> DataLoader:
425
426
427
        """
        Returns the evaluation :class:`~torch.utils.data.DataLoader`.

428
429
        Subclass and override this method if you want to inject some custom behavior.

430
        Args:
431
            eval_dataset (:obj:`torch.utils.data.dataset.Dataset`, `optional`):
432
                If provided, will override :obj:`self.eval_dataset`. If it is an :obj:`datasets.Dataset`, columns not
433
                accepted by the ``model.forward()`` method are automatically removed. It must implement :obj:`__len__`.
434
        """
Julien Chaumond's avatar
Julien Chaumond committed
435
436
        if eval_dataset is None and self.eval_dataset is None:
            raise ValueError("Trainer: evaluation requires an eval_dataset.")
437
438
439
        elif eval_dataset is not None and not isinstance(eval_dataset, collections.abc.Sized):
            raise ValueError("eval_dataset must implement __len__")
        elif is_datasets_available() and isinstance(eval_dataset, datasets.Dataset):
440
            self._remove_unused_columns(eval_dataset, description="evaluation")
441
        eval_dataset = eval_dataset if eval_dataset is not None else self.eval_dataset
442
        eval_sampler = self._get_eval_sampler(eval_dataset)
443

444
        return DataLoader(
445
            eval_dataset,
446
            sampler=eval_sampler,
Julien Chaumond's avatar
Julien Chaumond committed
447
            batch_size=self.args.eval_batch_size,
448
            collate_fn=self.data_collator,
Setu Shah's avatar
Setu Shah committed
449
            drop_last=self.args.dataloader_drop_last,
Chady Kamar's avatar
Chady Kamar committed
450
            num_workers=self.args.dataloader_num_workers,
Julien Chaumond's avatar
Julien Chaumond committed
451
452
453
        )

    def get_test_dataloader(self, test_dataset: Dataset) -> DataLoader:
454
455
456
        """
        Returns the test :class:`~torch.utils.data.DataLoader`.

457
458
        Subclass and override this method if you want to inject some custom behavior.

459
        Args:
460
            test_dataset (:obj:`torch.utils.data.dataset.Dataset`, `optional`):
461
                The test dataset to use. If it is an :obj:`datasets.Dataset`, columns not accepted by the
462
                ``model.forward()`` method are automatically removed. It must implement :obj:`__len__`.
463
        """
464
465
466
        if not isinstance(test_dataset, collections.abc.Sized):
            raise ValueError("test_dataset must implement __len__")
        elif is_datasets_available() and isinstance(test_dataset, datasets.Dataset):
467
            self._remove_unused_columns(test_dataset, description="test")
468
        test_sampler = self._get_eval_sampler(test_dataset)
Lysandre Debut's avatar
Lysandre Debut committed
469

470
471
        # We use the same batch_size as for eval.
        return DataLoader(
Julien Chaumond's avatar
Julien Chaumond committed
472
            test_dataset,
473
            sampler=test_sampler,
Julien Chaumond's avatar
Julien Chaumond committed
474
            batch_size=self.args.eval_batch_size,
475
            collate_fn=self.data_collator,
476
            drop_last=self.args.dataloader_drop_last,
Julien Chaumond's avatar
Julien Chaumond committed
477
        )
Lysandre Debut's avatar
Lysandre Debut committed
478

479
    def create_optimizer_and_scheduler(self, num_training_steps: int):
480
481
482
        """
        Setup the optimizer and the learning rate scheduler.

483
        We provide a reasonable default that works well. If you want to use something else, you can pass a tuple in the
484
        Trainer's init through :obj:`optimizers`, or subclass and override this method in a subclass.
485
        """
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
        if self.optimizer is None:
            no_decay = ["bias", "LayerNorm.weight"]
            optimizer_grouped_parameters = [
                {
                    "params": [p for n, p in self.model.named_parameters() if not any(nd in n for nd in no_decay)],
                    "weight_decay": self.args.weight_decay,
                },
                {
                    "params": [p for n, p in self.model.named_parameters() if any(nd in n for nd in no_decay)],
                    "weight_decay": 0.0,
                },
            ]
            self.optimizer = AdamW(
                optimizer_grouped_parameters,
                lr=self.args.learning_rate,
                betas=(self.args.adam_beta1, self.args.adam_beta2),
                eps=self.args.adam_epsilon,
            )
        if self.lr_scheduler is None:
            self.lr_scheduler = get_linear_schedule_with_warmup(
                self.optimizer, num_warmup_steps=self.args.warmup_steps, num_training_steps=num_training_steps
            )
Julien Chaumond's avatar
Julien Chaumond committed
508

509
    def num_examples(self, dataloader: DataLoader) -> int:
510
        """
511
        Helper to get number of samples in a :class:`~torch.utils.data.DataLoader` by accessing its dataset.
512
513

        Will raise an exception if the underlying dataset dese not implement method :obj:`__len__`
514
        """
515
        return len(dataloader.dataset)
516

517
518
    def _hp_search_setup(self, trial: Union["optuna.Trial", Dict[str, Any]]):
        """ HP search setup code """
519
520
        self._trial = trial

521
522
        if self.hp_search_backend is None or trial is None:
            return
523

524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
        params = self.hp_space(trial) if self.hp_search_backend == HPSearchBackend.OPTUNA else trial
        for key, value in params.items():
            if not hasattr(self.args, key):
                raise AttributeError(
                    f"Trying to set {key} in the hyperparameter search but there is no corresponding field in `TrainingArguments`."
                )
            old_attr = getattr(self.args, key, None)
            # Casting value to the proper type
            if old_attr is not None:
                value = type(old_attr)(value)
            setattr(self.args, key, value)
        if self.hp_search_backend == HPSearchBackend.OPTUNA:
            logger.info("Trial:", trial.params)

    def _report_to_hp_search(
        self, trial: Union["optuna.Trial", Dict[str, Any]], epoch: int, metrics: Dict[str, float]
    ):
        if self.hp_search_backend is None or trial is None:
            return
543
        self.objective = self.compute_objective(metrics.copy())
544
545
546
547
548
        if self.hp_search_backend == HPSearchBackend.OPTUNA:
            trial.report(self.objective, epoch)
            if trial.should_prune():
                raise optuna.TrialPruned()
        elif self.hp_search_backend == HPSearchBackend.RAY:
549
            if self.state.global_step % self.args.save_steps == 0:
550
                self._tune_save_checkpoint()
551
552
            tune.report(objective=self.objective, **metrics)

553
554
555
    def _tune_save_checkpoint(self):
        if not self.use_tune_checkpoints:
            return
556
        with tune.checkpoint_dir(step=self.state.global_step) as checkpoint_dir:
557
            self.args.output_dir = checkpoint_dir
558
            output_dir = os.path.join(self.args.output_dir, f"{PREFIX_CHECKPOINT_DIR}-{self.state.global_step}")
559
560
            self.save_model(output_dir)
            if self.is_world_master():
561
                self.state.save_to_json(os.path.join(output_dir, "trainer_state.json"))
562
563
564
                torch.save(self.optimizer.state_dict(), os.path.join(output_dir, "optimizer.pt"))
                torch.save(self.lr_scheduler.state_dict(), os.path.join(output_dir, "scheduler.pt"))

565
566
567
568
569
570
571
    def call_model_init(self, trial=None):
        model_init_argcount = len(inspect.signature(self.model_init).parameters)
        if model_init_argcount == 0:
            model = self.model_init()
        elif model_init_argcount == 1:
            model = self.model_init(trial)
        else:
572
573
574
575
            raise RuntimeError("model_init should have 0 or 1 argument.")

        if model is None:
            raise RuntimeError("model_init should not return None.")
576
577
578

        return model

579
    def train(self, model_path: Optional[str] = None, trial: Union["optuna.Trial", Dict[str, Any]] = None):
Julien Chaumond's avatar
Julien Chaumond committed
580
581
582
583
        """
        Main training entry point.

        Args:
584
585
586
            model_path (:obj:`str`, `optional`):
                Local path to the model if the model to train has been instantiated from a local path. If present,
                training will resume from the optimizer/scheduler states loaded here.
587
588
            trial (:obj:`optuna.Trial` or :obj:`Dict[str, Any]`, `optional`):
                The trial run or the hyperparameter dictionary for hyperparameter search.
Julien Chaumond's avatar
Julien Chaumond committed
589
        """
Sylvain Gugger's avatar
Sylvain Gugger committed
590
591
592
        # This might change the seed so needs to run first.
        self._hp_search_setup(trial)

593
594
        # Model re-init
        if self.model_init is not None:
Sylvain Gugger's avatar
Sylvain Gugger committed
595
596
            # Seed must be set before instantiating the model when using model_init.
            set_seed(self.args.seed)
597
598
599

            model = self.call_model_init(trial)

600
601
            self.model = model.to(self.args.device)

Sylvain Gugger's avatar
Sylvain Gugger committed
602
603
            # Reinitializes optimizer and scheduler
            self.optimizer, self.lr_scheduler = None, None
604

605
606
607
        # Keeping track whether we can can len() on the dataset or not
        train_dataset_is_sized = isinstance(self.train_dataset, collections.abc.Sized)

608
        # Data loader and number of training steps
Julien Chaumond's avatar
Julien Chaumond committed
609
        train_dataloader = self.get_train_dataloader()
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625

        # Setting up training control variables:
        # number of training epochs: num_train_epochs
        # number of training steps per epoch: num_update_steps_per_epoch
        # total number of training steps to execute: max_steps
        if train_dataset_is_sized:
            num_update_steps_per_epoch = len(train_dataloader) // self.args.gradient_accumulation_steps
            num_update_steps_per_epoch = max(num_update_steps_per_epoch, 1)
            if self.args.max_steps > 0:
                max_steps = self.args.max_steps
                num_train_epochs = self.args.max_steps // num_update_steps_per_epoch + int(
                    self.args.max_steps % num_update_steps_per_epoch > 0
                )
            else:
                max_steps = math.ceil(self.args.num_train_epochs * num_update_steps_per_epoch)
                num_train_epochs = math.ceil(self.args.num_train_epochs)
Julien Chaumond's avatar
Julien Chaumond committed
626
        else:
627
628
629
630
            # see __init__. max_steps is set when the dataset has no __len__
            max_steps = self.args.max_steps
            num_train_epochs = 1
            num_update_steps_per_epoch = max_steps
Julien Chaumond's avatar
Julien Chaumond committed
631

632
        self.create_optimizer_and_scheduler(num_training_steps=max_steps)
633
        self.state = TrainerState()
634
        self.state.is_hyper_param_search = trial is not None
Julien Chaumond's avatar
Julien Chaumond committed
635
636

        # Check if saved optimizer or scheduler states exist
Sylvain Gugger's avatar
Sylvain Gugger committed
637
        self._load_optimizer_and_scheduler(model_path)
Julien Chaumond's avatar
Julien Chaumond committed
638

Sylvain Gugger's avatar
Sylvain Gugger committed
639
        # Mixed precision training with apex (torch < 1.6)
Julien Chaumond's avatar
Julien Chaumond committed
640
        model = self.model
641
        if self.args.fp16 and _use_apex:
Julien Chaumond's avatar
Julien Chaumond committed
642
643
            if not is_apex_available():
                raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.")
644
            model, self.optimizer = amp.initialize(model, self.optimizer, opt_level=self.args.fp16_opt_level)
Julien Chaumond's avatar
Julien Chaumond committed
645

646
        # Multi-gpu training (should be after apex fp16 initialization)
Julien Chaumond's avatar
Julien Chaumond committed
647
648
649
650
651
652
653
654
655
        if self.args.n_gpu > 1:
            model = torch.nn.DataParallel(model)

        # Distributed training (should be after apex fp16 initialization)
        if self.args.local_rank != -1:
            model = torch.nn.parallel.DistributedDataParallel(
                model,
                device_ids=[self.args.local_rank],
                output_device=self.args.local_rank,
656
657
658
659
660
                find_unused_parameters=(
                    not getattr(model.config, "gradient_checkpointing", False)
                    if isinstance(model, PreTrainedModel)
                    else True
                ),
Julien Chaumond's avatar
Julien Chaumond committed
661
            )
662
663
        # find_unused_parameters breaks checkpointing as per
        # https://github.com/huggingface/transformers/pull/4659#issuecomment-643356021
Julien Chaumond's avatar
Julien Chaumond committed
664
665

        # Train!
666
        if is_torch_tpu_available():
Lysandre Debut's avatar
Lysandre Debut committed
667
668
669
670
671
            total_train_batch_size = self.args.train_batch_size * xm.xrt_world_size()
        else:
            total_train_batch_size = (
                self.args.train_batch_size
                * self.args.gradient_accumulation_steps
672
                * (torch.distributed.get_world_size() if self.args.local_rank != -1 else 1)
Lysandre Debut's avatar
Lysandre Debut committed
673
            )
674
675
676
677
678
679
680

        num_examples = (
            self.num_examples(train_dataloader)
            if train_dataset_is_sized
            else total_train_batch_size * self.args.max_steps
        )

Julien Chaumond's avatar
Julien Chaumond committed
681
        logger.info("***** Running training *****")
682
        logger.info("  Num examples = %d", num_examples)
Julien Chaumond's avatar
Julien Chaumond committed
683
        logger.info("  Num Epochs = %d", num_train_epochs)
684
        logger.info("  Instantaneous batch size per device = %d", self.args.per_device_train_batch_size)
Lysandre Debut's avatar
Lysandre Debut committed
685
        logger.info("  Total train batch size (w. parallel, distributed & accumulation) = %d", total_train_batch_size)
Julien Chaumond's avatar
Julien Chaumond committed
686
        logger.info("  Gradient Accumulation steps = %d", self.args.gradient_accumulation_steps)
687
        logger.info("  Total optimization steps = %d", max_steps)
Julien Chaumond's avatar
Julien Chaumond committed
688

689
        self.state.epoch = 0
Julien Chaumond's avatar
Julien Chaumond committed
690
691
        epochs_trained = 0
        steps_trained_in_current_epoch = 0
692

Julien Chaumond's avatar
Julien Chaumond committed
693
        # Check if continuing training from a checkpoint
694
695
696
697
698
699
700
701
702
703
        if model_path and os.path.isfile(os.path.join(model_path, "trainer_state.json")):
            self.state = TrainerState.load_from_json(os.path.join(model_path, "trainer_state.json"))
            epochs_trained = self.state.global_step // num_update_steps_per_epoch
            steps_trained_in_current_epoch = self.state.global_step % (num_update_steps_per_epoch)

            logger.info("  Continuing training from checkpoint, will skip to saved global_step")
            logger.info("  Continuing training from epoch %d", epochs_trained)
            logger.info("  Continuing training from global step %d", self.state.global_step)
            logger.info("  Will skip the first %d steps in the first epoch", steps_trained_in_current_epoch)

Sylvain Gugger's avatar
Sylvain Gugger committed
704
705
706
707
708
        # Update the references
        self.callback_handler.model = self.model
        self.callback_handler.optimizer = self.optimizer
        self.callback_handler.lr_scheduler = self.lr_scheduler
        self.callback_handler.train_dataloader = train_dataloader
709
710
        self.state.trial_name = self.hp_name(trial) if self.hp_name is not None else None
        self.state.trial_params = hp_params(trial) if trial is not None else None
711
712
713
714
        # This should be the same if the state has been saved but in case the training arguments changed, it's safer
        # to set this after the load.
        self.state.max_steps = max_steps
        self.state.num_train_epochs = num_train_epochs
Sylvain Gugger's avatar
Sylvain Gugger committed
715
716
        self.state.is_local_process_zero = self.is_local_process_zero()
        self.state.is_world_process_zero = self.is_world_process_zero()
Julien Chaumond's avatar
Julien Chaumond committed
717

718
        tr_loss = torch.tensor(0.0).to(self.args.device)
Sylvain Gugger's avatar
Sylvain Gugger committed
719
        self._logging_loss_scalar = 0
720
        self._total_flos = self.state.total_flos
Julien Chaumond's avatar
Julien Chaumond committed
721
        model.zero_grad()
Sylvain Gugger's avatar
Sylvain Gugger committed
722
723
724

        self.control = self.callback_handler.on_train_begin(self.args, self.state, self.control)

725
        for epoch in range(epochs_trained, num_train_epochs):
726
727
728
            if isinstance(train_dataloader, DataLoader) and isinstance(train_dataloader.sampler, DistributedSampler):
                train_dataloader.sampler.set_epoch(epoch)

729
            if is_torch_tpu_available():
730
731
732
                parallel_loader = pl.ParallelLoader(train_dataloader, [self.args.device]).per_device_loader(
                    self.args.device
                )
733
                epoch_iterator = parallel_loader
734
            else:
735
                epoch_iterator = train_dataloader
736

737
738
739
740
            # Reset the past mems state at the beginning of each epoch if necessary.
            if self.args.past_index >= 0:
                self._past = None

741
            steps_in_epoch = len(epoch_iterator) if train_dataset_is_sized else self.args.max_steps
Sylvain Gugger's avatar
Sylvain Gugger committed
742
743
            self.control = self.callback_handler.on_epoch_begin(self.args, self.state, self.control)

Julien Chaumond's avatar
Julien Chaumond committed
744
745
746
747
748
749
750
            for step, inputs in enumerate(epoch_iterator):

                # Skip past any already trained steps if resuming training
                if steps_trained_in_current_epoch > 0:
                    steps_trained_in_current_epoch -= 1
                    continue

Sylvain Gugger's avatar
Sylvain Gugger committed
751
752
753
                if (step + 1) % self.args.gradient_accumulation_steps == 0:
                    self.control = self.callback_handler.on_step_begin(self.args, self.state, self.control)

754
755
756
757
758
759
760
761
762
                if (
                    ((step + 1) % self.args.gradient_accumulation_steps != 0)
                    and self.args.local_rank != -1
                    and _use_ddp_no_sync
                ):
                    with model.no_sync():
                        tr_loss += self.training_step(model, inputs)
                else:
                    tr_loss += self.training_step(model, inputs)
763
                self._total_flos += self.floating_point_ops(inputs)
Julien Chaumond's avatar
Julien Chaumond committed
764
765
766

                if (step + 1) % self.args.gradient_accumulation_steps == 0 or (
                    # last step in epoch but step is always smaller than gradient_accumulation_steps
767
768
                    steps_in_epoch <= self.args.gradient_accumulation_steps
                    and (step + 1) == steps_in_epoch
Julien Chaumond's avatar
Julien Chaumond committed
769
                ):
770
                    if self.args.fp16 and _use_native_amp:
771
                        self.scaler.unscale_(self.optimizer)
772
773
                        torch.nn.utils.clip_grad_norm_(model.parameters(), self.args.max_grad_norm)
                    elif self.args.fp16 and _use_apex:
774
                        torch.nn.utils.clip_grad_norm_(amp.master_params(self.optimizer), self.args.max_grad_norm)
Julien Chaumond's avatar
Julien Chaumond committed
775
776
777
                    else:
                        torch.nn.utils.clip_grad_norm_(model.parameters(), self.args.max_grad_norm)

778
                    if is_torch_tpu_available():
779
                        xm.optimizer_step(self.optimizer)
780
                    elif self.args.fp16 and _use_native_amp:
781
                        self.scaler.step(self.optimizer)
782
                        self.scaler.update()
Lysandre Debut's avatar
Lysandre Debut committed
783
                    else:
784
                        self.optimizer.step()
Lysandre Debut's avatar
Lysandre Debut committed
785

786
                    self.lr_scheduler.step()
Julien Chaumond's avatar
Julien Chaumond committed
787
                    model.zero_grad()
788
                    self.state.global_step += 1
789
                    self.state.epoch = epoch + (step + 1) / steps_in_epoch
Sylvain Gugger's avatar
Sylvain Gugger committed
790
791
                    self.control = self.callback_handler.on_step_end(self.args, self.state, self.control)

792
                    self._maybe_log_save_evaluate(tr_loss, model, trial, epoch)
Julien Chaumond's avatar
Julien Chaumond committed
793

Sylvain Gugger's avatar
Sylvain Gugger committed
794
                if self.control.should_epoch_stop or self.control.should_training_stop:
Julien Chaumond's avatar
Julien Chaumond committed
795
                    break
796

Sylvain Gugger's avatar
Sylvain Gugger committed
797
            self.control = self.callback_handler.on_epoch_end(self.args, self.state, self.control)
798
            self._maybe_log_save_evaluate(tr_loss, model, trial, epoch)
799

800
            if self.args.tpu_metrics_debug or self.args.debug:
801
802
803
804
805
806
807
808
                if is_torch_tpu_available():
                    # tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
                    xm.master_print(met.metrics_report())
                else:
                    logger.warning(
                        "You enabled PyTorch/XLA debug metrics but you don't have a TPU "
                        "configured. Check your training configuration if this is unexpected."
                    )
Sylvain Gugger's avatar
Sylvain Gugger committed
809
            if self.control.should_training_stop:
810
                break
Julien Chaumond's avatar
Julien Chaumond committed
811

812
813
814
        if self.args.past_index and hasattr(self, "_past"):
            # Clean the state at the end of training
            delattr(self, "_past")
Julien Chaumond's avatar
Julien Chaumond committed
815
816

        logger.info("\n\nTraining completed. Do not forget to share your model on huggingface.co/models =)\n\n")
817
818
819
820
821
822
823
824
825
826
827
        if self.args.load_best_model_at_end and self.state.best_model_checkpoint is not None:
            logger.info(
                f"Loading best model from {self.state.best_model_checkpoint} (score: {self.state.best_metric})."
            )
            if isinstance(model, PreTrainedModel):
                self.model = model.from_pretrained(self.state.best_model_checkpoint)
                self.model = self.model.to(self.args.device)
            else:
                state_dict = torch.load(os.path.join(self.state.best_model_checkpoint, WEIGHTS_NAME))
                self.model.load_state_dict(state_dict)

828
829
830
831
        if self._total_flos is not None:
            self.store_flos()
            self.log({"total_flos": self.state.total_flos})

Sylvain Gugger's avatar
Sylvain Gugger committed
832
833
        self.control = self.callback_handler.on_train_end(self.args, self.state, self.control)

834
        return TrainOutput(self.state.global_step, tr_loss.item() / self.state.global_step)
835

836
    def _maybe_log_save_evaluate(self, tr_loss, model, trial, epoch):
Sylvain Gugger's avatar
Sylvain Gugger committed
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
        if self.control.should_log:
            logs: Dict[str, float] = {}
            tr_loss_scalar = tr_loss.item()
            logs["loss"] = (tr_loss_scalar - self._logging_loss_scalar) / self.args.logging_steps
            # backward compatibility for pytorch schedulers
            logs["learning_rate"] = (
                self.lr_scheduler.get_last_lr()[0]
                if version.parse(torch.__version__) >= version.parse("1.4")
                else self.lr_scheduler.get_lr()[0]
            )
            self._logging_loss_scalar = tr_loss_scalar

            self.log(logs)

        metrics = None
        if self.control.should_evaluate:
            metrics = self.evaluate()
            self._report_to_hp_search(trial, epoch, metrics)
855

Sylvain Gugger's avatar
Sylvain Gugger committed
856
857
858
859
860
        if self.control.should_save:
            self._save_checkpoint(model, trial, metrics=metrics)
            self.control = self.callback_handler.on_save(self.args, self.state, self.control)

    def _save_checkpoint(self, model, trial, metrics=None):
861
862
863
864
865
866
867
        # In all cases (even distributed/parallel), self.model is always a reference
        # to the model we want to save.
        if hasattr(model, "module"):
            assert model.module is self.model, f"Module {model.module} should be a reference to self.model"
        else:
            assert model is self.model, f"Model {model} should be a reference to self.model"
        # Save model checkpoint
868
        checkpoint_folder = f"{PREFIX_CHECKPOINT_DIR}-{self.state.global_step}"
869

870
871
        if self.hp_search_backend is not None and trial is not None:
            run_id = trial.number if self.hp_search_backend == HPSearchBackend.OPTUNA else tune.get_trial_id()
872
873
874
875
            run_name = self.hp_name(trial) if self.hp_name is not None else f"run-{run_id}"
            output_dir = os.path.join(self.args.output_dir, run_name, checkpoint_folder)
        else:
            output_dir = os.path.join(self.args.output_dir, checkpoint_folder)
876

877
            self.store_flos()
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
        self.save_model(output_dir)

        # Save optimizer and scheduler
        if is_torch_tpu_available():
            xm.rendezvous("saving_optimizer_states")
            xm.save(self.optimizer.state_dict(), os.path.join(output_dir, "optimizer.pt"))
            with warnings.catch_warnings(record=True) as caught_warnings:
                xm.save(self.lr_scheduler.state_dict(), os.path.join(output_dir, "scheduler.pt"))
                reissue_pt_warnings(caught_warnings)
        elif self.is_world_process_zero():
            torch.save(self.optimizer.state_dict(), os.path.join(output_dir, "optimizer.pt"))
            with warnings.catch_warnings(record=True) as caught_warnings:
                torch.save(self.lr_scheduler.state_dict(), os.path.join(output_dir, "scheduler.pt"))
            reissue_pt_warnings(caught_warnings)

        # Determine the new best metric / best model checkpoint
Sylvain Gugger's avatar
Sylvain Gugger committed
894
        if metrics is not None and self.args.metric_for_best_model is not None:
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
            metric_to_check = self.args.metric_for_best_model
            if not metric_to_check.startswith("eval_"):
                metric_to_check = f"eval_{metric_to_check}"
            metric_value = metrics[metric_to_check]

            operator = np.greater if self.args.greater_is_better else np.less
            if (
                self.state.best_metric is None
                or self.state.best_model_checkpoint is None
                or operator(metric_value, self.state.best_metric)
            ):
                self.state.best_metric = metric_value
                self.state.best_model_checkpoint = output_dir

        # Save the Trainer state
        if self.is_world_process_zero():
            self.state.save_to_json(os.path.join(output_dir, "trainer_state.json"))

        # Maybe delete some older checkpoints.
        if self.is_world_process_zero():
            self._rotate_checkpoints(use_mtime=True)

Sylvain Gugger's avatar
Sylvain Gugger committed
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
    def _load_optimizer_and_scheduler(self, model_path):
        """If optimizer and scheduler states exist, load them."""
        if (
            model_path is not None
            and os.path.isfile(os.path.join(model_path, "optimizer.pt"))
            and os.path.isfile(os.path.join(model_path, "scheduler.pt"))
        ):
            # Load in optimizer and scheduler states
            if is_torch_tpu_available():
                # On TPU we have to take some extra precautions to properly load the states on the right device.
                optimizer_state = torch.load(os.path.join(model_path, "optimizer.pt"), map_location="cpu")
                with warnings.catch_warnings(record=True) as caught_warnings:
                    lr_scheduler_state = torch.load(os.path.join(model_path, "scheduler.pt"), map_location="cpu")
                reissue_pt_warnings(caught_warnings)

                xm.send_cpu_data_to_device(optimizer_state, self.args.device)
                xm.send_cpu_data_to_device(lr_scheduler_state, self.args.device)

                self.optimizer.load_state_dict(optimizer_state)
                self.lr_scheduler.load_state_dict(lr_scheduler_state)
            else:
                self.optimizer.load_state_dict(
                    torch.load(os.path.join(model_path, "optimizer.pt"), map_location=self.args.device)
                )
                with warnings.catch_warnings(record=True) as caught_warnings:
                    self.lr_scheduler.load_state_dict(torch.load(os.path.join(model_path, "scheduler.pt")))
                reissue_pt_warnings(caught_warnings)

945
946
947
948
949
950
951
    def hyperparameter_search(
        self,
        hp_space: Optional[Callable[["optuna.Trial"], Dict[str, float]]] = None,
        compute_objective: Optional[Callable[[Dict[str, float]], float]] = None,
        n_trials: int = 20,
        direction: str = "minimize",
        backend: Optional[Union["str", HPSearchBackend]] = None,
952
        hp_name: Optional[Callable[["optuna.Trial"], str]] = None,
953
954
955
        **kwargs
    ) -> BestRun:
        """
956
957
958
        Launch an hyperparameter search using ``optuna`` or ``Ray Tune``. The optimized quantity is determined by
        :obj:`compute_objectie`, which defaults to a function returning the evaluation loss when no metric is provided,
        the sum of all metrics otherwise.
959

Sylvain Gugger's avatar
Sylvain Gugger committed
960
961
962
963
964
965
966
        .. warning::

            To use this method, you need to have provided a ``model_init`` when initializing your
            :class:`~transformers.Trainer`: we need to reinitialize the model at each new run. This is incompatible
            with the ``optimizers`` argument, so you need to subclass :class:`~transformers.Trainer` and override the
            method :meth:`~transformers.Trainer.create_optimizer_and_scheduler` for custom optimizer/scheduler.

967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
        Args:
            hp_space (:obj:`Callable[["optuna.Trial"], Dict[str, float]]`, `optional`):
                A function that defines the hyperparameter search space. Will default to
                :func:`~transformers.trainer_utils.default_hp_space_optuna` or
                :func:`~transformers.trainer_utils.default_hp_space_ray` depending on your backend.
            compute_objective (:obj:`Callable[[Dict[str, float]], float]`, `optional`):
                A function computing the objective to minimize or maximize from the metrics returned by the
                :obj:`evaluate` method. Will default to :func:`~transformers.trainer_utils.default_compute_objective`.
            n_trials (:obj:`int`, `optional`, defaults to 100):
                The number of trial runs to test.
            direction(:obj:`str`, `optional`, defaults to :obj:`"minimize"`):
                Whether to optimize greater or lower objects. Can be :obj:`"minimize"` or :obj:`"maximize"`, you should
                pick :obj:`"minimize"` when optimizing the validation loss, :obj:`"maximize"` when optimizing one or
                several metrics.
            backend(:obj:`str` or :class:`~transformers.training_utils.HPSearchBackend`, `optional`):
                The backend to use for hyperparameter search. Will default to optuna or Ray Tune, depending on which
                one is installed. If both are installed, will default to optuna.
            kwargs:
                Additional keyword arguments passed along to :obj:`optuna.create_study` or :obj:`ray.tune.run`. For
                more information see:

988
                - the documentation of `optuna.create_study <https://optuna.readthedocs.io/en/stable/reference/alias_generated/optuna.create_study.html#optuna.create_study>`__
989
990
991
                - the documentation of `tune.run <https://docs.ray.io/en/latest/tune/api_docs/execution.html#tune-run>`__

        Returns:
Tiger's avatar
Tiger committed
992
            :class:`transformers.trainer_utils.BestRun`: All the information about the best run.
993
994
995
996
997
998
999
1000
1001
1002
1003
        """
        if backend is None:
            backend = default_hp_search_backend()
            if backend is None:
                raise RuntimeError(
                    "At least one of optuna or ray should be installed. "
                    "To install optuna run `pip install optuna`."
                    "To install ray run `pip install ray[tune]`."
                )
        backend = HPSearchBackend(backend)
        if backend == HPSearchBackend.OPTUNA and not is_optuna_available():
Sylvain Gugger's avatar
Sylvain Gugger committed
1004
            raise RuntimeError("You picked the optuna backend, but it is not installed. Use `pip install optuna`.")
1005
1006
        if backend == HPSearchBackend.RAY and not is_ray_available():
            raise RuntimeError(
Sylvain Gugger's avatar
Sylvain Gugger committed
1007
                "You picked the Ray Tune backend, but it is not installed. Use `pip install 'ray[tune]'`."
1008
1009
            )
        self.hp_search_backend = backend
Sylvain Gugger's avatar
Sylvain Gugger committed
1010
1011
1012
1013
1014
        if self.model_init is None:
            raise RuntimeError(
                "To use hyperparameter search, you need to pass your model through a model_init function."
            )

1015
        self.hp_space = default_hp_space[backend] if hp_space is None else hp_space
1016
        self.hp_name = hp_name
1017
1018
        self.compute_objective = default_compute_objective if compute_objective is None else compute_objective

1019
1020
        run_hp_search = run_hp_search_optuna if backend == HPSearchBackend.OPTUNA else run_hp_search_ray
        best_run = run_hp_search(self, n_trials, direction, **kwargs)
1021
1022
1023
1024

        self.hp_search_backend = None
        return best_run

Sylvain Gugger's avatar
Sylvain Gugger committed
1025
    def log(self, logs: Dict[str, float]) -> None:
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
        """
        Log :obj:`logs` on the various objects watching training.

        Subclass and override this method to inject custom behavior.

        Args:
            logs (:obj:`Dict[str, float]`):
                The values to log.
        """
        if hasattr(self, "_log"):
            warnings.warn(
                "The `_log` method is deprecated and won't be called in a future version, define `log` in your subclass.",
                FutureWarning,
            )
Sylvain Gugger's avatar
Sylvain Gugger committed
1040
            return self._log(logs)
1041
1042
        if self.state.epoch is not None:
            logs["epoch"] = self.state.epoch
1043

Sylvain Gugger's avatar
Sylvain Gugger committed
1044
        self.control = self.callback_handler.on_log(self.args, self.state, self.control, logs)
1045
1046
        output = {**logs, **{"step": self.state.global_step}}
        self.state.log_history.append(output)
Julien Chaumond's avatar
Julien Chaumond committed
1047

sgugger's avatar
Fix CI  
sgugger committed
1048
    def _prepare_inputs(self, inputs: Dict[str, Union[torch.Tensor, Any]]) -> Dict[str, Union[torch.Tensor, Any]]:
1049
1050
1051
1052
        """
        Prepare :obj:`inputs` before feeding them to the model, converting them to tensors if they are not already and
        handling potential state.
        """
Julien Chaumond's avatar
Julien Chaumond committed
1053
        for k, v in inputs.items():
1054
1055
            if isinstance(v, torch.Tensor):
                inputs[k] = v.to(self.args.device)
Julien Chaumond's avatar
Julien Chaumond committed
1056

1057
1058
        if self.args.past_index >= 0 and self._past is not None:
            inputs["mems"] = self._past
1059

1060
1061
        return inputs

1062
    def training_step(self, model: nn.Module, inputs: Dict[str, Union[torch.Tensor, Any]]) -> torch.Tensor:
1063
        """
1064
        Perform a training step on a batch of inputs.
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077

        Subclass and override to inject custom behavior.

        Args:
            model (:obj:`nn.Module`):
                The model to train.
            inputs (:obj:`Dict[str, Union[torch.Tensor, Any]]`):
                The inputs and targets of the model.

                The dictionary will be unpacked before being fed to the model. Most models expect the targets under the
                argument :obj:`labels`. Check your model's documentation for all accepted arguments.

        Return:
1078
            :obj:`torch.Tensor`: The tensor with training loss on this batch.
1079
1080
1081
1082
1083
1084
        """
        if hasattr(self, "_training_step"):
            warnings.warn(
                "The `_training_step` method is deprecated and won't be called in a future version, define `training_step` in your subclass.",
                FutureWarning,
            )
1085
            return self._training_step(model, inputs, self.optimizer)
1086
1087

        model.train()
1088
        inputs = self._prepare_inputs(inputs)
1089

1090
1091
        if self.args.fp16 and _use_native_amp:
            with autocast():
Sylvain Gugger's avatar
Sylvain Gugger committed
1092
                loss = self.compute_loss(model, inputs)
1093
        else:
Sylvain Gugger's avatar
Sylvain Gugger committed
1094
            loss = self.compute_loss(model, inputs)
1095

Julien Chaumond's avatar
Julien Chaumond committed
1096
1097
        if self.args.n_gpu > 1:
            loss = loss.mean()  # mean() to average on multi-gpu parallel training
1098

Julien Chaumond's avatar
Julien Chaumond committed
1099
1100
1101
        if self.args.gradient_accumulation_steps > 1:
            loss = loss / self.args.gradient_accumulation_steps

1102
1103
1104
        if self.args.fp16 and _use_native_amp:
            self.scaler.scale(loss).backward()
        elif self.args.fp16 and _use_apex:
1105
            with amp.scale_loss(loss, self.optimizer) as scaled_loss:
Julien Chaumond's avatar
Julien Chaumond committed
1106
1107
1108
1109
                scaled_loss.backward()
        else:
            loss.backward()

1110
        return loss.detach()
Julien Chaumond's avatar
Julien Chaumond committed
1111

Sylvain Gugger's avatar
Sylvain Gugger committed
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
    def compute_loss(self, model, inputs):
        """
        How the loss is computed by Trainer. By default, all models return the loss in the first element.

        Subclass and override for custom behavior.
        """
        outputs = model(**inputs)
        # Save past state if it exists
        if self.args.past_index >= 0:
            self._past = outputs[self.args.past_index]
        # We don't use .loss here since the model may return tuples instead of ModelOutput.
        return outputs[0]

Lysandre Debut's avatar
Lysandre Debut committed
1125
    def is_local_master(self) -> bool:
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
        """
        Whether or not this process is the local (e.g., on one machine if training in a distributed fashion on
        several machines) main process.

        .. warning::

            This method is deprecated, use :meth:`~transformers.Trainer.is_local_process_zero` instead.
        """
        warnings.warn("This method is deprecated, use `Trainer.is_local_process_zero()` instead.", FutureWarning)
        return self.is_local_process_zero()

    def is_local_process_zero(self) -> bool:
        """
        Whether or not this process is the local (e.g., on one machine if training in a distributed fashion on
        several machines) main process.
        """
1142
        if is_torch_tpu_available():
Lysandre Debut's avatar
Lysandre Debut committed
1143
1144
1145
1146
            return xm.is_master_ordinal(local=True)
        else:
            return self.args.local_rank in [-1, 0]

Julien Chaumond's avatar
Julien Chaumond committed
1147
1148
    def is_world_master(self) -> bool:
        """
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
        Whether or not this process is the global main process (when training in a distributed fashion on
        several machines, this is only going to be :obj:`True` for one process).

        .. warning::

            This method is deprecated, use :meth:`~transformers.Trainer.is_world_process_zero` instead.
        """
        warnings.warn("This method is deprecated, use `Trainer.is_world_process_zero()` instead.", FutureWarning)
        return self.is_world_process_zero()

    def is_world_process_zero(self) -> bool:
        """
        Whether or not this process is the global main process (when training in a distributed fashion on
        several machines, this is only going to be :obj:`True` for one process).
Julien Chaumond's avatar
Julien Chaumond committed
1163
        """
1164
        if is_torch_tpu_available():
Lysandre Debut's avatar
Lysandre Debut committed
1165
1166
1167
            return xm.is_master_ordinal(local=False)
        else:
            return self.args.local_rank == -1 or torch.distributed.get_rank() == 0
Julien Chaumond's avatar
Julien Chaumond committed
1168
1169
1170

    def save_model(self, output_dir: Optional[str] = None):
        """
1171
        Will save the model, so you can reload it using :obj:`from_pretrained()`.
Julien Chaumond's avatar
Julien Chaumond committed
1172

1173
        Will only save from the world_master process (unless in TPUs).
Julien Chaumond's avatar
Julien Chaumond committed
1174
        """
1175

1176
        if is_torch_tpu_available():
1177
            self._save_tpu(output_dir)
1178
        elif self.is_world_process_zero():
Julien Chaumond's avatar
Julien Chaumond committed
1179
1180
            self._save(output_dir)

1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
    def _save_tpu(self, output_dir: Optional[str] = None):
        output_dir = output_dir if output_dir is not None else self.args.output_dir
        logger.info("Saving model checkpoint to %s", output_dir)

        if xm.is_master_ordinal():
            os.makedirs(output_dir, exist_ok=True)
            torch.save(self.args, os.path.join(output_dir, "training_args.bin"))

        # Save a trained model and configuration using `save_pretrained()`.
        # They can then be reloaded using `from_pretrained()`
        xm.rendezvous("saving_checkpoint")
1192
1193
1194
1195
1196
1197
        if not isinstance(self.model, PreTrainedModel):
            logger.info("Trainer.model is not a `PreTrainedModel`, only saving its state dict.")
            state_dict = self.model.state_dict()
            xm.save(state_dict, os.path.join(output_dir, WEIGHTS_NAME))
        else:
            self.model.save_pretrained(output_dir)
Sylvain Gugger's avatar
Sylvain Gugger committed
1198
        if self.tokenizer is not None and self.is_world_process_zero():
1199
            self.tokenizer.save_pretrained(output_dir)
1200

Julien Chaumond's avatar
Julien Chaumond committed
1201
1202
1203
1204
1205
1206
1207
    def _save(self, output_dir: Optional[str] = None):
        output_dir = output_dir if output_dir is not None else self.args.output_dir
        os.makedirs(output_dir, exist_ok=True)
        logger.info("Saving model checkpoint to %s", output_dir)
        # Save a trained model and configuration using `save_pretrained()`.
        # They can then be reloaded using `from_pretrained()`
        if not isinstance(self.model, PreTrainedModel):
1208
1209
1210
1211
1212
            logger.info("Trainer.model is not a `PreTrainedModel`, only saving its state dict.")
            state_dict = self.model.state_dict()
            torch.save(state_dict, os.path.join(output_dir, WEIGHTS_NAME))
        else:
            self.model.save_pretrained(output_dir)
Sylvain Gugger's avatar
Sylvain Gugger committed
1213
        if self.tokenizer is not None and self.is_world_process_zero():
1214
            self.tokenizer.save_pretrained(output_dir)
Julien Chaumond's avatar
Julien Chaumond committed
1215
1216
1217

        # Good practice: save your training arguments together with the trained model
        torch.save(self.args, os.path.join(output_dir, "training_args.bin"))
1218

1219
    def store_flos(self):
1220
        # Storing the number of floating-point operations that went into the model
1221
        if self._total_flos is not None:
1222
            if self.args.local_rank != -1:
1223
                self.state.total_flos = distributed_broadcast_scalars([self._total_flos]).sum().item()
1224
            else:
1225
                self.state.total_flos = self._total_flos
Julien Chaumond's avatar
Julien Chaumond committed
1226
1227
1228
1229

    def _sorted_checkpoints(self, checkpoint_prefix=PREFIX_CHECKPOINT_DIR, use_mtime=False) -> List[str]:
        ordering_and_checkpoint_path = []

1230
        glob_checkpoints = [str(x) for x in Path(self.args.output_dir).glob(f"{checkpoint_prefix}-*")]
Julien Chaumond's avatar
Julien Chaumond committed
1231
1232
1233
1234
1235

        for path in glob_checkpoints:
            if use_mtime:
                ordering_and_checkpoint_path.append((os.path.getmtime(path), path))
            else:
1236
                regex_match = re.match(f".*{checkpoint_prefix}-([0-9]+)", path)
Julien Chaumond's avatar
Julien Chaumond committed
1237
1238
1239
1240
1241
                if regex_match and regex_match.groups():
                    ordering_and_checkpoint_path.append((int(regex_match.groups()[0]), path))

        checkpoints_sorted = sorted(ordering_and_checkpoint_path)
        checkpoints_sorted = [checkpoint[1] for checkpoint in checkpoints_sorted]
1242
1243
        # Make sure we don't delete the best model.
        if self.state.best_model_checkpoint is not None:
1244
            best_model_index = checkpoints_sorted.index(str(Path(self.state.best_model_checkpoint)))
1245
            checkpoints_sorted[best_model_index], checkpoints_sorted[-1] = (
1246
1247
1248
                checkpoints_sorted[-1],
                checkpoints_sorted[best_model_index],
            )
Julien Chaumond's avatar
Julien Chaumond committed
1249
1250
1251
        return checkpoints_sorted

    def _rotate_checkpoints(self, use_mtime=False) -> None:
1252
        if self.args.save_total_limit is None or self.args.save_total_limit <= 0:
Julien Chaumond's avatar
Julien Chaumond committed
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
            return

        # Check if we should delete older checkpoint(s)
        checkpoints_sorted = self._sorted_checkpoints(use_mtime=use_mtime)
        if len(checkpoints_sorted) <= self.args.save_total_limit:
            return

        number_of_checkpoints_to_delete = max(0, len(checkpoints_sorted) - self.args.save_total_limit)
        checkpoints_to_be_deleted = checkpoints_sorted[:number_of_checkpoints_to_delete]
        for checkpoint in checkpoints_to_be_deleted:
            logger.info("Deleting older checkpoint [{}] due to args.save_total_limit".format(checkpoint))
            shutil.rmtree(checkpoint)

1266
    def evaluate(self, eval_dataset: Optional[Dataset] = None) -> Dict[str, float]:
Julien Chaumond's avatar
Julien Chaumond committed
1267
        """
1268
        Run evaluation and returns metrics.
Julien Chaumond's avatar
Julien Chaumond committed
1269
1270

        The calling script will be responsible for providing a method to compute metrics, as they are
1271
        task-dependent (pass it to the init :obj:`compute_metrics` argument).
Julien Chaumond's avatar
Julien Chaumond committed
1272

1273
1274
        You can also subclass and override this method to inject custom behavior.

Julien Chaumond's avatar
Julien Chaumond committed
1275
        Args:
1276
            eval_dataset (:obj:`Dataset`, `optional`):
1277
                Pass a dataset if you wish to override :obj:`self.eval_dataset`. If it is an :obj:`datasets.Dataset`,
1278
1279
                columns not accepted by the ``model.forward()`` method are automatically removed. It must implement
                the :obj:`__len__` method.
1280

Julien Chaumond's avatar
Julien Chaumond committed
1281
        Returns:
1282
            A dictionary containing the evaluation loss and the potential metrics computed from the predictions.
Julien Chaumond's avatar
Julien Chaumond committed
1283
        """
1284
1285
1286
        if eval_dataset is not None and not isinstance(eval_dataset, collections.abc.Sized):
            raise ValueError("eval_dataset must implement __len__")

Julien Chaumond's avatar
Julien Chaumond committed
1287
1288
        eval_dataloader = self.get_eval_dataloader(eval_dataset)

1289
        output = self.prediction_loop(eval_dataloader, description="Evaluation")
Lysandre Debut's avatar
Lysandre Debut committed
1290

1291
        self.log(output.metrics)
1292

1293
        if self.args.tpu_metrics_debug or self.args.debug:
Lysandre Debut's avatar
Lysandre Debut committed
1294
1295
1296
            # tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
            xm.master_print(met.metrics_report())

Sylvain Gugger's avatar
Sylvain Gugger committed
1297
        self.control = self.callback_handler.on_evaluate(self.args, self.state, self.control, output.metrics)
Julien Chaumond's avatar
Julien Chaumond committed
1298
1299
1300
1301
        return output.metrics

    def predict(self, test_dataset: Dataset) -> PredictionOutput:
        """
1302
        Run prediction and returns predictions and potential metrics.
Julien Chaumond's avatar
Julien Chaumond committed
1303
1304

        Depending on the dataset and your use case, your test dataset may contain labels.
1305
1306
1307
1308
        In that case, this method will also return metrics, like in :obj:`evaluate()`.

        Args:
            test_dataset (:obj:`Dataset`):
1309
                Dataset to run the predictions on. If it is an :obj:`datasets.Dataset`, columns not accepted by the
1310
                ``model.forward()`` method are automatically removed. Has to implement the method :obj:`__len__`
1311

1312
1313
1314
1315
1316
1317
1318
1319
        Returns:
            `NamedTuple`:
            predictions (:obj:`np.ndarray`):
                The predictions on :obj:`test_dataset`.
            label_ids (:obj:`np.ndarray`, `optional`):
                The labels (if the dataset contained some).
            metrics (:obj:`Dict[str, float]`, `optional`):
                The potential dictionary of metrics (if the dataset contained labels).
Julien Chaumond's avatar
Julien Chaumond committed
1320
        """
1321
1322
1323
        if test_dataset is not None and not isinstance(test_dataset, collections.abc.Sized):
            raise ValueError("test_dataset must implement __len__")

Julien Chaumond's avatar
Julien Chaumond committed
1324
        test_dataloader = self.get_test_dataloader(test_dataset)
1325

1326
        return self.prediction_loop(test_dataloader, description="Prediction")
Julien Chaumond's avatar
Julien Chaumond committed
1327

1328
    def prediction_loop(
Julien Chaumond's avatar
Julien Chaumond committed
1329
1330
1331
        self, dataloader: DataLoader, description: str, prediction_loss_only: Optional[bool] = None
    ) -> PredictionOutput:
        """
1332
        Prediction/evaluation loop, shared by :obj:`Trainer.evaluate()` and :obj:`Trainer.predict()`.
Julien Chaumond's avatar
Julien Chaumond committed
1333
1334
1335

        Works both with or without labels.
        """
1336
1337
1338
1339
1340
1341
        if hasattr(self, "_prediction_loop"):
            warnings.warn(
                "The `_prediction_loop` method is deprecated and won't be called in a future version, define `prediction_loop` in your subclass.",
                FutureWarning,
            )
            return self._prediction_loop(dataloader, description, prediction_loss_only=prediction_loss_only)
Julien Chaumond's avatar
Julien Chaumond committed
1342

1343
1344
        if not isinstance(dataloader.dataset, collections.abc.Sized):
            raise ValueError("dataset must implement __len__")
1345
1346
1347
        prediction_loss_only = (
            prediction_loss_only if prediction_loss_only is not None else self.args.prediction_loss_only
        )
Julien Chaumond's avatar
Julien Chaumond committed
1348

1349
        model = self.model
Julien Chaumond's avatar
Julien Chaumond committed
1350
        # multi-gpu eval
1351
1352
1353
1354
        if self.args.n_gpu > 1:
            model = torch.nn.DataParallel(model)
        # Note: in torch.distributed mode, there's no point in wrapping the model
        # inside a DistributedDataParallel as we'll be under `no_grad` anyways.
Julien Chaumond's avatar
Julien Chaumond committed
1355

1356
        batch_size = dataloader.batch_size
1357
        num_examples = self.num_examples(dataloader)
Julien Chaumond's avatar
Julien Chaumond committed
1358
        logger.info("***** Running %s *****", description)
1359
        logger.info("  Num examples = %d", num_examples)
1360
        logger.info("  Batch size = %d", batch_size)
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372
1373
1374
1375
        losses_host: torch.Tensor = None
        preds_host: Union[torch.Tensor, List[torch.Tensor]] = None
        labels_host: Union[torch.Tensor, List[torch.Tensor]] = None

        world_size = 1
        if is_torch_tpu_available():
            world_size = xm.xrt_world_size()
        elif self.args.local_rank != -1:
            world_size = torch.distributed.get_world_size()
        world_size = max(1, world_size)

        eval_losses_gatherer = DistributedTensorGatherer(world_size, num_examples, make_multiple_of=batch_size)
        preds_gatherer = DistributedTensorGatherer(world_size, num_examples)
        labels_gatherer = DistributedTensorGatherer(world_size, num_examples)

Julien Chaumond's avatar
Julien Chaumond committed
1376
1377
        model.eval()

1378
        if is_torch_tpu_available():
1379
1380
            dataloader = pl.ParallelLoader(dataloader, [self.args.device]).per_device_loader(self.args.device)

1381
        if self.args.past_index >= 0:
1382
            self._past = None
1383

Sylvain Gugger's avatar
Sylvain Gugger committed
1384
1385
        self.callback_handler.eval_dataloader = dataloader

1386
        for step, inputs in enumerate(dataloader):
1387
1388
            loss, logits, labels = self.prediction_step(model, inputs, prediction_loss_only)
            if loss is not None:
1389
1390
                losses = loss.repeat(batch_size)
                losses_host = losses if losses_host is None else torch.cat((losses_host, losses), dim=0)
1391
            if logits is not None:
1392
                preds_host = logits if preds_host is None else nested_concat(preds_host, logits, dim=0)
1393
            if labels is not None:
1394
                labels_host = labels if labels_host is None else nested_concat(labels_host, labels, dim=0)
Sylvain Gugger's avatar
Sylvain Gugger committed
1395
            self.control = self.callback_handler.on_prediction_step(self.args, self.state, self.control)
Julien Chaumond's avatar
Julien Chaumond committed
1396

1397
1398
1399
1400
1401
1402
1403
1404
1405
            # Gather all tensors and put them back on the CPU if we have done enough accumulation steps.
            if self.args.eval_accumulation_steps is not None and (step + 1) % self.args.eval_accumulation_steps == 0:
                eval_losses_gatherer.add_arrays(self._gather_and_numpify(losses_host, "eval_losses"))
                preds_gatherer.add_arrays(self._gather_and_numpify(preds_host, "eval_preds"))
                labels_gatherer.add_arrays(self._gather_and_numpify(labels_host, "eval_label_ids"))

                # Set back to None to begin a new accumulation
                losses_host, preds_host, labels_host = None, None, None

1406
1407
1408
        if self.args.past_index and hasattr(self, "_past"):
            # Clean the state at the end of the evaluation loop
            delattr(self, "_past")
Julien Chaumond's avatar
Julien Chaumond committed
1409

1410
1411
1412
1413
1414
1415
1416
1417
        # Gather all remaining tensors and put them back on the CPU
        eval_losses_gatherer.add_arrays(self._gather_and_numpify(losses_host, "eval_losses"))
        preds_gatherer.add_arrays(self._gather_and_numpify(preds_host, "eval_preds"))
        labels_gatherer.add_arrays(self._gather_and_numpify(labels_host, "eval_label_ids"))

        eval_loss = eval_losses_gatherer.finalize()
        preds = preds_gatherer.finalize()
        label_ids = labels_gatherer.finalize()
Lysandre Debut's avatar
Lysandre Debut committed
1418

Julien Chaumond's avatar
Julien Chaumond committed
1419
1420
1421
1422
        if self.compute_metrics is not None and preds is not None and label_ids is not None:
            metrics = self.compute_metrics(EvalPrediction(predictions=preds, label_ids=label_ids))
        else:
            metrics = {}
1423
1424
1425

        if eval_loss is not None:
            metrics["eval_loss"] = eval_loss.mean().item()
1426
1427
1428
1429
1430

        # Prefix all keys with eval_
        for key in list(metrics.keys()):
            if not key.startswith("eval_"):
                metrics[f"eval_{key}"] = metrics.pop(key)
Julien Chaumond's avatar
Julien Chaumond committed
1431
1432

        return PredictionOutput(predictions=preds, label_ids=label_ids, metrics=metrics)
1433

1434
1435
1436
1437
1438
1439
1440
1441
1442
1443
1444
1445
1446
1447
    def _gather_and_numpify(self, tensors, name):
        """
        Gather value of `tensors` (tensor or list/tuple of nested tensors) and convert them to numpy before
        concatenating them to `gathered`
        """
        if tensors is None:
            return
        if is_torch_tpu_available():
            tensors = nested_xla_mesh_reduce(tensors, name)
        elif self.args.local_rank != -1:
            tensors = distributed_concat(tensors)

        return nested_numpify(tensors)

1448
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458
1459
1460
1461
1462
1463
1464
1465
1466
1467
1468
1469
1470
    def prediction_step(
        self, model: nn.Module, inputs: Dict[str, Union[torch.Tensor, Any]], prediction_loss_only: bool
    ) -> Tuple[Optional[float], Optional[torch.Tensor], Optional[torch.Tensor]]:
        """
        Perform an evaluation step on :obj:`model` using obj:`inputs`.

        Subclass and override to inject custom behavior.

        Args:
            model (:obj:`nn.Module`):
                The model to evaluate.
            inputs (:obj:`Dict[str, Union[torch.Tensor, Any]]`):
                The inputs and targets of the model.

                The dictionary will be unpacked before being fed to the model. Most models expect the targets under the
                argument :obj:`labels`. Check your model's documentation for all accepted arguments.
            prediction_loss_only (:obj:`bool`):
                Whether or not to return the loss only.

        Return:
            Tuple[Optional[float], Optional[torch.Tensor], Optional[torch.Tensor]]:
            A tuple with the loss, logits and labels (each being optional).
        """
1471
        has_labels = all(inputs.get(k) is not None for k in self.label_names)
1472
        inputs = self._prepare_inputs(inputs)
1473
1474

        with torch.no_grad():
luyug's avatar
luyug committed
1475
1476
1477
1478
1479
            if self.args.fp16 and _use_native_amp:
                with autocast():
                    outputs = model(**inputs)
            else:
                outputs = model(**inputs)
1480
            if has_labels:
1481
                loss = outputs[0].mean().detach()
1482
                logits = outputs[1:]
1483
1484
            else:
                loss = None
1485
1486
                # Slicing so we get a tuple even if `outputs` is a `ModelOutput`.
                logits = outputs[:]
1487
1488
            if self.args.past_index >= 0:
                self._past = outputs[self.args.past_index if has_labels else self.args.past_index - 1]
1489
1490
                # Remove the past from the logits.
                logits = logits[: self.args.past_index - 1] + logits[self.args.past_index :]
1491
1492
1493
1494

        if prediction_loss_only:
            return (loss, None, None)

1495
        logits = nested_detach(logits)
Sylvain Gugger's avatar
Sylvain Gugger committed
1496
1497
1498
1499
        if len(logits) == 1:
            logits = logits[0]

        if has_labels:
1500
            labels = nested_detach(tuple(inputs.get(name) for name in self.label_names))
Sylvain Gugger's avatar
Sylvain Gugger committed
1501
1502
1503
1504
1505
1506
            if len(labels) == 1:
                labels = labels[0]
        else:
            labels = None

        return (loss, logits, labels)
1507
1508
1509

    def floating_point_ops(self, inputs: Dict[str, Union[torch.Tensor, Any]]):
        """
1510
        For models that inherit from :class:`~transformers.PreTrainedModel`, uses
1511
1512
1513
1514
1515
1516
1517
1518
1519
1520
1521
1522
1523
        that method to compute the number of floating point operations for every backward + forward pass. If using
        another model, either implement such a method in the model or subclass and override this method.

        Args:
            model (:obj:`nn.Module`):
                The model to evaluate.
            inputs (:obj:`Dict[str, Union[torch.Tensor, Any]]`):
                The inputs and targets of the model.

        Returns:
            :obj:`int`: The number of floating-point operations.
        """

Marcin Zab艂ocki's avatar
Marcin Zab艂ocki committed
1524
        model = self._actual_model(self.model)
1525
1526
1527
1528
1529
1530

        if hasattr(model, "floating_point_ops"):
            return model.floating_point_ops(inputs)

        else:
            return 0
Marcin Zab艂ocki's avatar
Marcin Zab艂ocki committed
1531
1532
1533
1534
1535
1536
1537
1538
1539
1540
1541
1542
1543
1544
1545
1546
1547
1548
1549

    @staticmethod
    def _actual_model(
        model: Union[torch.nn.DataParallel, torch.nn.parallel.DistributedDataParallel, torch.nn.modules.Module]
    ) -> torch.nn.modules.Module:
        """

        Args:
            model: (:obj:`Union[torch.nn.DataParallel, torch.nn.parallel.DistributedDataParallel, torch.nn.modules.Module]`):
                Model object used during training

        Returns:
            :obj:`torch.nn.modules.Module`: unwrapped module
        """
        if isinstance(model, torch.nn.DataParallel) or isinstance(model, torch.nn.parallel.DistributedDataParallel):
            model = model.module
        else:
            model = model
        return model