"tests/layoutlm/test_tokenization_layoutlm.py" did not exist on "32dbb2d954d646f3307f66c889c6c418a40acf88"
trainer.py 69.5 KB
Newer Older
Sylvain Gugger's avatar
Sylvain Gugger committed
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
# coding=utf-8
# Copyright 2020-present the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
The Trainer class, to easily train a 馃 Transformers from scratch or finetune it on a new task.
"""

19
import collections
20
import inspect
21
import math
Julien Chaumond's avatar
Julien Chaumond committed
22
23
24
import os
import re
import shutil
25
import warnings
Julien Chaumond's avatar
Julien Chaumond committed
26
from pathlib import Path
27
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
Julien Chaumond's avatar
Julien Chaumond committed
28
29
30

import numpy as np
import torch
31
from packaging import version
Julien Chaumond's avatar
Julien Chaumond committed
32
33
34
35
from torch import nn
from torch.utils.data.dataloader import DataLoader
from torch.utils.data.dataset import Dataset
from torch.utils.data.distributed import DistributedSampler
Sylvain Gugger's avatar
Sylvain Gugger committed
36
from torch.utils.data.sampler import RandomSampler, SequentialSampler
Julien Chaumond's avatar
Julien Chaumond committed
37

38
from .data.data_collator import DataCollator, DataCollatorWithPadding, default_data_collator
39
from .file_utils import WEIGHTS_NAME, is_datasets_available, is_in_notebook, is_torch_tpu_available
40
41
from .integrations import (
    default_hp_search_backend,
42
    hp_params,
43
44
45
46
47
    is_comet_available,
    is_optuna_available,
    is_ray_available,
    is_tensorboard_available,
    is_wandb_available,
48
49
    run_hp_search_optuna,
    run_hp_search_ray,
50
)
Sylvain Gugger's avatar
Sylvain Gugger committed
51
from .modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING
Julien Chaumond's avatar
Julien Chaumond committed
52
53
from .modeling_utils import PreTrainedModel
from .optimization import AdamW, get_linear_schedule_with_warmup
54
from .tokenization_utils_base import PreTrainedTokenizerBase
Sylvain Gugger's avatar
Sylvain Gugger committed
55
56
57
58
59
60
61
62
63
64
from .trainer_callback import (
    CallbackHandler,
    DefaultFlowCallback,
    PrinterCallback,
    ProgressCallback,
    TrainerCallback,
    TrainerControl,
    TrainerState,
)
from .trainer_pt_utils import (
65
    DistributedTensorGatherer,
Sylvain Gugger's avatar
Sylvain Gugger committed
66
67
68
69
70
71
72
73
74
75
    SequentialDistributedSampler,
    distributed_broadcast_scalars,
    distributed_concat,
    get_tpu_sampler,
    nested_concat,
    nested_detach,
    nested_numpify,
    nested_xla_mesh_reduce,
    reissue_pt_warnings,
)
76
77
78
79
80
81
82
83
84
85
86
from .trainer_utils import (
    PREFIX_CHECKPOINT_DIR,
    BestRun,
    EvalPrediction,
    HPSearchBackend,
    PredictionOutput,
    TrainOutput,
    default_compute_objective,
    default_hp_space,
    set_seed,
)
Patrick von Platen's avatar
Patrick von Platen committed
87
from .training_args import TrainingArguments
Lysandre Debut's avatar
Lysandre Debut committed
88
from .utils import logging
Julien Chaumond's avatar
Julien Chaumond committed
89
90


91
92
93
_use_native_amp = False
_use_apex = False

Sylvain Gugger's avatar
Sylvain Gugger committed
94
DEFAULT_CALLBACKS = [DefaultFlowCallback]
95
DEFAULT_PROGRESS_CALLBACK = ProgressCallback
Sylvain Gugger's avatar
Sylvain Gugger committed
96

97
98
99
100
if is_in_notebook():
    from .utils.notebook import NotebookProgressCallback

    DEFAULT_PROGRESS_CALLBACK = NotebookProgressCallback
101

102
103
# Check if Pytorch version >= 1.6 to switch between Native AMP and Apex
if version.parse(torch.__version__) < version.parse("1.6"):
104
    from .file_utils import is_apex_available
105
106
107
108
109
110
111

    if is_apex_available():
        from apex import amp
    _use_apex = True
else:
    _use_native_amp = True
    from torch.cuda.amp import autocast
Julien Chaumond's avatar
Julien Chaumond committed
112

113
114
115
116
117
if version.parse(torch.__version__) < version.parse("1.2"):
    _use_ddp_no_sync = False
else:
    _use_ddp_no_sync = True

118
119
if is_datasets_available():
    import datasets
Julien Chaumond's avatar
Julien Chaumond committed
120

121
if is_torch_tpu_available():
Lysandre Debut's avatar
Lysandre Debut committed
122
123
124
125
    import torch_xla.core.xla_model as xm
    import torch_xla.debug.metrics as met
    import torch_xla.distributed.parallel_loader as pl

126
if is_tensorboard_available():
Sylvain Gugger's avatar
Sylvain Gugger committed
127
128
129
130
    from .integrations import TensorBoardCallback

    DEFAULT_CALLBACKS.append(TensorBoardCallback)

Julien Chaumond's avatar
Julien Chaumond committed
131

132
if is_wandb_available():
Sylvain Gugger's avatar
Sylvain Gugger committed
133
134
135
    from .integrations import WandbCallback

    DEFAULT_CALLBACKS.append(WandbCallback)
136

137
if is_comet_available():
Sylvain Gugger's avatar
Sylvain Gugger committed
138
139
140
    from .integrations import CometCallback

    DEFAULT_CALLBACKS.append(CometCallback)
141

142
143
144
145
146
147
if is_optuna_available():
    import optuna

if is_ray_available():
    from ray import tune

Lysandre Debut's avatar
Lysandre Debut committed
148
logger = logging.get_logger(__name__)
Julien Chaumond's avatar
Julien Chaumond committed
149
150
151
152
153


class Trainer:
    """
    Trainer is a simple but feature-complete training and eval loop for PyTorch,
154
155
156
    optimized for 馃 Transformers.

    Args:
Sylvain Gugger's avatar
Sylvain Gugger committed
157
        model (:class:`~transformers.PreTrainedModel` or :obj:`torch.nn.Module`, `optional`):
158
            The model to train, evaluate or use for predictions. If not provided, a ``model_init`` must be passed.
Sylvain Gugger's avatar
Sylvain Gugger committed
159
160
161
162
163
164

            .. note::

                :class:`~transformers.Trainer` is optimized to work with the :class:`~transformers.PreTrainedModel`
                provided by the library. You can still use your own models defined as :obj:`torch.nn.Module` as long as
                they work the same way as the 馃 Transformers models.
165
166
167
        args (:class:`~transformers.TrainingArguments`, `optional`):
            The arguments to tweak for training. Will default to a basic instance of :class:`~transformers.TrainingArguments`
            with the ``output_dir`` set to a directory named `tmp_trainer` in the current directory if not provided.
168
        data_collator (:obj:`DataCollator`, `optional`):
169
            The function to use to form a batch from a list of elements of :obj:`train_dataset` or
170
171
            :obj:`eval_dataset`. Will default to :func:`~transformers.default_data_collator` if no ``tokenizer`` is
            provided, an instance of :func:`~transformers.DataCollatorWithPadding` otherwise.
Sylvain Gugger's avatar
Sylvain Gugger committed
172
        train_dataset (:obj:`torch.utils.data.dataset.Dataset`, `optional`):
173
            The dataset to use for training. If it is an :obj:`datasets.Dataset`, columns not accepted by the
174
            ``model.forward()`` method are automatically removed.
Sylvain Gugger's avatar
Sylvain Gugger committed
175
        eval_dataset (:obj:`torch.utils.data.dataset.Dataset`, `optional`):
176
             The dataset to use for evaluation. If it is an :obj:`datasets.Dataset`, columns not accepted by the
177
            ``model.forward()`` method are automatically removed.
178
179
180
181
        tokenizer (:class:`PreTrainedTokenizerBase`, `optional`):
            The tokenizer used to preprocess the data. If provided, will be used to automatically pad the inputs the
            maximum length when batching inputs, and it will be saved along the model to make it easier to rerun an
            interrupted training or reuse the fine-tuned model.
182
183
184
        model_init (:obj:`Callable[[], PreTrainedModel]`, `optional`):
            A function that instantiates the model to be used. If provided, each call to
            :meth:`~transformers.Trainer.train` will start from a new instance of the model as given by this function.
185
186
187

            The function may have zero argument, or a single one containing the optuna/Ray Tune trial object, to be able to choose
            different architectures according to hyper parameters (such as layer count, sizes of inner layers, dropout probabilities etc).
188
189
190
        compute_metrics (:obj:`Callable[[EvalPrediction], Dict]`, `optional`):
            The function that will be used to compute metrics at evaluation. Must take a
            :class:`~transformers.EvalPrediction` and return a dictionary string to metric values.
Sylvain Gugger's avatar
Sylvain Gugger committed
191
192
193
194
195
        callbacks (List of :obj:`~transformers.TrainerCallback`, `optional`):
            A list of callbacks to customize the training loop. Will add those to the list of default callbacks
            detailed in :doc:`here <callback>`.

            If you want to remove one of the default callbacks used, use the :meth:`Trainer.remove_callback` method.
196
197
198
199
        optimizers (:obj:`Tuple[torch.optim.Optimizer, torch.optim.lr_scheduler.LambdaLR`, `optional`):
            A tuple containing the optimizer and the scheduler to use. Will default to an instance of
            :class:`~transformers.AdamW` on your model and a scheduler given by
            :func:`~transformers.get_linear_schedule_with_warmup` controlled by :obj:`args`.
200
201
        kwargs:
            Deprecated keyword arguments.
Julien Chaumond's avatar
Julien Chaumond committed
202
203
204
205
    """

    def __init__(
        self,
Sylvain Gugger's avatar
Sylvain Gugger committed
206
        model: Union[PreTrainedModel, torch.nn.Module] = None,
207
        args: TrainingArguments = None,
Julien Chaumond's avatar
Julien Chaumond committed
208
209
210
        data_collator: Optional[DataCollator] = None,
        train_dataset: Optional[Dataset] = None,
        eval_dataset: Optional[Dataset] = None,
211
        tokenizer: Optional["PreTrainedTokenizerBase"] = None,
212
        model_init: Callable[[], PreTrainedModel] = None,
Julien Chaumond's avatar
Julien Chaumond committed
213
        compute_metrics: Optional[Callable[[EvalPrediction], Dict]] = None,
Sylvain Gugger's avatar
Sylvain Gugger committed
214
        callbacks: Optional[List[TrainerCallback]] = None,
215
        optimizers: Tuple[torch.optim.Optimizer, torch.optim.lr_scheduler.LambdaLR] = (None, None),
216
        **kwargs,
Julien Chaumond's avatar
Julien Chaumond committed
217
    ):
Sylvain Gugger's avatar
Sylvain Gugger committed
218
219
220
221
222
223
        if args is None:
            logger.info("No `TrainingArguments` passed, using the current path as `output_dir`.")
            args = TrainingArguments("tmp_trainer")
        self.args = args
        # Seed must be set before instantiating the model when using model
        set_seed(self.args.seed)
224
225
226
        assert (
            model is not None or model_init is not None
        ), "You must provide a model to use `Trainer`, either by using the `model` argument or the `model_init` argument."
227
        self.model_init = model_init
228
        self.hp_name = None
229
        if model is None and model_init is not None:
230
            model = self.call_model_init()
231
        self.model = model.to(args.device) if model is not None else None
232
233
        default_collator = default_data_collator if tokenizer is None else DataCollatorWithPadding(tokenizer)
        self.data_collator = data_collator if data_collator is not None else default_collator
Julien Chaumond's avatar
Julien Chaumond committed
234
235
        self.train_dataset = train_dataset
        self.eval_dataset = eval_dataset
236
        self.tokenizer = tokenizer
237

Julien Chaumond's avatar
Julien Chaumond committed
238
        self.compute_metrics = compute_metrics
239
        self.optimizer, self.lr_scheduler = optimizers
Sylvain Gugger's avatar
Sylvain Gugger committed
240
241
242
243
244
        if model_init is not None and (self.optimizer is not None or self.lr_scheduler is not None):
            raise RuntimeError(
                "Passing a `model_init` is incompatible with providing the `optimizers` argument."
                "You should subclass `Trainer` and override the `create_optimizer_and_scheduler` method."
            )
Sylvain Gugger's avatar
Sylvain Gugger committed
245
246
        callbacks = DEFAULT_CALLBACKS if callbacks is None else DEFAULT_CALLBACKS + callbacks
        self.callback_handler = CallbackHandler(callbacks, self.model, self.optimizer, self.lr_scheduler)
247
        self.add_callback(PrinterCallback if self.args.disable_tqdm else DEFAULT_PROGRESS_CALLBACK)
Sylvain Gugger's avatar
Sylvain Gugger committed
248
249
250
251
252
253
254
255
256
257
258
259

        # Deprecated arguments
        if "tb_writer" in kwargs:
            warnings.warn(
                "Passing `tb_writer` as a keyword argument is deprecated and won't be possible in a "
                + "future version. Use `TensorBoardCallback(tb_writer=...)` instead and pass it to the `callbacks`"
                + "argument",
                FutureWarning,
            )
            tb_writer = kwargs.pop("tb_writer")
            self.remove_callback(TensorBoardCallback)
            self.add_callback(TensorBoardCallback(tb_writer=tb_writer))
260
261
        if "prediction_loss_only" in kwargs:
            warnings.warn(
262
263
264
                "Passing `prediction_loss_only` as a keyword argument is deprecated and won't be possible in a "
                + "future version. Use `args.prediction_loss_only` instead. Setting "
                + f"`args.prediction_loss_only={kwargs['prediction_loss_only']}",
265
266
267
268
269
                FutureWarning,
            )
            self.args.prediction_loss_only = kwargs.pop("prediction_loss_only")
        assert kwargs == {}, f"Unexpected keyword arguments: {list(kwargs.keys())}."

270
271
272
        # Will be set to True by `self._setup_loggers()` on first call to `self.log()`.
        self._loggers_initialized = False

Julien Chaumond's avatar
Julien Chaumond committed
273
        # Create output directory if needed
274
        if self.is_world_process_zero():
Julien Chaumond's avatar
Julien Chaumond committed
275
            os.makedirs(self.args.output_dir, exist_ok=True)
276
        if is_torch_tpu_available() and isinstance(self.model, PreTrainedModel):
Lysandre Debut's avatar
Lysandre Debut committed
277
278
279
            # Set an xla_device flag on the model's config.
            # We'll find a more elegant and not need to do this in the future.
            self.model.config.xla_device = True
280
281
282
283
284
285
286
287
288
        if not callable(self.data_collator) and callable(getattr(self.data_collator, "collate_batch", None)):
            self.data_collator = self.data_collator.collate_batch
            warnings.warn(
                (
                    "The `data_collator` should now be a simple callable (function, class with `__call__`), classes "
                    + "with a `collate_batch` are deprecated and won't be supported in a future version."
                ),
                FutureWarning,
            )
289

290
291
292
293
294
295
296
297
298
        if args.max_steps > 0:
            logger.info("max_steps is given, it will override any value given in num_train_epochs")

        # Enforce rules on using datasets with no __len__
        if train_dataset is not None and not isinstance(train_dataset, collections.abc.Sized) and args.max_steps <= 0:
            raise ValueError("train_dataset does not implement __len__, max_steps has to be specified")
        if eval_dataset is not None and not isinstance(eval_dataset, collections.abc.Sized):
            raise ValueError("eval_dataset must implement __len__")

299
300
        if is_datasets_available():
            if isinstance(train_dataset, datasets.Dataset):
301
                self._remove_unused_columns(self.train_dataset, description="training")
302
            if isinstance(eval_dataset, datasets.Dataset):
303
304
                self._remove_unused_columns(self.eval_dataset, description="evaluation")

305
        self.state = TrainerState()
Sylvain Gugger's avatar
Sylvain Gugger committed
306
        self.control = TrainerControl()
307
308
309
        # Internal variable for total_flos used to count as tensors (for distributed + TPU), will be sent in the
        # state at each call to self.log.
        self._total_flos = None
310
311
        if self.args.fp16 and _use_native_amp:
            self.scaler = torch.cuda.amp.GradScaler()
312
        self.hp_search_backend = None
313
        self.use_tune_checkpoints = False
314
315
316
317
318
319
        default_label_names = (
            ["start_positions, end_positions"]
            if type(self.model) in MODEL_FOR_QUESTION_ANSWERING_MAPPING.values()
            else ["labels"]
        )
        self.label_names = default_label_names if self.args.label_names is None else self.args.label_names
Sylvain Gugger's avatar
Sylvain Gugger committed
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
        self.control = self.callback_handler.on_init_end(self.args, self.state, self.control)

    def add_callback(self, callback):
        """
        Add a callback to the current list of :class:`~transformer.TrainerCallback`.

        Args:
           callback (:obj:`type` or :class:`~transformer.TrainerCallback`):
               A :class:`~transformer.TrainerCallback` class or an instance of a :class:`~transformer.TrainerCallback`.
               In the first case, will instantiate a member of that class.
        """
        self.callback_handler.add_callback(callback)

    def pop_callback(self, callback):
        """
        Remove a callback from the current list of :class:`~transformer.TrainerCallback` and returns it.

        If the callback is not found, returns :obj:`None` (and no error is raised).

        Args:
           callback (:obj:`type` or :class:`~transformer.TrainerCallback`):
               A :class:`~transformer.TrainerCallback` class or an instance of a :class:`~transformer.TrainerCallback`.
               In the first case, will pop the first member of that class found in the list of callbacks.

        Returns:
            :class:`~transformer.TrainerCallback`: The callback removed, if found.
        """
        return self.callback_handler.pop_callback(callback)

    def remove_callback(self, callback):
        """
        Remove a callback from the current list of :class:`~transformer.TrainerCallback`.

        Args:
           callback (:obj:`type` or :class:`~transformer.TrainerCallback`):
               A :class:`~transformer.TrainerCallback` class or an instance of a :class:`~transformer.TrainerCallback`.
               In the first case, will remove the first member of that class found in the list of callbacks.
        """
        self.callback_handler.remove_callback(callback)
Julien Chaumond's avatar
Julien Chaumond committed
359

360
    def _remove_unused_columns(self, dataset: "datasets.Dataset", description: Optional[str] = None):
361
362
        if not self.args.remove_unused_columns:
            return
363
364
365
366
367
368
369
370
371
372
373
        # Inspect model forward signature to keep only the arguments it accepts.
        signature = inspect.signature(self.model.forward)
        signature_columns = list(signature.parameters.keys())
        # Labels may be named label or label_ids, the default data collator handles that.
        signature_columns += ["label", "label_ids"]
        columns = [k for k in signature_columns if k in dataset.column_names]
        ignored_columns = list(set(dataset.column_names) - set(signature_columns))
        dset_description = "" if description is None else f"in the {description} set "
        logger.info(
            f"The following columns {dset_description}don't have a corresponding argument in `{self.model.__class__.__name__}.forward` and have been ignored: {', '.join(ignored_columns)}."
        )
sgugger's avatar
sgugger committed
374
        dataset.set_format(type=dataset.format["type"], columns=columns)
375

376
    def _get_train_sampler(self) -> Optional[torch.utils.data.sampler.Sampler]:
377
        if not isinstance(self.train_dataset, collections.abc.Sized):
378
            return None
379
        elif is_torch_tpu_available():
380
            return get_tpu_sampler(self.train_dataset)
Lysandre Debut's avatar
Lysandre Debut committed
381
        else:
382
            return (
Lysandre Debut's avatar
Lysandre Debut committed
383
384
385
386
                RandomSampler(self.train_dataset)
                if self.args.local_rank == -1
                else DistributedSampler(self.train_dataset)
            )
387
388
389
390
391

    def get_train_dataloader(self) -> DataLoader:
        """
        Returns the training :class:`~torch.utils.data.DataLoader`.

392
        Will use no sampler if :obj:`self.train_dataset` does not implement :obj:`__len__`, a random sampler
393
394
395
396
397
398
399
400
401
        (adapted to distributed training if necessary) otherwise.

        Subclass and override this method if you want to inject some custom behavior.
        """
        if self.train_dataset is None:
            raise ValueError("Trainer: training requires a train_dataset.")
        train_sampler = self._get_train_sampler()

        return DataLoader(
Julien Chaumond's avatar
Julien Chaumond committed
402
403
404
            self.train_dataset,
            batch_size=self.args.train_batch_size,
            sampler=train_sampler,
405
            collate_fn=self.data_collator,
Setu Shah's avatar
Setu Shah committed
406
            drop_last=self.args.dataloader_drop_last,
Chady Kamar's avatar
Chady Kamar committed
407
            num_workers=self.args.dataloader_num_workers,
Julien Chaumond's avatar
Julien Chaumond committed
408
409
        )

410
    def _get_eval_sampler(self, eval_dataset: Dataset) -> Optional[torch.utils.data.sampler.Sampler]:
411
        if is_torch_tpu_available():
412
413
414
415
416
            return SequentialDistributedSampler(eval_dataset, num_replicas=xm.xrt_world_size(), rank=xm.get_ordinal())
        elif self.args.local_rank != -1:
            return SequentialDistributedSampler(eval_dataset)
        else:
            return SequentialSampler(eval_dataset)
Lysandre Debut's avatar
Lysandre Debut committed
417

Julien Chaumond's avatar
Julien Chaumond committed
418
    def get_eval_dataloader(self, eval_dataset: Optional[Dataset] = None) -> DataLoader:
419
420
421
        """
        Returns the evaluation :class:`~torch.utils.data.DataLoader`.

422
423
        Subclass and override this method if you want to inject some custom behavior.

424
        Args:
425
            eval_dataset (:obj:`torch.utils.data.dataset.Dataset`, `optional`):
426
                If provided, will override :obj:`self.eval_dataset`. If it is an :obj:`datasets.Dataset`, columns not
427
                accepted by the ``model.forward()`` method are automatically removed. It must implement :obj:`__len__`.
428
        """
Julien Chaumond's avatar
Julien Chaumond committed
429
430
        if eval_dataset is None and self.eval_dataset is None:
            raise ValueError("Trainer: evaluation requires an eval_dataset.")
431
432
433
        elif eval_dataset is not None and not isinstance(eval_dataset, collections.abc.Sized):
            raise ValueError("eval_dataset must implement __len__")
        elif is_datasets_available() and isinstance(eval_dataset, datasets.Dataset):
434
            self._remove_unused_columns(eval_dataset, description="evaluation")
435
        eval_dataset = eval_dataset if eval_dataset is not None else self.eval_dataset
436
        eval_sampler = self._get_eval_sampler(eval_dataset)
437

438
        return DataLoader(
439
            eval_dataset,
440
            sampler=eval_sampler,
Julien Chaumond's avatar
Julien Chaumond committed
441
            batch_size=self.args.eval_batch_size,
442
            collate_fn=self.data_collator,
Setu Shah's avatar
Setu Shah committed
443
            drop_last=self.args.dataloader_drop_last,
Chady Kamar's avatar
Chady Kamar committed
444
            num_workers=self.args.dataloader_num_workers,
Julien Chaumond's avatar
Julien Chaumond committed
445
446
447
        )

    def get_test_dataloader(self, test_dataset: Dataset) -> DataLoader:
448
449
450
        """
        Returns the test :class:`~torch.utils.data.DataLoader`.

451
452
        Subclass and override this method if you want to inject some custom behavior.

453
        Args:
454
            test_dataset (:obj:`torch.utils.data.dataset.Dataset`, `optional`):
455
                The test dataset to use. If it is an :obj:`datasets.Dataset`, columns not accepted by the
456
                ``model.forward()`` method are automatically removed. It must implement :obj:`__len__`.
457
        """
458
459
460
        if not isinstance(test_dataset, collections.abc.Sized):
            raise ValueError("test_dataset must implement __len__")
        elif is_datasets_available() and isinstance(test_dataset, datasets.Dataset):
461
            self._remove_unused_columns(test_dataset, description="test")
462
        test_sampler = self._get_eval_sampler(test_dataset)
Lysandre Debut's avatar
Lysandre Debut committed
463

464
465
        # We use the same batch_size as for eval.
        return DataLoader(
Julien Chaumond's avatar
Julien Chaumond committed
466
            test_dataset,
467
            sampler=test_sampler,
Julien Chaumond's avatar
Julien Chaumond committed
468
            batch_size=self.args.eval_batch_size,
469
            collate_fn=self.data_collator,
470
            drop_last=self.args.dataloader_drop_last,
Julien Chaumond's avatar
Julien Chaumond committed
471
        )
Lysandre Debut's avatar
Lysandre Debut committed
472

473
    def create_optimizer_and_scheduler(self, num_training_steps: int):
474
475
476
        """
        Setup the optimizer and the learning rate scheduler.

477
        We provide a reasonable default that works well. If you want to use something else, you can pass a tuple in the
478
        Trainer's init through :obj:`optimizers`, or subclass and override this method in a subclass.
479
        """
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
        if self.optimizer is None:
            no_decay = ["bias", "LayerNorm.weight"]
            optimizer_grouped_parameters = [
                {
                    "params": [p for n, p in self.model.named_parameters() if not any(nd in n for nd in no_decay)],
                    "weight_decay": self.args.weight_decay,
                },
                {
                    "params": [p for n, p in self.model.named_parameters() if any(nd in n for nd in no_decay)],
                    "weight_decay": 0.0,
                },
            ]
            self.optimizer = AdamW(
                optimizer_grouped_parameters,
                lr=self.args.learning_rate,
                betas=(self.args.adam_beta1, self.args.adam_beta2),
                eps=self.args.adam_epsilon,
            )
        if self.lr_scheduler is None:
            self.lr_scheduler = get_linear_schedule_with_warmup(
                self.optimizer, num_warmup_steps=self.args.warmup_steps, num_training_steps=num_training_steps
            )
Julien Chaumond's avatar
Julien Chaumond committed
502

503
    def num_examples(self, dataloader: DataLoader) -> int:
504
        """
505
        Helper to get number of samples in a :class:`~torch.utils.data.DataLoader` by accessing its dataset.
506
507

        Will raise an exception if the underlying dataset dese not implement method :obj:`__len__`
508
        """
509
        return len(dataloader.dataset)
510

511
512
    def _hp_search_setup(self, trial: Union["optuna.Trial", Dict[str, Any]]):
        """ HP search setup code """
513
514
        self._trial = trial

515
516
        if self.hp_search_backend is None or trial is None:
            return
517

518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
        params = self.hp_space(trial) if self.hp_search_backend == HPSearchBackend.OPTUNA else trial
        for key, value in params.items():
            if not hasattr(self.args, key):
                raise AttributeError(
                    f"Trying to set {key} in the hyperparameter search but there is no corresponding field in `TrainingArguments`."
                )
            old_attr = getattr(self.args, key, None)
            # Casting value to the proper type
            if old_attr is not None:
                value = type(old_attr)(value)
            setattr(self.args, key, value)
        if self.hp_search_backend == HPSearchBackend.OPTUNA:
            logger.info("Trial:", trial.params)

    def _report_to_hp_search(
        self, trial: Union["optuna.Trial", Dict[str, Any]], epoch: int, metrics: Dict[str, float]
    ):
        if self.hp_search_backend is None or trial is None:
            return
537
        self.objective = self.compute_objective(metrics.copy())
538
539
540
541
542
        if self.hp_search_backend == HPSearchBackend.OPTUNA:
            trial.report(self.objective, epoch)
            if trial.should_prune():
                raise optuna.TrialPruned()
        elif self.hp_search_backend == HPSearchBackend.RAY:
543
            if self.state.global_step % self.args.save_steps == 0:
544
                self._tune_save_checkpoint()
545
546
            tune.report(objective=self.objective, **metrics)

547
548
549
    def _tune_save_checkpoint(self):
        if not self.use_tune_checkpoints:
            return
550
        with tune.checkpoint_dir(step=self.state.global_step) as checkpoint_dir:
551
            self.args.output_dir = checkpoint_dir
552
            output_dir = os.path.join(self.args.output_dir, f"{PREFIX_CHECKPOINT_DIR}-{self.state.global_step}")
553
554
            self.save_model(output_dir)
            if self.is_world_master():
555
                self.state.save_to_json(os.path.join(output_dir, "trainer_state.json"))
556
557
558
                torch.save(self.optimizer.state_dict(), os.path.join(output_dir, "optimizer.pt"))
                torch.save(self.lr_scheduler.state_dict(), os.path.join(output_dir, "scheduler.pt"))

559
560
561
562
563
564
565
    def call_model_init(self, trial=None):
        model_init_argcount = len(inspect.signature(self.model_init).parameters)
        if model_init_argcount == 0:
            model = self.model_init()
        elif model_init_argcount == 1:
            model = self.model_init(trial)
        else:
566
567
568
569
            raise RuntimeError("model_init should have 0 or 1 argument.")

        if model is None:
            raise RuntimeError("model_init should not return None.")
570
571
572

        return model

573
    def train(self, model_path: Optional[str] = None, trial: Union["optuna.Trial", Dict[str, Any]] = None):
Julien Chaumond's avatar
Julien Chaumond committed
574
575
576
577
        """
        Main training entry point.

        Args:
578
579
580
            model_path (:obj:`str`, `optional`):
                Local path to the model if the model to train has been instantiated from a local path. If present,
                training will resume from the optimizer/scheduler states loaded here.
581
582
            trial (:obj:`optuna.Trial` or :obj:`Dict[str, Any]`, `optional`):
                The trial run or the hyperparameter dictionary for hyperparameter search.
Julien Chaumond's avatar
Julien Chaumond committed
583
        """
Sylvain Gugger's avatar
Sylvain Gugger committed
584
585
586
        # This might change the seed so needs to run first.
        self._hp_search_setup(trial)

587
588
        # Model re-init
        if self.model_init is not None:
Sylvain Gugger's avatar
Sylvain Gugger committed
589
590
            # Seed must be set before instantiating the model when using model_init.
            set_seed(self.args.seed)
591
592
593

            model = self.call_model_init(trial)

594
595
            self.model = model.to(self.args.device)

Sylvain Gugger's avatar
Sylvain Gugger committed
596
597
            # Reinitializes optimizer and scheduler
            self.optimizer, self.lr_scheduler = None, None
598

599
600
601
        # Keeping track whether we can can len() on the dataset or not
        train_dataset_is_sized = isinstance(self.train_dataset, collections.abc.Sized)

602
        # Data loader and number of training steps
Julien Chaumond's avatar
Julien Chaumond committed
603
        train_dataloader = self.get_train_dataloader()
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619

        # Setting up training control variables:
        # number of training epochs: num_train_epochs
        # number of training steps per epoch: num_update_steps_per_epoch
        # total number of training steps to execute: max_steps
        if train_dataset_is_sized:
            num_update_steps_per_epoch = len(train_dataloader) // self.args.gradient_accumulation_steps
            num_update_steps_per_epoch = max(num_update_steps_per_epoch, 1)
            if self.args.max_steps > 0:
                max_steps = self.args.max_steps
                num_train_epochs = self.args.max_steps // num_update_steps_per_epoch + int(
                    self.args.max_steps % num_update_steps_per_epoch > 0
                )
            else:
                max_steps = math.ceil(self.args.num_train_epochs * num_update_steps_per_epoch)
                num_train_epochs = math.ceil(self.args.num_train_epochs)
Julien Chaumond's avatar
Julien Chaumond committed
620
        else:
621
622
623
624
            # see __init__. max_steps is set when the dataset has no __len__
            max_steps = self.args.max_steps
            num_train_epochs = 1
            num_update_steps_per_epoch = max_steps
Julien Chaumond's avatar
Julien Chaumond committed
625

626
        self.create_optimizer_and_scheduler(num_training_steps=max_steps)
627
        self.state = TrainerState()
628
        self.state.is_hyper_param_search = trial is not None
Julien Chaumond's avatar
Julien Chaumond committed
629
630
631
632
633
634
635
636

        # Check if saved optimizer or scheduler states exist
        if (
            model_path is not None
            and os.path.isfile(os.path.join(model_path, "optimizer.pt"))
            and os.path.isfile(os.path.join(model_path, "scheduler.pt"))
        ):
            # Load in optimizer and scheduler states
637
            self.optimizer.load_state_dict(
638
639
                torch.load(os.path.join(model_path, "optimizer.pt"), map_location=self.args.device)
            )
640
641
642
            with warnings.catch_warnings(record=True) as caught_warnings:
                self.lr_scheduler.load_state_dict(torch.load(os.path.join(model_path, "scheduler.pt")))
            reissue_pt_warnings(caught_warnings)
Julien Chaumond's avatar
Julien Chaumond committed
643

Sylvain Gugger's avatar
Sylvain Gugger committed
644
        # Mixed precision training with apex (torch < 1.6)
Julien Chaumond's avatar
Julien Chaumond committed
645
        model = self.model
646
        if self.args.fp16 and _use_apex:
Julien Chaumond's avatar
Julien Chaumond committed
647
648
            if not is_apex_available():
                raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.")
649
            model, self.optimizer = amp.initialize(model, self.optimizer, opt_level=self.args.fp16_opt_level)
Julien Chaumond's avatar
Julien Chaumond committed
650

651
        # Multi-gpu training (should be after apex fp16 initialization)
Julien Chaumond's avatar
Julien Chaumond committed
652
653
654
655
656
657
658
659
660
        if self.args.n_gpu > 1:
            model = torch.nn.DataParallel(model)

        # Distributed training (should be after apex fp16 initialization)
        if self.args.local_rank != -1:
            model = torch.nn.parallel.DistributedDataParallel(
                model,
                device_ids=[self.args.local_rank],
                output_device=self.args.local_rank,
661
662
663
664
665
                find_unused_parameters=(
                    not getattr(model.config, "gradient_checkpointing", False)
                    if isinstance(model, PreTrainedModel)
                    else True
                ),
Julien Chaumond's avatar
Julien Chaumond committed
666
            )
667
668
        # find_unused_parameters breaks checkpointing as per
        # https://github.com/huggingface/transformers/pull/4659#issuecomment-643356021
Julien Chaumond's avatar
Julien Chaumond committed
669
670

        # Train!
671
        if is_torch_tpu_available():
Lysandre Debut's avatar
Lysandre Debut committed
672
673
674
675
676
            total_train_batch_size = self.args.train_batch_size * xm.xrt_world_size()
        else:
            total_train_batch_size = (
                self.args.train_batch_size
                * self.args.gradient_accumulation_steps
677
                * (torch.distributed.get_world_size() if self.args.local_rank != -1 else 1)
Lysandre Debut's avatar
Lysandre Debut committed
678
            )
679
680
681
682
683
684
685

        num_examples = (
            self.num_examples(train_dataloader)
            if train_dataset_is_sized
            else total_train_batch_size * self.args.max_steps
        )

Julien Chaumond's avatar
Julien Chaumond committed
686
        logger.info("***** Running training *****")
687
        logger.info("  Num examples = %d", num_examples)
Julien Chaumond's avatar
Julien Chaumond committed
688
        logger.info("  Num Epochs = %d", num_train_epochs)
689
        logger.info("  Instantaneous batch size per device = %d", self.args.per_device_train_batch_size)
Lysandre Debut's avatar
Lysandre Debut committed
690
        logger.info("  Total train batch size (w. parallel, distributed & accumulation) = %d", total_train_batch_size)
Julien Chaumond's avatar
Julien Chaumond committed
691
        logger.info("  Gradient Accumulation steps = %d", self.args.gradient_accumulation_steps)
692
        logger.info("  Total optimization steps = %d", max_steps)
Julien Chaumond's avatar
Julien Chaumond committed
693

694
        self.state.epoch = 0
Julien Chaumond's avatar
Julien Chaumond committed
695
696
        epochs_trained = 0
        steps_trained_in_current_epoch = 0
697

Julien Chaumond's avatar
Julien Chaumond committed
698
        # Check if continuing training from a checkpoint
699
700
701
702
703
704
705
706
707
708
        if model_path and os.path.isfile(os.path.join(model_path, "trainer_state.json")):
            self.state = TrainerState.load_from_json(os.path.join(model_path, "trainer_state.json"))
            epochs_trained = self.state.global_step // num_update_steps_per_epoch
            steps_trained_in_current_epoch = self.state.global_step % (num_update_steps_per_epoch)

            logger.info("  Continuing training from checkpoint, will skip to saved global_step")
            logger.info("  Continuing training from epoch %d", epochs_trained)
            logger.info("  Continuing training from global step %d", self.state.global_step)
            logger.info("  Will skip the first %d steps in the first epoch", steps_trained_in_current_epoch)

Sylvain Gugger's avatar
Sylvain Gugger committed
709
710
711
712
713
        # Update the references
        self.callback_handler.model = self.model
        self.callback_handler.optimizer = self.optimizer
        self.callback_handler.lr_scheduler = self.lr_scheduler
        self.callback_handler.train_dataloader = train_dataloader
714
715
        self.state.trial_name = self.hp_name(trial) if self.hp_name is not None else None
        self.state.trial_params = hp_params(trial) if trial is not None else None
716
717
718
719
        # This should be the same if the state has been saved but in case the training arguments changed, it's safer
        # to set this after the load.
        self.state.max_steps = max_steps
        self.state.num_train_epochs = num_train_epochs
Sylvain Gugger's avatar
Sylvain Gugger committed
720
721
        self.state.is_local_process_zero = self.is_local_process_zero()
        self.state.is_world_process_zero = self.is_world_process_zero()
Julien Chaumond's avatar
Julien Chaumond committed
722

723
        tr_loss = torch.tensor(0.0).to(self.args.device)
Sylvain Gugger's avatar
Sylvain Gugger committed
724
        self._logging_loss_scalar = 0
725
        self._total_flos = self.state.total_flos
Julien Chaumond's avatar
Julien Chaumond committed
726
        model.zero_grad()
Sylvain Gugger's avatar
Sylvain Gugger committed
727
728
729

        self.control = self.callback_handler.on_train_begin(self.args, self.state, self.control)

730
        for epoch in range(epochs_trained, num_train_epochs):
731
732
733
            if isinstance(train_dataloader, DataLoader) and isinstance(train_dataloader.sampler, DistributedSampler):
                train_dataloader.sampler.set_epoch(epoch)

734
            if is_torch_tpu_available():
735
736
737
                parallel_loader = pl.ParallelLoader(train_dataloader, [self.args.device]).per_device_loader(
                    self.args.device
                )
738
                epoch_iterator = parallel_loader
739
            else:
740
                epoch_iterator = train_dataloader
741

742
743
744
745
            # Reset the past mems state at the beginning of each epoch if necessary.
            if self.args.past_index >= 0:
                self._past = None

746
            steps_in_epoch = len(epoch_iterator) if train_dataset_is_sized else self.args.max_steps
Sylvain Gugger's avatar
Sylvain Gugger committed
747
748
            self.control = self.callback_handler.on_epoch_begin(self.args, self.state, self.control)

Julien Chaumond's avatar
Julien Chaumond committed
749
750
751
752
753
754
755
            for step, inputs in enumerate(epoch_iterator):

                # Skip past any already trained steps if resuming training
                if steps_trained_in_current_epoch > 0:
                    steps_trained_in_current_epoch -= 1
                    continue

Sylvain Gugger's avatar
Sylvain Gugger committed
756
757
758
                if (step + 1) % self.args.gradient_accumulation_steps == 0:
                    self.control = self.callback_handler.on_step_begin(self.args, self.state, self.control)

759
760
761
762
763
764
765
766
767
                if (
                    ((step + 1) % self.args.gradient_accumulation_steps != 0)
                    and self.args.local_rank != -1
                    and _use_ddp_no_sync
                ):
                    with model.no_sync():
                        tr_loss += self.training_step(model, inputs)
                else:
                    tr_loss += self.training_step(model, inputs)
768
                self._total_flos += self.floating_point_ops(inputs)
Julien Chaumond's avatar
Julien Chaumond committed
769
770
771

                if (step + 1) % self.args.gradient_accumulation_steps == 0 or (
                    # last step in epoch but step is always smaller than gradient_accumulation_steps
772
773
                    steps_in_epoch <= self.args.gradient_accumulation_steps
                    and (step + 1) == steps_in_epoch
Julien Chaumond's avatar
Julien Chaumond committed
774
                ):
775
                    if self.args.fp16 and _use_native_amp:
776
                        self.scaler.unscale_(self.optimizer)
777
778
                        torch.nn.utils.clip_grad_norm_(model.parameters(), self.args.max_grad_norm)
                    elif self.args.fp16 and _use_apex:
779
                        torch.nn.utils.clip_grad_norm_(amp.master_params(self.optimizer), self.args.max_grad_norm)
Julien Chaumond's avatar
Julien Chaumond committed
780
781
782
                    else:
                        torch.nn.utils.clip_grad_norm_(model.parameters(), self.args.max_grad_norm)

783
                    if is_torch_tpu_available():
784
                        xm.optimizer_step(self.optimizer)
785
                    elif self.args.fp16 and _use_native_amp:
786
                        self.scaler.step(self.optimizer)
787
                        self.scaler.update()
Lysandre Debut's avatar
Lysandre Debut committed
788
                    else:
789
                        self.optimizer.step()
Lysandre Debut's avatar
Lysandre Debut committed
790

791
                    self.lr_scheduler.step()
Julien Chaumond's avatar
Julien Chaumond committed
792
                    model.zero_grad()
793
                    self.state.global_step += 1
794
                    self.state.epoch = epoch + (step + 1) / steps_in_epoch
Sylvain Gugger's avatar
Sylvain Gugger committed
795
796
                    self.control = self.callback_handler.on_step_end(self.args, self.state, self.control)

797
                    self._maybe_log_save_evaluate(tr_loss, model, trial, epoch)
Julien Chaumond's avatar
Julien Chaumond committed
798

Sylvain Gugger's avatar
Sylvain Gugger committed
799
                if self.control.should_epoch_stop or self.control.should_training_stop:
Julien Chaumond's avatar
Julien Chaumond committed
800
                    break
801

Sylvain Gugger's avatar
Sylvain Gugger committed
802
            self.control = self.callback_handler.on_epoch_end(self.args, self.state, self.control)
803
            self._maybe_log_save_evaluate(tr_loss, model, trial, epoch)
804

805
            if self.args.tpu_metrics_debug or self.args.debug:
806
807
808
809
810
811
812
813
                if is_torch_tpu_available():
                    # tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
                    xm.master_print(met.metrics_report())
                else:
                    logger.warning(
                        "You enabled PyTorch/XLA debug metrics but you don't have a TPU "
                        "configured. Check your training configuration if this is unexpected."
                    )
Sylvain Gugger's avatar
Sylvain Gugger committed
814
            if self.control.should_training_stop:
815
                break
Julien Chaumond's avatar
Julien Chaumond committed
816

817
818
819
        if self.args.past_index and hasattr(self, "_past"):
            # Clean the state at the end of training
            delattr(self, "_past")
Julien Chaumond's avatar
Julien Chaumond committed
820
821

        logger.info("\n\nTraining completed. Do not forget to share your model on huggingface.co/models =)\n\n")
822
823
824
825
826
827
828
829
830
831
832
        if self.args.load_best_model_at_end and self.state.best_model_checkpoint is not None:
            logger.info(
                f"Loading best model from {self.state.best_model_checkpoint} (score: {self.state.best_metric})."
            )
            if isinstance(model, PreTrainedModel):
                self.model = model.from_pretrained(self.state.best_model_checkpoint)
                self.model = self.model.to(self.args.device)
            else:
                state_dict = torch.load(os.path.join(self.state.best_model_checkpoint, WEIGHTS_NAME))
                self.model.load_state_dict(state_dict)

Sylvain Gugger's avatar
Sylvain Gugger committed
833
834
        self.control = self.callback_handler.on_train_end(self.args, self.state, self.control)

835
        return TrainOutput(self.state.global_step, tr_loss.item() / self.state.global_step)
836

837
    def _maybe_log_save_evaluate(self, tr_loss, model, trial, epoch):
Sylvain Gugger's avatar
Sylvain Gugger committed
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
        if self.control.should_log:
            logs: Dict[str, float] = {}
            tr_loss_scalar = tr_loss.item()
            logs["loss"] = (tr_loss_scalar - self._logging_loss_scalar) / self.args.logging_steps
            # backward compatibility for pytorch schedulers
            logs["learning_rate"] = (
                self.lr_scheduler.get_last_lr()[0]
                if version.parse(torch.__version__) >= version.parse("1.4")
                else self.lr_scheduler.get_lr()[0]
            )
            self._logging_loss_scalar = tr_loss_scalar

            self.log(logs)

        metrics = None
        if self.control.should_evaluate:
            metrics = self.evaluate()
            self._report_to_hp_search(trial, epoch, metrics)
856

Sylvain Gugger's avatar
Sylvain Gugger committed
857
858
859
860
861
862
863
            self.control = self.callback_handler.on_evaluate(self.args, self.state, self.control, metrics)

        if self.control.should_save:
            self._save_checkpoint(model, trial, metrics=metrics)
            self.control = self.callback_handler.on_save(self.args, self.state, self.control)

    def _save_checkpoint(self, model, trial, metrics=None):
864
865
866
867
868
869
870
        # In all cases (even distributed/parallel), self.model is always a reference
        # to the model we want to save.
        if hasattr(model, "module"):
            assert model.module is self.model, f"Module {model.module} should be a reference to self.model"
        else:
            assert model is self.model, f"Model {model} should be a reference to self.model"
        # Save model checkpoint
871
        checkpoint_folder = f"{PREFIX_CHECKPOINT_DIR}-{self.state.global_step}"
872

873
874
        if self.hp_search_backend is not None and trial is not None:
            run_id = trial.number if self.hp_search_backend == HPSearchBackend.OPTUNA else tune.get_trial_id()
875
876
877
878
            run_name = self.hp_name(trial) if self.hp_name is not None else f"run-{run_id}"
            output_dir = os.path.join(self.args.output_dir, run_name, checkpoint_folder)
        else:
            output_dir = os.path.join(self.args.output_dir, checkpoint_folder)
879

880
            self.store_flos()
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
        self.save_model(output_dir)

        # Save optimizer and scheduler
        if is_torch_tpu_available():
            xm.rendezvous("saving_optimizer_states")
            xm.save(self.optimizer.state_dict(), os.path.join(output_dir, "optimizer.pt"))
            with warnings.catch_warnings(record=True) as caught_warnings:
                xm.save(self.lr_scheduler.state_dict(), os.path.join(output_dir, "scheduler.pt"))
                reissue_pt_warnings(caught_warnings)
        elif self.is_world_process_zero():
            torch.save(self.optimizer.state_dict(), os.path.join(output_dir, "optimizer.pt"))
            with warnings.catch_warnings(record=True) as caught_warnings:
                torch.save(self.lr_scheduler.state_dict(), os.path.join(output_dir, "scheduler.pt"))
            reissue_pt_warnings(caught_warnings)

        # Determine the new best metric / best model checkpoint
Sylvain Gugger's avatar
Sylvain Gugger committed
897
        if metrics is not None and self.args.metric_for_best_model is not None:
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
            metric_to_check = self.args.metric_for_best_model
            if not metric_to_check.startswith("eval_"):
                metric_to_check = f"eval_{metric_to_check}"
            metric_value = metrics[metric_to_check]

            operator = np.greater if self.args.greater_is_better else np.less
            if (
                self.state.best_metric is None
                or self.state.best_model_checkpoint is None
                or operator(metric_value, self.state.best_metric)
            ):
                self.state.best_metric = metric_value
                self.state.best_model_checkpoint = output_dir

        # Save the Trainer state
        if self.is_world_process_zero():
            self.state.save_to_json(os.path.join(output_dir, "trainer_state.json"))

        # Maybe delete some older checkpoints.
        if self.is_world_process_zero():
            self._rotate_checkpoints(use_mtime=True)

920
921
922
923
924
925
926
    def hyperparameter_search(
        self,
        hp_space: Optional[Callable[["optuna.Trial"], Dict[str, float]]] = None,
        compute_objective: Optional[Callable[[Dict[str, float]], float]] = None,
        n_trials: int = 20,
        direction: str = "minimize",
        backend: Optional[Union["str", HPSearchBackend]] = None,
927
        hp_name: Optional[Callable[["optuna.Trial"], str]] = None,
928
929
930
        **kwargs
    ) -> BestRun:
        """
931
932
933
        Launch an hyperparameter search using ``optuna`` or ``Ray Tune``. The optimized quantity is determined by
        :obj:`compute_objectie`, which defaults to a function returning the evaluation loss when no metric is provided,
        the sum of all metrics otherwise.
934

Sylvain Gugger's avatar
Sylvain Gugger committed
935
936
937
938
939
940
941
        .. warning::

            To use this method, you need to have provided a ``model_init`` when initializing your
            :class:`~transformers.Trainer`: we need to reinitialize the model at each new run. This is incompatible
            with the ``optimizers`` argument, so you need to subclass :class:`~transformers.Trainer` and override the
            method :meth:`~transformers.Trainer.create_optimizer_and_scheduler` for custom optimizer/scheduler.

942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
        Args:
            hp_space (:obj:`Callable[["optuna.Trial"], Dict[str, float]]`, `optional`):
                A function that defines the hyperparameter search space. Will default to
                :func:`~transformers.trainer_utils.default_hp_space_optuna` or
                :func:`~transformers.trainer_utils.default_hp_space_ray` depending on your backend.
            compute_objective (:obj:`Callable[[Dict[str, float]], float]`, `optional`):
                A function computing the objective to minimize or maximize from the metrics returned by the
                :obj:`evaluate` method. Will default to :func:`~transformers.trainer_utils.default_compute_objective`.
            n_trials (:obj:`int`, `optional`, defaults to 100):
                The number of trial runs to test.
            direction(:obj:`str`, `optional`, defaults to :obj:`"minimize"`):
                Whether to optimize greater or lower objects. Can be :obj:`"minimize"` or :obj:`"maximize"`, you should
                pick :obj:`"minimize"` when optimizing the validation loss, :obj:`"maximize"` when optimizing one or
                several metrics.
            backend(:obj:`str` or :class:`~transformers.training_utils.HPSearchBackend`, `optional`):
                The backend to use for hyperparameter search. Will default to optuna or Ray Tune, depending on which
                one is installed. If both are installed, will default to optuna.
            kwargs:
                Additional keyword arguments passed along to :obj:`optuna.create_study` or :obj:`ray.tune.run`. For
                more information see:

963
                - the documentation of `optuna.create_study <https://optuna.readthedocs.io/en/stable/reference/alias_generated/optuna.create_study.html#optuna.create_study>`__
964
965
966
                - the documentation of `tune.run <https://docs.ray.io/en/latest/tune/api_docs/execution.html#tune-run>`__

        Returns:
Tiger's avatar
Tiger committed
967
            :class:`transformers.trainer_utils.BestRun`: All the information about the best run.
968
969
970
971
972
973
974
975
976
977
978
        """
        if backend is None:
            backend = default_hp_search_backend()
            if backend is None:
                raise RuntimeError(
                    "At least one of optuna or ray should be installed. "
                    "To install optuna run `pip install optuna`."
                    "To install ray run `pip install ray[tune]`."
                )
        backend = HPSearchBackend(backend)
        if backend == HPSearchBackend.OPTUNA and not is_optuna_available():
Sylvain Gugger's avatar
Sylvain Gugger committed
979
            raise RuntimeError("You picked the optuna backend, but it is not installed. Use `pip install optuna`.")
980
981
        if backend == HPSearchBackend.RAY and not is_ray_available():
            raise RuntimeError(
Sylvain Gugger's avatar
Sylvain Gugger committed
982
                "You picked the Ray Tune backend, but it is not installed. Use `pip install 'ray[tune]'`."
983
984
            )
        self.hp_search_backend = backend
Sylvain Gugger's avatar
Sylvain Gugger committed
985
986
987
988
989
        if self.model_init is None:
            raise RuntimeError(
                "To use hyperparameter search, you need to pass your model through a model_init function."
            )

990
        self.hp_space = default_hp_space[backend] if hp_space is None else hp_space
991
        self.hp_name = hp_name
992
993
        self.compute_objective = default_compute_objective if compute_objective is None else compute_objective

994
995
        run_hp_search = run_hp_search_optuna if backend == HPSearchBackend.OPTUNA else run_hp_search_ray
        best_run = run_hp_search(self, n_trials, direction, **kwargs)
996
997
998
999

        self.hp_search_backend = None
        return best_run

Sylvain Gugger's avatar
Sylvain Gugger committed
1000
    def log(self, logs: Dict[str, float]) -> None:
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
        """
        Log :obj:`logs` on the various objects watching training.

        Subclass and override this method to inject custom behavior.

        Args:
            logs (:obj:`Dict[str, float]`):
                The values to log.
        """
        if hasattr(self, "_log"):
            warnings.warn(
                "The `_log` method is deprecated and won't be called in a future version, define `log` in your subclass.",
                FutureWarning,
            )
Sylvain Gugger's avatar
Sylvain Gugger committed
1015
            return self._log(logs)
1016
1017
1018
1019
1020
        if self.state.epoch is not None:
            logs["epoch"] = self.state.epoch
        if self._total_flos is not None:
            self.store_flos()
            logs["total_flos"] = self.state.total_flos
1021

Sylvain Gugger's avatar
Sylvain Gugger committed
1022
        self.control = self.callback_handler.on_log(self.args, self.state, self.control, logs)
1023
1024
        output = {**logs, **{"step": self.state.global_step}}
        self.state.log_history.append(output)
Julien Chaumond's avatar
Julien Chaumond committed
1025

sgugger's avatar
Fix CI  
sgugger committed
1026
    def _prepare_inputs(self, inputs: Dict[str, Union[torch.Tensor, Any]]) -> Dict[str, Union[torch.Tensor, Any]]:
1027
1028
1029
1030
        """
        Prepare :obj:`inputs` before feeding them to the model, converting them to tensors if they are not already and
        handling potential state.
        """
Julien Chaumond's avatar
Julien Chaumond committed
1031
        for k, v in inputs.items():
1032
1033
            if isinstance(v, torch.Tensor):
                inputs[k] = v.to(self.args.device)
Julien Chaumond's avatar
Julien Chaumond committed
1034

1035
1036
        if self.args.past_index >= 0 and self._past is not None:
            inputs["mems"] = self._past
1037

1038
1039
        return inputs

1040
    def training_step(self, model: nn.Module, inputs: Dict[str, Union[torch.Tensor, Any]]) -> torch.Tensor:
1041
        """
1042
        Perform a training step on a batch of inputs.
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055

        Subclass and override to inject custom behavior.

        Args:
            model (:obj:`nn.Module`):
                The model to train.
            inputs (:obj:`Dict[str, Union[torch.Tensor, Any]]`):
                The inputs and targets of the model.

                The dictionary will be unpacked before being fed to the model. Most models expect the targets under the
                argument :obj:`labels`. Check your model's documentation for all accepted arguments.

        Return:
1056
            :obj:`torch.Tensor`: The tensor with training loss on this batch.
1057
1058
1059
1060
1061
1062
        """
        if hasattr(self, "_training_step"):
            warnings.warn(
                "The `_training_step` method is deprecated and won't be called in a future version, define `training_step` in your subclass.",
                FutureWarning,
            )
1063
            return self._training_step(model, inputs, self.optimizer)
1064
1065

        model.train()
1066
        inputs = self._prepare_inputs(inputs)
1067

1068
1069
        if self.args.fp16 and _use_native_amp:
            with autocast():
Sylvain Gugger's avatar
Sylvain Gugger committed
1070
                loss = self.compute_loss(model, inputs)
1071
        else:
Sylvain Gugger's avatar
Sylvain Gugger committed
1072
            loss = self.compute_loss(model, inputs)
1073

Julien Chaumond's avatar
Julien Chaumond committed
1074
1075
        if self.args.n_gpu > 1:
            loss = loss.mean()  # mean() to average on multi-gpu parallel training
1076

Julien Chaumond's avatar
Julien Chaumond committed
1077
1078
1079
        if self.args.gradient_accumulation_steps > 1:
            loss = loss / self.args.gradient_accumulation_steps

1080
1081
1082
        if self.args.fp16 and _use_native_amp:
            self.scaler.scale(loss).backward()
        elif self.args.fp16 and _use_apex:
1083
            with amp.scale_loss(loss, self.optimizer) as scaled_loss:
Julien Chaumond's avatar
Julien Chaumond committed
1084
1085
1086
1087
                scaled_loss.backward()
        else:
            loss.backward()

1088
        return loss.detach()
Julien Chaumond's avatar
Julien Chaumond committed
1089

Sylvain Gugger's avatar
Sylvain Gugger committed
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
    def compute_loss(self, model, inputs):
        """
        How the loss is computed by Trainer. By default, all models return the loss in the first element.

        Subclass and override for custom behavior.
        """
        outputs = model(**inputs)
        # Save past state if it exists
        if self.args.past_index >= 0:
            self._past = outputs[self.args.past_index]
        # We don't use .loss here since the model may return tuples instead of ModelOutput.
        return outputs[0]

Lysandre Debut's avatar
Lysandre Debut committed
1103
    def is_local_master(self) -> bool:
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
        """
        Whether or not this process is the local (e.g., on one machine if training in a distributed fashion on
        several machines) main process.

        .. warning::

            This method is deprecated, use :meth:`~transformers.Trainer.is_local_process_zero` instead.
        """
        warnings.warn("This method is deprecated, use `Trainer.is_local_process_zero()` instead.", FutureWarning)
        return self.is_local_process_zero()

    def is_local_process_zero(self) -> bool:
        """
        Whether or not this process is the local (e.g., on one machine if training in a distributed fashion on
        several machines) main process.
        """
1120
        if is_torch_tpu_available():
Lysandre Debut's avatar
Lysandre Debut committed
1121
1122
1123
1124
            return xm.is_master_ordinal(local=True)
        else:
            return self.args.local_rank in [-1, 0]

Julien Chaumond's avatar
Julien Chaumond committed
1125
1126
    def is_world_master(self) -> bool:
        """
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
        Whether or not this process is the global main process (when training in a distributed fashion on
        several machines, this is only going to be :obj:`True` for one process).

        .. warning::

            This method is deprecated, use :meth:`~transformers.Trainer.is_world_process_zero` instead.
        """
        warnings.warn("This method is deprecated, use `Trainer.is_world_process_zero()` instead.", FutureWarning)
        return self.is_world_process_zero()

    def is_world_process_zero(self) -> bool:
        """
        Whether or not this process is the global main process (when training in a distributed fashion on
        several machines, this is only going to be :obj:`True` for one process).
Julien Chaumond's avatar
Julien Chaumond committed
1141
        """
1142
        if is_torch_tpu_available():
Lysandre Debut's avatar
Lysandre Debut committed
1143
1144
1145
            return xm.is_master_ordinal(local=False)
        else:
            return self.args.local_rank == -1 or torch.distributed.get_rank() == 0
Julien Chaumond's avatar
Julien Chaumond committed
1146
1147
1148

    def save_model(self, output_dir: Optional[str] = None):
        """
1149
        Will save the model, so you can reload it using :obj:`from_pretrained()`.
Julien Chaumond's avatar
Julien Chaumond committed
1150

1151
        Will only save from the world_master process (unless in TPUs).
Julien Chaumond's avatar
Julien Chaumond committed
1152
        """
1153

1154
        if is_torch_tpu_available():
1155
            self._save_tpu(output_dir)
1156
        elif self.is_world_process_zero():
Julien Chaumond's avatar
Julien Chaumond committed
1157
1158
            self._save(output_dir)

1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
    def _save_tpu(self, output_dir: Optional[str] = None):
        output_dir = output_dir if output_dir is not None else self.args.output_dir
        logger.info("Saving model checkpoint to %s", output_dir)

        if xm.is_master_ordinal():
            os.makedirs(output_dir, exist_ok=True)
            torch.save(self.args, os.path.join(output_dir, "training_args.bin"))

        # Save a trained model and configuration using `save_pretrained()`.
        # They can then be reloaded using `from_pretrained()`
        xm.rendezvous("saving_checkpoint")
1170
1171
1172
1173
1174
1175
        if not isinstance(self.model, PreTrainedModel):
            logger.info("Trainer.model is not a `PreTrainedModel`, only saving its state dict.")
            state_dict = self.model.state_dict()
            xm.save(state_dict, os.path.join(output_dir, WEIGHTS_NAME))
        else:
            self.model.save_pretrained(output_dir)
1176
1177
        if self.tokenizer is not None:
            self.tokenizer.save_pretrained(output_dir)
1178

Julien Chaumond's avatar
Julien Chaumond committed
1179
1180
1181
1182
1183
1184
1185
    def _save(self, output_dir: Optional[str] = None):
        output_dir = output_dir if output_dir is not None else self.args.output_dir
        os.makedirs(output_dir, exist_ok=True)
        logger.info("Saving model checkpoint to %s", output_dir)
        # Save a trained model and configuration using `save_pretrained()`.
        # They can then be reloaded using `from_pretrained()`
        if not isinstance(self.model, PreTrainedModel):
1186
1187
1188
1189
1190
            logger.info("Trainer.model is not a `PreTrainedModel`, only saving its state dict.")
            state_dict = self.model.state_dict()
            torch.save(state_dict, os.path.join(output_dir, WEIGHTS_NAME))
        else:
            self.model.save_pretrained(output_dir)
1191
1192
        if self.tokenizer is not None:
            self.tokenizer.save_pretrained(output_dir)
Julien Chaumond's avatar
Julien Chaumond committed
1193
1194
1195

        # Good practice: save your training arguments together with the trained model
        torch.save(self.args, os.path.join(output_dir, "training_args.bin"))
1196

1197
    def store_flos(self):
1198
        # Storing the number of floating-point operations that went into the model
1199
        if self._total_flos is not None:
1200
            if self.args.local_rank != -1:
1201
                self.state.total_flos = distributed_broadcast_scalars([self._total_flos]).sum().item()
1202
            else:
1203
                self.state.total_flos = self._total_flos
Julien Chaumond's avatar
Julien Chaumond committed
1204
1205
1206
1207

    def _sorted_checkpoints(self, checkpoint_prefix=PREFIX_CHECKPOINT_DIR, use_mtime=False) -> List[str]:
        ordering_and_checkpoint_path = []

1208
        glob_checkpoints = [str(x) for x in Path(self.args.output_dir).glob(f"{checkpoint_prefix}-*")]
Julien Chaumond's avatar
Julien Chaumond committed
1209
1210
1211
1212
1213

        for path in glob_checkpoints:
            if use_mtime:
                ordering_and_checkpoint_path.append((os.path.getmtime(path), path))
            else:
1214
                regex_match = re.match(f".*{checkpoint_prefix}-([0-9]+)", path)
Julien Chaumond's avatar
Julien Chaumond committed
1215
1216
1217
1218
1219
                if regex_match and regex_match.groups():
                    ordering_and_checkpoint_path.append((int(regex_match.groups()[0]), path))

        checkpoints_sorted = sorted(ordering_and_checkpoint_path)
        checkpoints_sorted = [checkpoint[1] for checkpoint in checkpoints_sorted]
1220
1221
        # Make sure we don't delete the best model.
        if self.state.best_model_checkpoint is not None:
1222
            best_model_index = checkpoints_sorted.index(str(Path(self.state.best_model_checkpoint)))
1223
            checkpoints_sorted[best_model_index], checkpoints_sorted[-1] = (
1224
1225
1226
                checkpoints_sorted[-1],
                checkpoints_sorted[best_model_index],
            )
Julien Chaumond's avatar
Julien Chaumond committed
1227
1228
1229
        return checkpoints_sorted

    def _rotate_checkpoints(self, use_mtime=False) -> None:
1230
        if self.args.save_total_limit is None or self.args.save_total_limit <= 0:
Julien Chaumond's avatar
Julien Chaumond committed
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
            return

        # Check if we should delete older checkpoint(s)
        checkpoints_sorted = self._sorted_checkpoints(use_mtime=use_mtime)
        if len(checkpoints_sorted) <= self.args.save_total_limit:
            return

        number_of_checkpoints_to_delete = max(0, len(checkpoints_sorted) - self.args.save_total_limit)
        checkpoints_to_be_deleted = checkpoints_sorted[:number_of_checkpoints_to_delete]
        for checkpoint in checkpoints_to_be_deleted:
            logger.info("Deleting older checkpoint [{}] due to args.save_total_limit".format(checkpoint))
            shutil.rmtree(checkpoint)

1244
    def evaluate(self, eval_dataset: Optional[Dataset] = None) -> Dict[str, float]:
Julien Chaumond's avatar
Julien Chaumond committed
1245
        """
1246
        Run evaluation and returns metrics.
Julien Chaumond's avatar
Julien Chaumond committed
1247
1248

        The calling script will be responsible for providing a method to compute metrics, as they are
1249
        task-dependent (pass it to the init :obj:`compute_metrics` argument).
Julien Chaumond's avatar
Julien Chaumond committed
1250

1251
1252
        You can also subclass and override this method to inject custom behavior.

Julien Chaumond's avatar
Julien Chaumond committed
1253
        Args:
1254
            eval_dataset (:obj:`Dataset`, `optional`):
1255
                Pass a dataset if you wish to override :obj:`self.eval_dataset`. If it is an :obj:`datasets.Dataset`,
1256
1257
                columns not accepted by the ``model.forward()`` method are automatically removed. It must implement
                the :obj:`__len__` method.
1258

Julien Chaumond's avatar
Julien Chaumond committed
1259
        Returns:
1260
            A dictionary containing the evaluation loss and the potential metrics computed from the predictions.
Julien Chaumond's avatar
Julien Chaumond committed
1261
        """
1262
1263
1264
        if eval_dataset is not None and not isinstance(eval_dataset, collections.abc.Sized):
            raise ValueError("eval_dataset must implement __len__")

Julien Chaumond's avatar
Julien Chaumond committed
1265
1266
        eval_dataloader = self.get_eval_dataloader(eval_dataset)

1267
        output = self.prediction_loop(eval_dataloader, description="Evaluation")
Lysandre Debut's avatar
Lysandre Debut committed
1268

1269
        self.log(output.metrics)
1270

1271
        if self.args.tpu_metrics_debug or self.args.debug:
Lysandre Debut's avatar
Lysandre Debut committed
1272
1273
1274
            # tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
            xm.master_print(met.metrics_report())

Julien Chaumond's avatar
Julien Chaumond committed
1275
1276
1277
1278
        return output.metrics

    def predict(self, test_dataset: Dataset) -> PredictionOutput:
        """
1279
        Run prediction and returns predictions and potential metrics.
Julien Chaumond's avatar
Julien Chaumond committed
1280
1281

        Depending on the dataset and your use case, your test dataset may contain labels.
1282
1283
1284
1285
        In that case, this method will also return metrics, like in :obj:`evaluate()`.

        Args:
            test_dataset (:obj:`Dataset`):
1286
                Dataset to run the predictions on. If it is an :obj:`datasets.Dataset`, columns not accepted by the
1287
                ``model.forward()`` method are automatically removed. Has to implement the method :obj:`__len__`
1288

1289
1290
1291
1292
1293
1294
1295
1296
        Returns:
            `NamedTuple`:
            predictions (:obj:`np.ndarray`):
                The predictions on :obj:`test_dataset`.
            label_ids (:obj:`np.ndarray`, `optional`):
                The labels (if the dataset contained some).
            metrics (:obj:`Dict[str, float]`, `optional`):
                The potential dictionary of metrics (if the dataset contained labels).
Julien Chaumond's avatar
Julien Chaumond committed
1297
        """
1298
1299
1300
        if test_dataset is not None and not isinstance(test_dataset, collections.abc.Sized):
            raise ValueError("test_dataset must implement __len__")

Julien Chaumond's avatar
Julien Chaumond committed
1301
        test_dataloader = self.get_test_dataloader(test_dataset)
1302

1303
        return self.prediction_loop(test_dataloader, description="Prediction")
Julien Chaumond's avatar
Julien Chaumond committed
1304

1305
    def prediction_loop(
Julien Chaumond's avatar
Julien Chaumond committed
1306
1307
1308
        self, dataloader: DataLoader, description: str, prediction_loss_only: Optional[bool] = None
    ) -> PredictionOutput:
        """
1309
        Prediction/evaluation loop, shared by :obj:`Trainer.evaluate()` and :obj:`Trainer.predict()`.
Julien Chaumond's avatar
Julien Chaumond committed
1310
1311
1312

        Works both with or without labels.
        """
1313
1314
1315
1316
1317
1318
        if hasattr(self, "_prediction_loop"):
            warnings.warn(
                "The `_prediction_loop` method is deprecated and won't be called in a future version, define `prediction_loop` in your subclass.",
                FutureWarning,
            )
            return self._prediction_loop(dataloader, description, prediction_loss_only=prediction_loss_only)
Julien Chaumond's avatar
Julien Chaumond committed
1319

1320
1321
        if not isinstance(dataloader.dataset, collections.abc.Sized):
            raise ValueError("dataset must implement __len__")
1322
1323
1324
        prediction_loss_only = (
            prediction_loss_only if prediction_loss_only is not None else self.args.prediction_loss_only
        )
Julien Chaumond's avatar
Julien Chaumond committed
1325

1326
        model = self.model
Julien Chaumond's avatar
Julien Chaumond committed
1327
        # multi-gpu eval
1328
1329
1330
1331
        if self.args.n_gpu > 1:
            model = torch.nn.DataParallel(model)
        # Note: in torch.distributed mode, there's no point in wrapping the model
        # inside a DistributedDataParallel as we'll be under `no_grad` anyways.
Julien Chaumond's avatar
Julien Chaumond committed
1332

1333
        batch_size = dataloader.batch_size
1334
        num_examples = self.num_examples(dataloader)
Julien Chaumond's avatar
Julien Chaumond committed
1335
        logger.info("***** Running %s *****", description)
1336
        logger.info("  Num examples = %d", num_examples)
1337
        logger.info("  Batch size = %d", batch_size)
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
        losses_host: torch.Tensor = None
        preds_host: Union[torch.Tensor, List[torch.Tensor]] = None
        labels_host: Union[torch.Tensor, List[torch.Tensor]] = None

        world_size = 1
        if is_torch_tpu_available():
            world_size = xm.xrt_world_size()
        elif self.args.local_rank != -1:
            world_size = torch.distributed.get_world_size()
        world_size = max(1, world_size)

        eval_losses_gatherer = DistributedTensorGatherer(world_size, num_examples, make_multiple_of=batch_size)
        preds_gatherer = DistributedTensorGatherer(world_size, num_examples)
        labels_gatherer = DistributedTensorGatherer(world_size, num_examples)

Julien Chaumond's avatar
Julien Chaumond committed
1353
1354
        model.eval()

1355
        if is_torch_tpu_available():
1356
1357
            dataloader = pl.ParallelLoader(dataloader, [self.args.device]).per_device_loader(self.args.device)

1358
        if self.args.past_index >= 0:
1359
            self._past = None
1360

Sylvain Gugger's avatar
Sylvain Gugger committed
1361
1362
        self.callback_handler.eval_dataloader = dataloader

1363
        for step, inputs in enumerate(dataloader):
1364
1365
            loss, logits, labels = self.prediction_step(model, inputs, prediction_loss_only)
            if loss is not None:
1366
1367
                losses = loss.repeat(batch_size)
                losses_host = losses if losses_host is None else torch.cat((losses_host, losses), dim=0)
1368
            if logits is not None:
1369
                preds_host = logits if preds_host is None else nested_concat(preds_host, logits, dim=0)
1370
            if labels is not None:
1371
                labels_host = labels if labels_host is None else nested_concat(labels_host, labels, dim=0)
Sylvain Gugger's avatar
Sylvain Gugger committed
1372
            self.control = self.callback_handler.on_prediction_step(self.args, self.state, self.control)
Julien Chaumond's avatar
Julien Chaumond committed
1373

1374
1375
1376
1377
1378
1379
1380
1381
1382
            # Gather all tensors and put them back on the CPU if we have done enough accumulation steps.
            if self.args.eval_accumulation_steps is not None and (step + 1) % self.args.eval_accumulation_steps == 0:
                eval_losses_gatherer.add_arrays(self._gather_and_numpify(losses_host, "eval_losses"))
                preds_gatherer.add_arrays(self._gather_and_numpify(preds_host, "eval_preds"))
                labels_gatherer.add_arrays(self._gather_and_numpify(labels_host, "eval_label_ids"))

                # Set back to None to begin a new accumulation
                losses_host, preds_host, labels_host = None, None, None

1383
1384
1385
        if self.args.past_index and hasattr(self, "_past"):
            # Clean the state at the end of the evaluation loop
            delattr(self, "_past")
Julien Chaumond's avatar
Julien Chaumond committed
1386

1387
1388
1389
1390
1391
1392
1393
1394
        # Gather all remaining tensors and put them back on the CPU
        eval_losses_gatherer.add_arrays(self._gather_and_numpify(losses_host, "eval_losses"))
        preds_gatherer.add_arrays(self._gather_and_numpify(preds_host, "eval_preds"))
        labels_gatherer.add_arrays(self._gather_and_numpify(labels_host, "eval_label_ids"))

        eval_loss = eval_losses_gatherer.finalize()
        preds = preds_gatherer.finalize()
        label_ids = labels_gatherer.finalize()
Lysandre Debut's avatar
Lysandre Debut committed
1395

Julien Chaumond's avatar
Julien Chaumond committed
1396
1397
1398
1399
        if self.compute_metrics is not None and preds is not None and label_ids is not None:
            metrics = self.compute_metrics(EvalPrediction(predictions=preds, label_ids=label_ids))
        else:
            metrics = {}
1400
1401
1402

        if eval_loss is not None:
            metrics["eval_loss"] = eval_loss.mean().item()
1403
1404
1405
1406
1407

        # Prefix all keys with eval_
        for key in list(metrics.keys()):
            if not key.startswith("eval_"):
                metrics[f"eval_{key}"] = metrics.pop(key)
Julien Chaumond's avatar
Julien Chaumond committed
1408
1409

        return PredictionOutput(predictions=preds, label_ids=label_ids, metrics=metrics)
1410

1411
1412
1413
1414
1415
1416
1417
1418
1419
1420
1421
1422
1423
1424
    def _gather_and_numpify(self, tensors, name):
        """
        Gather value of `tensors` (tensor or list/tuple of nested tensors) and convert them to numpy before
        concatenating them to `gathered`
        """
        if tensors is None:
            return
        if is_torch_tpu_available():
            tensors = nested_xla_mesh_reduce(tensors, name)
        elif self.args.local_rank != -1:
            tensors = distributed_concat(tensors)

        return nested_numpify(tensors)

1425
1426
1427
1428
1429
1430
1431
1432
1433
1434
1435
1436
1437
1438
1439
1440
1441
1442
1443
1444
1445
1446
1447
    def prediction_step(
        self, model: nn.Module, inputs: Dict[str, Union[torch.Tensor, Any]], prediction_loss_only: bool
    ) -> Tuple[Optional[float], Optional[torch.Tensor], Optional[torch.Tensor]]:
        """
        Perform an evaluation step on :obj:`model` using obj:`inputs`.

        Subclass and override to inject custom behavior.

        Args:
            model (:obj:`nn.Module`):
                The model to evaluate.
            inputs (:obj:`Dict[str, Union[torch.Tensor, Any]]`):
                The inputs and targets of the model.

                The dictionary will be unpacked before being fed to the model. Most models expect the targets under the
                argument :obj:`labels`. Check your model's documentation for all accepted arguments.
            prediction_loss_only (:obj:`bool`):
                Whether or not to return the loss only.

        Return:
            Tuple[Optional[float], Optional[torch.Tensor], Optional[torch.Tensor]]:
            A tuple with the loss, logits and labels (each being optional).
        """
1448
        has_labels = all(inputs.get(k) is not None for k in self.label_names)
1449
        inputs = self._prepare_inputs(inputs)
1450
1451
1452
1453

        with torch.no_grad():
            outputs = model(**inputs)
            if has_labels:
1454
                loss = outputs[0].mean().detach()
1455
                logits = outputs[1:]
1456
1457
            else:
                loss = None
1458
1459
                # Slicing so we get a tuple even if `outputs` is a `ModelOutput`.
                logits = outputs[:]
1460
1461
            if self.args.past_index >= 0:
                self._past = outputs[self.args.past_index if has_labels else self.args.past_index - 1]
1462
1463
                # Remove the past from the logits.
                logits = logits[: self.args.past_index - 1] + logits[self.args.past_index :]
1464
1465
1466
1467

        if prediction_loss_only:
            return (loss, None, None)

1468
        logits = nested_detach(logits)
Sylvain Gugger's avatar
Sylvain Gugger committed
1469
1470
1471
1472
        if len(logits) == 1:
            logits = logits[0]

        if has_labels:
1473
            labels = nested_detach(tuple(inputs.get(name) for name in self.label_names))
Sylvain Gugger's avatar
Sylvain Gugger committed
1474
1475
1476
1477
1478
1479
            if len(labels) == 1:
                labels = labels[0]
        else:
            labels = None

        return (loss, logits, labels)
1480
1481
1482

    def floating_point_ops(self, inputs: Dict[str, Union[torch.Tensor, Any]]):
        """
1483
        For models that inherit from :class:`~transformers.PreTrainedModel`, uses
1484
1485
1486
1487
1488
1489
1490
1491
1492
1493
1494
1495
1496
        that method to compute the number of floating point operations for every backward + forward pass. If using
        another model, either implement such a method in the model or subclass and override this method.

        Args:
            model (:obj:`nn.Module`):
                The model to evaluate.
            inputs (:obj:`Dict[str, Union[torch.Tensor, Any]]`):
                The inputs and targets of the model.

        Returns:
            :obj:`int`: The number of floating-point operations.
        """

Marcin Zab艂ocki's avatar
Marcin Zab艂ocki committed
1497
        model = self._actual_model(self.model)
1498
1499
1500
1501
1502
1503

        if hasattr(model, "floating_point_ops"):
            return model.floating_point_ops(inputs)

        else:
            return 0
Marcin Zab艂ocki's avatar
Marcin Zab艂ocki committed
1504
1505
1506
1507
1508
1509
1510
1511
1512
1513
1514
1515
1516
1517
1518
1519
1520
1521
1522

    @staticmethod
    def _actual_model(
        model: Union[torch.nn.DataParallel, torch.nn.parallel.DistributedDataParallel, torch.nn.modules.Module]
    ) -> torch.nn.modules.Module:
        """

        Args:
            model: (:obj:`Union[torch.nn.DataParallel, torch.nn.parallel.DistributedDataParallel, torch.nn.modules.Module]`):
                Model object used during training

        Returns:
            :obj:`torch.nn.modules.Module`: unwrapped module
        """
        if isinstance(model, torch.nn.DataParallel) or isinstance(model, torch.nn.parallel.DistributedDataParallel):
            model = model.module
        else:
            model = model
        return model