trainer.py 81.8 KB
Newer Older
Sylvain Gugger's avatar
Sylvain Gugger committed
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
# coding=utf-8
# Copyright 2020-present the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
The Trainer class, to easily train a 馃 Transformers from scratch or finetune it on a new task.
"""

19
import collections
20
import inspect
21
import math
Julien Chaumond's avatar
Julien Chaumond committed
22
23
24
import os
import re
import shutil
25
import time
26
import warnings
Julien Chaumond's avatar
Julien Chaumond committed
27
from pathlib import Path
28
from typing import TYPE_CHECKING, Any, Callable, Dict, List, Optional, Tuple, Union
Julien Chaumond's avatar
Julien Chaumond committed
29
30


31
32
# Integrations must be imported before ML frameworks:
from .integrations import (  # isort: split
33
    default_hp_search_backend,
34
    get_reporting_integration_callbacks,
35
    hp_params,
36
    is_fairscale_available,
37
    is_optuna_available,
38
    is_ray_tune_available,
39
40
    run_hp_search_optuna,
    run_hp_search_ray,
41
    init_deepspeed,
42
)
43
44
45
46
47
48
49
50
51
52
53

import numpy as np
import torch
from packaging import version
from torch import nn
from torch.utils.data.dataloader import DataLoader
from torch.utils.data.dataset import Dataset
from torch.utils.data.distributed import DistributedSampler
from torch.utils.data.sampler import RandomSampler, SequentialSampler

from .data.data_collator import DataCollator, DataCollatorWithPadding, default_data_collator
Sylvain Gugger's avatar
Sylvain Gugger committed
54
55
56
57
58
59
60
61
from .file_utils import (
    WEIGHTS_NAME,
    is_apex_available,
    is_datasets_available,
    is_in_notebook,
    is_sagemaker_distributed_available,
    is_torch_tpu_available,
)
Julien Chaumond's avatar
Julien Chaumond committed
62
from .modeling_utils import PreTrainedModel
Sylvain Gugger's avatar
Sylvain Gugger committed
63
from .models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING
Sylvain Gugger's avatar
Sylvain Gugger committed
64
from .optimization import Adafactor, AdamW, get_scheduler
65
from .tokenization_utils_base import PreTrainedTokenizerBase
Sylvain Gugger's avatar
Sylvain Gugger committed
66
67
68
69
70
71
72
73
74
75
from .trainer_callback import (
    CallbackHandler,
    DefaultFlowCallback,
    PrinterCallback,
    ProgressCallback,
    TrainerCallback,
    TrainerControl,
    TrainerState,
)
from .trainer_pt_utils import (
76
    DistributedLengthGroupedSampler,
77
    DistributedTensorGatherer,
Sylvain Gugger's avatar
Sylvain Gugger committed
78
    LabelSmoother,
79
    LengthGroupedSampler,
Sylvain Gugger's avatar
Sylvain Gugger committed
80
81
82
83
84
85
86
87
88
    SequentialDistributedSampler,
    distributed_broadcast_scalars,
    distributed_concat,
    nested_concat,
    nested_detach,
    nested_numpify,
    nested_xla_mesh_reduce,
    reissue_pt_warnings,
)
89
90
91
92
93
94
95
96
97
98
from .trainer_utils import (
    PREFIX_CHECKPOINT_DIR,
    BestRun,
    EvalPrediction,
    HPSearchBackend,
    PredictionOutput,
    TrainOutput,
    default_compute_objective,
    default_hp_space,
    set_seed,
99
    speed_metrics,
100
)
101
from .training_args import ParallelMode, TrainingArguments
Lysandre Debut's avatar
Lysandre Debut committed
102
from .utils import logging
Julien Chaumond's avatar
Julien Chaumond committed
103
104


105
_is_native_amp_available = False
106

Sylvain Gugger's avatar
Sylvain Gugger committed
107
DEFAULT_CALLBACKS = [DefaultFlowCallback]
108
DEFAULT_PROGRESS_CALLBACK = ProgressCallback
Sylvain Gugger's avatar
Sylvain Gugger committed
109

110
111
112
113
if is_in_notebook():
    from .utils.notebook import NotebookProgressCallback

    DEFAULT_PROGRESS_CALLBACK = NotebookProgressCallback
114

115
116
if is_apex_available():
    from apex import amp
117

118
if version.parse(torch.__version__) >= version.parse("1.6"):
119
    _is_native_amp_available = True
120
    from torch.cuda.amp import autocast
Julien Chaumond's avatar
Julien Chaumond committed
121

122
123
if is_datasets_available():
    import datasets
Julien Chaumond's avatar
Julien Chaumond committed
124

125
if is_torch_tpu_available():
Lysandre Debut's avatar
Lysandre Debut committed
126
127
128
129
    import torch_xla.core.xla_model as xm
    import torch_xla.debug.metrics as met
    import torch_xla.distributed.parallel_loader as pl

130
131
132
133
134
if is_fairscale_available():
    from fairscale.nn.data_parallel import ShardedDataParallel as ShardedDDP
    from fairscale.optim import OSS
    from fairscale.optim.grad_scaler import ShardedGradScaler

Sylvain Gugger's avatar
Sylvain Gugger committed
135
136
137
138
139
if is_sagemaker_distributed_available():
    import smdistributed.dataparallel.torch.distributed as dist
    from smdistributed.dataparallel.torch.parallel.distributed import DistributedDataParallel as DDP
else:
    import torch.distributed as dist
140
141
142
143

if TYPE_CHECKING:
    import optuna

Lysandre Debut's avatar
Lysandre Debut committed
144
logger = logging.get_logger(__name__)
Julien Chaumond's avatar
Julien Chaumond committed
145
146


147
148
149
150
151
152
153
154
def _model_unwrap(model: nn.Module) -> nn.Module:
    # since there could be multiple levels of wrapping, unwrap recursively
    if hasattr(model, "module"):
        return _model_unwrap(model.module)
    else:
        return model


Julien Chaumond's avatar
Julien Chaumond committed
155
156
class Trainer:
    """
Sylvain Gugger's avatar
Sylvain Gugger committed
157
    Trainer is a simple but feature-complete training and eval loop for PyTorch, optimized for 馃 Transformers.
158
159

    Args:
Sylvain Gugger's avatar
Sylvain Gugger committed
160
        model (:class:`~transformers.PreTrainedModel` or :obj:`torch.nn.Module`, `optional`):
161
            The model to train, evaluate or use for predictions. If not provided, a ``model_init`` must be passed.
Sylvain Gugger's avatar
Sylvain Gugger committed
162
163
164
165
166
167

            .. note::

                :class:`~transformers.Trainer` is optimized to work with the :class:`~transformers.PreTrainedModel`
                provided by the library. You can still use your own models defined as :obj:`torch.nn.Module` as long as
                they work the same way as the 馃 Transformers models.
168
        args (:class:`~transformers.TrainingArguments`, `optional`):
Sylvain Gugger's avatar
Sylvain Gugger committed
169
170
171
            The arguments to tweak for training. Will default to a basic instance of
            :class:`~transformers.TrainingArguments` with the ``output_dir`` set to a directory named `tmp_trainer` in
            the current directory if not provided.
172
        data_collator (:obj:`DataCollator`, `optional`):
Sylvain Gugger's avatar
Sylvain Gugger committed
173
174
175
            The function to use to form a batch from a list of elements of :obj:`train_dataset` or :obj:`eval_dataset`.
            Will default to :func:`~transformers.default_data_collator` if no ``tokenizer`` is provided, an instance of
            :func:`~transformers.DataCollatorWithPadding` otherwise.
Sylvain Gugger's avatar
Sylvain Gugger committed
176
        train_dataset (:obj:`torch.utils.data.dataset.Dataset`, `optional`):
177
            The dataset to use for training. If it is an :obj:`datasets.Dataset`, columns not accepted by the
178
            ``model.forward()`` method are automatically removed.
Sylvain Gugger's avatar
Sylvain Gugger committed
179
        eval_dataset (:obj:`torch.utils.data.dataset.Dataset`, `optional`):
180
             The dataset to use for evaluation. If it is an :obj:`datasets.Dataset`, columns not accepted by the
Sylvain Gugger's avatar
Sylvain Gugger committed
181
             ``model.forward()`` method are automatically removed.
182
183
184
185
        tokenizer (:class:`PreTrainedTokenizerBase`, `optional`):
            The tokenizer used to preprocess the data. If provided, will be used to automatically pad the inputs the
            maximum length when batching inputs, and it will be saved along the model to make it easier to rerun an
            interrupted training or reuse the fine-tuned model.
186
187
188
        model_init (:obj:`Callable[[], PreTrainedModel]`, `optional`):
            A function that instantiates the model to be used. If provided, each call to
            :meth:`~transformers.Trainer.train` will start from a new instance of the model as given by this function.
189

Sylvain Gugger's avatar
Sylvain Gugger committed
190
191
            The function may have zero argument, or a single one containing the optuna/Ray Tune trial object, to be
            able to choose different architectures according to hyper parameters (such as layer count, sizes of inner
Sylvain Gugger's avatar
Sylvain Gugger committed
192
193
            layers, dropout probabilities etc).
        compute_metrics (:obj:`Callable[[EvalPrediction], Dict]`, `optional`):
194
            The function that will be used to compute metrics at evaluation. Must take a
Sylvain Gugger's avatar
Sylvain Gugger committed
195
196
197
198
            :class:`~transformers.EvalPrediction` and return a dictionary string to metric values.
        callbacks (List of :obj:`~transformers.TrainerCallback`, `optional`):
            A list of callbacks to customize the training loop. Will add those to the list of default callbacks
            detailed in :doc:`here <callback>`.
Sylvain Gugger's avatar
Sylvain Gugger committed
199
200

            If you want to remove one of the default callbacks used, use the :meth:`Trainer.remove_callback` method.
Sylvain Gugger's avatar
Sylvain Gugger committed
201
        optimizers (:obj:`Tuple[torch.optim.Optimizer, torch.optim.lr_scheduler.LambdaLR`, `optional`): A tuple
Sylvain Gugger's avatar
Sylvain Gugger committed
202
            containing the optimizer and the scheduler to use. Will default to an instance of
203
            :class:`~transformers.AdamW` on your model and a scheduler given by
Sylvain Gugger's avatar
Sylvain Gugger committed
204
            :func:`~transformers.get_linear_schedule_with_warmup` controlled by :obj:`args`.
205

206
207
208
209
210
211
212
213
214
215
    Important attributes:

        - **model** -- Always points to the core model. If using a transformers model, it will be a
          :class:`~transformers.PreTrainedModel` subclass.
        - **model_wrapped** -- Always points to the most external model in case one or more other modules wrap the
          original model. This is the model that should be used for the forward pass. For example, under ``DeepSpeed``,
          the inner model is wrapped in ``DeepSpeed`` and then again in ``torch.nn.DistributedDataParallel``. If the
          inner model hasn't been wrapped, then ``self.model_wrapped`` is the same as ``self.model``.
        - **is_model_parallel** -- Whether or not a model has been switched to a model parallel mode (different from
          data parallelism, this means some of the model layers are split on different GPUs).
Julien Chaumond's avatar
Julien Chaumond committed
216
217
218
219
    """

    def __init__(
        self,
Sylvain Gugger's avatar
Sylvain Gugger committed
220
        model: Union[PreTrainedModel, torch.nn.Module] = None,
221
        args: TrainingArguments = None,
Julien Chaumond's avatar
Julien Chaumond committed
222
223
224
        data_collator: Optional[DataCollator] = None,
        train_dataset: Optional[Dataset] = None,
        eval_dataset: Optional[Dataset] = None,
225
        tokenizer: Optional["PreTrainedTokenizerBase"] = None,
226
        model_init: Callable[[], PreTrainedModel] = None,
Julien Chaumond's avatar
Julien Chaumond committed
227
        compute_metrics: Optional[Callable[[EvalPrediction], Dict]] = None,
Sylvain Gugger's avatar
Sylvain Gugger committed
228
        callbacks: Optional[List[TrainerCallback]] = None,
229
        optimizers: Tuple[torch.optim.Optimizer, torch.optim.lr_scheduler.LambdaLR] = (None, None),
Julien Chaumond's avatar
Julien Chaumond committed
230
    ):
Sylvain Gugger's avatar
Sylvain Gugger committed
231
        if args is None:
232
233
234
            output_dir = "tmp_trainer"
            logger.info(f"No `TrainingArguments` passed, using `output_dir={output_dir}`.")
            args = TrainingArguments(output_dir=output_dir)
Sylvain Gugger's avatar
Sylvain Gugger committed
235
236
237
        self.args = args
        # Seed must be set before instantiating the model when using model
        set_seed(self.args.seed)
238
        self.hp_name = None
239
        self.deepspeed = None
240

241
242
243
244
245
246
247
248
249
250
251
252
253
254
        if model is None:
            if model_init is not None:
                self.model_init = model_init
                model = self.call_model_init()
            else:
                raise RuntimeError("`Trainer` requires either a `model` or `model_init` argument")
        else:
            if model_init is not None:
                warnings.warn(
                    "`Trainer` requires either a `model` or `model_init` argument, but not both. "
                    "`model_init` will overwrite your model when calling the `train` method. This will become a fatal error in the next release.",
                    FutureWarning,
                )
            self.model_init = model_init
255

256
257
258
259
260
        if hasattr(model, "is_parallelizable") and model.is_parallelizable and model.model_parallel:
            self.is_model_parallel = True
        else:
            self.is_model_parallel = False

261
262
        default_collator = default_data_collator if tokenizer is None else DataCollatorWithPadding(tokenizer)
        self.data_collator = data_collator if data_collator is not None else default_collator
Julien Chaumond's avatar
Julien Chaumond committed
263
264
        self.train_dataset = train_dataset
        self.eval_dataset = eval_dataset
265
        self.tokenizer = tokenizer
266

267
        # Model parallel
268
        if not self.is_model_parallel:
269
            model = model.to(args.device)
270
271
272
        else:
            # Force n_gpu to 1 to avoid DataParallel.
            self.args._n_gpu = 1
273
274
275
276
277

        # later use `self.model is self.model_wrapped` to check if it's wrapped or not
        self.model_wrapped = model
        self.model = model

Julien Chaumond's avatar
Julien Chaumond committed
278
        self.compute_metrics = compute_metrics
279
        self.optimizer, self.lr_scheduler = optimizers
Sylvain Gugger's avatar
Sylvain Gugger committed
280
281
282
283
284
        if model_init is not None and (self.optimizer is not None or self.lr_scheduler is not None):
            raise RuntimeError(
                "Passing a `model_init` is incompatible with providing the `optimizers` argument."
                "You should subclass `Trainer` and override the `create_optimizer_and_scheduler` method."
            )
285
286
        default_callbacks = DEFAULT_CALLBACKS + get_reporting_integration_callbacks(self.args.report_to)
        callbacks = default_callbacks if callbacks is None else default_callbacks + callbacks
287
288
289
        self.callback_handler = CallbackHandler(
            callbacks, self.model, self.tokenizer, self.optimizer, self.lr_scheduler
        )
290
        self.add_callback(PrinterCallback if self.args.disable_tqdm else DEFAULT_PROGRESS_CALLBACK)
Sylvain Gugger's avatar
Sylvain Gugger committed
291

292
293
294
        # Will be set to True by `self._setup_loggers()` on first call to `self.log()`.
        self._loggers_initialized = False

Julien Chaumond's avatar
Julien Chaumond committed
295
        # Create output directory if needed
296
        if self.is_world_process_zero():
Julien Chaumond's avatar
Julien Chaumond committed
297
            os.makedirs(self.args.output_dir, exist_ok=True)
298
        if is_torch_tpu_available() and isinstance(self.model, PreTrainedModel):
Lysandre Debut's avatar
Lysandre Debut committed
299
300
301
            # Set an xla_device flag on the model's config.
            # We'll find a more elegant and not need to do this in the future.
            self.model.config.xla_device = True
302
        if not callable(self.data_collator) and callable(getattr(self.data_collator, "collate_batch", None)):
Sylvain Gugger's avatar
Sylvain Gugger committed
303
            raise ValueError("The `data_collator` should be a simple callable (function, class with `__call__`).")
304

305
306
307
308
309
310
311
312
313
        if args.max_steps > 0:
            logger.info("max_steps is given, it will override any value given in num_train_epochs")

        # Enforce rules on using datasets with no __len__
        if train_dataset is not None and not isinstance(train_dataset, collections.abc.Sized) and args.max_steps <= 0:
            raise ValueError("train_dataset does not implement __len__, max_steps has to be specified")
        if eval_dataset is not None and not isinstance(eval_dataset, collections.abc.Sized):
            raise ValueError("eval_dataset must implement __len__")

314
315
        if is_datasets_available():
            if isinstance(train_dataset, datasets.Dataset):
316
                self._remove_unused_columns(self.train_dataset, description="training")
317
            if isinstance(eval_dataset, datasets.Dataset):
318
319
                self._remove_unused_columns(self.eval_dataset, description="evaluation")

320
321
322
        # Setup Sharded DDP training
        self.sharded_dpp = False
        if args.sharded_ddp:
323
324
325
326
327
            if args.deepspeed:
                raise ValueError(
                    "Using --sharded_ddp together with --deepspeed is not possible, deactivate one of those flags."
                )

328
329
330
331
332
333
334
            if args.local_rank == -1:
                raise ValueError("Using sharded DDP only works in distributed training.")
            elif not is_fairscale_available():
                raise ImportError("Sharded DDP training requires fairscale: `pip install fairscale`.")
            else:
                self.sharded_dpp = True

335
336
337
        # Mixed precision setup
        self.use_apex = False
        self.use_amp = False
338
339
        self.fp16_backend = None

340
341
        if args.fp16:
            if args.fp16_backend == "auto":
342
                self.fp16_backend = "amp" if _is_native_amp_available else "apex"
343
            else:
344
345
                self.fp16_backend = args.fp16_backend
            logger.info(f"Using {self.fp16_backend} fp16 backend")
346

347
348
        if args.fp16 and not args.deepspeed:  # deepspeed manages its own fp16
            if self.fp16_backend == "amp":
349
                self.use_amp = True
350
                self.scaler = ShardedGradScaler() if self.sharded_dpp else torch.cuda.amp.GradScaler()
351
352
353
354
355
356
357
            else:
                if not is_apex_available():
                    raise ImportError(
                        "Using FP16 with APEX but APEX is not installed, please refer to https://www.github.com/nvidia/apex."
                    )
                self.use_apex = True

Sylvain Gugger's avatar
Sylvain Gugger committed
358
359
360
361
362
363
        # Label smoothing
        if self.args.label_smoothing_factor != 0:
            self.label_smoother = LabelSmoother(epsilon=self.args.label_smoothing_factor)
        else:
            self.label_smoother = None

364
        self.state = TrainerState()
Sylvain Gugger's avatar
Sylvain Gugger committed
365
        self.control = TrainerControl()
366
367
368
        # Internal variable for total_flos used to count as tensors (for distributed + TPU), will be sent in the
        # state at each call to self.log.
        self._total_flos = None
369
        self.hp_search_backend = None
370
        self.use_tune_checkpoints = False
371
        default_label_names = (
372
            ["start_positions", "end_positions"]
373
374
375
376
            if type(self.model) in MODEL_FOR_QUESTION_ANSWERING_MAPPING.values()
            else ["labels"]
        )
        self.label_names = default_label_names if self.args.label_names is None else self.args.label_names
Sylvain Gugger's avatar
Sylvain Gugger committed
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
        self.control = self.callback_handler.on_init_end(self.args, self.state, self.control)

    def add_callback(self, callback):
        """
        Add a callback to the current list of :class:`~transformer.TrainerCallback`.

        Args:
           callback (:obj:`type` or :class:`~transformer.TrainerCallback`):
               A :class:`~transformer.TrainerCallback` class or an instance of a :class:`~transformer.TrainerCallback`.
               In the first case, will instantiate a member of that class.
        """
        self.callback_handler.add_callback(callback)

    def pop_callback(self, callback):
        """
        Remove a callback from the current list of :class:`~transformer.TrainerCallback` and returns it.

        If the callback is not found, returns :obj:`None` (and no error is raised).

        Args:
           callback (:obj:`type` or :class:`~transformer.TrainerCallback`):
               A :class:`~transformer.TrainerCallback` class or an instance of a :class:`~transformer.TrainerCallback`.
               In the first case, will pop the first member of that class found in the list of callbacks.

        Returns:
            :class:`~transformer.TrainerCallback`: The callback removed, if found.
        """
        return self.callback_handler.pop_callback(callback)

    def remove_callback(self, callback):
        """
        Remove a callback from the current list of :class:`~transformer.TrainerCallback`.

        Args:
           callback (:obj:`type` or :class:`~transformer.TrainerCallback`):
               A :class:`~transformer.TrainerCallback` class or an instance of a :class:`~transformer.TrainerCallback`.
               In the first case, will remove the first member of that class found in the list of callbacks.
        """
        self.callback_handler.remove_callback(callback)
Julien Chaumond's avatar
Julien Chaumond committed
416

417
    def _remove_unused_columns(self, dataset: "datasets.Dataset", description: Optional[str] = None):
418
419
        if not self.args.remove_unused_columns:
            return
420
421
422
423
424
425
426
427
428
429
430
        # Inspect model forward signature to keep only the arguments it accepts.
        signature = inspect.signature(self.model.forward)
        signature_columns = list(signature.parameters.keys())
        # Labels may be named label or label_ids, the default data collator handles that.
        signature_columns += ["label", "label_ids"]
        columns = [k for k in signature_columns if k in dataset.column_names]
        ignored_columns = list(set(dataset.column_names) - set(signature_columns))
        dset_description = "" if description is None else f"in the {description} set "
        logger.info(
            f"The following columns {dset_description}don't have a corresponding argument in `{self.model.__class__.__name__}.forward` and have been ignored: {', '.join(ignored_columns)}."
        )
sgugger's avatar
sgugger committed
431
        dataset.set_format(type=dataset.format["type"], columns=columns)
432

433
    def _get_train_sampler(self) -> Optional[torch.utils.data.sampler.Sampler]:
434
435
436
        if isinstance(self.train_dataset, torch.utils.data.IterableDataset) or not isinstance(
            self.train_dataset, collections.abc.Sized
        ):
437
            return None
438
439
440
441
442

        # Gather the number of processes and this process index.
        if self.args.parallel_mode == ParallelMode.TPU:
            num_processes = xm.xrt_world_size()
            process_index = xm.get_ordinal()
Sylvain Gugger's avatar
Sylvain Gugger committed
443
444
445
446
447
448
        elif (
            self.args.parallel_mode == ParallelMode.DISTRIBUTED
            or self.args.parallel_mode == ParallelMode.SAGEMAKER_DISTRIBUTED
        ):
            num_processes = dist.get_world_size()
            process_index = dist.get_rank()
Lysandre Debut's avatar
Lysandre Debut committed
449
        else:
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
            num_processes = 1
            process_index = 0

        # Build the sampler.
        if self.args.group_by_length:
            if num_processes <= 1:
                return LengthGroupedSampler(self.train_dataset, self.args.train_batch_size)
            else:
                return DistributedLengthGroupedSampler(
                    self.train_dataset, self.args.train_batch_size, num_replicas=num_processes, rank=process_index
                )

        else:
            if num_processes <= 1:
                return RandomSampler(self.train_dataset)
            else:
                return DistributedSampler(self.train_dataset, num_replicas=num_processes, rank=process_index)
467
468
469
470
471

    def get_train_dataloader(self) -> DataLoader:
        """
        Returns the training :class:`~torch.utils.data.DataLoader`.

Sylvain Gugger's avatar
Sylvain Gugger committed
472
473
        Will use no sampler if :obj:`self.train_dataset` does not implement :obj:`__len__`, a random sampler (adapted
        to distributed training if necessary) otherwise.
474
475
476
477
478
479
480
481

        Subclass and override this method if you want to inject some custom behavior.
        """
        if self.train_dataset is None:
            raise ValueError("Trainer: training requires a train_dataset.")
        train_sampler = self._get_train_sampler()

        return DataLoader(
Julien Chaumond's avatar
Julien Chaumond committed
482
483
484
            self.train_dataset,
            batch_size=self.args.train_batch_size,
            sampler=train_sampler,
485
            collate_fn=self.data_collator,
Setu Shah's avatar
Setu Shah committed
486
            drop_last=self.args.dataloader_drop_last,
Chady Kamar's avatar
Chady Kamar committed
487
            num_workers=self.args.dataloader_num_workers,
488
            pin_memory=self.args.dataloader_pin_memory,
Julien Chaumond's avatar
Julien Chaumond committed
489
490
        )

491
    def _get_eval_sampler(self, eval_dataset: Dataset) -> Optional[torch.utils.data.sampler.Sampler]:
492
        if is_torch_tpu_available():
493
494
495
496
497
            return SequentialDistributedSampler(eval_dataset, num_replicas=xm.xrt_world_size(), rank=xm.get_ordinal())
        elif self.args.local_rank != -1:
            return SequentialDistributedSampler(eval_dataset)
        else:
            return SequentialSampler(eval_dataset)
Lysandre Debut's avatar
Lysandre Debut committed
498

Julien Chaumond's avatar
Julien Chaumond committed
499
    def get_eval_dataloader(self, eval_dataset: Optional[Dataset] = None) -> DataLoader:
500
501
502
        """
        Returns the evaluation :class:`~torch.utils.data.DataLoader`.

503
504
        Subclass and override this method if you want to inject some custom behavior.

505
        Args:
506
            eval_dataset (:obj:`torch.utils.data.dataset.Dataset`, `optional`):
507
                If provided, will override :obj:`self.eval_dataset`. If it is an :obj:`datasets.Dataset`, columns not
508
                accepted by the ``model.forward()`` method are automatically removed. It must implement :obj:`__len__`.
509
        """
Julien Chaumond's avatar
Julien Chaumond committed
510
511
        if eval_dataset is None and self.eval_dataset is None:
            raise ValueError("Trainer: evaluation requires an eval_dataset.")
512
513
514
        elif eval_dataset is not None and not isinstance(eval_dataset, collections.abc.Sized):
            raise ValueError("eval_dataset must implement __len__")
        elif is_datasets_available() and isinstance(eval_dataset, datasets.Dataset):
515
            self._remove_unused_columns(eval_dataset, description="evaluation")
516
        eval_dataset = eval_dataset if eval_dataset is not None else self.eval_dataset
517
        eval_sampler = self._get_eval_sampler(eval_dataset)
518

519
        return DataLoader(
520
            eval_dataset,
521
            sampler=eval_sampler,
Julien Chaumond's avatar
Julien Chaumond committed
522
            batch_size=self.args.eval_batch_size,
523
            collate_fn=self.data_collator,
Setu Shah's avatar
Setu Shah committed
524
            drop_last=self.args.dataloader_drop_last,
Chady Kamar's avatar
Chady Kamar committed
525
            num_workers=self.args.dataloader_num_workers,
526
            pin_memory=self.args.dataloader_pin_memory,
Julien Chaumond's avatar
Julien Chaumond committed
527
528
529
        )

    def get_test_dataloader(self, test_dataset: Dataset) -> DataLoader:
530
531
532
        """
        Returns the test :class:`~torch.utils.data.DataLoader`.

533
534
        Subclass and override this method if you want to inject some custom behavior.

535
        Args:
536
            test_dataset (:obj:`torch.utils.data.dataset.Dataset`, `optional`):
537
                The test dataset to use. If it is an :obj:`datasets.Dataset`, columns not accepted by the
538
                ``model.forward()`` method are automatically removed. It must implement :obj:`__len__`.
539
        """
540
541
542
        if not isinstance(test_dataset, collections.abc.Sized):
            raise ValueError("test_dataset must implement __len__")
        elif is_datasets_available() and isinstance(test_dataset, datasets.Dataset):
543
            self._remove_unused_columns(test_dataset, description="test")
544
        test_sampler = self._get_eval_sampler(test_dataset)
Lysandre Debut's avatar
Lysandre Debut committed
545

546
547
        # We use the same batch_size as for eval.
        return DataLoader(
Julien Chaumond's avatar
Julien Chaumond committed
548
            test_dataset,
549
            sampler=test_sampler,
Julien Chaumond's avatar
Julien Chaumond committed
550
            batch_size=self.args.eval_batch_size,
551
            collate_fn=self.data_collator,
552
            drop_last=self.args.dataloader_drop_last,
553
            pin_memory=self.args.dataloader_pin_memory,
Julien Chaumond's avatar
Julien Chaumond committed
554
        )
Lysandre Debut's avatar
Lysandre Debut committed
555

556
    def create_optimizer_and_scheduler(self, num_training_steps: int):
557
558
559
        """
        Setup the optimizer and the learning rate scheduler.

560
        We provide a reasonable default that works well. If you want to use something else, you can pass a tuple in the
561
        Trainer's init through :obj:`optimizers`, or subclass and override this method in a subclass.
562
        """
563
564
565
566
567
568
569
570
571
572
573
574
        if self.optimizer is None:
            no_decay = ["bias", "LayerNorm.weight"]
            optimizer_grouped_parameters = [
                {
                    "params": [p for n, p in self.model.named_parameters() if not any(nd in n for nd in no_decay)],
                    "weight_decay": self.args.weight_decay,
                },
                {
                    "params": [p for n, p in self.model.named_parameters() if any(nd in n for nd in no_decay)],
                    "weight_decay": 0.0,
                },
            ]
Sylvain Gugger's avatar
Sylvain Gugger committed
575
576
577
578
579
580
581
582
583
584
585
            optimizer_cls = Adafactor if self.args.adafactor else AdamW
            if self.args.adafactor:
                optimizer_cls = Adafactor
                optimizer_kwargs = {"scale_parameter": False, "relative_step": False}
            else:
                optimizer_cls = AdamW
                optimizer_kwargs = {
                    "betas": (self.args.adam_beta1, self.args.adam_beta2),
                    "eps": self.args.adam_epsilon,
                }
            optimizer_kwargs["lr"] = self.args.learning_rate
586
587
588
            if self.sharded_dpp:
                self.optimizer = OSS(
                    params=optimizer_grouped_parameters,
Sylvain Gugger's avatar
Sylvain Gugger committed
589
590
                    optim=optimizer_cls,
                    **optimizer_kwargs,
591
592
                )
            else:
Sylvain Gugger's avatar
Sylvain Gugger committed
593
594
                self.optimizer = optimizer_cls(optimizer_grouped_parameters, **optimizer_kwargs)

595
        if self.lr_scheduler is None:
Sylvain Gugger's avatar
Sylvain Gugger committed
596
597
598
599
600
            self.lr_scheduler = get_scheduler(
                self.args.lr_scheduler_type,
                self.optimizer,
                num_warmup_steps=self.args.warmup_steps,
                num_training_steps=num_training_steps,
601
            )
Julien Chaumond's avatar
Julien Chaumond committed
602

603
    def num_examples(self, dataloader: DataLoader) -> int:
604
        """
605
        Helper to get number of samples in a :class:`~torch.utils.data.DataLoader` by accessing its dataset.
606
607

        Will raise an exception if the underlying dataset dese not implement method :obj:`__len__`
608
        """
609
        return len(dataloader.dataset)
610

611
612
    def _hp_search_setup(self, trial: Union["optuna.Trial", Dict[str, Any]]):
        """ HP search setup code """
613
614
        self._trial = trial

615
616
        if self.hp_search_backend is None or trial is None:
            return
617

618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
        params = self.hp_space(trial) if self.hp_search_backend == HPSearchBackend.OPTUNA else trial
        for key, value in params.items():
            if not hasattr(self.args, key):
                raise AttributeError(
                    f"Trying to set {key} in the hyperparameter search but there is no corresponding field in `TrainingArguments`."
                )
            old_attr = getattr(self.args, key, None)
            # Casting value to the proper type
            if old_attr is not None:
                value = type(old_attr)(value)
            setattr(self.args, key, value)
        if self.hp_search_backend == HPSearchBackend.OPTUNA:
            logger.info("Trial:", trial.params)

    def _report_to_hp_search(
        self, trial: Union["optuna.Trial", Dict[str, Any]], epoch: int, metrics: Dict[str, float]
    ):
        if self.hp_search_backend is None or trial is None:
            return
637
        self.objective = self.compute_objective(metrics.copy())
638
        if self.hp_search_backend == HPSearchBackend.OPTUNA:
639
640
            import optuna

641
642
643
644
            trial.report(self.objective, epoch)
            if trial.should_prune():
                raise optuna.TrialPruned()
        elif self.hp_search_backend == HPSearchBackend.RAY:
645
646
            from ray import tune

647
            if self.state.global_step % self.args.save_steps == 0:
648
                self._tune_save_checkpoint()
649
650
            tune.report(objective=self.objective, **metrics)

651
    def _tune_save_checkpoint(self):
652
653
        from ray import tune

654
655
        if not self.use_tune_checkpoints:
            return
656
        with tune.checkpoint_dir(step=self.state.global_step) as checkpoint_dir:
657
            self.args.output_dir = checkpoint_dir
658
            output_dir = os.path.join(self.args.output_dir, f"{PREFIX_CHECKPOINT_DIR}-{self.state.global_step}")
659
            self.save_model(output_dir)
660
            if self.is_world_process_zero():
661
                self.state.save_to_json(os.path.join(output_dir, "trainer_state.json"))
662
663
664
                torch.save(self.optimizer.state_dict(), os.path.join(output_dir, "optimizer.pt"))
                torch.save(self.lr_scheduler.state_dict(), os.path.join(output_dir, "scheduler.pt"))

665
666
667
668
669
670
671
    def call_model_init(self, trial=None):
        model_init_argcount = len(inspect.signature(self.model_init).parameters)
        if model_init_argcount == 0:
            model = self.model_init()
        elif model_init_argcount == 1:
            model = self.model_init(trial)
        else:
672
673
674
675
            raise RuntimeError("model_init should have 0 or 1 argument.")

        if model is None:
            raise RuntimeError("model_init should not return None.")
676
677
678

        return model

679
680
681
682
    def train(
        self,
        resume_from_checkpoint: Optional[str] = None,
        trial: Union["optuna.Trial", Dict[str, Any]] = None,
683
        **kwargs,
684
    ):
Julien Chaumond's avatar
Julien Chaumond committed
685
686
687
688
        """
        Main training entry point.

        Args:
689
690
691
            resume_from_checkpoint (:obj:`str`, `optional`):
                Local path to a saved checkpoint as saved by a previous instance of :class:`~transformers.Trainer`. If
                present, training will resume from the model/optimizer/scheduler states loaded here.
692
693
            trial (:obj:`optuna.Trial` or :obj:`Dict[str, Any]`, `optional`):
                The trial run or the hyperparameter dictionary for hyperparameter search.
694
695
            kwargs:
                Additional keyword arguments used to hide deprecated arguments
Julien Chaumond's avatar
Julien Chaumond committed
696
        """
697
698
699
700
701
702
703
704
705
        if "model_path" in kwargs:
            resume_from_checkpoint = kwargs.pop("model_path")
            warnings.warn(
                "`model_path` is deprecated and will be removed in a future version. Use `resume_from_checkpoint` "
                "instead.",
                FutureWarning,
            )
        if len(kwargs) > 0:
            raise TypeError(f"train() received got unexpected keyword arguments: {', '.join(list(kwargs.keys()))}.")
Sylvain Gugger's avatar
Sylvain Gugger committed
706
707
708
        # This might change the seed so needs to run first.
        self._hp_search_setup(trial)

709
        # Model re-init
710
        model_reloaded = False
711
        if self.model_init is not None:
Sylvain Gugger's avatar
Sylvain Gugger committed
712
713
            # Seed must be set before instantiating the model when using model_init.
            set_seed(self.args.seed)
714
715
            self.model = self.call_model_init(trial)
            model_reloaded = True
Sylvain Gugger's avatar
Sylvain Gugger committed
716
717
            # Reinitializes optimizer and scheduler
            self.optimizer, self.lr_scheduler = None, None
718

719
        # Load potential model checkpoint
720
721
        if resume_from_checkpoint is not None and os.path.isfile(os.path.join(resume_from_checkpoint, WEIGHTS_NAME)):
            logger.info(f"Loading model from {resume_from_checkpoint}).")
722
            if isinstance(self.model, PreTrainedModel):
723
                self.model = self.model.from_pretrained(resume_from_checkpoint)
724
725
                model_reloaded = True
            else:
726
                state_dict = torch.load(os.path.join(resume_from_checkpoint, WEIGHTS_NAME))
727
728
729
730
731
732
733
734
                self.model.load_state_dict(state_dict)

        # If model was re-initialized, put it on the right device and update self.model_wrapped
        if model_reloaded:
            if not self.is_model_parallel:
                self.model = self.model.to(self.args.device)
            self.model_wrapped = self.model

735
736
737
        # Keeping track whether we can can len() on the dataset or not
        train_dataset_is_sized = isinstance(self.train_dataset, collections.abc.Sized)

738
        # Data loader and number of training steps
Julien Chaumond's avatar
Julien Chaumond committed
739
        train_dataloader = self.get_train_dataloader()
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755

        # Setting up training control variables:
        # number of training epochs: num_train_epochs
        # number of training steps per epoch: num_update_steps_per_epoch
        # total number of training steps to execute: max_steps
        if train_dataset_is_sized:
            num_update_steps_per_epoch = len(train_dataloader) // self.args.gradient_accumulation_steps
            num_update_steps_per_epoch = max(num_update_steps_per_epoch, 1)
            if self.args.max_steps > 0:
                max_steps = self.args.max_steps
                num_train_epochs = self.args.max_steps // num_update_steps_per_epoch + int(
                    self.args.max_steps % num_update_steps_per_epoch > 0
                )
            else:
                max_steps = math.ceil(self.args.num_train_epochs * num_update_steps_per_epoch)
                num_train_epochs = math.ceil(self.args.num_train_epochs)
Julien Chaumond's avatar
Julien Chaumond committed
756
        else:
757
758
759
760
            # see __init__. max_steps is set when the dataset has no __len__
            max_steps = self.args.max_steps
            num_train_epochs = 1
            num_update_steps_per_epoch = max_steps
Julien Chaumond's avatar
Julien Chaumond committed
761

762
763
764
765
766
767
768
769
770
771
        if self.args.deepspeed:
            model, optimizer, lr_scheduler = init_deepspeed(self, num_training_steps=max_steps)
            self.model = model.module
            self.model_wrapped = model  # will get further wrapped in DDP
            self.deepspeed = model  # DeepSpeedEngine object
            self.optimizer = optimizer
            self.lr_scheduler = lr_scheduler
        else:
            self.create_optimizer_and_scheduler(num_training_steps=max_steps)

772
        self.state = TrainerState()
773
        self.state.is_hyper_param_search = trial is not None
Julien Chaumond's avatar
Julien Chaumond committed
774
775

        # Check if saved optimizer or scheduler states exist
776
        self._load_optimizer_and_scheduler(resume_from_checkpoint)
Julien Chaumond's avatar
Julien Chaumond committed
777

778
779
        model = self.model_wrapped

Sylvain Gugger's avatar
Sylvain Gugger committed
780
        # Mixed precision training with apex (torch < 1.6)
781
        if self.use_apex:
782
            model, self.optimizer = amp.initialize(model, self.optimizer, opt_level=self.args.fp16_opt_level)
Julien Chaumond's avatar
Julien Chaumond committed
783

784
        # Multi-gpu training (should be after apex fp16 initialization)
785
        if self.args.n_gpu > 1:
Julien Chaumond's avatar
Julien Chaumond committed
786
787
788
            model = torch.nn.DataParallel(model)

        # Distributed training (should be after apex fp16 initialization)
789
790
        if self.sharded_dpp:
            model = ShardedDDP(model, self.optimizer)
Sylvain Gugger's avatar
Sylvain Gugger committed
791
792
        elif is_sagemaker_distributed_available():
            model = DDP(model, device_ids=[dist.get_local_rank()], broadcast_buffers=False)
793
        elif self.args.local_rank != -1:
794
795
796
797
798
799
800
801
            if self.args.ddp_find_unused_parameters is not None:
                find_unused_parameters = self.args.ddp_find_unused_parameters
            elif isinstance(model, PreTrainedModel):
                # find_unused_parameters breaks checkpointing as per
                # https://github.com/huggingface/transformers/pull/4659#issuecomment-643356021
                find_unused_parameters = not getattr(model.config, "gradient_checkpointing", False)
            else:
                find_unused_parameters = True
Julien Chaumond's avatar
Julien Chaumond committed
802
803
804
805
            model = torch.nn.parallel.DistributedDataParallel(
                model,
                device_ids=[self.args.local_rank],
                output_device=self.args.local_rank,
806
                find_unused_parameters=find_unused_parameters,
Julien Chaumond's avatar
Julien Chaumond committed
807
808
            )

809
810
811
812
813
814
815
816
        # for the rest of this function `model` is the outside model, whether it was wrapped or not
        if model is not self.model:
            self.model_wrapped = model

        # important: at this point:
        # self.model         is the Transformers Model
        # self.model_wrapped is DDP(Transformers Model), DDP(Deepspeed(Transformers Model)), etc.

Julien Chaumond's avatar
Julien Chaumond committed
817
        # Train!
818
        if is_torch_tpu_available():
Sylvain Gugger's avatar
Sylvain Gugger committed
819
820
821
            world_size = xm.xrt_world_size()
        elif self.args.local_rank != -1:
            world_size = dist.get_world_size()
Lysandre Debut's avatar
Lysandre Debut committed
822
        else:
Sylvain Gugger's avatar
Sylvain Gugger committed
823
            world_size = 1
824

Sylvain Gugger's avatar
Sylvain Gugger committed
825
        total_train_batch_size = self.args.train_batch_size * self.args.gradient_accumulation_steps * world_size
826
827
828
829
830
831
        num_examples = (
            self.num_examples(train_dataloader)
            if train_dataset_is_sized
            else total_train_batch_size * self.args.max_steps
        )

Julien Chaumond's avatar
Julien Chaumond committed
832
        logger.info("***** Running training *****")
833
834
835
836
837
838
        logger.info(f"  Num examples = {num_examples}")
        logger.info(f"  Num Epochs = {num_train_epochs}")
        logger.info(f"  Instantaneous batch size per device = {self.args.per_device_train_batch_size}")
        logger.info(f"  Total train batch size (w. parallel, distributed & accumulation) = {total_train_batch_size}")
        logger.info(f"  Gradient Accumulation steps = {self.args.gradient_accumulation_steps}")
        logger.info(f"  Total optimization steps = {max_steps}")
Julien Chaumond's avatar
Julien Chaumond committed
839

840
        self.state.epoch = 0
841
        start_time = time.time()
Julien Chaumond's avatar
Julien Chaumond committed
842
843
        epochs_trained = 0
        steps_trained_in_current_epoch = 0
844

Julien Chaumond's avatar
Julien Chaumond committed
845
        # Check if continuing training from a checkpoint
846
847
848
849
        if resume_from_checkpoint is not None and os.path.isfile(
            os.path.join(resume_from_checkpoint, "trainer_state.json")
        ):
            self.state = TrainerState.load_from_json(os.path.join(resume_from_checkpoint, "trainer_state.json"))
850
            epochs_trained = self.state.global_step // num_update_steps_per_epoch
851
852
853
854
855
            if not self.args.ignore_data_skip:
                steps_trained_in_current_epoch = self.state.global_step % (num_update_steps_per_epoch)
                steps_trained_in_current_epoch *= self.args.gradient_accumulation_steps
            else:
                steps_trained_in_current_epoch = 0
856
857

            logger.info("  Continuing training from checkpoint, will skip to saved global_step")
858
859
860
861
862
863
864
            logger.info(f"  Continuing training from epoch {epochs_trained}")
            logger.info(f"  Continuing training from global step {self.state.global_step}")
            if not self.args.ignore_data_skip:
                logger.info(
                    f"  Will skip the first {epochs_trained} epochs then the first {steps_trained_in_current_epoch} "
                    "batches in the first epoch."
                )
865

Sylvain Gugger's avatar
Sylvain Gugger committed
866
867
868
869
870
        # Update the references
        self.callback_handler.model = self.model
        self.callback_handler.optimizer = self.optimizer
        self.callback_handler.lr_scheduler = self.lr_scheduler
        self.callback_handler.train_dataloader = train_dataloader
871
872
        self.state.trial_name = self.hp_name(trial) if self.hp_name is not None else None
        self.state.trial_params = hp_params(trial) if trial is not None else None
873
874
875
876
        # This should be the same if the state has been saved but in case the training arguments changed, it's safer
        # to set this after the load.
        self.state.max_steps = max_steps
        self.state.num_train_epochs = num_train_epochs
Sylvain Gugger's avatar
Sylvain Gugger committed
877
878
        self.state.is_local_process_zero = self.is_local_process_zero()
        self.state.is_world_process_zero = self.is_world_process_zero()
Julien Chaumond's avatar
Julien Chaumond committed
879

880
        # tr_loss is a tensor to avoid synchronization of TPUs through .item()
881
        tr_loss = torch.tensor(0.0).to(self.args.device)
882
883
        # _total_loss_scalar is updated everytime .item() has to be called on tr_loss and stores the sum of all losses
        self._total_loss_scalar = 0.0
884
        self._globalstep_last_logged = self.state.global_step
885
        self._total_flos = self.state.total_flos
Julien Chaumond's avatar
Julien Chaumond committed
886
        model.zero_grad()
Sylvain Gugger's avatar
Sylvain Gugger committed
887
888
889

        self.control = self.callback_handler.on_train_begin(self.args, self.state, self.control)

890
891
892
893
894
895
896
        # Skip the first epochs_trained epochs to get the random state of the dataloader at the right point.
        if not self.args.ignore_data_skip:
            for epoch in range(epochs_trained):
                # We just need to begin an iteration to create the randomization of the sampler.
                for _ in train_dataloader:
                    break

897
        for epoch in range(epochs_trained, num_train_epochs):
898
899
900
            if isinstance(train_dataloader, DataLoader) and isinstance(train_dataloader.sampler, DistributedSampler):
                train_dataloader.sampler.set_epoch(epoch)

901
            if is_torch_tpu_available():
902
903
904
                parallel_loader = pl.ParallelLoader(train_dataloader, [self.args.device]).per_device_loader(
                    self.args.device
                )
905
                epoch_iterator = parallel_loader
906
            else:
907
                epoch_iterator = train_dataloader
908

909
910
911
912
            # Reset the past mems state at the beginning of each epoch if necessary.
            if self.args.past_index >= 0:
                self._past = None

913
            steps_in_epoch = len(epoch_iterator) if train_dataset_is_sized else self.args.max_steps
Sylvain Gugger's avatar
Sylvain Gugger committed
914
915
            self.control = self.callback_handler.on_epoch_begin(self.args, self.state, self.control)

Julien Chaumond's avatar
Julien Chaumond committed
916
917
918
919
920
921
922
            for step, inputs in enumerate(epoch_iterator):

                # Skip past any already trained steps if resuming training
                if steps_trained_in_current_epoch > 0:
                    steps_trained_in_current_epoch -= 1
                    continue

Sylvain Gugger's avatar
Sylvain Gugger committed
923
924
925
                if (step + 1) % self.args.gradient_accumulation_steps == 0:
                    self.control = self.callback_handler.on_step_begin(self.args, self.state, self.control)

926
927
                if ((step + 1) % self.args.gradient_accumulation_steps != 0) and self.args.local_rank != -1:
                    # Avoid unnecessary DDP synchronization since there will be no backward pass on this example.
928
929
930
931
                    with model.no_sync():
                        tr_loss += self.training_step(model, inputs)
                else:
                    tr_loss += self.training_step(model, inputs)
932
                self._total_flos += self.floating_point_ops(inputs)
Julien Chaumond's avatar
Julien Chaumond committed
933
934
935

                if (step + 1) % self.args.gradient_accumulation_steps == 0 or (
                    # last step in epoch but step is always smaller than gradient_accumulation_steps
936
937
                    steps_in_epoch <= self.args.gradient_accumulation_steps
                    and (step + 1) == steps_in_epoch
Julien Chaumond's avatar
Julien Chaumond committed
938
                ):
939
                    # Gradient clipping
940
941
942
                    if self.args.max_grad_norm is not None and self.args.max_grad_norm > 0 and not self.deepspeed:
                        # deepspeed does its own clipping

943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
                        if self.use_amp:
                            # AMP: gradients need unscaling
                            self.scaler.unscale_(self.optimizer)

                        if hasattr(self.optimizer, "clip_grad_norm"):
                            # Some optimizers (like the sharded optimizer) have a specific way to do gradient clipping
                            self.optimizer.clip_grad_norm(self.args.max_grad_norm)
                        else:
                            # Revert to normal clipping otherwise, handling Apex or full precision
                            torch.nn.utils.clip_grad_norm_(
                                amp.master_params(self.optimizer) if self.use_apex else model.parameters(),
                                self.args.max_grad_norm,
                            )

                    # Optimizer step
Stas Bekman's avatar
Stas Bekman committed
958
959
960
                    if self.deepspeed:
                        self.deepspeed.step()
                    elif is_torch_tpu_available():
961
                        xm.optimizer_step(self.optimizer)
962
                    elif self.use_amp:
963
                        self.scaler.step(self.optimizer)
964
                        self.scaler.update()
Lysandre Debut's avatar
Lysandre Debut committed
965
                    else:
966
                        self.optimizer.step()
Lysandre Debut's avatar
Lysandre Debut committed
967

968
                    self.lr_scheduler.step()
Julien Chaumond's avatar
Julien Chaumond committed
969
                    model.zero_grad()
970
                    self.state.global_step += 1
971
                    self.state.epoch = epoch + (step + 1) / steps_in_epoch
Sylvain Gugger's avatar
Sylvain Gugger committed
972
973
                    self.control = self.callback_handler.on_step_end(self.args, self.state, self.control)

974
                    self._maybe_log_save_evaluate(tr_loss, model, trial, epoch)
Julien Chaumond's avatar
Julien Chaumond committed
975

Sylvain Gugger's avatar
Sylvain Gugger committed
976
                if self.control.should_epoch_stop or self.control.should_training_stop:
Julien Chaumond's avatar
Julien Chaumond committed
977
                    break
978

Sylvain Gugger's avatar
Sylvain Gugger committed
979
            self.control = self.callback_handler.on_epoch_end(self.args, self.state, self.control)
980
            self._maybe_log_save_evaluate(tr_loss, model, trial, epoch)
981

982
            if self.args.tpu_metrics_debug or self.args.debug:
983
984
985
986
987
988
989
990
                if is_torch_tpu_available():
                    # tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
                    xm.master_print(met.metrics_report())
                else:
                    logger.warning(
                        "You enabled PyTorch/XLA debug metrics but you don't have a TPU "
                        "configured. Check your training configuration if this is unexpected."
                    )
Sylvain Gugger's avatar
Sylvain Gugger committed
991
            if self.control.should_training_stop:
992
                break
Julien Chaumond's avatar
Julien Chaumond committed
993

994
995
996
        if self.args.past_index and hasattr(self, "_past"):
            # Clean the state at the end of training
            delattr(self, "_past")
Julien Chaumond's avatar
Julien Chaumond committed
997
998

        logger.info("\n\nTraining completed. Do not forget to share your model on huggingface.co/models =)\n\n")
999
1000
1001
1002
        if self.args.load_best_model_at_end and self.state.best_model_checkpoint is not None:
            logger.info(
                f"Loading best model from {self.state.best_model_checkpoint} (score: {self.state.best_metric})."
            )
1003
1004
            if isinstance(self.model, PreTrainedModel):
                self.model = self.model.from_pretrained(self.state.best_model_checkpoint)
1005
                if not self.is_model_parallel:
1006
                    self.model = self.model.to(self.args.device)
1007
1008
1009
1010
            else:
                state_dict = torch.load(os.path.join(self.state.best_model_checkpoint, WEIGHTS_NAME))
                self.model.load_state_dict(state_dict)

1011
1012
1013
1014
1015
            if self.deepspeed:
                self.deepspeed.load_checkpoint(
                    self.state.best_model_checkpoint, load_optimizer_states=False, load_lr_scheduler_states=False
                )

1016
        metrics = speed_metrics("train", start_time, self.state.max_steps)
1017
1018
        if self._total_flos is not None:
            self.store_flos()
1019
1020
            metrics["total_flos"] = self.state.total_flos
        self.log(metrics)
1021

Sylvain Gugger's avatar
Sylvain Gugger committed
1022
        self.control = self.callback_handler.on_train_end(self.args, self.state, self.control)
1023
1024
        # add remaining tr_loss
        self._total_loss_scalar += tr_loss.item()
Sylvain Gugger's avatar
Sylvain Gugger committed
1025

1026
        return TrainOutput(self.state.global_step, self._total_loss_scalar / self.state.global_step, metrics)
1027

1028
    def _maybe_log_save_evaluate(self, tr_loss, model, trial, epoch):
Sylvain Gugger's avatar
Sylvain Gugger committed
1029
1030
1031
        if self.control.should_log:
            logs: Dict[str, float] = {}
            tr_loss_scalar = tr_loss.item()
1032
1033
1034
            # reset tr_loss to zero
            tr_loss -= tr_loss

1035
            logs["loss"] = round(tr_loss_scalar / (self.state.global_step - self._globalstep_last_logged), 4)
Sylvain Gugger's avatar
Sylvain Gugger committed
1036
1037
1038
1039
1040
1041
            # backward compatibility for pytorch schedulers
            logs["learning_rate"] = (
                self.lr_scheduler.get_last_lr()[0]
                if version.parse(torch.__version__) >= version.parse("1.4")
                else self.lr_scheduler.get_lr()[0]
            )
1042
            self._total_loss_scalar += tr_loss_scalar
1043
            self._globalstep_last_logged = self.state.global_step
Sylvain Gugger's avatar
Sylvain Gugger committed
1044
1045
1046
1047
1048
1049
1050

            self.log(logs)

        metrics = None
        if self.control.should_evaluate:
            metrics = self.evaluate()
            self._report_to_hp_search(trial, epoch, metrics)
1051

Sylvain Gugger's avatar
Sylvain Gugger committed
1052
1053
1054
1055
1056
        if self.control.should_save:
            self._save_checkpoint(model, trial, metrics=metrics)
            self.control = self.callback_handler.on_save(self.args, self.state, self.control)

    def _save_checkpoint(self, model, trial, metrics=None):
1057
1058
1059
1060
        # In all cases, including ddp/dp/deepspeed, self.model is always a reference to the model we
        # want to save.
        assert _model_unwrap(model) is self.model, "internal model should be a reference to self.model"

1061
        # Save model checkpoint
1062
        checkpoint_folder = f"{PREFIX_CHECKPOINT_DIR}-{self.state.global_step}"
1063

1064
        if self.hp_search_backend is not None and trial is not None:
1065
1066
1067
1068
1069
1070
            if self.hp_search_backend == HPSearchBackend.OPTUNA:
                run_id = trial.number
            else:
                from ray import tune

                run_id = tune.get_trial_id()
1071
1072
1073
1074
            run_name = self.hp_name(trial) if self.hp_name is not None else f"run-{run_id}"
            output_dir = os.path.join(self.args.output_dir, run_name, checkpoint_folder)
        else:
            output_dir = os.path.join(self.args.output_dir, checkpoint_folder)
1075

1076
            self.store_flos()
1077

1078
        self.save_model(output_dir)
1079
1080
        if self.deepspeed:
            self.deepspeed.save_checkpoint(output_dir)
1081
1082

        # Save optimizer and scheduler
1083
1084
        if self.sharded_dpp:
            self.optimizer.consolidate_state_dict()
1085

1086
1087
1088
1089
1090
1091
        if is_torch_tpu_available():
            xm.rendezvous("saving_optimizer_states")
            xm.save(self.optimizer.state_dict(), os.path.join(output_dir, "optimizer.pt"))
            with warnings.catch_warnings(record=True) as caught_warnings:
                xm.save(self.lr_scheduler.state_dict(), os.path.join(output_dir, "scheduler.pt"))
                reissue_pt_warnings(caught_warnings)
1092
1093
        elif self.is_world_process_zero() and not self.deepspeed:
            # deepspeed.save_checkpoint above saves model/optim/sched
1094
1095
1096
1097
1098
1099
            torch.save(self.optimizer.state_dict(), os.path.join(output_dir, "optimizer.pt"))
            with warnings.catch_warnings(record=True) as caught_warnings:
                torch.save(self.lr_scheduler.state_dict(), os.path.join(output_dir, "scheduler.pt"))
            reissue_pt_warnings(caught_warnings)

        # Determine the new best metric / best model checkpoint
Sylvain Gugger's avatar
Sylvain Gugger committed
1100
        if metrics is not None and self.args.metric_for_best_model is not None:
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
            metric_to_check = self.args.metric_for_best_model
            if not metric_to_check.startswith("eval_"):
                metric_to_check = f"eval_{metric_to_check}"
            metric_value = metrics[metric_to_check]

            operator = np.greater if self.args.greater_is_better else np.less
            if (
                self.state.best_metric is None
                or self.state.best_model_checkpoint is None
                or operator(metric_value, self.state.best_metric)
            ):
                self.state.best_metric = metric_value
                self.state.best_model_checkpoint = output_dir

        # Save the Trainer state
        if self.is_world_process_zero():
            self.state.save_to_json(os.path.join(output_dir, "trainer_state.json"))

        # Maybe delete some older checkpoints.
        if self.is_world_process_zero():
            self._rotate_checkpoints(use_mtime=True)

1123
    def _load_optimizer_and_scheduler(self, checkpoint):
Sylvain Gugger's avatar
Sylvain Gugger committed
1124
        """If optimizer and scheduler states exist, load them."""
1125
        if checkpoint is None:
1126
1127
            return

1128
1129
        if os.path.isfile(os.path.join(checkpoint, "optimizer.pt")) and os.path.isfile(
            os.path.join(checkpoint, "scheduler.pt")
Sylvain Gugger's avatar
Sylvain Gugger committed
1130
1131
1132
1133
        ):
            # Load in optimizer and scheduler states
            if is_torch_tpu_available():
                # On TPU we have to take some extra precautions to properly load the states on the right device.
1134
                optimizer_state = torch.load(os.path.join(checkpoint, "optimizer.pt"), map_location="cpu")
Sylvain Gugger's avatar
Sylvain Gugger committed
1135
                with warnings.catch_warnings(record=True) as caught_warnings:
1136
                    lr_scheduler_state = torch.load(os.path.join(checkpoint, "scheduler.pt"), map_location="cpu")
Sylvain Gugger's avatar
Sylvain Gugger committed
1137
1138
1139
1140
1141
1142
1143
1144
1145
                reissue_pt_warnings(caught_warnings)

                xm.send_cpu_data_to_device(optimizer_state, self.args.device)
                xm.send_cpu_data_to_device(lr_scheduler_state, self.args.device)

                self.optimizer.load_state_dict(optimizer_state)
                self.lr_scheduler.load_state_dict(lr_scheduler_state)
            else:
                self.optimizer.load_state_dict(
1146
                    torch.load(os.path.join(checkpoint, "optimizer.pt"), map_location=self.args.device)
Sylvain Gugger's avatar
Sylvain Gugger committed
1147
1148
                )
                with warnings.catch_warnings(record=True) as caught_warnings:
1149
                    self.lr_scheduler.load_state_dict(torch.load(os.path.join(checkpoint, "scheduler.pt")))
Sylvain Gugger's avatar
Sylvain Gugger committed
1150
1151
                reissue_pt_warnings(caught_warnings)

1152
1153
        if self.deepspeed:
            # Not sure how to check if there is a saved deepspeed checkpoint, but since it just return None if it fails to find a deepspeed checkpoint this is sort of a check-n-load function
1154
            self.deepspeed.load_checkpoint(checkpoint, load_optimizer_states=True, load_lr_scheduler_states=True)
1155

1156
1157
1158
1159
1160
1161
1162
    def hyperparameter_search(
        self,
        hp_space: Optional[Callable[["optuna.Trial"], Dict[str, float]]] = None,
        compute_objective: Optional[Callable[[Dict[str, float]], float]] = None,
        n_trials: int = 20,
        direction: str = "minimize",
        backend: Optional[Union["str", HPSearchBackend]] = None,
1163
        hp_name: Optional[Callable[["optuna.Trial"], str]] = None,
1164
        **kwargs,
1165
1166
    ) -> BestRun:
        """
1167
        Launch an hyperparameter search using ``optuna`` or ``Ray Tune``. The optimized quantity is determined by
Sylvain Gugger's avatar
Sylvain Gugger committed
1168
1169
        :obj:`compute_objective`, which defaults to a function returning the evaluation loss when no metric is
        provided, the sum of all metrics otherwise.
1170

Sylvain Gugger's avatar
Sylvain Gugger committed
1171
1172
1173
1174
1175
1176
1177
        .. warning::

            To use this method, you need to have provided a ``model_init`` when initializing your
            :class:`~transformers.Trainer`: we need to reinitialize the model at each new run. This is incompatible
            with the ``optimizers`` argument, so you need to subclass :class:`~transformers.Trainer` and override the
            method :meth:`~transformers.Trainer.create_optimizer_and_scheduler` for custom optimizer/scheduler.

1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
        Args:
            hp_space (:obj:`Callable[["optuna.Trial"], Dict[str, float]]`, `optional`):
                A function that defines the hyperparameter search space. Will default to
                :func:`~transformers.trainer_utils.default_hp_space_optuna` or
                :func:`~transformers.trainer_utils.default_hp_space_ray` depending on your backend.
            compute_objective (:obj:`Callable[[Dict[str, float]], float]`, `optional`):
                A function computing the objective to minimize or maximize from the metrics returned by the
                :obj:`evaluate` method. Will default to :func:`~transformers.trainer_utils.default_compute_objective`.
            n_trials (:obj:`int`, `optional`, defaults to 100):
                The number of trial runs to test.
            direction(:obj:`str`, `optional`, defaults to :obj:`"minimize"`):
                Whether to optimize greater or lower objects. Can be :obj:`"minimize"` or :obj:`"maximize"`, you should
                pick :obj:`"minimize"` when optimizing the validation loss, :obj:`"maximize"` when optimizing one or
                several metrics.
            backend(:obj:`str` or :class:`~transformers.training_utils.HPSearchBackend`, `optional`):
                The backend to use for hyperparameter search. Will default to optuna or Ray Tune, depending on which
                one is installed. If both are installed, will default to optuna.
            kwargs:
                Additional keyword arguments passed along to :obj:`optuna.create_study` or :obj:`ray.tune.run`. For
                more information see:

Sylvain Gugger's avatar
Sylvain Gugger committed
1199
1200
1201
1202
                - the documentation of `optuna.create_study
                  <https://optuna.readthedocs.io/en/stable/reference/alias_generated/optuna.create_study.html#optuna.create_study>`__
                - the documentation of `tune.run
                  <https://docs.ray.io/en/latest/tune/api_docs/execution.html#tune-run>`__
1203
1204

        Returns:
Tiger's avatar
Tiger committed
1205
            :class:`transformers.trainer_utils.BestRun`: All the information about the best run.
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
        """
        if backend is None:
            backend = default_hp_search_backend()
            if backend is None:
                raise RuntimeError(
                    "At least one of optuna or ray should be installed. "
                    "To install optuna run `pip install optuna`."
                    "To install ray run `pip install ray[tune]`."
                )
        backend = HPSearchBackend(backend)
        if backend == HPSearchBackend.OPTUNA and not is_optuna_available():
Sylvain Gugger's avatar
Sylvain Gugger committed
1217
            raise RuntimeError("You picked the optuna backend, but it is not installed. Use `pip install optuna`.")
1218
        if backend == HPSearchBackend.RAY and not is_ray_tune_available():
1219
            raise RuntimeError(
Sylvain Gugger's avatar
Sylvain Gugger committed
1220
                "You picked the Ray Tune backend, but it is not installed. Use `pip install 'ray[tune]'`."
1221
1222
            )
        self.hp_search_backend = backend
Sylvain Gugger's avatar
Sylvain Gugger committed
1223
1224
1225
1226
1227
        if self.model_init is None:
            raise RuntimeError(
                "To use hyperparameter search, you need to pass your model through a model_init function."
            )

1228
        self.hp_space = default_hp_space[backend] if hp_space is None else hp_space
1229
        self.hp_name = hp_name
1230
1231
        self.compute_objective = default_compute_objective if compute_objective is None else compute_objective

1232
1233
        run_hp_search = run_hp_search_optuna if backend == HPSearchBackend.OPTUNA else run_hp_search_ray
        best_run = run_hp_search(self, n_trials, direction, **kwargs)
1234
1235
1236
1237

        self.hp_search_backend = None
        return best_run

Sylvain Gugger's avatar
Sylvain Gugger committed
1238
    def log(self, logs: Dict[str, float]) -> None:
1239
1240
1241
1242
1243
1244
1245
1246
1247
        """
        Log :obj:`logs` on the various objects watching training.

        Subclass and override this method to inject custom behavior.

        Args:
            logs (:obj:`Dict[str, float]`):
                The values to log.
        """
1248
        if self.state.epoch is not None:
1249
            logs["epoch"] = round(self.state.epoch, 2)
1250

1251
1252
        output = {**logs, **{"step": self.state.global_step}}
        self.state.log_history.append(output)
1253
        self.control = self.callback_handler.on_log(self.args, self.state, self.control, logs)
Julien Chaumond's avatar
Julien Chaumond committed
1254

sgugger's avatar
Fix CI  
sgugger committed
1255
    def _prepare_inputs(self, inputs: Dict[str, Union[torch.Tensor, Any]]) -> Dict[str, Union[torch.Tensor, Any]]:
1256
1257
1258
1259
        """
        Prepare :obj:`inputs` before feeding them to the model, converting them to tensors if they are not already and
        handling potential state.
        """
Julien Chaumond's avatar
Julien Chaumond committed
1260
        for k, v in inputs.items():
1261
1262
            if isinstance(v, torch.Tensor):
                inputs[k] = v.to(self.args.device)
Julien Chaumond's avatar
Julien Chaumond committed
1263

1264
1265
        if self.args.past_index >= 0 and self._past is not None:
            inputs["mems"] = self._past
1266

1267
1268
        return inputs

1269
    def training_step(self, model: nn.Module, inputs: Dict[str, Union[torch.Tensor, Any]]) -> torch.Tensor:
1270
        """
1271
        Perform a training step on a batch of inputs.
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284

        Subclass and override to inject custom behavior.

        Args:
            model (:obj:`nn.Module`):
                The model to train.
            inputs (:obj:`Dict[str, Union[torch.Tensor, Any]]`):
                The inputs and targets of the model.

                The dictionary will be unpacked before being fed to the model. Most models expect the targets under the
                argument :obj:`labels`. Check your model's documentation for all accepted arguments.

        Return:
1285
            :obj:`torch.Tensor`: The tensor with training loss on this batch.
1286
1287
1288
        """

        model.train()
1289
        inputs = self._prepare_inputs(inputs)
1290

1291
        if self.use_amp:
1292
            with autocast():
Sylvain Gugger's avatar
Sylvain Gugger committed
1293
                loss = self.compute_loss(model, inputs)
1294
        else:
Sylvain Gugger's avatar
Sylvain Gugger committed
1295
            loss = self.compute_loss(model, inputs)
1296

Julien Chaumond's avatar
Julien Chaumond committed
1297
1298
        if self.args.n_gpu > 1:
            loss = loss.mean()  # mean() to average on multi-gpu parallel training
1299

Julien Chaumond's avatar
Julien Chaumond committed
1300
1301
1302
        if self.args.gradient_accumulation_steps > 1:
            loss = loss / self.args.gradient_accumulation_steps

1303
        if self.use_amp:
1304
            self.scaler.scale(loss).backward()
1305
        elif self.use_apex:
1306
            with amp.scale_loss(loss, self.optimizer) as scaled_loss:
Julien Chaumond's avatar
Julien Chaumond committed
1307
                scaled_loss.backward()
1308
        elif self.deepspeed:
1309
            self.deepspeed.backward(loss)
Julien Chaumond's avatar
Julien Chaumond committed
1310
1311
1312
        else:
            loss.backward()

1313
        return loss.detach()
Julien Chaumond's avatar
Julien Chaumond committed
1314

Sylvain Gugger's avatar
Sylvain Gugger committed
1315
1316
1317
1318
1319
1320
    def compute_loss(self, model, inputs):
        """
        How the loss is computed by Trainer. By default, all models return the loss in the first element.

        Subclass and override for custom behavior.
        """
1321
1322
1323
1324
        if self.label_smoother is not None and "labels" in inputs:
            labels = inputs.pop("labels")
        else:
            labels = None
Sylvain Gugger's avatar
Sylvain Gugger committed
1325
1326
        outputs = model(**inputs)
        # Save past state if it exists
1327
        # TODO: this needs to be fixed and made cleaner later.
Sylvain Gugger's avatar
Sylvain Gugger committed
1328
1329
        if self.args.past_index >= 0:
            self._past = outputs[self.args.past_index]
Sylvain Gugger's avatar
Sylvain Gugger committed
1330

1331
1332
        if labels is not None:
            return self.label_smoother(outputs, labels)
Sylvain Gugger's avatar
Sylvain Gugger committed
1333
1334
1335
        else:
            # We don't use .loss here since the model may return tuples instead of ModelOutput.
            return outputs["loss"] if isinstance(outputs, dict) else outputs[0]
Sylvain Gugger's avatar
Sylvain Gugger committed
1336

1337
1338
    def is_local_process_zero(self) -> bool:
        """
Sylvain Gugger's avatar
Sylvain Gugger committed
1339
1340
        Whether or not this process is the local (e.g., on one machine if training in a distributed fashion on several
        machines) main process.
1341
        """
1342
        if is_torch_tpu_available():
Lysandre Debut's avatar
Lysandre Debut committed
1343
1344
1345
1346
            return xm.is_master_ordinal(local=True)
        else:
            return self.args.local_rank in [-1, 0]

1347
1348
    def is_world_process_zero(self) -> bool:
        """
Sylvain Gugger's avatar
Sylvain Gugger committed
1349
1350
        Whether or not this process is the global main process (when training in a distributed fashion on several
        machines, this is only going to be :obj:`True` for one process).
Julien Chaumond's avatar
Julien Chaumond committed
1351
        """
1352
        if is_torch_tpu_available():
Lysandre Debut's avatar
Lysandre Debut committed
1353
1354
            return xm.is_master_ordinal(local=False)
        else:
Sylvain Gugger's avatar
Sylvain Gugger committed
1355
            return self.args.local_rank == -1 or dist.get_rank() == 0
Julien Chaumond's avatar
Julien Chaumond committed
1356
1357
1358

    def save_model(self, output_dir: Optional[str] = None):
        """
1359
        Will save the model, so you can reload it using :obj:`from_pretrained()`.
Julien Chaumond's avatar
Julien Chaumond committed
1360

1361
        Will only save from the world_master process (unless in TPUs).
Julien Chaumond's avatar
Julien Chaumond committed
1362
        """
1363

1364
        if is_torch_tpu_available():
1365
            self._save_tpu(output_dir)
1366
        elif self.is_world_process_zero():
Julien Chaumond's avatar
Julien Chaumond committed
1367
1368
            self._save(output_dir)

1369
1370
1371
1372
1373
        # If on sagemaker and we are saving the main model (not a checkpoint so output_dir=None), save a copy to
        # SM_MODEL_DIR for easy deployment.
        if output_dir is None and os.getenv("SM_MODEL_DIR") is not None:
            self.save_model(output_dir=os.getenv("SM_MODEL_DIR"))

1374
1375
1376
1377
1378
1379
1380
1381
1382
1383
1384
    def _save_tpu(self, output_dir: Optional[str] = None):
        output_dir = output_dir if output_dir is not None else self.args.output_dir
        logger.info("Saving model checkpoint to %s", output_dir)

        if xm.is_master_ordinal():
            os.makedirs(output_dir, exist_ok=True)
            torch.save(self.args, os.path.join(output_dir, "training_args.bin"))

        # Save a trained model and configuration using `save_pretrained()`.
        # They can then be reloaded using `from_pretrained()`
        xm.rendezvous("saving_checkpoint")
1385
1386
1387
1388
1389
1390
        if not isinstance(self.model, PreTrainedModel):
            logger.info("Trainer.model is not a `PreTrainedModel`, only saving its state dict.")
            state_dict = self.model.state_dict()
            xm.save(state_dict, os.path.join(output_dir, WEIGHTS_NAME))
        else:
            self.model.save_pretrained(output_dir)
Sylvain Gugger's avatar
Sylvain Gugger committed
1391
        if self.tokenizer is not None and self.is_world_process_zero():
1392
            self.tokenizer.save_pretrained(output_dir)
1393

Julien Chaumond's avatar
Julien Chaumond committed
1394
1395
1396
1397
1398
1399
1400
    def _save(self, output_dir: Optional[str] = None):
        output_dir = output_dir if output_dir is not None else self.args.output_dir
        os.makedirs(output_dir, exist_ok=True)
        logger.info("Saving model checkpoint to %s", output_dir)
        # Save a trained model and configuration using `save_pretrained()`.
        # They can then be reloaded using `from_pretrained()`
        if not isinstance(self.model, PreTrainedModel):
1401
1402
1403
1404
1405
            logger.info("Trainer.model is not a `PreTrainedModel`, only saving its state dict.")
            state_dict = self.model.state_dict()
            torch.save(state_dict, os.path.join(output_dir, WEIGHTS_NAME))
        else:
            self.model.save_pretrained(output_dir)
Sylvain Gugger's avatar
Sylvain Gugger committed
1406
        if self.tokenizer is not None and self.is_world_process_zero():
1407
            self.tokenizer.save_pretrained(output_dir)
Julien Chaumond's avatar
Julien Chaumond committed
1408
1409
1410

        # Good practice: save your training arguments together with the trained model
        torch.save(self.args, os.path.join(output_dir, "training_args.bin"))
1411

1412
    def store_flos(self):
1413
        # Storing the number of floating-point operations that went into the model
1414
        if self._total_flos is not None:
1415
            if self.args.local_rank != -1:
1416
                self.state.total_flos = distributed_broadcast_scalars([self._total_flos]).sum().item()
1417
            else:
1418
                self.state.total_flos = self._total_flos
Julien Chaumond's avatar
Julien Chaumond committed
1419
1420
1421
1422

    def _sorted_checkpoints(self, checkpoint_prefix=PREFIX_CHECKPOINT_DIR, use_mtime=False) -> List[str]:
        ordering_and_checkpoint_path = []

1423
        glob_checkpoints = [str(x) for x in Path(self.args.output_dir).glob(f"{checkpoint_prefix}-*")]
Julien Chaumond's avatar
Julien Chaumond committed
1424
1425
1426
1427
1428

        for path in glob_checkpoints:
            if use_mtime:
                ordering_and_checkpoint_path.append((os.path.getmtime(path), path))
            else:
1429
                regex_match = re.match(f".*{checkpoint_prefix}-([0-9]+)", path)
Julien Chaumond's avatar
Julien Chaumond committed
1430
1431
1432
1433
1434
                if regex_match and regex_match.groups():
                    ordering_and_checkpoint_path.append((int(regex_match.groups()[0]), path))

        checkpoints_sorted = sorted(ordering_and_checkpoint_path)
        checkpoints_sorted = [checkpoint[1] for checkpoint in checkpoints_sorted]
1435
1436
        # Make sure we don't delete the best model.
        if self.state.best_model_checkpoint is not None:
1437
            best_model_index = checkpoints_sorted.index(str(Path(self.state.best_model_checkpoint)))
1438
            checkpoints_sorted[best_model_index], checkpoints_sorted[-1] = (
1439
1440
1441
                checkpoints_sorted[-1],
                checkpoints_sorted[best_model_index],
            )
Julien Chaumond's avatar
Julien Chaumond committed
1442
1443
1444
        return checkpoints_sorted

    def _rotate_checkpoints(self, use_mtime=False) -> None:
1445
        if self.args.save_total_limit is None or self.args.save_total_limit <= 0:
Julien Chaumond's avatar
Julien Chaumond committed
1446
1447
1448
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458
            return

        # Check if we should delete older checkpoint(s)
        checkpoints_sorted = self._sorted_checkpoints(use_mtime=use_mtime)
        if len(checkpoints_sorted) <= self.args.save_total_limit:
            return

        number_of_checkpoints_to_delete = max(0, len(checkpoints_sorted) - self.args.save_total_limit)
        checkpoints_to_be_deleted = checkpoints_sorted[:number_of_checkpoints_to_delete]
        for checkpoint in checkpoints_to_be_deleted:
            logger.info("Deleting older checkpoint [{}] due to args.save_total_limit".format(checkpoint))
            shutil.rmtree(checkpoint)

1459
    def evaluate(
1460
1461
1462
1463
        self,
        eval_dataset: Optional[Dataset] = None,
        ignore_keys: Optional[List[str]] = None,
        metric_key_prefix: str = "eval",
1464
    ) -> Dict[str, float]:
Julien Chaumond's avatar
Julien Chaumond committed
1465
        """
1466
        Run evaluation and returns metrics.
Julien Chaumond's avatar
Julien Chaumond committed
1467

Sylvain Gugger's avatar
Sylvain Gugger committed
1468
1469
        The calling script will be responsible for providing a method to compute metrics, as they are task-dependent
        (pass it to the init :obj:`compute_metrics` argument).
Julien Chaumond's avatar
Julien Chaumond committed
1470

1471
1472
        You can also subclass and override this method to inject custom behavior.

Julien Chaumond's avatar
Julien Chaumond committed
1473
        Args:
1474
            eval_dataset (:obj:`Dataset`, `optional`):
1475
                Pass a dataset if you wish to override :obj:`self.eval_dataset`. If it is an :obj:`datasets.Dataset`,
Sylvain Gugger's avatar
Sylvain Gugger committed
1476
1477
                columns not accepted by the ``model.forward()`` method are automatically removed. It must implement the
                :obj:`__len__` method.
1478
1479
1480
            ignore_keys (:obj:`Lst[str]`, `optional`):
                A list of keys in the output of your model (if it is a dictionary) that should be ignored when
                gathering predictions.
1481
1482
1483
            metric_key_prefix (:obj:`str`, `optional`, defaults to :obj:`"eval"`):
                An optional prefix to be used as the metrics key prefix. For example the metrics "bleu" will be named
                "eval_bleu" if the prefix is "eval" (default)
1484

Julien Chaumond's avatar
Julien Chaumond committed
1485
        Returns:
1486
1487
            A dictionary containing the evaluation loss and the potential metrics computed from the predictions. The
            dictionary also contains the epoch number which comes from the training state.
Julien Chaumond's avatar
Julien Chaumond committed
1488
        """
1489
1490
1491
        if eval_dataset is not None and not isinstance(eval_dataset, collections.abc.Sized):
            raise ValueError("eval_dataset must implement __len__")

Julien Chaumond's avatar
Julien Chaumond committed
1492
        eval_dataloader = self.get_eval_dataloader(eval_dataset)
1493
        start_time = time.time()
Julien Chaumond's avatar
Julien Chaumond committed
1494

1495
1496
1497
1498
1499
1500
        output = self.prediction_loop(
            eval_dataloader,
            description="Evaluation",
            # No point gathering the predictions if there are no metrics, otherwise we defer to
            # self.args.prediction_loss_only
            prediction_loss_only=True if self.compute_metrics is None else None,
1501
            ignore_keys=ignore_keys,
1502
            metric_key_prefix=metric_key_prefix,
1503
        )
Lysandre Debut's avatar
Lysandre Debut committed
1504

1505
1506
        n_samples = len(eval_dataset if eval_dataset is not None else self.eval_dataset)
        output.metrics.update(speed_metrics(metric_key_prefix, start_time, n_samples))
1507
        self.log(output.metrics)
1508

1509
        if self.args.tpu_metrics_debug or self.args.debug:
Lysandre Debut's avatar
Lysandre Debut committed
1510
1511
1512
            # tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
            xm.master_print(met.metrics_report())

Sylvain Gugger's avatar
Sylvain Gugger committed
1513
        self.control = self.callback_handler.on_evaluate(self.args, self.state, self.control, output.metrics)
Julien Chaumond's avatar
Julien Chaumond committed
1514
1515
        return output.metrics

1516
1517
1518
    def predict(
        self, test_dataset: Dataset, ignore_keys: Optional[List[str]] = None, metric_key_prefix: str = "eval"
    ) -> PredictionOutput:
Julien Chaumond's avatar
Julien Chaumond committed
1519
        """
1520
        Run prediction and returns predictions and potential metrics.
Julien Chaumond's avatar
Julien Chaumond committed
1521

Sylvain Gugger's avatar
Sylvain Gugger committed
1522
1523
        Depending on the dataset and your use case, your test dataset may contain labels. In that case, this method
        will also return metrics, like in :obj:`evaluate()`.
1524
1525
1526

        Args:
            test_dataset (:obj:`Dataset`):
1527
                Dataset to run the predictions on. If it is an :obj:`datasets.Dataset`, columns not accepted by the
1528
                ``model.forward()`` method are automatically removed. Has to implement the method :obj:`__len__`
1529
1530
1531
            ignore_keys (:obj:`Lst[str]`, `optional`):
                A list of keys in the output of your model (if it is a dictionary) that should be ignored when
                gathering predictions.
1532
1533
1534
            metric_key_prefix (:obj:`str`, `optional`, defaults to :obj:`"eval"`):
                An optional prefix to be used as the metrics key prefix. For example the metrics "bleu" will be named
                "eval_bleu" if the prefix is "eval" (default)
1535

1536
1537
1538
1539
1540
1541
        .. note::

            If your predictions or labels have different sequence length (for instance because you're doing dynamic
            padding in a token classification task) the predictions will be padded (on the right) to allow for
            concatenation into one array. The padding index is -100.

Sylvain Gugger's avatar
Sylvain Gugger committed
1542
1543
1544
1545
1546
1547
        Returns: `NamedTuple` A namedtuple with the following keys:

            - predictions (:obj:`np.ndarray`): The predictions on :obj:`test_dataset`.
            - label_ids (:obj:`np.ndarray`, `optional`): The labels (if the dataset contained some).
            - metrics (:obj:`Dict[str, float]`, `optional`): The potential dictionary of metrics (if the dataset
              contained labels).
Julien Chaumond's avatar
Julien Chaumond committed
1548
        """
1549
1550
1551
        if test_dataset is not None and not isinstance(test_dataset, collections.abc.Sized):
            raise ValueError("test_dataset must implement __len__")

Julien Chaumond's avatar
Julien Chaumond committed
1552
        test_dataloader = self.get_test_dataloader(test_dataset)
1553
        start_time = time.time()
1554

1555
        output = self.prediction_loop(
1556
1557
            test_dataloader, description="Prediction", ignore_keys=ignore_keys, metric_key_prefix=metric_key_prefix
        )
1558
1559
        output.metrics.update(speed_metrics(metric_key_prefix, start_time, len(test_dataset)))
        return output
Julien Chaumond's avatar
Julien Chaumond committed
1560

1561
    def prediction_loop(
1562
1563
1564
1565
1566
        self,
        dataloader: DataLoader,
        description: str,
        prediction_loss_only: Optional[bool] = None,
        ignore_keys: Optional[List[str]] = None,
1567
        metric_key_prefix: str = "eval",
Julien Chaumond's avatar
Julien Chaumond committed
1568
1569
    ) -> PredictionOutput:
        """
1570
        Prediction/evaluation loop, shared by :obj:`Trainer.evaluate()` and :obj:`Trainer.predict()`.
Julien Chaumond's avatar
Julien Chaumond committed
1571
1572
1573

        Works both with or without labels.
        """
1574
1575
        if not isinstance(dataloader.dataset, collections.abc.Sized):
            raise ValueError("dataset must implement __len__")
1576
1577
1578
        prediction_loss_only = (
            prediction_loss_only if prediction_loss_only is not None else self.args.prediction_loss_only
        )
Julien Chaumond's avatar
Julien Chaumond committed
1579

1580
        model = self.model
Julien Chaumond's avatar
Julien Chaumond committed
1581
        # multi-gpu eval
1582
        if self.args.n_gpu > 1:
1583
1584
1585
            model = torch.nn.DataParallel(model)
        # Note: in torch.distributed mode, there's no point in wrapping the model
        # inside a DistributedDataParallel as we'll be under `no_grad` anyways.
Julien Chaumond's avatar
Julien Chaumond committed
1586

1587
        batch_size = dataloader.batch_size
1588
        num_examples = self.num_examples(dataloader)
Julien Chaumond's avatar
Julien Chaumond committed
1589
        logger.info("***** Running %s *****", description)
1590
        logger.info("  Num examples = %d", num_examples)
1591
        logger.info("  Batch size = %d", batch_size)
1592
1593
1594
1595
1596
1597
1598
1599
        losses_host: torch.Tensor = None
        preds_host: Union[torch.Tensor, List[torch.Tensor]] = None
        labels_host: Union[torch.Tensor, List[torch.Tensor]] = None

        world_size = 1
        if is_torch_tpu_available():
            world_size = xm.xrt_world_size()
        elif self.args.local_rank != -1:
Sylvain Gugger's avatar
Sylvain Gugger committed
1600
            world_size = dist.get_world_size()
1601
1602
1603
        world_size = max(1, world_size)

        eval_losses_gatherer = DistributedTensorGatherer(world_size, num_examples, make_multiple_of=batch_size)
1604
1605
1606
        if not prediction_loss_only:
            preds_gatherer = DistributedTensorGatherer(world_size, num_examples)
            labels_gatherer = DistributedTensorGatherer(world_size, num_examples)
1607

Julien Chaumond's avatar
Julien Chaumond committed
1608
1609
        model.eval()

1610
        if is_torch_tpu_available():
1611
1612
            dataloader = pl.ParallelLoader(dataloader, [self.args.device]).per_device_loader(self.args.device)

1613
        if self.args.past_index >= 0:
1614
            self._past = None
1615

Sylvain Gugger's avatar
Sylvain Gugger committed
1616
1617
        self.callback_handler.eval_dataloader = dataloader

1618
        for step, inputs in enumerate(dataloader):
1619
            loss, logits, labels = self.prediction_step(model, inputs, prediction_loss_only, ignore_keys=ignore_keys)
1620
            if loss is not None:
1621
1622
                losses = loss.repeat(batch_size)
                losses_host = losses if losses_host is None else torch.cat((losses_host, losses), dim=0)
1623
            if logits is not None:
1624
                preds_host = logits if preds_host is None else nested_concat(preds_host, logits, padding_index=-100)
1625
            if labels is not None:
1626
                labels_host = labels if labels_host is None else nested_concat(labels_host, labels, padding_index=-100)
Sylvain Gugger's avatar
Sylvain Gugger committed
1627
            self.control = self.callback_handler.on_prediction_step(self.args, self.state, self.control)
Julien Chaumond's avatar
Julien Chaumond committed
1628

1629
1630
1631
            # Gather all tensors and put them back on the CPU if we have done enough accumulation steps.
            if self.args.eval_accumulation_steps is not None and (step + 1) % self.args.eval_accumulation_steps == 0:
                eval_losses_gatherer.add_arrays(self._gather_and_numpify(losses_host, "eval_losses"))
1632
1633
1634
                if not prediction_loss_only:
                    preds_gatherer.add_arrays(self._gather_and_numpify(preds_host, "eval_preds"))
                    labels_gatherer.add_arrays(self._gather_and_numpify(labels_host, "eval_label_ids"))
1635
1636
1637
1638

                # Set back to None to begin a new accumulation
                losses_host, preds_host, labels_host = None, None, None

1639
1640
1641
        if self.args.past_index and hasattr(self, "_past"):
            # Clean the state at the end of the evaluation loop
            delattr(self, "_past")
Julien Chaumond's avatar
Julien Chaumond committed
1642

1643
1644
        # Gather all remaining tensors and put them back on the CPU
        eval_losses_gatherer.add_arrays(self._gather_and_numpify(losses_host, "eval_losses"))
1645
1646
1647
        if not prediction_loss_only:
            preds_gatherer.add_arrays(self._gather_and_numpify(preds_host, "eval_preds"))
            labels_gatherer.add_arrays(self._gather_and_numpify(labels_host, "eval_label_ids"))
1648
1649

        eval_loss = eval_losses_gatherer.finalize()
1650
1651
        preds = preds_gatherer.finalize() if not prediction_loss_only else None
        label_ids = labels_gatherer.finalize() if not prediction_loss_only else None
Lysandre Debut's avatar
Lysandre Debut committed
1652

Julien Chaumond's avatar
Julien Chaumond committed
1653
1654
1655
1656
        if self.compute_metrics is not None and preds is not None and label_ids is not None:
            metrics = self.compute_metrics(EvalPrediction(predictions=preds, label_ids=label_ids))
        else:
            metrics = {}
1657
1658

        if eval_loss is not None:
1659
            metrics[f"{metric_key_prefix}_loss"] = eval_loss.mean().item()
1660

1661
        # Prefix all keys with metric_key_prefix + '_'
1662
        for key in list(metrics.keys()):
1663
1664
            if not key.startswith(f"{metric_key_prefix}_"):
                metrics[f"{metric_key_prefix}_{key}"] = metrics.pop(key)
Julien Chaumond's avatar
Julien Chaumond committed
1665
1666

        return PredictionOutput(predictions=preds, label_ids=label_ids, metrics=metrics)
1667

1668
1669
1670
1671
1672
1673
1674
1675
1676
1677
1678
1679
1680
1681
    def _gather_and_numpify(self, tensors, name):
        """
        Gather value of `tensors` (tensor or list/tuple of nested tensors) and convert them to numpy before
        concatenating them to `gathered`
        """
        if tensors is None:
            return
        if is_torch_tpu_available():
            tensors = nested_xla_mesh_reduce(tensors, name)
        elif self.args.local_rank != -1:
            tensors = distributed_concat(tensors)

        return nested_numpify(tensors)

1682
    def prediction_step(
1683
1684
1685
1686
1687
        self,
        model: nn.Module,
        inputs: Dict[str, Union[torch.Tensor, Any]],
        prediction_loss_only: bool,
        ignore_keys: Optional[List[str]] = None,
1688
1689
1690
1691
1692
1693
1694
1695
1696
1697
1698
1699
1700
1701
1702
1703
    ) -> Tuple[Optional[float], Optional[torch.Tensor], Optional[torch.Tensor]]:
        """
        Perform an evaluation step on :obj:`model` using obj:`inputs`.

        Subclass and override to inject custom behavior.

        Args:
            model (:obj:`nn.Module`):
                The model to evaluate.
            inputs (:obj:`Dict[str, Union[torch.Tensor, Any]]`):
                The inputs and targets of the model.

                The dictionary will be unpacked before being fed to the model. Most models expect the targets under the
                argument :obj:`labels`. Check your model's documentation for all accepted arguments.
            prediction_loss_only (:obj:`bool`):
                Whether or not to return the loss only.
1704
1705
1706
            ignore_keys (:obj:`Lst[str]`, `optional`):
                A list of keys in the output of your model (if it is a dictionary) that should be ignored when
                gathering predictions.
1707
1708

        Return:
Sylvain Gugger's avatar
Sylvain Gugger committed
1709
1710
            Tuple[Optional[float], Optional[torch.Tensor], Optional[torch.Tensor]]: A tuple with the loss, logits and
            labels (each being optional).
1711
        """
1712
        has_labels = all(inputs.get(k) is not None for k in self.label_names)
1713
        inputs = self._prepare_inputs(inputs)
1714
1715
1716
1717
1718
        if ignore_keys is None:
            if hasattr(self.model, "config"):
                ignore_keys = getattr(self.model.config, "keys_to_ignore_at_inference", [])
            else:
                ignore_keys = []
1719
1720

        with torch.no_grad():
1721
            if self.use_amp:
luyug's avatar
luyug committed
1722
1723
1724
1725
                with autocast():
                    outputs = model(**inputs)
            else:
                outputs = model(**inputs)
1726
            if has_labels:
Sylvain Gugger's avatar
Sylvain Gugger committed
1727
1728
1729
1730
                if self.label_smoother is not None and "labels" in inputs:
                    loss = self.label_smoother(outputs, inputs["labels"]).mean().detach()
                else:
                    loss = (outputs["loss"] if isinstance(outputs, dict) else outputs[0]).mean().detach()
1731
1732
1733
1734
                if isinstance(outputs, dict):
                    logits = tuple(v for k, v in outputs.items() if k not in ignore_keys + ["loss"])
                else:
                    logits = outputs[1:]
1735
1736
            else:
                loss = None
1737
1738
1739
1740
1741
                if isinstance(outputs, dict):
                    logits = tuple(v for k, v in outputs.items() if k not in ignore_keys)
                else:
                    logits = outputs
            # TODO: this needs to be fixed and made cleaner later.
1742
1743
1744
1745
1746
1747
            if self.args.past_index >= 0:
                self._past = outputs[self.args.past_index if has_labels else self.args.past_index - 1]

        if prediction_loss_only:
            return (loss, None, None)

1748
        logits = nested_detach(logits)
Sylvain Gugger's avatar
Sylvain Gugger committed
1749
1750
1751
1752
        if len(logits) == 1:
            logits = logits[0]

        if has_labels:
1753
            labels = nested_detach(tuple(inputs.get(name) for name in self.label_names))
Sylvain Gugger's avatar
Sylvain Gugger committed
1754
1755
1756
1757
1758
1759
            if len(labels) == 1:
                labels = labels[0]
        else:
            labels = None

        return (loss, logits, labels)
1760
1761
1762

    def floating_point_ops(self, inputs: Dict[str, Union[torch.Tensor, Any]]):
        """
Sylvain Gugger's avatar
Sylvain Gugger committed
1763
1764
1765
        For models that inherit from :class:`~transformers.PreTrainedModel`, uses that method to compute the number of
        floating point operations for every backward + forward pass. If using another model, either implement such a
        method in the model or subclass and override this method.
1766
1767
1768
1769
1770
1771
1772
1773

        Args:
            inputs (:obj:`Dict[str, Union[torch.Tensor, Any]]`):
                The inputs and targets of the model.

        Returns:
            :obj:`int`: The number of floating-point operations.
        """
1774
1775
        if hasattr(self.model, "floating_point_ops"):
            return self.model.floating_point_ops(inputs)
1776
1777
        else:
            return 0