trainer.py 124 KB
Newer Older
Sylvain Gugger's avatar
Sylvain Gugger committed
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
# coding=utf-8
# Copyright 2020-present the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
The Trainer class, to easily train a 馃 Transformers from scratch or finetune it on a new task.
"""

19
import collections
20
import inspect
21
import math
Julien Chaumond's avatar
Julien Chaumond committed
22
import os
23
import random
Julien Chaumond's avatar
Julien Chaumond committed
24
25
import re
import shutil
26
import sys
27
import time
28
import warnings
29
from logging import StreamHandler
Julien Chaumond's avatar
Julien Chaumond committed
30
from pathlib import Path
31
from typing import TYPE_CHECKING, Any, Callable, Dict, List, Optional, Tuple, Union
Julien Chaumond's avatar
Julien Chaumond committed
32

33
34
from tqdm.auto import tqdm

Julien Chaumond's avatar
Julien Chaumond committed
35

36
37
# Integrations must be imported before ML frameworks:
from .integrations import (  # isort: split
38
    default_hp_search_backend,
39
    get_reporting_integration_callbacks,
40
    hp_params,
41
    is_fairscale_available,
42
    is_optuna_available,
43
    is_ray_tune_available,
44
45
    run_hp_search_optuna,
    run_hp_search_ray,
46
)
47
48
49
50
51
52

import numpy as np
import torch
from packaging import version
from torch import nn
from torch.utils.data.dataloader import DataLoader
53
from torch.utils.data.dataset import Dataset, IterableDataset
54
55
56
from torch.utils.data.distributed import DistributedSampler
from torch.utils.data.sampler import RandomSampler, SequentialSampler

57
58
from . import __version__
from .configuration_utils import PretrainedConfig
59
from .data.data_collator import DataCollator, DataCollatorWithPadding, default_data_collator
60
from .debug_utils import DebugOption, DebugUnderflowOverflow
61
from .deepspeed import deepspeed_init, is_deepspeed_zero3_enabled
62
from .dependency_versions_check import dep_version_check
Sylvain Gugger's avatar
Sylvain Gugger committed
63
from .file_utils import (
64
    CONFIG_NAME,
Sylvain Gugger's avatar
Sylvain Gugger committed
65
    WEIGHTS_NAME,
Sylvain Gugger's avatar
Sylvain Gugger committed
66
    PushToHubMixin,
Sylvain Gugger's avatar
Sylvain Gugger committed
67
68
69
    is_apex_available,
    is_datasets_available,
    is_in_notebook,
Sylvain Gugger's avatar
Sylvain Gugger committed
70
71
    is_sagemaker_dp_enabled,
    is_sagemaker_mp_enabled,
Sylvain Gugger's avatar
Sylvain Gugger committed
72
    is_torch_tpu_available,
73
    is_training_run_on_sagemaker,
Sylvain Gugger's avatar
Sylvain Gugger committed
74
)
Sylvain Gugger's avatar
Sylvain Gugger committed
75
from .modelcard import TrainingSummary
76
from .modeling_utils import PreTrainedModel, unwrap_model
Sylvain Gugger's avatar
Sylvain Gugger committed
77
from .optimization import Adafactor, AdamW, get_scheduler
78
from .tokenization_utils_base import PreTrainedTokenizerBase
Sylvain Gugger's avatar
Sylvain Gugger committed
79
80
81
82
83
84
85
86
87
88
from .trainer_callback import (
    CallbackHandler,
    DefaultFlowCallback,
    PrinterCallback,
    ProgressCallback,
    TrainerCallback,
    TrainerControl,
    TrainerState,
)
from .trainer_pt_utils import (
89
    DistributedLengthGroupedSampler,
90
    DistributedSamplerWithLoop,
91
    DistributedTensorGatherer,
92
    IterableDatasetShard,
Sylvain Gugger's avatar
Sylvain Gugger committed
93
    LabelSmoother,
94
    LengthGroupedSampler,
Sylvain Gugger's avatar
Sylvain Gugger committed
95
    SequentialDistributedSampler,
96
    ShardSampler,
Sylvain Gugger's avatar
Sylvain Gugger committed
97
98
    distributed_broadcast_scalars,
    distributed_concat,
99
    find_batch_size,
100
    get_parameter_names,
Sylvain Gugger's avatar
Sylvain Gugger committed
101
102
103
    nested_concat,
    nested_detach,
    nested_numpify,
104
    nested_truncate,
Sylvain Gugger's avatar
Sylvain Gugger committed
105
106
107
    nested_xla_mesh_reduce,
    reissue_pt_warnings,
)
108
109
110
from .trainer_utils import (
    PREFIX_CHECKPOINT_DIR,
    BestRun,
111
    EvalLoopOutput,
112
113
114
    EvalPrediction,
    HPSearchBackend,
    PredictionOutput,
115
    ShardedDDPOption,
116
    TrainerMemoryTracker,
117
118
119
    TrainOutput,
    default_compute_objective,
    default_hp_space,
120
    denumpify_detensorize,
121
    get_last_checkpoint,
122
    number_of_arguments,
123
    set_seed,
124
    speed_metrics,
125
)
126
from .training_args import ParallelMode, TrainingArguments
Lysandre Debut's avatar
Lysandre Debut committed
127
from .utils import logging
128
from .utils.modeling_auto_mapping import MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES
Julien Chaumond's avatar
Julien Chaumond committed
129
130


131
_is_torch_generator_available = False
132
_is_native_amp_available = False
133

Sylvain Gugger's avatar
Sylvain Gugger committed
134
DEFAULT_CALLBACKS = [DefaultFlowCallback]
135
DEFAULT_PROGRESS_CALLBACK = ProgressCallback
Sylvain Gugger's avatar
Sylvain Gugger committed
136

137
138
139
140
if is_in_notebook():
    from .utils.notebook import NotebookProgressCallback

    DEFAULT_PROGRESS_CALLBACK = NotebookProgressCallback
141

142
143
if is_apex_available():
    from apex import amp
144

145
if version.parse(torch.__version__) >= version.parse("1.6"):
146
    _is_torch_generator_available = True
147
    _is_native_amp_available = True
148
    from torch.cuda.amp import autocast
Julien Chaumond's avatar
Julien Chaumond committed
149

150
151
if is_datasets_available():
    import datasets
Julien Chaumond's avatar
Julien Chaumond committed
152

153
if is_torch_tpu_available():
Lysandre Debut's avatar
Lysandre Debut committed
154
155
156
157
    import torch_xla.core.xla_model as xm
    import torch_xla.debug.metrics as met
    import torch_xla.distributed.parallel_loader as pl

158
if is_fairscale_available():
159
    dep_version_check("fairscale")
160
    import fairscale
161
    from fairscale.nn.data_parallel import FullyShardedDataParallel as FullyShardedDDP
162
    from fairscale.nn.data_parallel import ShardedDataParallel as ShardedDDP
163
    from fairscale.nn.wrap import auto_wrap
164
165
166
    from fairscale.optim import OSS
    from fairscale.optim.grad_scaler import ShardedGradScaler

Sylvain Gugger's avatar
Sylvain Gugger committed
167
if is_sagemaker_dp_enabled():
Sylvain Gugger's avatar
Sylvain Gugger committed
168
169
170
171
    import smdistributed.dataparallel.torch.distributed as dist
    from smdistributed.dataparallel.torch.parallel.distributed import DistributedDataParallel as DDP
else:
    import torch.distributed as dist
172

Sylvain Gugger's avatar
Sylvain Gugger committed
173
174
175
176
177
if is_sagemaker_mp_enabled():
    import smdistributed.modelparallel.torch as smp

    from .trainer_pt_utils import smp_forward_backward, smp_forward_only, smp_gather, smp_nested_concat

178
179
180
181
if is_training_run_on_sagemaker():
    logging.add_handler(StreamHandler(sys.stdout))


182
183
184
if TYPE_CHECKING:
    import optuna

Lysandre Debut's avatar
Lysandre Debut committed
185
logger = logging.get_logger(__name__)
Julien Chaumond's avatar
Julien Chaumond committed
186
187
188
189


class Trainer:
    """
Sylvain Gugger's avatar
Sylvain Gugger committed
190
    Trainer is a simple but feature-complete training and eval loop for PyTorch, optimized for 馃 Transformers.
191
192

    Args:
Sylvain Gugger's avatar
Sylvain Gugger committed
193
        model (:class:`~transformers.PreTrainedModel` or :obj:`torch.nn.Module`, `optional`):
194
            The model to train, evaluate or use for predictions. If not provided, a ``model_init`` must be passed.
Sylvain Gugger's avatar
Sylvain Gugger committed
195
196
197
198
199
200

            .. note::

                :class:`~transformers.Trainer` is optimized to work with the :class:`~transformers.PreTrainedModel`
                provided by the library. You can still use your own models defined as :obj:`torch.nn.Module` as long as
                they work the same way as the 馃 Transformers models.
201
        args (:class:`~transformers.TrainingArguments`, `optional`):
Sylvain Gugger's avatar
Sylvain Gugger committed
202
203
204
            The arguments to tweak for training. Will default to a basic instance of
            :class:`~transformers.TrainingArguments` with the ``output_dir`` set to a directory named `tmp_trainer` in
            the current directory if not provided.
205
        data_collator (:obj:`DataCollator`, `optional`):
Sylvain Gugger's avatar
Sylvain Gugger committed
206
207
208
            The function to use to form a batch from a list of elements of :obj:`train_dataset` or :obj:`eval_dataset`.
            Will default to :func:`~transformers.default_data_collator` if no ``tokenizer`` is provided, an instance of
            :func:`~transformers.DataCollatorWithPadding` otherwise.
209
        train_dataset (:obj:`torch.utils.data.dataset.Dataset` or :obj:`torch.utils.data.dataset.IterableDataset`, `optional`):
210
            The dataset to use for training. If it is an :obj:`datasets.Dataset`, columns not accepted by the
211
            ``model.forward()`` method are automatically removed.
212
213
214

            Note that if it's a :obj:`torch.utils.data.dataset.IterableDataset` with some randomization and you are
            training in a distributed fashion, your iterable dataset should either use a internal attribute
215
            :obj:`generator` that is a :obj:`torch.Generator` for the randomization that must be identical on all
216
217
            processes (and the Trainer will manually set the seed of this :obj:`generator` at each epoch) or have a
            :obj:`set_epoch()` method that internally sets the seed of the RNGs used.
Sylvain Gugger's avatar
Sylvain Gugger committed
218
        eval_dataset (:obj:`torch.utils.data.dataset.Dataset`, `optional`):
219
             The dataset to use for evaluation. If it is an :obj:`datasets.Dataset`, columns not accepted by the
Sylvain Gugger's avatar
Sylvain Gugger committed
220
             ``model.forward()`` method are automatically removed.
221
222
223
224
        tokenizer (:class:`PreTrainedTokenizerBase`, `optional`):
            The tokenizer used to preprocess the data. If provided, will be used to automatically pad the inputs the
            maximum length when batching inputs, and it will be saved along the model to make it easier to rerun an
            interrupted training or reuse the fine-tuned model.
225
226
227
        model_init (:obj:`Callable[[], PreTrainedModel]`, `optional`):
            A function that instantiates the model to be used. If provided, each call to
            :meth:`~transformers.Trainer.train` will start from a new instance of the model as given by this function.
228

Sylvain Gugger's avatar
Sylvain Gugger committed
229
230
            The function may have zero argument, or a single one containing the optuna/Ray Tune trial object, to be
            able to choose different architectures according to hyper parameters (such as layer count, sizes of inner
Sylvain Gugger's avatar
Sylvain Gugger committed
231
232
            layers, dropout probabilities etc).
        compute_metrics (:obj:`Callable[[EvalPrediction], Dict]`, `optional`):
233
            The function that will be used to compute metrics at evaluation. Must take a
Sylvain Gugger's avatar
Sylvain Gugger committed
234
235
236
237
            :class:`~transformers.EvalPrediction` and return a dictionary string to metric values.
        callbacks (List of :obj:`~transformers.TrainerCallback`, `optional`):
            A list of callbacks to customize the training loop. Will add those to the list of default callbacks
            detailed in :doc:`here <callback>`.
Sylvain Gugger's avatar
Sylvain Gugger committed
238
239

            If you want to remove one of the default callbacks used, use the :meth:`Trainer.remove_callback` method.
Sylvain Gugger's avatar
Sylvain Gugger committed
240
        optimizers (:obj:`Tuple[torch.optim.Optimizer, torch.optim.lr_scheduler.LambdaLR`, `optional`): A tuple
Sylvain Gugger's avatar
Sylvain Gugger committed
241
            containing the optimizer and the scheduler to use. Will default to an instance of
242
            :class:`~transformers.AdamW` on your model and a scheduler given by
Sylvain Gugger's avatar
Sylvain Gugger committed
243
            :func:`~transformers.get_linear_schedule_with_warmup` controlled by :obj:`args`.
244

245
246
247
248
249
250
251
252
253
254
    Important attributes:

        - **model** -- Always points to the core model. If using a transformers model, it will be a
          :class:`~transformers.PreTrainedModel` subclass.
        - **model_wrapped** -- Always points to the most external model in case one or more other modules wrap the
          original model. This is the model that should be used for the forward pass. For example, under ``DeepSpeed``,
          the inner model is wrapped in ``DeepSpeed`` and then again in ``torch.nn.DistributedDataParallel``. If the
          inner model hasn't been wrapped, then ``self.model_wrapped`` is the same as ``self.model``.
        - **is_model_parallel** -- Whether or not a model has been switched to a model parallel mode (different from
          data parallelism, this means some of the model layers are split on different GPUs).
255
256
257
        - **place_model_on_device** -- Whether or not to automatically place the model on the device - it will be set
          to :obj:`False` if model parallel or deepspeed is used, or if the default
          ``TrainingArguments.place_model_on_device`` is overridden to return :obj:`False` .
258
259
        - **is_in_train** -- Whether or not a model is currently running ``train`` (e.g. when ``evaluate`` is called
          while in ``train``)
260

Julien Chaumond's avatar
Julien Chaumond committed
261
262
    """

263
    from .trainer_pt_utils import _get_learning_rate, log_metrics, metrics_format, save_metrics, save_state
264

Julien Chaumond's avatar
Julien Chaumond committed
265
266
    def __init__(
        self,
267
        model: Union[PreTrainedModel, nn.Module] = None,
268
        args: TrainingArguments = None,
Julien Chaumond's avatar
Julien Chaumond committed
269
270
271
        data_collator: Optional[DataCollator] = None,
        train_dataset: Optional[Dataset] = None,
        eval_dataset: Optional[Dataset] = None,
272
        tokenizer: Optional[PreTrainedTokenizerBase] = None,
273
        model_init: Callable[[], PreTrainedModel] = None,
Julien Chaumond's avatar
Julien Chaumond committed
274
        compute_metrics: Optional[Callable[[EvalPrediction], Dict]] = None,
Sylvain Gugger's avatar
Sylvain Gugger committed
275
        callbacks: Optional[List[TrainerCallback]] = None,
276
        optimizers: Tuple[torch.optim.Optimizer, torch.optim.lr_scheduler.LambdaLR] = (None, None),
Julien Chaumond's avatar
Julien Chaumond committed
277
    ):
Sylvain Gugger's avatar
Sylvain Gugger committed
278
        if args is None:
279
280
281
            output_dir = "tmp_trainer"
            logger.info(f"No `TrainingArguments` passed, using `output_dir={output_dir}`.")
            args = TrainingArguments(output_dir=output_dir)
Sylvain Gugger's avatar
Sylvain Gugger committed
282
283
284
        self.args = args
        # Seed must be set before instantiating the model when using model
        set_seed(self.args.seed)
285
        self.hp_name = None
286
        self.deepspeed = None
287
        self.is_in_train = False
288

289
290
291
292
        # memory metrics - must set up as early as possible
        self._memory_tracker = TrainerMemoryTracker(self.args.skip_memory_metrics)
        self._memory_tracker.start()

293
        # set the correct log level depending on the node
294
        log_level = args.get_process_log_level()
295
296
        logging.set_verbosity(log_level)

297
298
299
        # force device and distributed setup init explicitly
        args._setup_devices

300
301
302
303
304
305
306
307
308
309
310
311
312
313
        if model is None:
            if model_init is not None:
                self.model_init = model_init
                model = self.call_model_init()
            else:
                raise RuntimeError("`Trainer` requires either a `model` or `model_init` argument")
        else:
            if model_init is not None:
                warnings.warn(
                    "`Trainer` requires either a `model` or `model_init` argument, but not both. "
                    "`model_init` will overwrite your model when calling the `train` method. This will become a fatal error in the next release.",
                    FutureWarning,
                )
            self.model_init = model_init
314

315
316
317
318
319
        if hasattr(model, "is_parallelizable") and model.is_parallelizable and model.model_parallel:
            self.is_model_parallel = True
        else:
            self.is_model_parallel = False

320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
        # Setup Sharded DDP training
        self.sharded_ddp = None
        if len(args.sharded_ddp) > 0:
            if args.deepspeed:
                raise ValueError(
                    "Using --sharded_ddp xxx together with --deepspeed is not possible, deactivate one of those flags."
                )

            if args.local_rank == -1:
                raise ValueError("Using sharded DDP only works in distributed training.")
            elif not is_fairscale_available():
                raise ImportError("Sharded DDP training requires fairscale: `pip install fairscale`.")
            elif ShardedDDPOption.SIMPLE not in args.sharded_ddp and FullyShardedDDP is None:
                raise ImportError(
                    "Sharded DDP in a mode other than simple training requires fairscale version >= 0.3, found "
                    f"{fairscale.__version__}. Upgrade your fairscale library: `pip install --upgrade fairscale`."
                )
            elif ShardedDDPOption.SIMPLE in args.sharded_ddp:
                self.sharded_ddp = ShardedDDPOption.SIMPLE
            elif ShardedDDPOption.ZERO_DP_2 in args.sharded_ddp:
                self.sharded_ddp = ShardedDDPOption.ZERO_DP_2
            elif ShardedDDPOption.ZERO_DP_3 in args.sharded_ddp:
                self.sharded_ddp = ShardedDDPOption.ZERO_DP_3

344
        # one place to sort out whether to place the model on device or not
345
346
347
348
349
350
        # postpone switching model to cuda when:
        # 1. MP - since we are trying to fit a much bigger than 1 gpu model
        # 2. fp16-enabled DeepSpeed loads the model in half the size and it doesn't need .to() anyway,
        #    and we only use deepspeed for training at the moment
        # 3. full fp16 eval - since the model needs to be half'ed first
        # 4. Sharded DDP - same as MP
351
        self.place_model_on_device = args.place_model_on_device
352
353
        if (
            self.is_model_parallel
354
            or args.deepspeed
355
356
357
            or (args.fp16_full_eval and not args.do_train)
            or (self.sharded_ddp in [ShardedDDPOption.ZERO_DP_2, ShardedDDPOption.ZERO_DP_3])
        ):
358
359
            self.place_model_on_device = False

360
361
        default_collator = default_data_collator if tokenizer is None else DataCollatorWithPadding(tokenizer)
        self.data_collator = data_collator if data_collator is not None else default_collator
Julien Chaumond's avatar
Julien Chaumond committed
362
363
        self.train_dataset = train_dataset
        self.eval_dataset = eval_dataset
364
        self.tokenizer = tokenizer
365

366
        if self.place_model_on_device:
367
            model = model.to(args.device)
Stas Bekman's avatar
Stas Bekman committed
368
369
370

        # Force n_gpu to 1 to avoid DataParallel as MP will manage the GPUs
        if self.is_model_parallel:
371
            self.args._n_gpu = 1
372
373
374
375
376

        # later use `self.model is self.model_wrapped` to check if it's wrapped or not
        self.model_wrapped = model
        self.model = model

Julien Chaumond's avatar
Julien Chaumond committed
377
        self.compute_metrics = compute_metrics
378
        self.optimizer, self.lr_scheduler = optimizers
Sylvain Gugger's avatar
Sylvain Gugger committed
379
380
381
382
383
        if model_init is not None and (self.optimizer is not None or self.lr_scheduler is not None):
            raise RuntimeError(
                "Passing a `model_init` is incompatible with providing the `optimizers` argument."
                "You should subclass `Trainer` and override the `create_optimizer_and_scheduler` method."
            )
384
385
        default_callbacks = DEFAULT_CALLBACKS + get_reporting_integration_callbacks(self.args.report_to)
        callbacks = default_callbacks if callbacks is None else default_callbacks + callbacks
386
387
388
        self.callback_handler = CallbackHandler(
            callbacks, self.model, self.tokenizer, self.optimizer, self.lr_scheduler
        )
389
        self.add_callback(PrinterCallback if self.args.disable_tqdm else DEFAULT_PROGRESS_CALLBACK)
Sylvain Gugger's avatar
Sylvain Gugger committed
390

391
392
393
        # Will be set to True by `self._setup_loggers()` on first call to `self.log()`.
        self._loggers_initialized = False

394
395
396
        # Create clone of distant repo and output directory if needed
        if self.args.push_to_hub:
            self.init_git_repo()
397
        if self.args.should_save:
Julien Chaumond's avatar
Julien Chaumond committed
398
            os.makedirs(self.args.output_dir, exist_ok=True)
399

400
        if not callable(self.data_collator) and callable(getattr(self.data_collator, "collate_batch", None)):
Sylvain Gugger's avatar
Sylvain Gugger committed
401
            raise ValueError("The `data_collator` should be a simple callable (function, class with `__call__`).")
402

403
404
405
406
407
408
        if args.max_steps > 0:
            logger.info("max_steps is given, it will override any value given in num_train_epochs")

        if train_dataset is not None and not isinstance(train_dataset, collections.abc.Sized) and args.max_steps <= 0:
            raise ValueError("train_dataset does not implement __len__, max_steps has to be specified")

409
        self._signature_columns = None
410

411
412
413
        # Mixed precision setup
        self.use_apex = False
        self.use_amp = False
414
415
        self.fp16_backend = None

416
417
        if args.fp16:
            if args.fp16_backend == "auto":
418
                self.fp16_backend = "amp" if _is_native_amp_available else "apex"
419
            else:
420
421
                self.fp16_backend = args.fp16_backend
            logger.info(f"Using {self.fp16_backend} fp16 backend")
422

423
424
        if args.fp16 and not args.deepspeed:  # deepspeed manages its own fp16
            if self.fp16_backend == "amp":
425
                self.use_amp = True
426
427
428
429
430
431
                if is_sagemaker_mp_enabled():
                    self.scaler = smp.amp.GradScaler()
                elif self.sharded_ddp is not None:
                    self.scaler = ShardedGradScaler()
                else:
                    self.scaler = torch.cuda.amp.GradScaler()
432
433
434
435
436
437
438
            else:
                if not is_apex_available():
                    raise ImportError(
                        "Using FP16 with APEX but APEX is not installed, please refer to https://www.github.com/nvidia/apex."
                    )
                self.use_apex = True

439
440
441
442
443
444
445
        # FP16 + model parallelism in SageMaker: gradient clipping does not work for now so we raise a helpful error.
        if is_sagemaker_mp_enabled() and self.use_amp and args.max_grad_norm is not None and args.max_grad_norm > 0:
            raise ValueError(
                "SageMaker Model Parallelism in mixed precision mode does not support gradient clipping yet. Pass "
                "along 'max_grad_norm': 0 in your hyperparameters."
            )

Sylvain Gugger's avatar
Sylvain Gugger committed
446
447
448
449
450
451
        # Label smoothing
        if self.args.label_smoothing_factor != 0:
            self.label_smoother = LabelSmoother(epsilon=self.args.label_smoothing_factor)
        else:
            self.label_smoother = None

452
        self.state = TrainerState()
Sylvain Gugger's avatar
Sylvain Gugger committed
453
        self.control = TrainerControl()
454
455
456
        # Internal variable to count flos in each process, will be accumulated in `self.state.total_flos` then
        # returned to 0 every time flos need to be logged
        self.current_flos = 0
457
        self.hp_search_backend = None
458
        self.use_tune_checkpoints = False
459
        default_label_names = (
460
            ["start_positions", "end_positions"]
461
            if type(self.model).__name__ in MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES.values()
462
463
464
            else ["labels"]
        )
        self.label_names = default_label_names if self.args.label_names is None else self.args.label_names
Sylvain Gugger's avatar
Sylvain Gugger committed
465
466
        self.control = self.callback_handler.on_init_end(self.args, self.state, self.control)

467
468
469
        # very last
        self._memory_tracker.stop_and_update_metrics()

Sylvain Gugger's avatar
Sylvain Gugger committed
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
    def add_callback(self, callback):
        """
        Add a callback to the current list of :class:`~transformer.TrainerCallback`.

        Args:
           callback (:obj:`type` or :class:`~transformer.TrainerCallback`):
               A :class:`~transformer.TrainerCallback` class or an instance of a :class:`~transformer.TrainerCallback`.
               In the first case, will instantiate a member of that class.
        """
        self.callback_handler.add_callback(callback)

    def pop_callback(self, callback):
        """
        Remove a callback from the current list of :class:`~transformer.TrainerCallback` and returns it.

        If the callback is not found, returns :obj:`None` (and no error is raised).

        Args:
           callback (:obj:`type` or :class:`~transformer.TrainerCallback`):
               A :class:`~transformer.TrainerCallback` class or an instance of a :class:`~transformer.TrainerCallback`.
               In the first case, will pop the first member of that class found in the list of callbacks.

        Returns:
            :class:`~transformer.TrainerCallback`: The callback removed, if found.
        """
        return self.callback_handler.pop_callback(callback)

    def remove_callback(self, callback):
        """
        Remove a callback from the current list of :class:`~transformer.TrainerCallback`.

        Args:
           callback (:obj:`type` or :class:`~transformer.TrainerCallback`):
               A :class:`~transformer.TrainerCallback` class or an instance of a :class:`~transformer.TrainerCallback`.
               In the first case, will remove the first member of that class found in the list of callbacks.
        """
        self.callback_handler.remove_callback(callback)
Julien Chaumond's avatar
Julien Chaumond committed
507

508
    def _remove_unused_columns(self, dataset: "datasets.Dataset", description: Optional[str] = None):
509
        if not self.args.remove_unused_columns:
510
            return dataset
511
512
513
514
515
516
517
518
        if self._signature_columns is None:
            # Inspect model forward signature to keep only the arguments it accepts.
            signature = inspect.signature(self.model.forward)
            self._signature_columns = list(signature.parameters.keys())
            # Labels may be named label or label_ids, the default data collator handles that.
            self._signature_columns += ["label", "label_ids"]
        columns = [k for k in self._signature_columns if k in dataset.column_names]
        ignored_columns = list(set(dataset.column_names) - set(self._signature_columns))
519
520
521
522
523
524
        if len(ignored_columns) > 0:
            dset_description = "" if description is None else f"in the {description} set "
            logger.info(
                f"The following columns {dset_description} don't have a corresponding argument in "
                f"`{self.model.__class__.__name__}.forward` and have been ignored: {', '.join(ignored_columns)}."
            )
525

526
527
528
529
530
531
532
        if version.parse(datasets.__version__) < version.parse("1.4.0"):
            dataset.set_format(
                type=dataset.format["type"], columns=columns, format_kwargs=dataset.format["format_kwargs"]
            )
            return dataset
        else:
            return dataset.remove_columns(ignored_columns)
533

534
    def _get_train_sampler(self) -> Optional[torch.utils.data.sampler.Sampler]:
535
        if not isinstance(self.train_dataset, collections.abc.Sized):
536
            return None
537

538
539
540
541
542
        generator = None
        if self.args.world_size <= 1 and _is_torch_generator_available:
            generator = torch.Generator()
            generator.manual_seed(int(torch.empty((), dtype=torch.int64).random_().item()))

543
544
        # Build the sampler.
        if self.args.group_by_length:
545
546
547
548
549
550
551
552
            if is_datasets_available() and isinstance(self.train_dataset, datasets.Dataset):
                lengths = (
                    self.train_dataset[self.args.length_column_name]
                    if self.args.length_column_name in self.train_dataset.column_names
                    else None
                )
            else:
                lengths = None
553
            model_input_name = self.tokenizer.model_input_names[0] if self.tokenizer is not None else None
554
            if self.args.world_size <= 1:
555
                return LengthGroupedSampler(
556
557
558
559
560
                    self.train_dataset,
                    self.args.train_batch_size,
                    lengths=lengths,
                    model_input_name=model_input_name,
                    generator=generator,
561
                )
562
563
            else:
                return DistributedLengthGroupedSampler(
564
565
                    self.train_dataset,
                    self.args.train_batch_size,
566
567
                    num_replicas=self.args.world_size,
                    rank=self.args.process_index,
568
                    lengths=lengths,
569
                    model_input_name=model_input_name,
570
                    seed=self.args.seed,
571
572
573
                )

        else:
574
            if self.args.world_size <= 1:
575
576
                if _is_torch_generator_available:
                    return RandomSampler(self.train_dataset, generator=generator)
577
                return RandomSampler(self.train_dataset)
Sylvain Gugger's avatar
Sylvain Gugger committed
578
579
580
581
            elif (
                self.args.parallel_mode in [ParallelMode.TPU, ParallelMode.SAGEMAKER_MODEL_PARALLEL]
                and not self.args.dataloader_drop_last
            ):
582
583
584
585
586
587
                # Use a loop for TPUs when drop_last is False to have all batches have the same size.
                return DistributedSamplerWithLoop(
                    self.train_dataset,
                    batch_size=self.args.per_device_train_batch_size,
                    num_replicas=self.args.world_size,
                    rank=self.args.process_index,
588
                    seed=self.args.seed,
589
                )
590
            else:
591
                return DistributedSampler(
592
593
594
595
                    self.train_dataset,
                    num_replicas=self.args.world_size,
                    rank=self.args.process_index,
                    seed=self.args.seed,
596
                )
597
598
599
600
601

    def get_train_dataloader(self) -> DataLoader:
        """
        Returns the training :class:`~torch.utils.data.DataLoader`.

Sylvain Gugger's avatar
Sylvain Gugger committed
602
603
        Will use no sampler if :obj:`self.train_dataset` does not implement :obj:`__len__`, a random sampler (adapted
        to distributed training if necessary) otherwise.
604
605
606
607
608

        Subclass and override this method if you want to inject some custom behavior.
        """
        if self.train_dataset is None:
            raise ValueError("Trainer: training requires a train_dataset.")
609

610
611
612
613
614
        train_dataset = self.train_dataset
        if is_datasets_available() and isinstance(train_dataset, datasets.Dataset):
            train_dataset = self._remove_unused_columns(train_dataset, description="training")

        if isinstance(train_dataset, torch.utils.data.dataset.IterableDataset):
615
616
            if self.args.world_size > 1:
                train_dataset = IterableDatasetShard(
617
                    train_dataset,
618
619
620
621
622
                    batch_size=self.args.train_batch_size,
                    drop_last=self.args.dataloader_drop_last,
                    num_processes=self.args.world_size,
                    process_index=self.args.process_index,
                )
623

624
625
626
627
628
629
630
631
            return DataLoader(
                train_dataset,
                batch_size=self.args.train_batch_size,
                collate_fn=self.data_collator,
                num_workers=self.args.dataloader_num_workers,
                pin_memory=self.args.dataloader_pin_memory,
            )

632
633
634
        train_sampler = self._get_train_sampler()

        return DataLoader(
635
            train_dataset,
Julien Chaumond's avatar
Julien Chaumond committed
636
637
            batch_size=self.args.train_batch_size,
            sampler=train_sampler,
638
            collate_fn=self.data_collator,
Setu Shah's avatar
Setu Shah committed
639
            drop_last=self.args.dataloader_drop_last,
Chady Kamar's avatar
Chady Kamar committed
640
            num_workers=self.args.dataloader_num_workers,
641
            pin_memory=self.args.dataloader_pin_memory,
Julien Chaumond's avatar
Julien Chaumond committed
642
643
        )

644
    def _get_eval_sampler(self, eval_dataset: Dataset) -> Optional[torch.utils.data.sampler.Sampler]:
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
        # Deprecated code
        if self.args.use_legacy_prediction_loop:
            if is_torch_tpu_available():
                return SequentialDistributedSampler(
                    eval_dataset, num_replicas=xm.xrt_world_size(), rank=xm.get_ordinal()
                )
            elif is_sagemaker_mp_enabled():
                return SequentialDistributedSampler(
                    eval_dataset,
                    num_replicas=smp.dp_size(),
                    rank=smp.dp_rank(),
                    batch_size=self.args.per_device_eval_batch_size,
                )
            elif self.args.local_rank != -1:
                return SequentialDistributedSampler(eval_dataset)
            else:
                return SequentialSampler(eval_dataset)

        if self.args.world_size <= 1:
            return SequentialSampler(eval_dataset)
        else:
            return ShardSampler(
Sylvain Gugger's avatar
Sylvain Gugger committed
667
668
                eval_dataset,
                batch_size=self.args.per_device_eval_batch_size,
669
670
                num_processes=self.args.world_size,
                process_index=self.args.process_index,
Sylvain Gugger's avatar
Sylvain Gugger committed
671
            )
Lysandre Debut's avatar
Lysandre Debut committed
672

Julien Chaumond's avatar
Julien Chaumond committed
673
    def get_eval_dataloader(self, eval_dataset: Optional[Dataset] = None) -> DataLoader:
674
675
676
        """
        Returns the evaluation :class:`~torch.utils.data.DataLoader`.

677
678
        Subclass and override this method if you want to inject some custom behavior.

679
        Args:
680
            eval_dataset (:obj:`torch.utils.data.dataset.Dataset`, `optional`):
681
                If provided, will override :obj:`self.eval_dataset`. If it is an :obj:`datasets.Dataset`, columns not
682
                accepted by the ``model.forward()`` method are automatically removed. It must implement :obj:`__len__`.
683
        """
Julien Chaumond's avatar
Julien Chaumond committed
684
685
        if eval_dataset is None and self.eval_dataset is None:
            raise ValueError("Trainer: evaluation requires an eval_dataset.")
686
        eval_dataset = eval_dataset if eval_dataset is not None else self.eval_dataset
687

688
689
690
        if is_datasets_available() and isinstance(eval_dataset, datasets.Dataset):
            eval_dataset = self._remove_unused_columns(eval_dataset, description="evaluation")

691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
        if isinstance(eval_dataset, torch.utils.data.dataset.IterableDataset):
            if self.args.world_size > 1:
                eval_dataset = IterableDatasetShard(
                    eval_dataset,
                    batch_size=self.args.eval_batch_size,
                    drop_last=self.args.dataloader_drop_last,
                    num_processes=self.args.world_size,
                    process_index=self.args.process_index,
                )
            return DataLoader(
                eval_dataset,
                batch_size=self.args.eval_batch_size,
                collate_fn=self.data_collator,
                num_workers=self.args.dataloader_num_workers,
                pin_memory=self.args.dataloader_pin_memory,
            )

708
        eval_sampler = self._get_eval_sampler(eval_dataset)
709

710
        return DataLoader(
711
            eval_dataset,
712
            sampler=eval_sampler,
Julien Chaumond's avatar
Julien Chaumond committed
713
            batch_size=self.args.eval_batch_size,
714
            collate_fn=self.data_collator,
Setu Shah's avatar
Setu Shah committed
715
            drop_last=self.args.dataloader_drop_last,
Chady Kamar's avatar
Chady Kamar committed
716
            num_workers=self.args.dataloader_num_workers,
717
            pin_memory=self.args.dataloader_pin_memory,
Julien Chaumond's avatar
Julien Chaumond committed
718
719
720
        )

    def get_test_dataloader(self, test_dataset: Dataset) -> DataLoader:
721
722
723
        """
        Returns the test :class:`~torch.utils.data.DataLoader`.

724
725
        Subclass and override this method if you want to inject some custom behavior.

726
        Args:
727
            test_dataset (:obj:`torch.utils.data.dataset.Dataset`, `optional`):
728
                The test dataset to use. If it is an :obj:`datasets.Dataset`, columns not accepted by the
729
                ``model.forward()`` method are automatically removed. It must implement :obj:`__len__`.
730
        """
731
        if is_datasets_available() and isinstance(test_dataset, datasets.Dataset):
732
            test_dataset = self._remove_unused_columns(test_dataset, description="test")
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750

        if isinstance(test_dataset, torch.utils.data.dataset.IterableDataset):
            if self.args.world_size > 1:
                test_dataset = IterableDatasetShard(
                    test_dataset,
                    batch_size=self.args.eval_batch_size,
                    drop_last=self.args.dataloader_drop_last,
                    num_processes=self.args.world_size,
                    process_index=self.args.process_index,
                )
            return DataLoader(
                test_dataset,
                batch_size=self.args.eval_batch_size,
                collate_fn=self.data_collator,
                num_workers=self.args.dataloader_num_workers,
                pin_memory=self.args.dataloader_pin_memory,
            )

751
        test_sampler = self._get_eval_sampler(test_dataset)
Lysandre Debut's avatar
Lysandre Debut committed
752

753
754
        # We use the same batch_size as for eval.
        return DataLoader(
Julien Chaumond's avatar
Julien Chaumond committed
755
            test_dataset,
756
            sampler=test_sampler,
Julien Chaumond's avatar
Julien Chaumond committed
757
            batch_size=self.args.eval_batch_size,
758
            collate_fn=self.data_collator,
759
            drop_last=self.args.dataloader_drop_last,
760
            pin_memory=self.args.dataloader_pin_memory,
Julien Chaumond's avatar
Julien Chaumond committed
761
        )
Lysandre Debut's avatar
Lysandre Debut committed
762

763
    def create_optimizer_and_scheduler(self, num_training_steps: int):
764
765
766
        """
        Setup the optimizer and the learning rate scheduler.

767
768
769
770
771
772
773
774
775
776
777
        We provide a reasonable default that works well. If you want to use something else, you can pass a tuple in the
        Trainer's init through :obj:`optimizers`, or subclass and override this method (or :obj:`create_optimizer`
        and/or :obj:`create_scheduler`) in a subclass.
        """
        self.create_optimizer()
        self.create_scheduler(num_training_steps)

    def create_optimizer(self):
        """
        Setup the optimizer.

778
        We provide a reasonable default that works well. If you want to use something else, you can pass a tuple in the
779
        Trainer's init through :obj:`optimizers`, or subclass and override this method in a subclass.
780
        """
781
        if self.optimizer is None:
782
            decay_parameters = get_parameter_names(self.model, [nn.LayerNorm])
783
            decay_parameters = [name for name in decay_parameters if "bias" not in name]
784
785
            optimizer_grouped_parameters = [
                {
786
                    "params": [p for n, p in self.model.named_parameters() if n in decay_parameters],
787
788
789
                    "weight_decay": self.args.weight_decay,
                },
                {
790
                    "params": [p for n, p in self.model.named_parameters() if n not in decay_parameters],
791
792
793
                    "weight_decay": 0.0,
                },
            ]
Sylvain Gugger's avatar
Sylvain Gugger committed
794
795
796
797
798
799
800
801
802
803
804
            optimizer_cls = Adafactor if self.args.adafactor else AdamW
            if self.args.adafactor:
                optimizer_cls = Adafactor
                optimizer_kwargs = {"scale_parameter": False, "relative_step": False}
            else:
                optimizer_cls = AdamW
                optimizer_kwargs = {
                    "betas": (self.args.adam_beta1, self.args.adam_beta2),
                    "eps": self.args.adam_epsilon,
                }
            optimizer_kwargs["lr"] = self.args.learning_rate
805
            if self.sharded_ddp == ShardedDDPOption.SIMPLE:
806
807
                self.optimizer = OSS(
                    params=optimizer_grouped_parameters,
Sylvain Gugger's avatar
Sylvain Gugger committed
808
809
                    optim=optimizer_cls,
                    **optimizer_kwargs,
810
811
                )
            else:
Sylvain Gugger's avatar
Sylvain Gugger committed
812
813
                self.optimizer = optimizer_cls(optimizer_grouped_parameters, **optimizer_kwargs)

Sylvain Gugger's avatar
Sylvain Gugger committed
814
815
816
        if is_sagemaker_mp_enabled():
            self.optimizer = smp.DistributedOptimizer(self.optimizer)

817
818
819
820
821
822
823
    def create_scheduler(self, num_training_steps: int):
        """
        Setup the scheduler. The optimizer of the trainer must have been set up before this method is called.

        Args:
            num_training_steps (int): The number of training steps to do.
        """
824
        if self.lr_scheduler is None:
Sylvain Gugger's avatar
Sylvain Gugger committed
825
826
827
            self.lr_scheduler = get_scheduler(
                self.args.lr_scheduler_type,
                self.optimizer,
828
                num_warmup_steps=self.args.get_warmup_steps(num_training_steps),
Sylvain Gugger's avatar
Sylvain Gugger committed
829
                num_training_steps=num_training_steps,
830
            )
Julien Chaumond's avatar
Julien Chaumond committed
831

832
    def num_examples(self, dataloader: DataLoader) -> int:
833
        """
834
        Helper to get number of samples in a :class:`~torch.utils.data.DataLoader` by accessing its dataset.
835

836
        Will raise an exception if the underlying dataset does not implement method :obj:`__len__`
837
        """
838
        return len(dataloader.dataset)
839

840
    def _hp_search_setup(self, trial: Union["optuna.Trial", Dict[str, Any]]):
Patrick von Platen's avatar
Patrick von Platen committed
841
        """HP search setup code"""
842
843
        self._trial = trial

844
845
        if self.hp_search_backend is None or trial is None:
            return
846
847
848
849
850
        if self.hp_search_backend == HPSearchBackend.OPTUNA:
            params = self.hp_space(trial)
        elif self.hp_search_backend == HPSearchBackend.RAY:
            params = trial
            params.pop("wandb", None)
851

852
853
        for key, value in params.items():
            if not hasattr(self.args, key):
854
                logger.warn(
855
856
                    f"Trying to set {key} in the hyperparameter search but there is no corresponding field in `TrainingArguments`."
                )
857
                continue
858
859
860
861
862
863
864
            old_attr = getattr(self.args, key, None)
            # Casting value to the proper type
            if old_attr is not None:
                value = type(old_attr)(value)
            setattr(self.args, key, value)
        if self.hp_search_backend == HPSearchBackend.OPTUNA:
            logger.info("Trial:", trial.params)
865
866
        if self.args.deepspeed:
            # Rebuild the deepspeed config to reflect the updated training parameters
867
            from transformers.deepspeed import HfDeepSpeedConfig
868

869
            self.args.hf_deepspeed_config = HfDeepSpeedConfig(self.args)
870
871
872
873
874
875

    def _report_to_hp_search(
        self, trial: Union["optuna.Trial", Dict[str, Any]], epoch: int, metrics: Dict[str, float]
    ):
        if self.hp_search_backend is None or trial is None:
            return
876
        self.objective = self.compute_objective(metrics.copy())
877
        if self.hp_search_backend == HPSearchBackend.OPTUNA:
878
879
            import optuna

880
881
882
883
            trial.report(self.objective, epoch)
            if trial.should_prune():
                raise optuna.TrialPruned()
        elif self.hp_search_backend == HPSearchBackend.RAY:
884
885
            from ray import tune

886
            if self.control.should_save:
887
                self._tune_save_checkpoint()
888
889
            tune.report(objective=self.objective, **metrics)

890
    def _tune_save_checkpoint(self):
891
892
        from ray import tune

893
894
        if not self.use_tune_checkpoints:
            return
895
        with tune.checkpoint_dir(step=self.state.global_step) as checkpoint_dir:
896
            output_dir = os.path.join(checkpoint_dir, f"{PREFIX_CHECKPOINT_DIR}-{self.state.global_step}")
897
            self.save_model(output_dir)
898
            if self.args.should_save:
899
                self.state.save_to_json(os.path.join(output_dir, "trainer_state.json"))
900
901
902
                torch.save(self.optimizer.state_dict(), os.path.join(output_dir, "optimizer.pt"))
                torch.save(self.lr_scheduler.state_dict(), os.path.join(output_dir, "scheduler.pt"))

903
    def call_model_init(self, trial=None):
904
        model_init_argcount = number_of_arguments(self.model_init)
905
906
907
908
909
        if model_init_argcount == 0:
            model = self.model_init()
        elif model_init_argcount == 1:
            model = self.model_init(trial)
        else:
910
911
912
913
            raise RuntimeError("model_init should have 0 or 1 argument.")

        if model is None:
            raise RuntimeError("model_init should not return None.")
914
915
916

        return model

917
    def _wrap_model(self, model, training=True):
Sylvain Gugger's avatar
Sylvain Gugger committed
918
919
920
921
922
923
        if is_sagemaker_mp_enabled():
            # Wrapping the base model twice in a DistributedModel will raise an error.
            if isinstance(self.model_wrapped, smp.model.DistributedModel):
                return self.model_wrapped
            return smp.DistributedModel(model, backward_passes_per_step=self.args.gradient_accumulation_steps)

924
925
        # already initialized its own DDP and AMP
        if self.deepspeed:
926
            return self.deepspeed
927

928
929
930
931
        # train/eval could be run multiple-times - if already wrapped, don't re-wrap it again
        if unwrap_model(model) is not model:
            return model

932
933
934
935
936
937
        # Mixed precision training with apex (torch < 1.6)
        if self.use_apex and training:
            model, self.optimizer = amp.initialize(model, self.optimizer, opt_level=self.args.fp16_opt_level)

        # Multi-gpu training (should be after apex fp16 initialization)
        if self.args.n_gpu > 1:
938
            model = nn.DataParallel(model)
939
940
941
942
943
944
945

        # Note: in torch.distributed mode, there's no point in wrapping the model
        # inside a DistributedDataParallel as we'll be under `no_grad` anyways.
        if not training:
            return model

        # Distributed training (should be after apex fp16 initialization)
946
947
948
949
950
951
952
953
954
        if self.sharded_ddp is not None:
            # Sharded DDP!
            if self.sharded_ddp == ShardedDDPOption.SIMPLE:
                model = ShardedDDP(model, self.optimizer)
            else:
                mixed_precision = self.args.fp16
                cpu_offload = ShardedDDPOption.OFFLOAD in self.args.sharded_ddp
                zero_3 = self.sharded_ddp == ShardedDDPOption.ZERO_DP_3
                # XXX: Breaking the self.model convention but I see no way around it for now.
955
956
                if ShardedDDPOption.AUTO_WRAP in self.args.sharded_ddp:
                    model = auto_wrap(model)
957
                self.model = model = FullyShardedDDP(
958
959
960
961
                    model,
                    mixed_precision=mixed_precision,
                    reshard_after_forward=zero_3,
                    cpu_offload=cpu_offload,
962
963
                ).to(self.args.device)

Sylvain Gugger's avatar
Sylvain Gugger committed
964
        elif is_sagemaker_dp_enabled():
965
966
967
968
969
970
971
972
973
974
            model = DDP(model, device_ids=[dist.get_local_rank()], broadcast_buffers=False)
        elif self.args.local_rank != -1:
            if self.args.ddp_find_unused_parameters is not None:
                find_unused_parameters = self.args.ddp_find_unused_parameters
            elif isinstance(model, PreTrainedModel):
                # find_unused_parameters breaks checkpointing as per
                # https://github.com/huggingface/transformers/pull/4659#issuecomment-643356021
                find_unused_parameters = not getattr(model.config, "gradient_checkpointing", False)
            else:
                find_unused_parameters = True
975
            model = nn.parallel.DistributedDataParallel(
976
977
978
979
980
981
982
983
                model,
                device_ids=[self.args.local_rank],
                output_device=self.args.local_rank,
                find_unused_parameters=find_unused_parameters,
            )

        return model

984
985
    def train(
        self,
986
        resume_from_checkpoint: Optional[Union[str, bool]] = None,
987
        trial: Union["optuna.Trial", Dict[str, Any]] = None,
988
        ignore_keys_for_eval: Optional[List[str]] = None,
989
        **kwargs,
990
    ):
Julien Chaumond's avatar
Julien Chaumond committed
991
992
993
994
        """
        Main training entry point.

        Args:
995
996
997
998
999
            resume_from_checkpoint (:obj:`str` or :obj:`bool`, `optional`):
                If a :obj:`str`, local path to a saved checkpoint as saved by a previous instance of
                :class:`~transformers.Trainer`. If a :obj:`bool` and equals `True`, load the last checkpoint in
                `args.output_dir` as saved by a previous instance of :class:`~transformers.Trainer`. If present,
                training will resume from the model/optimizer/scheduler states loaded here.
1000
1001
            trial (:obj:`optuna.Trial` or :obj:`Dict[str, Any]`, `optional`):
                The trial run or the hyperparameter dictionary for hyperparameter search.
1002
1003
1004
            ignore_keys_for_eval (:obj:`List[str]`, `optional`)
                A list of keys in the output of your model (if it is a dictionary) that should be ignored when
                gathering predictions for evaluation during the training.
1005
1006
            kwargs:
                Additional keyword arguments used to hide deprecated arguments
Julien Chaumond's avatar
Julien Chaumond committed
1007
        """
1008
        resume_from_checkpoint = None if not resume_from_checkpoint else resume_from_checkpoint
1009
1010
1011
1012

        # memory metrics - must set up as early as possible
        self._memory_tracker.start()

1013
1014
        args = self.args

1015
1016
        self.is_in_train = True

1017
1018
1019
1020
1021
        # do_train is not a reliable argument, as it might not be set and .train() still called, so
        # the following is a workaround:
        if args.fp16_full_eval and not args.do_train:
            self.model = self.model.to(args.device)

1022
1023
1024
1025
1026
1027
1028
1029
1030
        if "model_path" in kwargs:
            resume_from_checkpoint = kwargs.pop("model_path")
            warnings.warn(
                "`model_path` is deprecated and will be removed in a future version. Use `resume_from_checkpoint` "
                "instead.",
                FutureWarning,
            )
        if len(kwargs) > 0:
            raise TypeError(f"train() received got unexpected keyword arguments: {', '.join(list(kwargs.keys()))}.")
Sylvain Gugger's avatar
Sylvain Gugger committed
1031
1032
1033
        # This might change the seed so needs to run first.
        self._hp_search_setup(trial)

1034
        # Model re-init
1035
        model_reloaded = False
1036
        if self.model_init is not None:
Sylvain Gugger's avatar
Sylvain Gugger committed
1037
            # Seed must be set before instantiating the model when using model_init.
1038
            set_seed(args.seed)
1039
1040
            self.model = self.call_model_init(trial)
            model_reloaded = True
Sylvain Gugger's avatar
Sylvain Gugger committed
1041
1042
            # Reinitializes optimizer and scheduler
            self.optimizer, self.lr_scheduler = None, None
1043

1044
        # Load potential model checkpoint
1045
        if isinstance(resume_from_checkpoint, bool) and resume_from_checkpoint:
1046
            resume_from_checkpoint = get_last_checkpoint(args.output_dir)
1047
            if resume_from_checkpoint is None:
1048
                raise ValueError(f"No valid checkpoint found in output directory ({args.output_dir})")
1049

1050
1051
1052
1053
        if resume_from_checkpoint is not None:
            if not os.path.isfile(os.path.join(resume_from_checkpoint, WEIGHTS_NAME)):
                raise ValueError(f"Can't find a valid checkpoint at {resume_from_checkpoint}")

1054
            logger.info(f"Loading model from {resume_from_checkpoint}).")
1055

1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
            if os.path.isfile(os.path.join(resume_from_checkpoint, CONFIG_NAME)):
                config = PretrainedConfig.from_json_file(os.path.join(resume_from_checkpoint, CONFIG_NAME))
                checkpoint_version = config.transformers_version
                if checkpoint_version is not None and checkpoint_version != __version__:
                    logger.warn(
                        f"You are resuming training from a checkpoint trained with {checkpoint_version} of "
                        f"Transformers but your current version is {__version__}. This is not recommended and could "
                        "yield to errors or unwanted behaviors."
                    )

1066
            if args.deepspeed:
1067
                # will be resumed in deepspeed_init
1068
                pass
1069
            else:
1070
1071
1072
                # We load the model state dict on the CPU to avoid an OOM error.
                state_dict = torch.load(os.path.join(resume_from_checkpoint, WEIGHTS_NAME), map_location="cpu")
                # If the model is on the GPU, it still works!
1073
                self._load_state_dict_in_model(state_dict)
1074

1075
1076
1077
                # release memory
                del state_dict

1078
1079
        # If model was re-initialized, put it on the right device and update self.model_wrapped
        if model_reloaded:
1080
            if self.place_model_on_device:
1081
                self.model = self.model.to(args.device)
1082
1083
            self.model_wrapped = self.model

1084
1085
1086
        # Keeping track whether we can can len() on the dataset or not
        train_dataset_is_sized = isinstance(self.train_dataset, collections.abc.Sized)

1087
        # Data loader and number of training steps
Julien Chaumond's avatar
Julien Chaumond committed
1088
        train_dataloader = self.get_train_dataloader()
1089
1090
1091
1092
1093

        # Setting up training control variables:
        # number of training epochs: num_train_epochs
        # number of training steps per epoch: num_update_steps_per_epoch
        # total number of training steps to execute: max_steps
1094
        total_train_batch_size = args.train_batch_size * args.gradient_accumulation_steps * args.world_size
1095
        if train_dataset_is_sized:
1096
            num_update_steps_per_epoch = len(train_dataloader) // args.gradient_accumulation_steps
1097
            num_update_steps_per_epoch = max(num_update_steps_per_epoch, 1)
1098
1099
1100
1101
            if args.max_steps > 0:
                max_steps = args.max_steps
                num_train_epochs = args.max_steps // num_update_steps_per_epoch + int(
                    args.max_steps % num_update_steps_per_epoch > 0
1102
                )
1103
1104
1105
                # May be slightly incorrect if the last batch in the training datalaoder has a smaller size but it's
                # the best we can do.
                num_train_samples = args.max_steps * total_train_batch_size
1106
            else:
1107
1108
                max_steps = math.ceil(args.num_train_epochs * num_update_steps_per_epoch)
                num_train_epochs = math.ceil(args.num_train_epochs)
1109
                num_train_samples = len(self.train_dataset) * args.num_train_epochs
Julien Chaumond's avatar
Julien Chaumond committed
1110
        else:
1111
            # see __init__. max_steps is set when the dataset has no __len__
1112
            max_steps = args.max_steps
1113
1114
            # Setting a very large number of epochs so we go as many times as necessary over the iterator.
            num_train_epochs = sys.maxsize
1115
            num_update_steps_per_epoch = max_steps
1116
            num_train_samples = args.max_steps * total_train_batch_size
Julien Chaumond's avatar
Julien Chaumond committed
1117

1118
        if DebugOption.UNDERFLOW_OVERFLOW in self.args.debug:
1119
1120
1121
1122
1123
1124
1125
1126
            if self.args.n_gpu > 1:
                # nn.DataParallel(model) replicates the model, creating new variables and module
                # references registered here no longer work on other gpus, breaking the module
                raise ValueError(
                    "Currently --debug underflow_overflow is not supported under DP. Please use DDP (torch.distributed.launch)."
                )
            else:
                debug_overflow = DebugUnderflowOverflow(self.model)  # noqa
1127

1128
        delay_optimizer_creation = self.sharded_ddp is not None and self.sharded_ddp != ShardedDDPOption.SIMPLE
1129
        if args.deepspeed:
1130
            deepspeed_engine, optimizer, lr_scheduler = deepspeed_init(
1131
1132
                self, num_training_steps=max_steps, resume_from_checkpoint=resume_from_checkpoint
            )
1133
1134
1135
            self.model = deepspeed_engine.module
            self.model_wrapped = deepspeed_engine
            self.deepspeed = deepspeed_engine
1136
1137
            self.optimizer = optimizer
            self.lr_scheduler = lr_scheduler
1138
        elif not delay_optimizer_creation:
1139
1140
            self.create_optimizer_and_scheduler(num_training_steps=max_steps)

1141
        self.state = TrainerState()
1142
        self.state.is_hyper_param_search = trial is not None
Julien Chaumond's avatar
Julien Chaumond committed
1143

1144
        model = self._wrap_model(self.model_wrapped)
Julien Chaumond's avatar
Julien Chaumond committed
1145

1146
1147
1148
1149
        # for the rest of this function `model` is the outside model, whether it was wrapped or not
        if model is not self.model:
            self.model_wrapped = model

1150
1151
1152
        if delay_optimizer_creation:
            self.create_optimizer_and_scheduler(num_training_steps=max_steps)

1153
1154
1155
        # Check if saved optimizer or scheduler states exist
        self._load_optimizer_and_scheduler(resume_from_checkpoint)

1156
1157
        # important: at this point:
        # self.model         is the Transformers Model
1158
        # self.model_wrapped is DDP(Transformers Model), Deepspeed(Transformers Model), etc.
1159

Julien Chaumond's avatar
Julien Chaumond committed
1160
        # Train!
1161
        num_examples = (
1162
            self.num_examples(train_dataloader) if train_dataset_is_sized else total_train_batch_size * args.max_steps
1163
1164
        )

Julien Chaumond's avatar
Julien Chaumond committed
1165
        logger.info("***** Running training *****")
1166
1167
        logger.info(f"  Num examples = {num_examples}")
        logger.info(f"  Num Epochs = {num_train_epochs}")
1168
        logger.info(f"  Instantaneous batch size per device = {args.per_device_train_batch_size}")
1169
        logger.info(f"  Total train batch size (w. parallel, distributed & accumulation) = {total_train_batch_size}")
1170
        logger.info(f"  Gradient Accumulation steps = {args.gradient_accumulation_steps}")
1171
        logger.info(f"  Total optimization steps = {max_steps}")
Julien Chaumond's avatar
Julien Chaumond committed
1172

1173
        self.state.epoch = 0
1174
        start_time = time.time()
Julien Chaumond's avatar
Julien Chaumond committed
1175
1176
        epochs_trained = 0
        steps_trained_in_current_epoch = 0
1177
        steps_trained_progress_bar = None
1178

Julien Chaumond's avatar
Julien Chaumond committed
1179
        # Check if continuing training from a checkpoint
1180
1181
1182
1183
        if resume_from_checkpoint is not None and os.path.isfile(
            os.path.join(resume_from_checkpoint, "trainer_state.json")
        ):
            self.state = TrainerState.load_from_json(os.path.join(resume_from_checkpoint, "trainer_state.json"))
1184
            epochs_trained = self.state.global_step // num_update_steps_per_epoch
1185
            if not args.ignore_data_skip:
1186
                steps_trained_in_current_epoch = self.state.global_step % (num_update_steps_per_epoch)
1187
                steps_trained_in_current_epoch *= args.gradient_accumulation_steps
1188
1189
            else:
                steps_trained_in_current_epoch = 0
1190
1191

            logger.info("  Continuing training from checkpoint, will skip to saved global_step")
1192
1193
            logger.info(f"  Continuing training from epoch {epochs_trained}")
            logger.info(f"  Continuing training from global step {self.state.global_step}")
1194
            if not args.ignore_data_skip:
1195
1196
                logger.info(
                    f"  Will skip the first {epochs_trained} epochs then the first {steps_trained_in_current_epoch} "
1197
1198
                    "batches in the first epoch. If this takes a lot of time, you can add the `--ignore_data_skip` "
                    "flag to your launch command, but you will resume the training on data already seen by your model."
1199
                )
1200
1201
1202
                if self.is_local_process_zero() and not args.disable_tqdm:
                    steps_trained_progress_bar = tqdm(total=steps_trained_in_current_epoch)
                    steps_trained_progress_bar.set_description("Skipping the first batches")
1203

Sylvain Gugger's avatar
Sylvain Gugger committed
1204
1205
1206
1207
1208
        # Update the references
        self.callback_handler.model = self.model
        self.callback_handler.optimizer = self.optimizer
        self.callback_handler.lr_scheduler = self.lr_scheduler
        self.callback_handler.train_dataloader = train_dataloader
1209
1210
        self.state.trial_name = self.hp_name(trial) if self.hp_name is not None else None
        self.state.trial_params = hp_params(trial) if trial is not None else None
1211
1212
1213
1214
        # This should be the same if the state has been saved but in case the training arguments changed, it's safer
        # to set this after the load.
        self.state.max_steps = max_steps
        self.state.num_train_epochs = num_train_epochs
Sylvain Gugger's avatar
Sylvain Gugger committed
1215
1216
        self.state.is_local_process_zero = self.is_local_process_zero()
        self.state.is_world_process_zero = self.is_world_process_zero()
Julien Chaumond's avatar
Julien Chaumond committed
1217

1218
        # tr_loss is a tensor to avoid synchronization of TPUs through .item()
1219
        tr_loss = torch.tensor(0.0).to(args.device)
1220
1221
        # _total_loss_scalar is updated everytime .item() has to be called on tr_loss and stores the sum of all losses
        self._total_loss_scalar = 0.0
1222
        self._globalstep_last_logged = self.state.global_step
Julien Chaumond's avatar
Julien Chaumond committed
1223
        model.zero_grad()
Sylvain Gugger's avatar
Sylvain Gugger committed
1224

1225
        self.control = self.callback_handler.on_train_begin(args, self.state, self.control)
Sylvain Gugger's avatar
Sylvain Gugger committed
1226

1227
        # Skip the first epochs_trained epochs to get the random state of the dataloader at the right point.
1228
        if not args.ignore_data_skip:
1229
1230
1231
1232
1233
            for epoch in range(epochs_trained):
                # We just need to begin an iteration to create the randomization of the sampler.
                for _ in train_dataloader:
                    break

1234
        for epoch in range(epochs_trained, num_train_epochs):
1235
1236
            if isinstance(train_dataloader, DataLoader) and isinstance(train_dataloader.sampler, DistributedSampler):
                train_dataloader.sampler.set_epoch(epoch)
1237
1238
            elif isinstance(train_dataloader.dataset, IterableDatasetShard):
                train_dataloader.dataset.set_epoch(epoch)
1239

1240
            if is_torch_tpu_available():
1241
                parallel_loader = pl.ParallelLoader(train_dataloader, [args.device]).per_device_loader(args.device)
1242
                epoch_iterator = parallel_loader
1243
            else:
1244
                epoch_iterator = train_dataloader
1245

1246
            # Reset the past mems state at the beginning of each epoch if necessary.
1247
            if args.past_index >= 0:
1248
1249
                self._past = None

1250
            steps_in_epoch = (
1251
                len(epoch_iterator) if train_dataset_is_sized else args.max_steps * args.gradient_accumulation_steps
1252
            )
1253
            self.control = self.callback_handler.on_epoch_begin(args, self.state, self.control)
Sylvain Gugger's avatar
Sylvain Gugger committed
1254

Julien Chaumond's avatar
Julien Chaumond committed
1255
1256
1257
1258
1259
            for step, inputs in enumerate(epoch_iterator):

                # Skip past any already trained steps if resuming training
                if steps_trained_in_current_epoch > 0:
                    steps_trained_in_current_epoch -= 1
1260
1261
                    if steps_trained_progress_bar is not None:
                        steps_trained_progress_bar.update(1)
1262
1263
                    if steps_trained_in_current_epoch == 0:
                        self._load_rng_state(resume_from_checkpoint)
Julien Chaumond's avatar
Julien Chaumond committed
1264
                    continue
1265
1266
1267
                elif steps_trained_progress_bar is not None:
                    steps_trained_progress_bar.close()
                    steps_trained_progress_bar = None
Julien Chaumond's avatar
Julien Chaumond committed
1268

1269
1270
                if step % args.gradient_accumulation_steps == 0:
                    self.control = self.callback_handler.on_step_begin(args, self.state, self.control)
Sylvain Gugger's avatar
Sylvain Gugger committed
1271

1272
                if (
1273
1274
1275
                    ((step + 1) % args.gradient_accumulation_steps != 0)
                    and args.local_rank != -1
                    and args._no_sync_in_gradient_accumulation
1276
                ):
1277
                    # Avoid unnecessary DDP synchronization since there will be no backward pass on this example.
1278
1279
1280
1281
                    with model.no_sync():
                        tr_loss += self.training_step(model, inputs)
                else:
                    tr_loss += self.training_step(model, inputs)
1282
                self.current_flos += float(self.floating_point_ops(inputs))
Julien Chaumond's avatar
Julien Chaumond committed
1283

1284
1285
1286
1287
                # Optimizer step for deepspeed must be called on every step regardless of the value of gradient_accumulation_steps
                if self.deepspeed:
                    self.deepspeed.step()

1288
                if (step + 1) % args.gradient_accumulation_steps == 0 or (
Julien Chaumond's avatar
Julien Chaumond committed
1289
                    # last step in epoch but step is always smaller than gradient_accumulation_steps
1290
                    steps_in_epoch <= args.gradient_accumulation_steps
1291
                    and (step + 1) == steps_in_epoch
Julien Chaumond's avatar
Julien Chaumond committed
1292
                ):
1293
                    # Gradient clipping
1294
                    if args.max_grad_norm is not None and args.max_grad_norm > 0 and not self.deepspeed:
1295
1296
                        # deepspeed does its own clipping

1297
1298
1299
1300
1301
1302
                        if self.use_amp:
                            # AMP: gradients need unscaling
                            self.scaler.unscale_(self.optimizer)

                        if hasattr(self.optimizer, "clip_grad_norm"):
                            # Some optimizers (like the sharded optimizer) have a specific way to do gradient clipping
1303
                            self.optimizer.clip_grad_norm(args.max_grad_norm)
1304
1305
                        elif hasattr(model, "clip_grad_norm_"):
                            # Some models (like FullyShardedDDP) have a specific way to do gradient clipping
1306
                            model.clip_grad_norm_(args.max_grad_norm)
1307
1308
                        else:
                            # Revert to normal clipping otherwise, handling Apex or full precision
1309
                            nn.utils.clip_grad_norm_(
1310
                                amp.master_params(self.optimizer) if self.use_apex else model.parameters(),
1311
                                args.max_grad_norm,
1312
1313
1314
                            )

                    # Optimizer step
1315
                    optimizer_was_run = True
Stas Bekman's avatar
Stas Bekman committed
1316
                    if self.deepspeed:
1317
                        pass  # called outside the loop
Stas Bekman's avatar
Stas Bekman committed
1318
                    elif is_torch_tpu_available():
1319
                        xm.optimizer_step(self.optimizer)
1320
                    elif self.use_amp:
1321
                        scale_before = self.scaler.get_scale()
1322
                        self.scaler.step(self.optimizer)
1323
                        self.scaler.update()
1324
1325
                        scale_after = self.scaler.get_scale()
                        optimizer_was_run = scale_before <= scale_after
Lysandre Debut's avatar
Lysandre Debut committed
1326
                    else:
1327
                        self.optimizer.step()
Lysandre Debut's avatar
Lysandre Debut committed
1328

1329
                    if optimizer_was_run and not self.deepspeed:
1330
1331
                        self.lr_scheduler.step()

Julien Chaumond's avatar
Julien Chaumond committed
1332
                    model.zero_grad()
1333
                    self.state.global_step += 1
1334
                    self.state.epoch = epoch + (step + 1) / steps_in_epoch
1335
                    self.control = self.callback_handler.on_step_end(args, self.state, self.control)
Sylvain Gugger's avatar
Sylvain Gugger committed
1336

1337
                    self._maybe_log_save_evaluate(tr_loss, model, trial, epoch, ignore_keys_for_eval)
wulu473's avatar
wulu473 committed
1338
1339
                else:
                    self.control = self.callback_handler.on_substep_end(args, self.state, self.control)
Julien Chaumond's avatar
Julien Chaumond committed
1340

Sylvain Gugger's avatar
Sylvain Gugger committed
1341
                if self.control.should_epoch_stop or self.control.should_training_stop:
Julien Chaumond's avatar
Julien Chaumond committed
1342
                    break
1343

1344
            self.control = self.callback_handler.on_epoch_end(args, self.state, self.control)
1345
            self._maybe_log_save_evaluate(tr_loss, model, trial, epoch, ignore_keys_for_eval)
1346

1347
            if DebugOption.TPU_METRICS_DEBUG in self.args.debug:
1348
1349
1350
1351
1352
1353
1354
1355
                if is_torch_tpu_available():
                    # tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
                    xm.master_print(met.metrics_report())
                else:
                    logger.warning(
                        "You enabled PyTorch/XLA debug metrics but you don't have a TPU "
                        "configured. Check your training configuration if this is unexpected."
                    )
Sylvain Gugger's avatar
Sylvain Gugger committed
1356
            if self.control.should_training_stop:
1357
                break
Julien Chaumond's avatar
Julien Chaumond committed
1358

1359
        if args.past_index and hasattr(self, "_past"):
1360
1361
            # Clean the state at the end of training
            delattr(self, "_past")
Julien Chaumond's avatar
Julien Chaumond committed
1362
1363

        logger.info("\n\nTraining completed. Do not forget to share your model on huggingface.co/models =)\n\n")
1364
        if args.load_best_model_at_end and self.state.best_model_checkpoint is not None:
1365
1366
1367
            # Wait for everyone to get here so we are sur the model has been saved by process 0.
            if is_torch_tpu_available():
                xm.rendezvous("load_best_model_at_end")
1368
            elif args.local_rank != -1:
1369
1370
                dist.barrier()

1371
1372
1373
            logger.info(
                f"Loading best model from {self.state.best_model_checkpoint} (score: {self.state.best_metric})."
            )
1374
1375
1376
1377
1378
1379
1380
1381
1382
1383
1384
1385

            best_model_path = os.path.join(self.state.best_model_checkpoint, WEIGHTS_NAME)
            if os.path.exists(best_model_path):
                # We load the model state dict on the CPU to avoid an OOM error.
                state_dict = torch.load(best_model_path, map_location="cpu")
                # If the model is on the GPU, it still works!
                self._load_state_dict_in_model(state_dict)
            else:
                logger.warn(
                    f"Could not locate the best model at {best_model_path}, if you are running a distributed training "
                    "on multiple nodes, you should activate `--save_on_each_node`."
                )
1386

1387
1388
1389
1390
1391
            if self.deepspeed:
                self.deepspeed.load_checkpoint(
                    self.state.best_model_checkpoint, load_optimizer_states=False, load_lr_scheduler_states=False
                )

1392
1393
1394
1395
        # add remaining tr_loss
        self._total_loss_scalar += tr_loss.item()
        train_loss = self._total_loss_scalar / self.state.global_step

1396
        metrics = speed_metrics("train", start_time, num_samples=num_train_samples, num_steps=self.state.max_steps)
1397
1398
        self.store_flos()
        metrics["total_flos"] = self.state.total_flos
1399
        metrics["train_loss"] = train_loss
Sylvain Gugger's avatar
Sylvain Gugger committed
1400

1401
        self.is_in_train = False
1402

1403
1404
        self._memory_tracker.stop_and_update_metrics(metrics)

1405
1406
1407
1408
1409
        self.log(metrics)

        self.control = self.callback_handler.on_train_end(args, self.state, self.control)

        return TrainOutput(self.state.global_step, train_loss, metrics)
1410

1411
1412
1413
1414
1415
1416
1417
1418
1419
1420
1421
    def _load_state_dict_in_model(self, state_dict):
        load_result = self.model.load_state_dict(state_dict, strict=False)

        if len(load_result.missing_keys) != 0:
            if set(load_result.missing_keys) == set(self.model._keys_to_ignore_on_save):
                self.model.tie_weights()
            else:
                logger.warn(f"There were missing keys in the checkpoint model loaded: {load_result.missing_keys}.")
        if len(load_result.unexpected_keys) != 0:
            logger.warn(f"There were unexpected keys in the checkpoint model loaded: {load_result.unexpected_keys}.")

1422
    def _maybe_log_save_evaluate(self, tr_loss, model, trial, epoch, ignore_keys_for_eval):
Sylvain Gugger's avatar
Sylvain Gugger committed
1423
1424
1425
        if self.control.should_log:
            logs: Dict[str, float] = {}
            tr_loss_scalar = tr_loss.item()
1426
1427
1428
            # reset tr_loss to zero
            tr_loss -= tr_loss

1429
            logs["loss"] = round(tr_loss_scalar / (self.state.global_step - self._globalstep_last_logged), 4)
1430
            logs["learning_rate"] = self._get_learning_rate()
1431

1432
            self._total_loss_scalar += tr_loss_scalar
1433
            self._globalstep_last_logged = self.state.global_step
Teven's avatar
Teven committed
1434
            self.store_flos()
Sylvain Gugger's avatar
Sylvain Gugger committed
1435
1436
1437
1438
1439

            self.log(logs)

        metrics = None
        if self.control.should_evaluate:
1440
            metrics = self.evaluate(ignore_keys=ignore_keys_for_eval)
Sylvain Gugger's avatar
Sylvain Gugger committed
1441
            self._report_to_hp_search(trial, epoch, metrics)
1442

Sylvain Gugger's avatar
Sylvain Gugger committed
1443
1444
1445
1446
        if self.control.should_save:
            self._save_checkpoint(model, trial, metrics=metrics)
            self.control = self.callback_handler.on_save(self.args, self.state, self.control)

1447
1448
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458
1459
1460
1461
1462
1463
1464
1465
1466
1467
1468
1469
1470
1471
1472
1473
1474
1475
1476
1477
1478
1479
1480
1481
    def _load_rng_state(self, checkpoint):
        # Load RNG states from `checkpoint`
        if checkpoint is None:
            return

        local_rank = xm.get_local_ordinal() if is_torch_tpu_available() else self.args.local_rank
        if local_rank != -1:
            rng_file = os.path.join(checkpoint, f"rng_state_{local_rank}.pth")
            if not os.path.isfile(os.path.join(checkpoint, rng_file)):
                logger.info(
                    f"Didn't find an RNG file for process {local_rank}, if you are resuming a training that "
                    "wasn't launched in a distributed fashion, reproducibility is not guaranteed."
                )
                return
        else:
            rng_file = os.path.join(checkpoint, "rng_state.pth")
            if not os.path.isfile(os.path.join(checkpoint, rng_file)):
                logger.info(
                    "Didn't find an RNG file, if you are resuming a training that was launched in a distributed "
                    "fashion, reproducibility is not guaranteed."
                )
                return

        checkpoint_rng_state = torch.load(rng_file)
        random.setstate(checkpoint_rng_state["python"])
        np.random.set_state(checkpoint_rng_state["numpy"])
        torch.random.set_rng_state(checkpoint_rng_state["cpu"])
        if torch.cuda.is_available():
            if self.args.local_rank != -1:
                torch.cuda.random.set_rng_state(checkpoint_rng_state["cuda"])
            else:
                torch.cuda.random.set_rng_state_all(checkpoint_rng_state["cuda"])
        if is_torch_tpu_available():
            xm.set_rng_state(checkpoint_rng_state["xla"])

Sylvain Gugger's avatar
Sylvain Gugger committed
1482
    def _save_checkpoint(self, model, trial, metrics=None):
1483
        # In all cases, including ddp/dp/deepspeed, self.model is always a reference to the model we
1484
        # want to save except FullyShardedDDP.
1485
        # assert unwrap_model(model) is self.model, "internal model should be a reference to self.model"
1486

1487
        # Save model checkpoint
1488
        checkpoint_folder = f"{PREFIX_CHECKPOINT_DIR}-{self.state.global_step}"
1489

1490
        if self.hp_search_backend is not None and trial is not None:
1491
1492
1493
1494
1495
1496
            if self.hp_search_backend == HPSearchBackend.OPTUNA:
                run_id = trial.number
            else:
                from ray import tune

                run_id = tune.get_trial_id()
1497
            run_name = self.hp_name(trial) if self.hp_name is not None else f"run-{run_id}"
1498
            run_dir = os.path.join(self.args.output_dir, run_name)
1499
        else:
1500
            run_dir = self.args.output_dir
1501
            self.store_flos()
1502

1503
        output_dir = os.path.join(run_dir, checkpoint_folder)
1504
        self.save_model(output_dir)
1505
        if self.deepspeed:
1506
1507
            # under zero3 model file itself doesn't get saved since it's bogus! Unless deepspeed
            # config `stage3_gather_fp16_weights_on_model_save` is True
1508
            self.deepspeed.save_checkpoint(output_dir)
1509
1510

        # Save optimizer and scheduler
1511
        if self.sharded_ddp == ShardedDDPOption.SIMPLE:
1512
            self.optimizer.consolidate_state_dict()
1513

1514
1515
1516
1517
1518
1519
        if is_torch_tpu_available():
            xm.rendezvous("saving_optimizer_states")
            xm.save(self.optimizer.state_dict(), os.path.join(output_dir, "optimizer.pt"))
            with warnings.catch_warnings(record=True) as caught_warnings:
                xm.save(self.lr_scheduler.state_dict(), os.path.join(output_dir, "scheduler.pt"))
                reissue_pt_warnings(caught_warnings)
Sylvain Gugger's avatar
Sylvain Gugger committed
1520
        elif is_sagemaker_mp_enabled():
1521
1522
1523
1524
            if smp.dp_rank() == 0:
                # Consolidate the state dict on all processed of dp_rank 0
                opt_state_dict = self.optimizer.state_dict()
                # Save it and the scheduler on the main process
1525
                if self.args.should_save:
1526
1527
1528
1529
                    torch.save(opt_state_dict, os.path.join(output_dir, "optimizer.pt"))
                    with warnings.catch_warnings(record=True) as caught_warnings:
                        torch.save(self.lr_scheduler.state_dict(), os.path.join(output_dir, "scheduler.pt"))
                    reissue_pt_warnings(caught_warnings)
1530
1531
                    if self.use_amp:
                        torch.save(self.scaler.state_dict(), os.path.join(output_dir, "scaler.pt"))
1532
        elif self.args.should_save and not self.deepspeed:
1533
            # deepspeed.save_checkpoint above saves model/optim/sched
1534
1535
1536
1537
            torch.save(self.optimizer.state_dict(), os.path.join(output_dir, "optimizer.pt"))
            with warnings.catch_warnings(record=True) as caught_warnings:
                torch.save(self.lr_scheduler.state_dict(), os.path.join(output_dir, "scheduler.pt"))
            reissue_pt_warnings(caught_warnings)
1538
1539
            if self.use_amp:
                torch.save(self.scaler.state_dict(), os.path.join(output_dir, "scaler.pt"))
1540
1541

        # Determine the new best metric / best model checkpoint
Sylvain Gugger's avatar
Sylvain Gugger committed
1542
        if metrics is not None and self.args.metric_for_best_model is not None:
1543
1544
1545
1546
1547
1548
1549
1550
1551
1552
1553
1554
1555
1556
1557
            metric_to_check = self.args.metric_for_best_model
            if not metric_to_check.startswith("eval_"):
                metric_to_check = f"eval_{metric_to_check}"
            metric_value = metrics[metric_to_check]

            operator = np.greater if self.args.greater_is_better else np.less
            if (
                self.state.best_metric is None
                or self.state.best_model_checkpoint is None
                or operator(metric_value, self.state.best_metric)
            ):
                self.state.best_metric = metric_value
                self.state.best_model_checkpoint = output_dir

        # Save the Trainer state
1558
        if self.args.should_save:
1559
1560
            self.state.save_to_json(os.path.join(output_dir, "trainer_state.json"))

1561
1562
1563
1564
1565
1566
1567
1568
1569
1570
1571
1572
1573
1574
1575
1576
        # Save RNG state in non-distributed training
        rng_states = {
            "python": random.getstate(),
            "numpy": np.random.get_state(),
            "cpu": torch.random.get_rng_state(),
        }
        if torch.cuda.is_available():
            if self.args.local_rank == -1:
                # In non distributed, we save the global CUDA RNG state (will take care of DataParallel)
                rng_states["cuda"] = torch.cuda.random.get_rng_state_all()
            else:
                rng_states["cuda"] = torch.cuda.random.get_rng_state()

        if is_torch_tpu_available():
            rng_states["xla"] = xm.get_rng_state()

1577
1578
1579
        # A process can arrive here before the process 0 has a chance to save the model, in which case output_dir may
        # not yet exist.
        os.makedirs(output_dir, exist_ok=True)
1580
1581
1582
1583
1584
1585
        local_rank = xm.get_local_ordinal() if is_torch_tpu_available() else self.args.local_rank
        if local_rank == -1:
            torch.save(rng_states, os.path.join(output_dir, "rng_state.pth"))
        else:
            torch.save(rng_states, os.path.join(output_dir, f"rng_state_{local_rank}.pth"))

1586
        # Maybe delete some older checkpoints.
1587
        if self.args.should_save:
1588
1589
            self._rotate_checkpoints(use_mtime=True, output_dir=run_dir)

1590
    def _load_optimizer_and_scheduler(self, checkpoint):
Sylvain Gugger's avatar
Sylvain Gugger committed
1591
        """If optimizer and scheduler states exist, load them."""
1592
        if checkpoint is None:
1593
1594
            return

1595
        if self.deepspeed:
1596
            # deepspeed loads optimizer/lr_scheduler together with the model in deepspeed_init
1597
1598
            return

1599
1600
        if os.path.isfile(os.path.join(checkpoint, "optimizer.pt")) and os.path.isfile(
            os.path.join(checkpoint, "scheduler.pt")
Sylvain Gugger's avatar
Sylvain Gugger committed
1601
1602
1603
1604
        ):
            # Load in optimizer and scheduler states
            if is_torch_tpu_available():
                # On TPU we have to take some extra precautions to properly load the states on the right device.
1605
                optimizer_state = torch.load(os.path.join(checkpoint, "optimizer.pt"), map_location="cpu")
Sylvain Gugger's avatar
Sylvain Gugger committed
1606
                with warnings.catch_warnings(record=True) as caught_warnings:
1607
                    lr_scheduler_state = torch.load(os.path.join(checkpoint, "scheduler.pt"), map_location="cpu")
Sylvain Gugger's avatar
Sylvain Gugger committed
1608
1609
1610
1611
1612
1613
1614
1615
                reissue_pt_warnings(caught_warnings)

                xm.send_cpu_data_to_device(optimizer_state, self.args.device)
                xm.send_cpu_data_to_device(lr_scheduler_state, self.args.device)

                self.optimizer.load_state_dict(optimizer_state)
                self.lr_scheduler.load_state_dict(lr_scheduler_state)
            else:
Sylvain Gugger's avatar
Sylvain Gugger committed
1616
                map_location = "cpu" if is_sagemaker_mp_enabled() else self.args.device
Sylvain Gugger's avatar
Sylvain Gugger committed
1617
                self.optimizer.load_state_dict(
Sylvain Gugger's avatar
Sylvain Gugger committed
1618
                    torch.load(os.path.join(checkpoint, "optimizer.pt"), map_location=map_location)
Sylvain Gugger's avatar
Sylvain Gugger committed
1619
1620
                )
                with warnings.catch_warnings(record=True) as caught_warnings:
1621
                    self.lr_scheduler.load_state_dict(torch.load(os.path.join(checkpoint, "scheduler.pt")))
Sylvain Gugger's avatar
Sylvain Gugger committed
1622
                reissue_pt_warnings(caught_warnings)
1623
1624
                if self.use_amp and os.path.isfile(os.path.join(checkpoint, "scaler.pt")):
                    self.scaler.load_state_dict(torch.load(os.path.join(checkpoint, "scaler.pt")))
Sylvain Gugger's avatar
Sylvain Gugger committed
1625

1626
1627
1628
1629
1630
1631
1632
    def hyperparameter_search(
        self,
        hp_space: Optional[Callable[["optuna.Trial"], Dict[str, float]]] = None,
        compute_objective: Optional[Callable[[Dict[str, float]], float]] = None,
        n_trials: int = 20,
        direction: str = "minimize",
        backend: Optional[Union["str", HPSearchBackend]] = None,
1633
        hp_name: Optional[Callable[["optuna.Trial"], str]] = None,
1634
        **kwargs,
1635
1636
    ) -> BestRun:
        """
1637
        Launch an hyperparameter search using ``optuna`` or ``Ray Tune``. The optimized quantity is determined by
Sylvain Gugger's avatar
Sylvain Gugger committed
1638
1639
        :obj:`compute_objective`, which defaults to a function returning the evaluation loss when no metric is
        provided, the sum of all metrics otherwise.
1640

Sylvain Gugger's avatar
Sylvain Gugger committed
1641
1642
1643
1644
1645
1646
1647
        .. warning::

            To use this method, you need to have provided a ``model_init`` when initializing your
            :class:`~transformers.Trainer`: we need to reinitialize the model at each new run. This is incompatible
            with the ``optimizers`` argument, so you need to subclass :class:`~transformers.Trainer` and override the
            method :meth:`~transformers.Trainer.create_optimizer_and_scheduler` for custom optimizer/scheduler.

1648
1649
1650
1651
1652
1653
1654
1655
1656
1657
1658
1659
1660
1661
1662
1663
1664
1665
1666
1667
1668
        Args:
            hp_space (:obj:`Callable[["optuna.Trial"], Dict[str, float]]`, `optional`):
                A function that defines the hyperparameter search space. Will default to
                :func:`~transformers.trainer_utils.default_hp_space_optuna` or
                :func:`~transformers.trainer_utils.default_hp_space_ray` depending on your backend.
            compute_objective (:obj:`Callable[[Dict[str, float]], float]`, `optional`):
                A function computing the objective to minimize or maximize from the metrics returned by the
                :obj:`evaluate` method. Will default to :func:`~transformers.trainer_utils.default_compute_objective`.
            n_trials (:obj:`int`, `optional`, defaults to 100):
                The number of trial runs to test.
            direction(:obj:`str`, `optional`, defaults to :obj:`"minimize"`):
                Whether to optimize greater or lower objects. Can be :obj:`"minimize"` or :obj:`"maximize"`, you should
                pick :obj:`"minimize"` when optimizing the validation loss, :obj:`"maximize"` when optimizing one or
                several metrics.
            backend(:obj:`str` or :class:`~transformers.training_utils.HPSearchBackend`, `optional`):
                The backend to use for hyperparameter search. Will default to optuna or Ray Tune, depending on which
                one is installed. If both are installed, will default to optuna.
            kwargs:
                Additional keyword arguments passed along to :obj:`optuna.create_study` or :obj:`ray.tune.run`. For
                more information see:

Sylvain Gugger's avatar
Sylvain Gugger committed
1669
                - the documentation of `optuna.create_study
1670
                  <https://optuna.readthedocs.io/en/stable/reference/generated/optuna.study.create_study.html>`__
Sylvain Gugger's avatar
Sylvain Gugger committed
1671
1672
                - the documentation of `tune.run
                  <https://docs.ray.io/en/latest/tune/api_docs/execution.html#tune-run>`__
1673
1674

        Returns:
Tiger's avatar
Tiger committed
1675
            :class:`transformers.trainer_utils.BestRun`: All the information about the best run.
1676
1677
1678
1679
1680
1681
1682
1683
1684
1685
1686
        """
        if backend is None:
            backend = default_hp_search_backend()
            if backend is None:
                raise RuntimeError(
                    "At least one of optuna or ray should be installed. "
                    "To install optuna run `pip install optuna`."
                    "To install ray run `pip install ray[tune]`."
                )
        backend = HPSearchBackend(backend)
        if backend == HPSearchBackend.OPTUNA and not is_optuna_available():
Sylvain Gugger's avatar
Sylvain Gugger committed
1687
            raise RuntimeError("You picked the optuna backend, but it is not installed. Use `pip install optuna`.")
1688
        if backend == HPSearchBackend.RAY and not is_ray_tune_available():
1689
            raise RuntimeError(
Sylvain Gugger's avatar
Sylvain Gugger committed
1690
                "You picked the Ray Tune backend, but it is not installed. Use `pip install 'ray[tune]'`."
1691
1692
            )
        self.hp_search_backend = backend
Sylvain Gugger's avatar
Sylvain Gugger committed
1693
1694
1695
1696
1697
        if self.model_init is None:
            raise RuntimeError(
                "To use hyperparameter search, you need to pass your model through a model_init function."
            )

1698
        self.hp_space = default_hp_space[backend] if hp_space is None else hp_space
1699
        self.hp_name = hp_name
1700
1701
        self.compute_objective = default_compute_objective if compute_objective is None else compute_objective

1702
1703
        run_hp_search = run_hp_search_optuna if backend == HPSearchBackend.OPTUNA else run_hp_search_ray
        best_run = run_hp_search(self, n_trials, direction, **kwargs)
1704
1705
1706
1707

        self.hp_search_backend = None
        return best_run

Sylvain Gugger's avatar
Sylvain Gugger committed
1708
    def log(self, logs: Dict[str, float]) -> None:
1709
1710
1711
1712
1713
1714
1715
1716
1717
        """
        Log :obj:`logs` on the various objects watching training.

        Subclass and override this method to inject custom behavior.

        Args:
            logs (:obj:`Dict[str, float]`):
                The values to log.
        """
1718
        if self.state.epoch is not None:
1719
            logs["epoch"] = round(self.state.epoch, 2)
1720

1721
1722
        output = {**logs, **{"step": self.state.global_step}}
        self.state.log_history.append(output)
1723
        self.control = self.callback_handler.on_log(self.args, self.state, self.control, logs)
Julien Chaumond's avatar
Julien Chaumond committed
1724

sgugger's avatar
Fix CI  
sgugger committed
1725
    def _prepare_inputs(self, inputs: Dict[str, Union[torch.Tensor, Any]]) -> Dict[str, Union[torch.Tensor, Any]]:
1726
1727
1728
1729
        """
        Prepare :obj:`inputs` before feeding them to the model, converting them to tensors if they are not already and
        handling potential state.
        """
Julien Chaumond's avatar
Julien Chaumond committed
1730
        for k, v in inputs.items():
1731
            if isinstance(v, torch.Tensor):
1732
1733
1734
1735
1736
1737
1738
1739
                kwargs = dict(device=self.args.device)
                if self.deepspeed and inputs[k].dtype != torch.int64:
                    # NLP models inputs are int64 and those get adjusted to the right dtype of the
                    # embedding. Other models such as wav2vec2's inputs are already float and thus
                    # may need special handling to match the dtypes of the model
                    kwargs.update(dict(dtype=self.args.hf_deepspeed_config.dtype()))

                inputs[k] = v.to(**kwargs)
Julien Chaumond's avatar
Julien Chaumond committed
1740

1741
1742
        if self.args.past_index >= 0 and self._past is not None:
            inputs["mems"] = self._past
1743

1744
1745
        return inputs

1746
    def training_step(self, model: nn.Module, inputs: Dict[str, Union[torch.Tensor, Any]]) -> torch.Tensor:
1747
        """
1748
        Perform a training step on a batch of inputs.
1749
1750
1751
1752
1753
1754
1755
1756
1757
1758
1759
1760
1761

        Subclass and override to inject custom behavior.

        Args:
            model (:obj:`nn.Module`):
                The model to train.
            inputs (:obj:`Dict[str, Union[torch.Tensor, Any]]`):
                The inputs and targets of the model.

                The dictionary will be unpacked before being fed to the model. Most models expect the targets under the
                argument :obj:`labels`. Check your model's documentation for all accepted arguments.

        Return:
1762
            :obj:`torch.Tensor`: The tensor with training loss on this batch.
1763
1764
        """
        model.train()
1765
        inputs = self._prepare_inputs(inputs)
1766

Sylvain Gugger's avatar
Sylvain Gugger committed
1767
        if is_sagemaker_mp_enabled():
1768
1769
            scaler = self.scaler if self.use_amp else None
            loss_mb = smp_forward_backward(model, inputs, self.args.gradient_accumulation_steps, scaler=scaler)
Sylvain Gugger's avatar
Sylvain Gugger committed
1770
1771
            return loss_mb.reduce_mean().detach().to(self.args.device)

1772
        if self.use_amp:
1773
            with autocast():
Sylvain Gugger's avatar
Sylvain Gugger committed
1774
                loss = self.compute_loss(model, inputs)
1775
        else:
Sylvain Gugger's avatar
Sylvain Gugger committed
1776
            loss = self.compute_loss(model, inputs)
1777

Julien Chaumond's avatar
Julien Chaumond committed
1778
1779
        if self.args.n_gpu > 1:
            loss = loss.mean()  # mean() to average on multi-gpu parallel training
1780

1781
1782
        if self.args.gradient_accumulation_steps > 1 and not self.deepspeed:
            # deepspeed handles loss scaling by gradient_accumulation_steps in its `backward`
Julien Chaumond's avatar
Julien Chaumond committed
1783
1784
            loss = loss / self.args.gradient_accumulation_steps

1785
        if self.use_amp:
1786
            self.scaler.scale(loss).backward()
1787
        elif self.use_apex:
1788
            with amp.scale_loss(loss, self.optimizer) as scaled_loss:
Julien Chaumond's avatar
Julien Chaumond committed
1789
                scaled_loss.backward()
1790
        elif self.deepspeed:
1791
1792
            # loss gets scaled under gradient_accumulation_steps in deepspeed
            loss = self.deepspeed.backward(loss)
Julien Chaumond's avatar
Julien Chaumond committed
1793
1794
1795
        else:
            loss.backward()

1796
        return loss.detach()
Julien Chaumond's avatar
Julien Chaumond committed
1797

1798
    def compute_loss(self, model, inputs, return_outputs=False):
Sylvain Gugger's avatar
Sylvain Gugger committed
1799
1800
1801
1802
1803
        """
        How the loss is computed by Trainer. By default, all models return the loss in the first element.

        Subclass and override for custom behavior.
        """
1804
1805
1806
1807
        if self.label_smoother is not None and "labels" in inputs:
            labels = inputs.pop("labels")
        else:
            labels = None
Sylvain Gugger's avatar
Sylvain Gugger committed
1808
1809
        outputs = model(**inputs)
        # Save past state if it exists
1810
        # TODO: this needs to be fixed and made cleaner later.
Sylvain Gugger's avatar
Sylvain Gugger committed
1811
1812
        if self.args.past_index >= 0:
            self._past = outputs[self.args.past_index]
Sylvain Gugger's avatar
Sylvain Gugger committed
1813

1814
        if labels is not None:
1815
            loss = self.label_smoother(outputs, labels)
Sylvain Gugger's avatar
Sylvain Gugger committed
1816
1817
        else:
            # We don't use .loss here since the model may return tuples instead of ModelOutput.
1818
1819
1820
            loss = outputs["loss"] if isinstance(outputs, dict) else outputs[0]

        return (loss, outputs) if return_outputs else loss
Sylvain Gugger's avatar
Sylvain Gugger committed
1821

1822
1823
    def is_local_process_zero(self) -> bool:
        """
Sylvain Gugger's avatar
Sylvain Gugger committed
1824
1825
        Whether or not this process is the local (e.g., on one machine if training in a distributed fashion on several
        machines) main process.
1826
        """
1827
        return self.args.local_process_index == 0
Lysandre Debut's avatar
Lysandre Debut committed
1828

1829
1830
    def is_world_process_zero(self) -> bool:
        """
Sylvain Gugger's avatar
Sylvain Gugger committed
1831
1832
        Whether or not this process is the global main process (when training in a distributed fashion on several
        machines, this is only going to be :obj:`True` for one process).
Julien Chaumond's avatar
Julien Chaumond committed
1833
        """
1834
1835
1836
        # Special case for SageMaker ModelParallel since there process_index is dp_process_index, not the global
        # process index.
        if is_sagemaker_mp_enabled():
Sylvain Gugger's avatar
Sylvain Gugger committed
1837
            return smp.rank() == 0
Lysandre Debut's avatar
Lysandre Debut committed
1838
        else:
Sylvain Gugger's avatar
Sylvain Gugger committed
1839
            return self.args.process_index == 0
Julien Chaumond's avatar
Julien Chaumond committed
1840
1841
1842

    def save_model(self, output_dir: Optional[str] = None):
        """
1843
        Will save the model, so you can reload it using :obj:`from_pretrained()`.
Julien Chaumond's avatar
Julien Chaumond committed
1844

1845
        Will only save from the main process.
Julien Chaumond's avatar
Julien Chaumond committed
1846
        """
1847
1848
1849
1850

        if output_dir is None:
            output_dir = self.args.output_dir

1851
        if is_torch_tpu_available():
1852
            self._save_tpu(output_dir)
Sylvain Gugger's avatar
Sylvain Gugger committed
1853
1854
1855
        elif is_sagemaker_mp_enabled():
            # Calling the state_dict needs to be done on the wrapped model and on all processes.
            state_dict = self.model_wrapped.state_dict()
1856
            if self.args.should_save:
Sylvain Gugger's avatar
Sylvain Gugger committed
1857
                self._save(output_dir, state_dict=state_dict)
1858
1859
1860
1861
        elif (
            ShardedDDPOption.ZERO_DP_2 in self.args.sharded_ddp or ShardedDDPOption.ZERO_DP_3 in self.args.sharded_ddp
        ):
            state_dict = self.model.state_dict()
1862

1863
            if self.args.should_save:
1864
                self._save(output_dir, state_dict=state_dict)
1865
1866
1867
        elif self.deepspeed:

            # this takes care of everything as long as we aren't under zero3
1868
            if self.args.should_save:
1869
1870
1871
1872
1873
1874
1875
                self._save(output_dir)

            if is_deepspeed_zero3_enabled():
                # It's too complicated to try to override different places where the weights dump gets
                # saved, so since under zero3 the file is bogus, simply delete it. The user should
                # either user deepspeed checkpoint to resume or to recover full weights use
                # zero_to_fp32.py stored in the checkpoint.
1876
                if self.args.should_save:
1877
1878
1879
1880
1881
1882
1883
1884
1885
1886
                    file = os.path.join(output_dir, WEIGHTS_NAME)
                    if os.path.isfile(file):
                        # logger.info(f"deepspeed zero3: removing {file}, see zero_to_fp32.py to recover weights")
                        os.remove(file)

                # now save the real model if stage3_gather_fp16_weights_on_model_save=True
                # if false it will not be saved.
                # This must be called on all ranks
                self.deepspeed.save_fp16_model(output_dir, WEIGHTS_NAME)

1887
        elif self.args.should_save:
1888
            self._save(output_dir)
Julien Chaumond's avatar
Julien Chaumond committed
1889

1890
1891
    def _save_tpu(self, output_dir: Optional[str] = None):
        output_dir = output_dir if output_dir is not None else self.args.output_dir
1892
        logger.info(f"Saving model checkpoint to {output_dir}")
1893
1894
1895
1896
1897
1898
1899
1900

        if xm.is_master_ordinal():
            os.makedirs(output_dir, exist_ok=True)
            torch.save(self.args, os.path.join(output_dir, "training_args.bin"))

        # Save a trained model and configuration using `save_pretrained()`.
        # They can then be reloaded using `from_pretrained()`
        xm.rendezvous("saving_checkpoint")
1901
        if not isinstance(self.model, PreTrainedModel):
1902
1903
1904
            if isinstance(unwrap_model(self.model), PreTrainedModel):
                unwrap_model(self.model).save_pretrained(
                    output_dir,
1905
                    save_config=self.args.should_save,
1906
1907
1908
                    state_dict=self.model.state_dict(),
                    save_function=xm.save,
                )
1909
1910
            else:
                logger.info("Trainer.model is not a `PreTrainedModel`, only saving its state dict.")
1911
1912
                state_dict = self.model.state_dict()
                xm.save(state_dict, os.path.join(output_dir, WEIGHTS_NAME))
1913
        else:
1914
1915
            self.model.save_pretrained(output_dir, save_config=self.args.should_save, save_function=xm.save)
        if self.tokenizer is not None and self.args.should_save:
1916
            self.tokenizer.save_pretrained(output_dir)
1917

1918
    def _save(self, output_dir: Optional[str] = None, state_dict=None):
1919
        # If we are executing this function, we are the process zero, so we don't check for that.
Julien Chaumond's avatar
Julien Chaumond committed
1920
1921
        output_dir = output_dir if output_dir is not None else self.args.output_dir
        os.makedirs(output_dir, exist_ok=True)
1922
        logger.info(f"Saving model checkpoint to {output_dir}")
Julien Chaumond's avatar
Julien Chaumond committed
1923
1924
1925
        # Save a trained model and configuration using `save_pretrained()`.
        # They can then be reloaded using `from_pretrained()`
        if not isinstance(self.model, PreTrainedModel):
1926
            if isinstance(unwrap_model(self.model), PreTrainedModel):
1927
1928
1929
                if state_dict is None:
                    state_dict = self.model.state_dict()
                unwrap_model(self.model).save_pretrained(output_dir, state_dict=state_dict)
1930
1931
            else:
                logger.info("Trainer.model is not a `PreTrainedModel`, only saving its state dict.")
1932
1933
                if state_dict is None:
                    state_dict = self.model.state_dict()
1934
                torch.save(state_dict, os.path.join(output_dir, WEIGHTS_NAME))
1935
        else:
1936
            self.model.save_pretrained(output_dir, state_dict=state_dict)
1937
        if self.tokenizer is not None:
1938
            self.tokenizer.save_pretrained(output_dir)
Julien Chaumond's avatar
Julien Chaumond committed
1939
1940
1941

        # Good practice: save your training arguments together with the trained model
        torch.save(self.args, os.path.join(output_dir, "training_args.bin"))
1942

1943
    def store_flos(self):
1944
        # Storing the number of floating-point operations that went into the model
1945
1946
1947
1948
        if self.args.local_rank != -1:
            self.state.total_flos += distributed_broadcast_scalars([self.current_flos]).sum().item()
            self.current_flos = 0
        else:
Teven's avatar
Teven committed
1949
            self.state.total_flos += self.current_flos
1950
            self.current_flos = 0
Julien Chaumond's avatar
Julien Chaumond committed
1951

1952
1953
1954
    def _sorted_checkpoints(
        self, output_dir=None, checkpoint_prefix=PREFIX_CHECKPOINT_DIR, use_mtime=False
    ) -> List[str]:
Julien Chaumond's avatar
Julien Chaumond committed
1955
1956
        ordering_and_checkpoint_path = []

1957
        glob_checkpoints = [str(x) for x in Path(output_dir).glob(f"{checkpoint_prefix}-*")]
Julien Chaumond's avatar
Julien Chaumond committed
1958
1959
1960
1961
1962

        for path in glob_checkpoints:
            if use_mtime:
                ordering_and_checkpoint_path.append((os.path.getmtime(path), path))
            else:
1963
                regex_match = re.match(f".*{checkpoint_prefix}-([0-9]+)", path)
1964
                if regex_match is not None and regex_match.groups() is not None:
Julien Chaumond's avatar
Julien Chaumond committed
1965
1966
1967
1968
                    ordering_and_checkpoint_path.append((int(regex_match.groups()[0]), path))

        checkpoints_sorted = sorted(ordering_and_checkpoint_path)
        checkpoints_sorted = [checkpoint[1] for checkpoint in checkpoints_sorted]
1969
1970
        # Make sure we don't delete the best model.
        if self.state.best_model_checkpoint is not None:
1971
            best_model_index = checkpoints_sorted.index(str(Path(self.state.best_model_checkpoint)))
1972
1973
            for i in range(best_model_index, len(checkpoints_sorted) - 2):
                checkpoints_sorted[i], checkpoints_sorted[i + 1] = checkpoints_sorted[i + 1], checkpoints_sorted[i]
Julien Chaumond's avatar
Julien Chaumond committed
1974
1975
        return checkpoints_sorted

1976
    def _rotate_checkpoints(self, use_mtime=False, output_dir=None) -> None:
1977
        if self.args.save_total_limit is None or self.args.save_total_limit <= 0:
Julien Chaumond's avatar
Julien Chaumond committed
1978
1979
1980
            return

        # Check if we should delete older checkpoint(s)
1981
        checkpoints_sorted = self._sorted_checkpoints(use_mtime=use_mtime, output_dir=output_dir)
Julien Chaumond's avatar
Julien Chaumond committed
1982
1983
1984
        if len(checkpoints_sorted) <= self.args.save_total_limit:
            return

1985
        # If save_total_limit=1 with load_best_model_at_end=True, we could end up deleting the last checkpoint, which
1986
1987
1988
1989
1990
1991
1992
1993
1994
1995
        # we don't do to allow resuming.
        save_total_limit = self.args.save_total_limit
        if (
            self.state.best_model_checkpoint is not None
            and self.args.save_total_limit == 1
            and checkpoints_sorted[-1] != self.state.best_model_checkpoint
        ):
            save_total_limit = 2

        number_of_checkpoints_to_delete = max(0, len(checkpoints_sorted) - save_total_limit)
Julien Chaumond's avatar
Julien Chaumond committed
1996
1997
        checkpoints_to_be_deleted = checkpoints_sorted[:number_of_checkpoints_to_delete]
        for checkpoint in checkpoints_to_be_deleted:
1998
            logger.info(f"Deleting older checkpoint [{checkpoint}] due to args.save_total_limit")
Julien Chaumond's avatar
Julien Chaumond committed
1999
2000
            shutil.rmtree(checkpoint)

2001
    def evaluate(
2002
2003
2004
2005
        self,
        eval_dataset: Optional[Dataset] = None,
        ignore_keys: Optional[List[str]] = None,
        metric_key_prefix: str = "eval",
2006
    ) -> Dict[str, float]:
Julien Chaumond's avatar
Julien Chaumond committed
2007
        """
2008
        Run evaluation and returns metrics.
Julien Chaumond's avatar
Julien Chaumond committed
2009

Sylvain Gugger's avatar
Sylvain Gugger committed
2010
2011
        The calling script will be responsible for providing a method to compute metrics, as they are task-dependent
        (pass it to the init :obj:`compute_metrics` argument).
Julien Chaumond's avatar
Julien Chaumond committed
2012

2013
2014
        You can also subclass and override this method to inject custom behavior.

Julien Chaumond's avatar
Julien Chaumond committed
2015
        Args:
2016
            eval_dataset (:obj:`Dataset`, `optional`):
2017
                Pass a dataset if you wish to override :obj:`self.eval_dataset`. If it is an :obj:`datasets.Dataset`,
Sylvain Gugger's avatar
Sylvain Gugger committed
2018
2019
                columns not accepted by the ``model.forward()`` method are automatically removed. It must implement the
                :obj:`__len__` method.
2020
2021
2022
            ignore_keys (:obj:`Lst[str]`, `optional`):
                A list of keys in the output of your model (if it is a dictionary) that should be ignored when
                gathering predictions.
2023
2024
2025
            metric_key_prefix (:obj:`str`, `optional`, defaults to :obj:`"eval"`):
                An optional prefix to be used as the metrics key prefix. For example the metrics "bleu" will be named
                "eval_bleu" if the prefix is "eval" (default)
2026

Julien Chaumond's avatar
Julien Chaumond committed
2027
        Returns:
2028
2029
            A dictionary containing the evaluation loss and the potential metrics computed from the predictions. The
            dictionary also contains the epoch number which comes from the training state.
Julien Chaumond's avatar
Julien Chaumond committed
2030
        """
2031
2032
2033
        # memory metrics - must set up as early as possible
        self._memory_tracker.start()

Julien Chaumond's avatar
Julien Chaumond committed
2034
        eval_dataloader = self.get_eval_dataloader(eval_dataset)
2035
        start_time = time.time()
Julien Chaumond's avatar
Julien Chaumond committed
2036

2037
2038
        eval_loop = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
        output = eval_loop(
2039
2040
2041
2042
2043
            eval_dataloader,
            description="Evaluation",
            # No point gathering the predictions if there are no metrics, otherwise we defer to
            # self.args.prediction_loss_only
            prediction_loss_only=True if self.compute_metrics is None else None,
2044
            ignore_keys=ignore_keys,
2045
            metric_key_prefix=metric_key_prefix,
2046
        )
Lysandre Debut's avatar
Lysandre Debut committed
2047

2048
2049
2050
2051
2052
2053
2054
2055
2056
        total_batch_size = self.args.eval_batch_size * self.args.world_size
        output.metrics.update(
            speed_metrics(
                metric_key_prefix,
                start_time,
                num_samples=output.num_samples,
                num_steps=math.ceil(output.num_samples / total_batch_size),
            )
        )
2057

2058
        self.log(output.metrics)
2059

2060
        if DebugOption.TPU_METRICS_DEBUG in self.args.debug:
Lysandre Debut's avatar
Lysandre Debut committed
2061
2062
2063
            # tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
            xm.master_print(met.metrics_report())

Sylvain Gugger's avatar
Sylvain Gugger committed
2064
        self.control = self.callback_handler.on_evaluate(self.args, self.state, self.control, output.metrics)
2065
2066
2067

        self._memory_tracker.stop_and_update_metrics(output.metrics)

Julien Chaumond's avatar
Julien Chaumond committed
2068
2069
        return output.metrics

2070
    def predict(
Bhadresh Savani's avatar
Bhadresh Savani committed
2071
        self, test_dataset: Dataset, ignore_keys: Optional[List[str]] = None, metric_key_prefix: str = "test"
2072
    ) -> PredictionOutput:
Julien Chaumond's avatar
Julien Chaumond committed
2073
        """
2074
        Run prediction and returns predictions and potential metrics.
Julien Chaumond's avatar
Julien Chaumond committed
2075

Sylvain Gugger's avatar
Sylvain Gugger committed
2076
2077
        Depending on the dataset and your use case, your test dataset may contain labels. In that case, this method
        will also return metrics, like in :obj:`evaluate()`.
2078
2079
2080

        Args:
            test_dataset (:obj:`Dataset`):
2081
                Dataset to run the predictions on. If it is an :obj:`datasets.Dataset`, columns not accepted by the
2082
                ``model.forward()`` method are automatically removed. Has to implement the method :obj:`__len__`
2083
2084
2085
            ignore_keys (:obj:`Lst[str]`, `optional`):
                A list of keys in the output of your model (if it is a dictionary) that should be ignored when
                gathering predictions.
Bhadresh Savani's avatar
Bhadresh Savani committed
2086
            metric_key_prefix (:obj:`str`, `optional`, defaults to :obj:`"test"`):
2087
                An optional prefix to be used as the metrics key prefix. For example the metrics "bleu" will be named
Bhadresh Savani's avatar
Bhadresh Savani committed
2088
                "test_bleu" if the prefix is "test" (default)
2089

2090
2091
2092
2093
2094
2095
        .. note::

            If your predictions or labels have different sequence length (for instance because you're doing dynamic
            padding in a token classification task) the predictions will be padded (on the right) to allow for
            concatenation into one array. The padding index is -100.

Sylvain Gugger's avatar
Sylvain Gugger committed
2096
2097
2098
2099
2100
2101
        Returns: `NamedTuple` A namedtuple with the following keys:

            - predictions (:obj:`np.ndarray`): The predictions on :obj:`test_dataset`.
            - label_ids (:obj:`np.ndarray`, `optional`): The labels (if the dataset contained some).
            - metrics (:obj:`Dict[str, float]`, `optional`): The potential dictionary of metrics (if the dataset
              contained labels).
Julien Chaumond's avatar
Julien Chaumond committed
2102
        """
2103
2104
2105
        # memory metrics - must set up as early as possible
        self._memory_tracker.start()

Julien Chaumond's avatar
Julien Chaumond committed
2106
        test_dataloader = self.get_test_dataloader(test_dataset)
2107
        start_time = time.time()
2108

2109
2110
        eval_loop = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
        output = eval_loop(
2111
2112
            test_dataloader, description="Prediction", ignore_keys=ignore_keys, metric_key_prefix=metric_key_prefix
        )
2113
2114
2115
2116
2117
2118
2119
2120
2121
        total_batch_size = self.args.eval_batch_size * self.args.world_size
        output.metrics.update(
            speed_metrics(
                metric_key_prefix,
                start_time,
                num_samples=output.num_samples,
                num_steps=math.ceil(output.num_samples / total_batch_size),
            )
        )
2122
2123
2124

        self._memory_tracker.stop_and_update_metrics(output.metrics)

2125
        return PredictionOutput(predictions=output.predictions, label_ids=output.label_ids, metrics=output.metrics)
Julien Chaumond's avatar
Julien Chaumond committed
2126

2127
    def evaluation_loop(
2128
2129
2130
2131
2132
        self,
        dataloader: DataLoader,
        description: str,
        prediction_loss_only: Optional[bool] = None,
        ignore_keys: Optional[List[str]] = None,
2133
        metric_key_prefix: str = "eval",
2134
    ) -> EvalLoopOutput:
Julien Chaumond's avatar
Julien Chaumond committed
2135
        """
2136
        Prediction/evaluation loop, shared by :obj:`Trainer.evaluate()` and :obj:`Trainer.predict()`.
Julien Chaumond's avatar
Julien Chaumond committed
2137
2138
2139

        Works both with or without labels.
        """
2140
2141
2142
        prediction_loss_only = (
            prediction_loss_only if prediction_loss_only is not None else self.args.prediction_loss_only
        )
Julien Chaumond's avatar
Julien Chaumond committed
2143

2144
2145
2146
2147
2148
2149
2150
2151
2152
2153
2154
2155
2156
2157
        # if eval is called w/o train init deepspeed here
        if self.args.deepspeed and not self.deepspeed:

            # XXX: eval doesn't have `resume_from_checkpoint` arg but we should be able to do eval
            # from the checkpoint eventually
            deepspeed_engine, _, _ = deepspeed_init(self, num_training_steps=0, resume_from_checkpoint=None)
            self.model = deepspeed_engine.module
            self.model_wrapped = deepspeed_engine
            self.deepspeed = deepspeed_engine
            # XXX: we don't need optim/sched for inference, but this needs to be sorted out, since
            # for example the Z3-optimizer is a must for zero3 to work even for inference - what we
            # don't need is the deepspeed basic optimizer which is self.optimizer.optimizer
            deepspeed_engine.optimizer.optimizer = None
            deepspeed_engine.lr_scheduler = None
2158

2159
        model = self._wrap_model(self.model, training=False)
Julien Chaumond's avatar
Julien Chaumond committed
2160

2161
        # if full fp16 is wanted on eval and this ``evaluation`` or ``predict`` isn't called while
2162
        # ``train`` is running, halve it first and then put on device
2163
2164
2165
        if not self.is_in_train and self.args.fp16_full_eval:
            model = model.half().to(self.args.device)

2166
        batch_size = dataloader.batch_size
2167

2168
        logger.info(f"***** Running {description} *****")
2169
2170
2171
2172
        if isinstance(dataloader.dataset, collections.abc.Sized):
            logger.info(f"  Num examples = {self.num_examples(dataloader)}")
        else:
            logger.info("  Num examples: Unknown")
2173
        logger.info(f"  Batch size = {batch_size}")
2174

Julien Chaumond's avatar
Julien Chaumond committed
2175
2176
        model.eval()

2177
2178
2179
2180
        self.callback_handler.eval_dataloader = dataloader
        # Do this before wrapping.
        eval_dataset = dataloader.dataset

2181
        if is_torch_tpu_available():
2182
2183
            dataloader = pl.ParallelLoader(dataloader, [self.args.device]).per_device_loader(self.args.device)

2184
        if self.args.past_index >= 0:
2185
            self._past = None
2186

2187
2188
2189
2190
2191
2192
2193
2194
2195
2196
2197
2198
2199
        # Initialize containers
        # losses/preds/labels on GPU/TPU (accumulated for eval_accumulation_steps)
        losses_host = None
        preds_host = None
        labels_host = None
        # losses/preds/labels on CPU (final containers)
        all_losses = None
        all_preds = None
        all_labels = None
        # Will be useful when we have an iterable dataset so don't know its length.

        observed_num_examples = 0
        # Main evaluation loop
2200
        for step, inputs in enumerate(dataloader):
2201
2202
2203
2204
2205
2206
            # Update the observed num examples
            observed_batch_size = find_batch_size(inputs)
            if observed_batch_size is not None:
                observed_num_examples += observed_batch_size

            # Prediction step
2207
            loss, logits, labels = self.prediction_step(model, inputs, prediction_loss_only, ignore_keys=ignore_keys)
2208
2209

            # Update containers on host
2210
            if loss is not None:
2211
                losses = self._nested_gather(loss.repeat(batch_size))
2212
                losses_host = losses if losses_host is None else torch.cat((losses_host, losses), dim=0)
2213
            if logits is not None:
2214
2215
                logits = self._pad_across_processes(logits)
                logits = self._nested_gather(logits)
2216
                preds_host = logits if preds_host is None else nested_concat(preds_host, logits, padding_index=-100)
2217
            if labels is not None:
2218
2219
                labels = self._pad_across_processes(labels)
                labels = self._nested_gather(labels)
2220
                labels_host = labels if labels_host is None else nested_concat(labels_host, labels, padding_index=-100)
Sylvain Gugger's avatar
Sylvain Gugger committed
2221
            self.control = self.callback_handler.on_prediction_step(self.args, self.state, self.control)
Julien Chaumond's avatar
Julien Chaumond committed
2222

2223
2224
            # Gather all tensors and put them back on the CPU if we have done enough accumulation steps.
            if self.args.eval_accumulation_steps is not None and (step + 1) % self.args.eval_accumulation_steps == 0:
2225
2226
2227
2228
2229
2230
2231
2232
2233
2234
2235
                if losses_host is not None:
                    losses = nested_numpify(losses_host)
                    all_losses = losses if all_losses is None else np.concatenate((all_losses, losses), axis=0)
                if preds_host is not None:
                    logits = nested_numpify(preds_host)
                    all_preds = logits if all_preds is None else nested_concat(all_preds, logits, padding_index=-100)
                if labels_host is not None:
                    labels = nested_numpify(labels_host)
                    all_labels = (
                        labels if all_labels is None else nested_concat(all_labels, labels, padding_index=-100)
                    )
2236
2237
2238
2239

                # Set back to None to begin a new accumulation
                losses_host, preds_host, labels_host = None, None, None

2240
2241
2242
        if self.args.past_index and hasattr(self, "_past"):
            # Clean the state at the end of the evaluation loop
            delattr(self, "_past")
Julien Chaumond's avatar
Julien Chaumond committed
2243

2244
        # Gather all remaining tensors and put them back on the CPU
2245
2246
2247
2248
2249
2250
2251
2252
2253
2254
2255
2256
2257
        if losses_host is not None:
            losses = nested_numpify(losses_host)
            all_losses = losses if all_losses is None else np.concatenate((all_losses, losses), axis=0)
        if preds_host is not None:
            logits = nested_numpify(preds_host)
            all_preds = logits if all_preds is None else nested_concat(all_preds, logits, padding_index=-100)
        if labels_host is not None:
            labels = nested_numpify(labels_host)
            all_labels = labels if all_labels is None else nested_concat(all_labels, labels, padding_index=-100)

        # Number of samples
        if not isinstance(eval_dataset, IterableDataset):
            num_samples = len(eval_dataset)
2258
2259
2260
        # The instance check is weird and does not actually check for the type, but whether the dataset has the right
        # methods. Therefore we need to make sure it also has the attribute.
        elif isinstance(eval_dataset, IterableDatasetShard) and hasattr(eval_dataset, "num_examples"):
2261
2262
2263
2264
2265
2266
2267
2268
2269
2270
2271
2272
2273
2274
2275
2276
            num_samples = eval_dataset.num_examples
        else:
            num_samples = observed_num_examples

        # Number of losses has been rounded to a multiple of batch_size and in a distributed training, the number of
        # samplers has been rounded to a multiple of batch_size, so we truncate.
        if all_losses is not None:
            all_losses = all_losses[:num_samples]
        if all_preds is not None:
            all_preds = nested_truncate(all_preds, num_samples)
        if all_labels is not None:
            all_labels = nested_truncate(all_labels, num_samples)

        # Metrics!
        if self.compute_metrics is not None and all_preds is not None and all_labels is not None:
            metrics = self.compute_metrics(EvalPrediction(predictions=all_preds, label_ids=all_labels))
Julien Chaumond's avatar
Julien Chaumond committed
2277
2278
        else:
            metrics = {}
2279

2280
2281
2282
        # To be JSON-serializable, we need to remove numpy types or zero-d tensors
        metrics = denumpify_detensorize(metrics)

2283
2284
        if all_losses is not None:
            metrics[f"{metric_key_prefix}_loss"] = all_losses.mean().item()
2285

2286
        # Prefix all keys with metric_key_prefix + '_'
2287
        for key in list(metrics.keys()):
2288
2289
            if not key.startswith(f"{metric_key_prefix}_"):
                metrics[f"{metric_key_prefix}_{key}"] = metrics.pop(key)
Julien Chaumond's avatar
Julien Chaumond committed
2290

2291
        return EvalLoopOutput(predictions=all_preds, label_ids=all_labels, metrics=metrics, num_samples=num_samples)
2292

2293
    def _nested_gather(self, tensors, name=None):
2294
2295
2296
2297
2298
2299
2300
        """
        Gather value of `tensors` (tensor or list/tuple of nested tensors) and convert them to numpy before
        concatenating them to `gathered`
        """
        if tensors is None:
            return
        if is_torch_tpu_available():
2301
2302
            if name is None:
                name = "nested_gather"
2303
            tensors = nested_xla_mesh_reduce(tensors, name)
Sylvain Gugger's avatar
Sylvain Gugger committed
2304
2305
        elif is_sagemaker_mp_enabled():
            tensors = smp_gather(tensors)
2306
2307
        elif self.args.local_rank != -1:
            tensors = distributed_concat(tensors)
2308
        return tensors
2309

2310
2311
2312
2313
2314
2315
2316
2317
2318
2319
2320
2321
2322
2323
2324
2325
2326
2327
2328
2329
2330
2331
2332
2333
2334
2335
2336
2337
2338
2339
2340
2341
    # Copied from Accelerate.
    def _pad_across_processes(self, tensor, pad_index=-100):
        """
        Recursively pad the tensors in a nested list/tuple/dictionary of tensors from all devices to the same size so
        they can safely be gathered.
        """
        if isinstance(tensor, (list, tuple)):
            return type(tensor)(self._pad_across_processes(t, pad_index=pad_index) for t in tensor)
        elif isinstance(tensor, dict):
            return type(tensor)({k: self._pad_across_processes(v, pad_index=pad_index) for k, v in tensor.items()})
        elif not isinstance(tensor, torch.Tensor):
            raise TypeError(
                f"Can't pad the values of type {type(tensor)}, only of nested list/tuple/dicts of tensors."
            )

        if len(tensor.shape) < 2:
            return tensor
        # Gather all sizes
        size = torch.tensor(tensor.shape, device=tensor.device)[None]
        sizes = self._nested_gather(size).cpu()

        max_size = max(s[1] for s in sizes)
        if tensor.shape[1] == max_size:
            return tensor

        # Then pad to the maximum size
        old_size = tensor.shape
        new_size = list(old_size)
        new_size[1] = max_size
        new_tensor = tensor.new_zeros(tuple(new_size)) + pad_index
        new_tensor[:, : old_size[1]] = tensor
        return new_tensor
2342

2343
    def prediction_step(
2344
2345
2346
2347
2348
        self,
        model: nn.Module,
        inputs: Dict[str, Union[torch.Tensor, Any]],
        prediction_loss_only: bool,
        ignore_keys: Optional[List[str]] = None,
2349
    ) -> Tuple[Optional[torch.Tensor], Optional[torch.Tensor], Optional[torch.Tensor]]:
2350
2351
2352
2353
2354
2355
2356
2357
2358
2359
2360
2361
2362
2363
2364
        """
        Perform an evaluation step on :obj:`model` using obj:`inputs`.

        Subclass and override to inject custom behavior.

        Args:
            model (:obj:`nn.Module`):
                The model to evaluate.
            inputs (:obj:`Dict[str, Union[torch.Tensor, Any]]`):
                The inputs and targets of the model.

                The dictionary will be unpacked before being fed to the model. Most models expect the targets under the
                argument :obj:`labels`. Check your model's documentation for all accepted arguments.
            prediction_loss_only (:obj:`bool`):
                Whether or not to return the loss only.
2365
2366
2367
            ignore_keys (:obj:`Lst[str]`, `optional`):
                A list of keys in the output of your model (if it is a dictionary) that should be ignored when
                gathering predictions.
2368
2369

        Return:
2370
2371
            Tuple[Optional[torch.Tensor], Optional[torch.Tensor], Optional[torch.Tensor]]: A tuple with the loss,
            logits and labels (each being optional).
2372
        """
2373
        has_labels = all(inputs.get(k) is not None for k in self.label_names)
2374
        inputs = self._prepare_inputs(inputs)
2375
2376
2377
2378
2379
        if ignore_keys is None:
            if hasattr(self.model, "config"):
                ignore_keys = getattr(self.model.config, "keys_to_ignore_at_inference", [])
            else:
                ignore_keys = []
2380

2381
2382
2383
2384
2385
2386
2387
2388
        # labels may be popped when computing the loss (label smoothing for instance) so we grab them first.
        if has_labels:
            labels = nested_detach(tuple(inputs.get(name) for name in self.label_names))
            if len(labels) == 1:
                labels = labels[0]
        else:
            labels = None

2389
        with torch.no_grad():
Sylvain Gugger's avatar
Sylvain Gugger committed
2390
2391
2392
2393
2394
2395
2396
2397
2398
2399
2400
2401
            if is_sagemaker_mp_enabled():
                raw_outputs = smp_forward_only(model, inputs)
                if has_labels:
                    if isinstance(raw_outputs, dict):
                        loss_mb = raw_outputs["loss"]
                        logits_mb = tuple(v for k, v in raw_outputs.items() if k not in ignore_keys + ["loss"])
                    else:
                        loss_mb = raw_outputs[0]
                        logits_mb = raw_outputs[1:]

                    loss = loss_mb.reduce_mean().detach().cpu()
                    logits = smp_nested_concat(logits_mb)
2402
                else:
Sylvain Gugger's avatar
Sylvain Gugger committed
2403
2404
2405
2406
2407
2408
                    loss = None
                    if isinstance(raw_outputs, dict):
                        logits_mb = tuple(v for k, v in raw_outputs.items() if k not in ignore_keys)
                    else:
                        logits_mb = raw_outputs
                    logits = smp_nested_concat(logits_mb)
2409
            else:
Sylvain Gugger's avatar
Sylvain Gugger committed
2410
2411
2412
2413
2414
2415
2416
                if has_labels:
                    loss, outputs = self.compute_loss(model, inputs, return_outputs=True)
                    loss = loss.mean().detach()
                    if isinstance(outputs, dict):
                        logits = tuple(v for k, v in outputs.items() if k not in ignore_keys + ["loss"])
                    else:
                        logits = outputs[1:]
2417
                else:
Sylvain Gugger's avatar
Sylvain Gugger committed
2418
2419
2420
2421
2422
2423
2424
2425
2426
2427
2428
2429
2430
                    loss = None
                    if self.use_amp:
                        with autocast():
                            outputs = model(**inputs)
                    else:
                        outputs = model(**inputs)
                    if isinstance(outputs, dict):
                        logits = tuple(v for k, v in outputs.items() if k not in ignore_keys)
                    else:
                        logits = outputs
                    # TODO: this needs to be fixed and made cleaner later.
                    if self.args.past_index >= 0:
                        self._past = outputs[self.args.past_index - 1]
2431
2432
2433
2434

        if prediction_loss_only:
            return (loss, None, None)

2435
        logits = nested_detach(logits)
Sylvain Gugger's avatar
Sylvain Gugger committed
2436
2437
2438
2439
        if len(logits) == 1:
            logits = logits[0]

        return (loss, logits, labels)
2440
2441
2442

    def floating_point_ops(self, inputs: Dict[str, Union[torch.Tensor, Any]]):
        """
Sylvain Gugger's avatar
Sylvain Gugger committed
2443
2444
2445
        For models that inherit from :class:`~transformers.PreTrainedModel`, uses that method to compute the number of
        floating point operations for every backward + forward pass. If using another model, either implement such a
        method in the model or subclass and override this method.
2446
2447
2448
2449
2450
2451
2452
2453

        Args:
            inputs (:obj:`Dict[str, Union[torch.Tensor, Any]]`):
                The inputs and targets of the model.

        Returns:
            :obj:`int`: The number of floating-point operations.
        """
2454
2455
        if hasattr(self.model, "floating_point_ops"):
            return self.model.floating_point_ops(inputs)
2456
2457
        else:
            return 0
2458

2459
2460
2461
2462
    def init_git_repo(self):
        """
        Initializes a git repo in :obj:`self.args.push_to_hub_model_id`.
        """
2463
        if not self.args.should_save:
2464
2465
2466
2467
2468
2469
2470
2471
2472
2473
2474
2475
2476
2477
2478
2479
            return
        use_auth_token = True if self.args.push_to_hub_token is None else self.args.push_to_hub_token
        repo_url = PushToHubMixin._get_repo_url_from_name(
            self.args.push_to_hub_model_id,
            organization=self.args.push_to_hub_organization,
            use_auth_token=use_auth_token,
        )
        self.repo = PushToHubMixin._create_or_get_repo(
            self.args.output_dir, repo_url=repo_url, use_auth_token=use_auth_token
        )

        # By default, ignore the checkpoint folders
        if not os.path.exists(os.path.join(self.args.output_dir, ".gitignore")):
            with open(os.path.join(self.args.output_dir, ".gitignore"), "w", encoding="utf-8") as writer:
                writer.writelines(["checkpoint-*/"])

Sylvain Gugger's avatar
Sylvain Gugger committed
2480
2481
2482
2483
2484
2485
2486
    def create_model_card(
        self,
        language: Optional[str] = None,
        license: Optional[str] = None,
        tags: Optional[str] = None,
        model_name: Optional[str] = None,
        finetuned_from: Optional[str] = None,
Sylvain Gugger's avatar
Sylvain Gugger committed
2487
        tasks: Optional[str] = None,
Sylvain Gugger's avatar
Sylvain Gugger committed
2488
2489
2490
2491
2492
2493
2494
2495
2496
2497
2498
        dataset_tags: Optional[Union[str, List[str]]] = None,
        dataset: Optional[Union[str, List[str]]] = None,
        dataset_args: Optional[Union[str, List[str]]] = None,
    ):
        training_summary = TrainingSummary.from_trainer(
            self,
            language=language,
            license=license,
            tags=tags,
            model_name=model_name,
            finetuned_from=finetuned_from,
Sylvain Gugger's avatar
Sylvain Gugger committed
2499
            tasks=tasks,
Sylvain Gugger's avatar
Sylvain Gugger committed
2500
2501
2502
2503
2504
2505
2506
2507
            dataset_tags=dataset_tags,
            dataset=dataset,
            dataset_args=dataset_args,
        )
        model_card = training_summary.to_model_card()
        with open(os.path.join(self.args.output_dir, "README.md"), "w") as f:
            f.write(model_card)

2508
    def push_to_hub(self, commit_message: Optional[str] = "add model", **kwargs) -> str:
Sylvain Gugger's avatar
Sylvain Gugger committed
2509
        """
2510
        Upload `self.model` and `self.tokenizer` to the 馃 model hub on the repo `self.args.push_to_hub_model_id`.
Sylvain Gugger's avatar
Sylvain Gugger committed
2511
2512
2513
2514

        Parameters:
            commit_message (:obj:`str`, `optional`, defaults to :obj:`"add model"`):
                Message to commit while pushing.
Sylvain Gugger's avatar
Sylvain Gugger committed
2515
2516
            kwargs:
                Additional keyword arguments passed along to :meth:`~transformers.Trainer.create_model_card`.
Sylvain Gugger's avatar
Sylvain Gugger committed
2517
2518
2519
2520
2521

        Returns:
            The url of the commit of your model in the given repository.
        """

2522
2523
2524
2525
        if self.args.should_save:
            self.create_model_card(model_name=self.args.push_to_hub_model_id, **kwargs)
        # Needs to be executed on all processes for TPU training, but will only save on the processed determined by
        # self.args.should_save.
2526
        self.save_model()
2527
2528
2529
2530
2531

        # Only push from one node.
        if not self.is_world_process_zero():
            return

2532
        return self.repo.push_to_hub(commit_message=commit_message)
Sylvain Gugger's avatar
Sylvain Gugger committed
2533

2534
2535
2536
2537
2538
2539
2540
2541
2542
2543
2544
2545
2546
2547
2548
2549
2550
2551
2552
2553
2554
2555
2556
2557
2558
2559
2560
2561
2562
2563
2564
2565
2566
2567
2568
2569
2570
2571
2572
2573
2574
    #
    # Deprecated code
    #

    def prediction_loop(
        self,
        dataloader: DataLoader,
        description: str,
        prediction_loss_only: Optional[bool] = None,
        ignore_keys: Optional[List[str]] = None,
        metric_key_prefix: str = "eval",
    ) -> PredictionOutput:
        """
        Prediction/evaluation loop, shared by :obj:`Trainer.evaluate()` and :obj:`Trainer.predict()`.

        Works both with or without labels.
        """
        if not isinstance(dataloader.dataset, collections.abc.Sized):
            raise ValueError("dataset must implement __len__")
        prediction_loss_only = (
            prediction_loss_only if prediction_loss_only is not None else self.args.prediction_loss_only
        )

        # if eval is called w/o train init deepspeed here
        if self.args.deepspeed and not self.deepspeed:

            # XXX: eval doesn't have `resume_from_checkpoint` arg but we should be able to do eval
            # from the checkpoint eventually
            deepspeed_engine, _, _ = deepspeed_init(self, num_training_steps=0, resume_from_checkpoint=None)
            self.model = deepspeed_engine.module
            self.model_wrapped = deepspeed_engine
            self.deepspeed = deepspeed_engine
            # XXX: we don't need optim/sched for inference, but this needs to be sorted out, since
            # for example the Z3-optimizer is a must for zero3 to work even for inference - what we
            # don't need is the deepspeed basic optimizer which is self.optimizer.optimizer
            deepspeed_engine.optimizer.optimizer = None
            deepspeed_engine.lr_scheduler = None

        model = self._wrap_model(self.model, training=False)

        # if full fp16 is wanted on eval and this ``evaluation`` or ``predict`` isn't called while
2575
        # ``train`` is running, halve it first and then put on device
2576
2577
2578
2579
2580
2581
2582
2583
2584
2585
2586
2587
2588
2589
2590
2591
2592
2593
2594
2595
2596
2597
2598
2599
2600
2601
2602
2603
2604
2605
2606
2607
2608
2609
2610
2611
2612
2613
2614
2615
2616
2617
2618
2619
2620
2621
2622
2623
2624
2625
2626
2627
2628
2629
2630
2631
2632
2633
2634
2635
2636
2637
2638
2639
2640
2641
2642
2643
2644
2645
2646
2647
2648
2649
2650
2651
2652
2653
2654
2655
2656
2657
2658
2659
2660
2661
2662
2663
2664
2665
2666
2667
2668
2669
2670
2671
2672
2673
2674
2675
2676
2677
        if not self.is_in_train and self.args.fp16_full_eval:
            model = model.half().to(self.args.device)

        batch_size = dataloader.batch_size
        num_examples = self.num_examples(dataloader)
        logger.info(f"***** Running {description} *****")
        logger.info(f"  Num examples = {num_examples}")
        logger.info(f"  Batch size = {batch_size}")
        losses_host: torch.Tensor = None
        preds_host: Union[torch.Tensor, List[torch.Tensor]] = None
        labels_host: Union[torch.Tensor, List[torch.Tensor]] = None

        world_size = max(1, self.args.world_size)

        eval_losses_gatherer = DistributedTensorGatherer(world_size, num_examples, make_multiple_of=batch_size)
        if not prediction_loss_only:
            # The actual number of eval_sample can be greater than num_examples in distributed settings (when we pass
            # a batch size to the sampler)
            make_multiple_of = None
            if hasattr(dataloader, "sampler") and isinstance(dataloader.sampler, SequentialDistributedSampler):
                make_multiple_of = dataloader.sampler.batch_size
            preds_gatherer = DistributedTensorGatherer(world_size, num_examples, make_multiple_of=make_multiple_of)
            labels_gatherer = DistributedTensorGatherer(world_size, num_examples, make_multiple_of=make_multiple_of)

        model.eval()

        if is_torch_tpu_available():
            dataloader = pl.ParallelLoader(dataloader, [self.args.device]).per_device_loader(self.args.device)

        if self.args.past_index >= 0:
            self._past = None

        self.callback_handler.eval_dataloader = dataloader

        for step, inputs in enumerate(dataloader):
            loss, logits, labels = self.prediction_step(model, inputs, prediction_loss_only, ignore_keys=ignore_keys)
            if loss is not None:
                losses = loss.repeat(batch_size)
                losses_host = losses if losses_host is None else torch.cat((losses_host, losses), dim=0)
            if logits is not None:
                preds_host = logits if preds_host is None else nested_concat(preds_host, logits, padding_index=-100)
            if labels is not None:
                labels_host = labels if labels_host is None else nested_concat(labels_host, labels, padding_index=-100)
            self.control = self.callback_handler.on_prediction_step(self.args, self.state, self.control)

            # Gather all tensors and put them back on the CPU if we have done enough accumulation steps.
            if self.args.eval_accumulation_steps is not None and (step + 1) % self.args.eval_accumulation_steps == 0:
                eval_losses_gatherer.add_arrays(self._gather_and_numpify(losses_host, "eval_losses"))
                if not prediction_loss_only:
                    preds_gatherer.add_arrays(self._gather_and_numpify(preds_host, "eval_preds"))
                    labels_gatherer.add_arrays(self._gather_and_numpify(labels_host, "eval_label_ids"))

                # Set back to None to begin a new accumulation
                losses_host, preds_host, labels_host = None, None, None

        if self.args.past_index and hasattr(self, "_past"):
            # Clean the state at the end of the evaluation loop
            delattr(self, "_past")

        # Gather all remaining tensors and put them back on the CPU
        eval_losses_gatherer.add_arrays(self._gather_and_numpify(losses_host, "eval_losses"))
        if not prediction_loss_only:
            preds_gatherer.add_arrays(self._gather_and_numpify(preds_host, "eval_preds"))
            labels_gatherer.add_arrays(self._gather_and_numpify(labels_host, "eval_label_ids"))

        eval_loss = eval_losses_gatherer.finalize()
        preds = preds_gatherer.finalize() if not prediction_loss_only else None
        label_ids = labels_gatherer.finalize() if not prediction_loss_only else None

        if self.compute_metrics is not None and preds is not None and label_ids is not None:
            metrics = self.compute_metrics(EvalPrediction(predictions=preds, label_ids=label_ids))
        else:
            metrics = {}

        # To be JSON-serializable, we need to remove numpy types or zero-d tensors
        metrics = denumpify_detensorize(metrics)

        if eval_loss is not None:
            metrics[f"{metric_key_prefix}_loss"] = eval_loss.mean().item()

        # Prefix all keys with metric_key_prefix + '_'
        for key in list(metrics.keys()):
            if not key.startswith(f"{metric_key_prefix}_"):
                metrics[f"{metric_key_prefix}_{key}"] = metrics.pop(key)

        return PredictionOutput(predictions=preds, label_ids=label_ids, metrics=metrics)

    def _gather_and_numpify(self, tensors, name):
        """
        Gather value of `tensors` (tensor or list/tuple of nested tensors) and convert them to numpy before
        concatenating them to `gathered`
        """
        if tensors is None:
            return
        if is_torch_tpu_available():
            tensors = nested_xla_mesh_reduce(tensors, name)
        elif is_sagemaker_mp_enabled():
            tensors = smp_gather(tensors)
        elif self.args.local_rank != -1:
            tensors = distributed_concat(tensors)

        return nested_numpify(tensors)