"vscode:/vscode.git/clone" did not exist on "6336017c154c73aa749e411a64557628596e5626"
training_args.py 34.3 KB
Newer Older
Sylvain Gugger's avatar
Sylvain Gugger committed
1
2
3
4
5
6
7
8
9
10
11
12
13
14
# Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

Julien Chaumond's avatar
Julien Chaumond committed
15
import json
Julien Plu's avatar
Julien Plu committed
16
import os
17
from dataclasses import asdict, dataclass, field
18
from enum import Enum
19
from typing import Any, Dict, List, Optional
Julien Chaumond's avatar
Julien Chaumond committed
20

Sylvain Gugger's avatar
Sylvain Gugger committed
21
22
23
24
25
26
27
from .file_utils import (
    cached_property,
    is_sagemaker_distributed_available,
    is_torch_available,
    is_torch_tpu_available,
    torch_required,
)
Sylvain Gugger's avatar
Sylvain Gugger committed
28
from .trainer_utils import EvaluationStrategy, SchedulerType
Lysandre Debut's avatar
Lysandre Debut committed
29
from .utils import logging
Julien Chaumond's avatar
Julien Chaumond committed
30
31
32
33
34


if is_torch_available():
    import torch

35
if is_torch_tpu_available():
Lysandre Debut's avatar
Lysandre Debut committed
36
37
38
    import torch_xla.core.xla_model as xm


Lysandre Debut's avatar
Lysandre Debut committed
39
logger = logging.get_logger(__name__)
40
41


Julien Plu's avatar
Julien Plu committed
42
43
44
45
46
47
48
49
50
51
52
def default_logdir() -> str:
    """
    Same default as PyTorch
    """
    import socket
    from datetime import datetime

    current_time = datetime.now().strftime("%b%d_%H-%M-%S")
    return os.path.join("runs", current_time + "_" + socket.gethostname())


53
54
55
@dataclass
class TrainingArguments:
    """
Sylvain Gugger's avatar
Sylvain Gugger committed
56
57
    TrainingArguments is the subset of the arguments we use in our example scripts **which relate to the training loop
    itself**.
58

59
60
61
    Using :class:`~transformers.HfArgumentParser` we can turn this class into `argparse
    <https://docs.python.org/3/library/argparse.html#module-argparse>`__ arguments that can be specified on the command
    line.
62

63
64
65



66
67
68
69
70
71
72
    Parameters:
        output_dir (:obj:`str`):
            The output directory where the model predictions and checkpoints will be written.
        overwrite_output_dir (:obj:`bool`, `optional`, defaults to :obj:`False`):
            If :obj:`True`, overwrite the content of the output directory. Use this to continue training if
            :obj:`output_dir` points to a checkpoint directory.
        do_train (:obj:`bool`, `optional`, defaults to :obj:`False`):
73
74
75
            Whether to run training or not. This argument is not directly used by :class:`~transformers.Trainer`, it's
            intended to be used by your training/evaluation scripts instead. See the `example scripts
            <https://github.com/huggingface/transformers/tree/master/examples>`__ for more details.
76
        do_eval (:obj:`bool`, `optional`):
77
78
79
80
81
            Whether to run evaluation on the validation set or not. Will be set to :obj:`True` if
            :obj:`evaluation_strategy` is different from :obj:`"no"`. This argument is not directly used by
            :class:`~transformers.Trainer`, it's intended to be used by your training/evaluation scripts instead. See
            the `example scripts <https://github.com/huggingface/transformers/tree/master/examples>`__ for more
            details.
82
        do_predict (:obj:`bool`, `optional`, defaults to :obj:`False`):
83
84
85
86
            Whether to run predictions on the test set or not. This argument is not directly used by
            :class:`~transformers.Trainer`, it's intended to be used by your training/evaluation scripts instead. See
            the `example scripts <https://github.com/huggingface/transformers/tree/master/examples>`__ for more
            details.
Sylvain Gugger's avatar
Sylvain Gugger committed
87
        evaluation_strategy (:obj:`str` or :class:`~transformers.trainer_utils.EvaluationStrategy`, `optional`, defaults to :obj:`"no"`):
88
89
90
91
92
93
            The evaluation strategy to adopt during training. Possible values are:

                * :obj:`"no"`: No evaluation is done during training.
                * :obj:`"steps"`: Evaluation is done (and logged) every :obj:`eval_steps`.
                * :obj:`"epoch"`: Evaluation is done at the end of each epoch.

94
        prediction_loss_only (:obj:`bool`, `optional`, defaults to `False`):
95
            When performing evaluation and generating predictions, only returns the loss.
96
97
98
99
        per_device_train_batch_size (:obj:`int`, `optional`, defaults to 8):
            The batch size per GPU/TPU core/CPU for training.
        per_device_eval_batch_size (:obj:`int`, `optional`, defaults to 8):
            The batch size per GPU/TPU core/CPU for evaluation.
100
        gradient_accumulation_steps (:obj:`int`, `optional`, defaults to 1):
101
            Number of updates steps to accumulate the gradients for, before performing a backward/update pass.
102
103
104
105
106
107

            .. warning::

                When using gradient accumulation, one step is counted as one step with backward pass. Therefore,
                logging, evaluation, save will be conducted every ``gradient_accumulation_steps * xxx_step`` training
                examples.
108
109
110
111
        eval_accumulation_steps (:obj:`int`, `optional`):
            Number of predictions steps to accumulate the output tensors for, before moving the results to the CPU. If
            left unset, the whole predictions are accumulated on GPU/TPU before being moved to the CPU (faster but
            requires more memory).
112
        learning_rate (:obj:`float`, `optional`, defaults to 5e-5):
113
            The initial learning rate for :class:`~transformers.AdamW` optimizer.
114
        weight_decay (:obj:`float`, `optional`, defaults to 0):
Sylvain Gugger's avatar
Sylvain Gugger committed
115
            The weight decay to apply (if not zero) to all layers except all bias and LayerNorm weights in
116
            :class:`~transformers.AdamW` optimizer.
117
        adam_beta1 (:obj:`float`, `optional`, defaults to 0.9):
118
            The beta1 hyperparameter for the :class:`~transformers.AdamW` optimizer.
119
        adam_beta2 (:obj:`float`, `optional`, defaults to 0.999):
120
            The beta2 hyperparameter for the :class:`~transformers.AdamW` optimizer.
121
        adam_epsilon (:obj:`float`, `optional`, defaults to 1e-8):
122
            The epsilon hyperparameter for the :class:`~transformers.AdamW` optimizer.
123
124
125
        max_grad_norm (:obj:`float`, `optional`, defaults to 1.0):
            Maximum gradient norm (for gradient clipping).
        num_train_epochs(:obj:`float`, `optional`, defaults to 3.0):
Sylvain Gugger's avatar
Sylvain Gugger committed
126
127
            Total number of training epochs to perform (if not an integer, will perform the decimal part percents of
            the last epoch before stopping training).
128
129
130
        max_steps (:obj:`int`, `optional`, defaults to -1):
            If set to a positive number, the total number of training steps to perform. Overrides
            :obj:`num_train_epochs`.
Sylvain Gugger's avatar
Sylvain Gugger committed
131
132
133
        lr_scheduler_type (:obj:`str` or :class:`~transformers.SchedulerType`, `optional`, defaults to :obj:`"linear"`):
            The scheduler type to use. See the documentation of :class:`~transformers.SchedulerType` for all possible
            values.
134
135
136
        warmup_steps (:obj:`int`, `optional`, defaults to 0):
            Number of steps used for a linear warmup from 0 to :obj:`learning_rate`.
        logging_dir (:obj:`str`, `optional`):
137
138
            `TensorBoard <https://www.tensorflow.org/tensorboard>`__ log directory. Will default to
            `runs/**CURRENT_DATETIME_HOSTNAME**`.
139
        logging_first_step (:obj:`bool`, `optional`, defaults to :obj:`False`):
Tiger's avatar
Tiger committed
140
            Whether to log and evaluate the first :obj:`global_step` or not.
141
142
143
144
145
146
147
148
        logging_steps (:obj:`int`, `optional`, defaults to 500):
            Number of update steps between two logs.
        save_steps (:obj:`int`, `optional`, defaults to 500):
            Number of updates steps before two checkpoint saves.
        save_total_limit (:obj:`int`, `optional`):
            If a value is passed, will limit the total amount of checkpoints. Deletes the older checkpoints in
            :obj:`output_dir`.
        no_cuda (:obj:`bool`, `optional`, defaults to :obj:`False`):
Alan deLevie's avatar
Alan deLevie committed
149
            Whether to not use CUDA even when it is available or not.
150
        seed (:obj:`int`, `optional`, defaults to 42):
151
152
153
            Random seed that will be set at the beginning of training. To ensure reproducibility across runs, use the
            :func:`~transformers.Trainer.model_init` function to instantiate the model if it has some randomly
            initialized parameters.
154
        fp16 (:obj:`bool`, `optional`, defaults to :obj:`False`):
155
            Whether to use 16-bit (mixed) precision training (through NVIDIA Apex) instead of 32-bit training.
156
        fp16_opt_level (:obj:`str`, `optional`, defaults to 'O1'):
157
158
            For :obj:`fp16` training, Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']. See details
            on the `Apex documentation <https://nvidia.github.io/apex/amp.html>`__.
159
160
161
162
        fp16_backend (:obj:`str`, `optional`, defaults to :obj:`"auto"`):
            The backend to use for mixed precision training. Must be one of :obj:`"auto"`, :obj:`"amp"` or
            :obj:`"apex"`. :obj:`"auto"` will use AMP or APEX depending on the PyTorch version detected, while the
            other choices will force the requested backend.
163
        local_rank (:obj:`int`, `optional`, defaults to -1):
164
            Rank of the process during distributed training.
165
        tpu_num_cores (:obj:`int`, `optional`):
Tiger's avatar
Tiger committed
166
            When training on TPU, the number of TPU cores (automatically passed by launcher script).
167
        debug (:obj:`bool`, `optional`, defaults to :obj:`False`):
168
169
170
171
            When training on TPU, whether to print debug metrics or not.
        dataloader_drop_last (:obj:`bool`, `optional`, defaults to :obj:`False`):
            Whether to drop the last incomplete batch (if the length of the dataset is not divisible by the batch size)
            or not.
172
173
174
        eval_steps (:obj:`int`, `optional`):
            Number of update steps between two evaluations if :obj:`evaluation_strategy="steps"`. Will default to the
            same value as :obj:`logging_steps` if not set.
Chady Kamar's avatar
Chady Kamar committed
175
        dataloader_num_workers (:obj:`int`, `optional`, defaults to 0):
Sylvain Gugger's avatar
Sylvain Gugger committed
176
177
            Number of subprocesses to use for data loading (PyTorch only). 0 means that the data will be loaded in the
            main process.
178
179
180
181
182
        past_index (:obj:`int`, `optional`, defaults to -1):
            Some models like :doc:`TransformerXL <../model_doc/transformerxl>` or :doc`XLNet <../model_doc/xlnet>` can
            make use of the past hidden states for their predictions. If this argument is set to a positive int, the
            ``Trainer`` will use the corresponding output (usually index 2) as the past state and feed it to the model
            at the next training step under the keyword argument ``mems``.
183
        run_name (:obj:`str`, `optional`):
184
            A descriptor for the run. Typically used for `wandb <https://www.wandb.com/>`_ logging.
185
        disable_tqdm (:obj:`bool`, `optional`):
186
187
188
            Whether or not to disable the tqdm progress bars and table of metrics produced by
            :class:`~transformers.notebook.NotebookTrainingTracker` in Jupyter Notebooks. Will default to :obj:`True`
            if the logging level is set to warn or lower (default), :obj:`False` otherwise.
189
        remove_unused_columns (:obj:`bool`, `optional`, defaults to :obj:`True`):
190
191
            If using :obj:`datasets.Dataset` datasets, whether or not to automatically remove the columns unused by the
            model forward method.
192

Sylvain Gugger's avatar
Sylvain Gugger committed
193
194
195
            (Note that this behavior is not implemented for :class:`~transformers.TFTrainer` yet.)
        label_names (:obj:`List[str]`, `optional`):
            The list of keys in your dictionary of inputs that correspond to the labels.
Sylvain Gugger's avatar
Sylvain Gugger committed
196
197

            Will eventually default to :obj:`["labels"]` except if the model used is one of the
Sylvain Gugger's avatar
Sylvain Gugger committed
198
            :obj:`XxxForQuestionAnswering` in which case it will default to :obj:`["start_positions",
Sylvain Gugger's avatar
Sylvain Gugger committed
199
200
201
            "end_positions"]`.
        load_best_model_at_end (:obj:`bool`, `optional`, defaults to :obj:`False`):
            Whether or not to load the best model found during training at the end of training.
202
203
204
205
206

            .. note::

                When set to :obj:`True`, the parameters :obj:`save_steps` will be ignored and the model will be saved
                after each evaluation.
Sylvain Gugger's avatar
Sylvain Gugger committed
207
        metric_for_best_model (:obj:`str`, `optional`):
208
209
210
211
212
            Use in conjunction with :obj:`load_best_model_at_end` to specify the metric to use to compare two different
            models. Must be the name of a metric returned by the evaluation with or without the prefix :obj:`"eval_"`.
            Will default to :obj:`"loss"` if unspecified and :obj:`load_best_model_at_end=True` (to use the evaluation
            loss).

Tiger's avatar
Tiger committed
213
            If you set this value, :obj:`greater_is_better` will default to :obj:`True`. Don't forget to set it to
Sylvain Gugger's avatar
Sylvain Gugger committed
214
215
216
217
            :obj:`False` if your metric is better when lower.
        greater_is_better (:obj:`bool`, `optional`):
            Use in conjunction with :obj:`load_best_model_at_end` and :obj:`metric_for_best_model` to specify if better
            models should have a greater metric or not. Will default to:
218
219
220
221

            - :obj:`True` if :obj:`metric_for_best_model` is set to a value that isn't :obj:`"loss"` or
              :obj:`"eval_loss"`.
            - :obj:`False` if :obj:`metric_for_best_model` is not set, or set to :obj:`"loss"` or :obj:`"eval_loss"`.
222
        ignore_skip_data (:obj:`bool`, `optional`, defaults to :obj:`False`):
223
224
225
            When resuming training, whether or not to skip the epochs and batches to get the data loading at the same
            stage as in the previous training. If set to :obj:`True`, the training will begin faster (as that skipping
            step can take a long time) but will not yield the same results as the interrupted training would have.
226
227
228
        sharded_ddp (:obj:`bool`, `optional`, defaults to :obj:`False`):
            Use Sharded DDP training from `FairScale <https://github.com/facebookresearch/fairscale>`__ (in distributed
            training only). This is an experimental feature.
229
230
231
        deepspeed (:obj:`str`, `optional`):
            Use `Deepspeed <https://github.com/microsoft/deepspeed>`__. This is an experimental feature and its API may
            evolve in the future. The value is the location of its json config file (usually ``ds_config.json``).
Sylvain Gugger's avatar
Sylvain Gugger committed
232
233
234
235
236
237
238
        label_smoothing_factor (:obj:`float`, `optional`, defaults to 0.0):
            The label smoothing factor to use. Zero means no label smoothing, otherwise the underlying onehot-encoded
            labels are changed from 0s and 1s to :obj:`label_smoothing_factor/num_labels` and :obj:`1 -
            label_smoothing_factor + label_smoothing_factor/num_labels` respectively.
        adafactor (:obj:`bool`, `optional`, defaults to :obj:`False`):
            Whether or not to use the :class:`~transformers.Adafactor` optimizer instead of
            :class:`~transformers.AdamW`.
239
240
241
        group_by_length (:obj:`bool`, `optional`, defaults to :obj:`False`):
            Whether or not to group together samples of roughly the same legnth in the training dataset (to minimize
            padding applied and be more efficient). Only useful if applying dynamic padding.
242
        report_to (:obj:`str` or :obj:`List[str]`, `optional`, defaults to :obj:`"all"`):
243
            The list of integrations to report the results and logs to. Supported platforms are :obj:`"azure_ml"`,
244
245
            :obj:`"comet_ml"`, :obj:`"mlflow"`, :obj:`"tensorboard"` and :obj:`"wandb"`. Use :obj:`"all"` to report to
            all integrations installed, :obj:`"none"` for no integrations.
246
247
        ddp_find_unused_parameters (:obj:`bool`, `optional`):
            When using distributed training, the value of the flag :obj:`find_unused_parameters` passed to
248
            :obj:`DistributedDataParallel`. Will default to :obj:`False` if gradient checkpointing is used, :obj:`True`
249
            otherwise.
250
        dataloader_pin_memory (:obj:`bool`, `optional`, defaults to :obj:`True`)):
251
            Whether you want to pin memory in data loaders or not. Will default to :obj:`True`.
252
253
    """

254
255
256
    output_dir: Optional[str] = field(
        default=None,
        metadata={"help": "The output directory where the model predictions and checkpoints will be written."},
257
258
    )
    overwrite_output_dir: bool = field(
259
260
261
262
263
264
265
        default=False,
        metadata={
            "help": (
                "Overwrite the content of the output directory."
                "Use this to continue training if output_dir points to a checkpoint directory."
            )
        },
266
267
268
    )

    do_train: bool = field(default=False, metadata={"help": "Whether to run training."})
269
    do_eval: bool = field(default=None, metadata={"help": "Whether to run eval on the dev set."})
Julien Chaumond's avatar
Julien Chaumond committed
270
    do_predict: bool = field(default=False, metadata={"help": "Whether to run predictions on the test set."})
271
272
    evaluation_strategy: EvaluationStrategy = field(
        default="no",
Sylvain Gugger's avatar
Sylvain Gugger committed
273
        metadata={"help": "The evaluation strategy to use."},
274
    )
275
    prediction_loss_only: bool = field(
Lysandre's avatar
Lysandre committed
276
277
        default=False,
        metadata={"help": "When performing evaluation and predictions, only returns the loss."},
278
    )
279

280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
    per_device_train_batch_size: int = field(
        default=8, metadata={"help": "Batch size per GPU/TPU core/CPU for training."}
    )
    per_device_eval_batch_size: int = field(
        default=8, metadata={"help": "Batch size per GPU/TPU core/CPU for evaluation."}
    )

    per_gpu_train_batch_size: Optional[int] = field(
        default=None,
        metadata={
            "help": "Deprecated, the use of `--per_device_train_batch_size` is preferred. "
            "Batch size per GPU/TPU core/CPU for training."
        },
    )
    per_gpu_eval_batch_size: Optional[int] = field(
        default=None,
        metadata={
            "help": "Deprecated, the use of `--per_device_eval_batch_size` is preferred."
            "Batch size per GPU/TPU core/CPU for evaluation."
        },
    )

302
    gradient_accumulation_steps: int = field(
303
304
        default=1,
        metadata={"help": "Number of updates steps to accumulate before performing a backward/update pass."},
305
    )
306
307
308
309
    eval_accumulation_steps: Optional[int] = field(
        default=None,
        metadata={"help": "Number of predictions steps to accumulate before moving the tensors to the CPU."},
    )
310

311
312
313
314
315
    learning_rate: float = field(default=5e-5, metadata={"help": "The initial learning rate for AdamW."})
    weight_decay: float = field(default=0.0, metadata={"help": "Weight decay for AdamW if we apply some."})
    adam_beta1: float = field(default=0.9, metadata={"help": "Beta1 for AdamW optimizer"})
    adam_beta2: float = field(default=0.999, metadata={"help": "Beta2 for AdamW optimizer"})
    adam_epsilon: float = field(default=1e-8, metadata={"help": "Epsilon for AdamW optimizer."})
316
317
318
319
320
321
322
    max_grad_norm: float = field(default=1.0, metadata={"help": "Max gradient norm."})

    num_train_epochs: float = field(default=3.0, metadata={"help": "Total number of training epochs to perform."})
    max_steps: int = field(
        default=-1,
        metadata={"help": "If > 0: set total number of training steps to perform. Override num_train_epochs."},
    )
Sylvain Gugger's avatar
Sylvain Gugger committed
323
324
325
326
    lr_scheduler_type: SchedulerType = field(
        default="linear",
        metadata={"help": "The scheduler type to use."},
    )
327
328
    warmup_steps: int = field(default=0, metadata={"help": "Linear warmup over warmup_steps."})

Julien Plu's avatar
Julien Plu committed
329
    logging_dir: Optional[str] = field(default_factory=default_logdir, metadata={"help": "Tensorboard log dir."})
330
    logging_first_step: bool = field(default=False, metadata={"help": "Log the first global_step"})
331
332
333
334
335
    logging_steps: int = field(default=500, metadata={"help": "Log every X updates steps."})
    save_steps: int = field(default=500, metadata={"help": "Save checkpoint every X updates steps."})
    save_total_limit: Optional[int] = field(
        default=None,
        metadata={
336
337
338
339
            "help": (
                "Limit the total amount of checkpoints."
                "Deletes the older checkpoints in the output_dir. Default is unlimited checkpoints"
            )
340
341
        },
    )
Lysandre Debut's avatar
Lysandre Debut committed
342
    no_cuda: bool = field(default=False, metadata={"help": "Do not use CUDA even when it is available"})
343
    seed: int = field(default=42, metadata={"help": "Random seed that will be set at the beginning of training."})
344
345
346

    fp16: bool = field(
        default=False,
347
        metadata={"help": "Whether to use 16-bit (mixed) precision (through NVIDIA Apex) instead of 32-bit"},
348
349
350
351
    )
    fp16_opt_level: str = field(
        default="O1",
        metadata={
352
353
354
355
            "help": (
                "For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."
                "See details at https://nvidia.github.io/apex/amp.html"
            )
356
357
        },
    )
358
359
360
361
    fp16_backend: str = field(
        default="auto",
        metadata={"help": "The backend to be used for mixed precision.", "choices": ["auto", "amp", "apex"]},
    )
362
    local_rank: int = field(default=-1, metadata={"help": "For distributed training: local_rank"})
Julien Chaumond's avatar
Julien Chaumond committed
363

Lysandre Debut's avatar
Lysandre Debut committed
364
365
366
    tpu_num_cores: Optional[int] = field(
        default=None, metadata={"help": "TPU: Number of TPU cores (automatically passed by launcher script)"}
    )
367
368
369
370
371
    tpu_metrics_debug: bool = field(
        default=False,
        metadata={"help": "Deprecated, the use of `--debug` is preferred. TPU: Whether to print debug metrics"},
    )
    debug: bool = field(default=False, metadata={"help": "Whether to print debug metrics on TPU"})
Lysandre Debut's avatar
Lysandre Debut committed
372

Setu Shah's avatar
Setu Shah committed
373
374
375
    dataloader_drop_last: bool = field(
        default=False, metadata={"help": "Drop the last incomplete batch if it is not divisible by the batch size."}
    )
376
    eval_steps: int = field(default=None, metadata={"help": "Run an evaluation every X steps."})
Chady Kamar's avatar
Chady Kamar committed
377
378
    dataloader_num_workers: int = field(
        default=0,
Sylvain Gugger's avatar
Sylvain Gugger committed
379
380
381
        metadata={
            "help": "Number of subprocesses to use for data loading (PyTorch only). 0 means that the data will be loaded in the main process."
        },
Chady Kamar's avatar
Chady Kamar committed
382
    )
Setu Shah's avatar
Setu Shah committed
383

384
385
386
387
388
    past_index: int = field(
        default=-1,
        metadata={"help": "If >=0, uses the corresponding part of the output as the past state for next step."},
    )

389
390
391
    run_name: Optional[str] = field(
        default=None, metadata={"help": "An optional descriptor for the run. Notably used for wandb logging."}
    )
392
393
394
395
    disable_tqdm: Optional[bool] = field(
        default=None, metadata={"help": "Whether or not to disable the tqdm progress bars."}
    )

396
397
398
    remove_unused_columns: Optional[bool] = field(
        default=True, metadata={"help": "Remove columns not required by the model when using an nlp.Dataset."}
    )
Sylvain Gugger's avatar
Sylvain Gugger committed
399
400
401
402
    label_names: Optional[List[str]] = field(
        default=None, metadata={"help": "The list of keys in your dictionary of inputs that correspond to the labels."}
    )

403
404
405
406
407
408
409
410
411
412
    load_best_model_at_end: Optional[bool] = field(
        default=False,
        metadata={"help": "Whether or not to load the best model found during training at the end of training."},
    )
    metric_for_best_model: Optional[str] = field(
        default=None, metadata={"help": "The metric to use to compare two different models."}
    )
    greater_is_better: Optional[bool] = field(
        default=None, metadata={"help": "Whether the `metric_for_best_model` should be maximized or not."}
    )
413
414
415
416
417
418
    ignore_data_skip: bool = field(
        default=False,
        metadata={
            "help": "When resuming training, whether or not to skip the first epochs and batches to get to the same training data."
        },
    )
419
420
421
422
    sharded_ddp: bool = field(
        default=False,
        metadata={"help": "Whether or not to use sharded DDP training (in distributed training only)."},
    )
423
424
425
426
    deepspeed: Optional[str] = field(
        default=None,
        metadata={"help": "Enable deepspeed and pass the path to deepspeed json config file (e.g. ds_config.json)"},
    )
Sylvain Gugger's avatar
Sylvain Gugger committed
427
428
429
    label_smoothing_factor: float = field(
        default=0.0, metadata={"help": "The label smoothing epsilon to apply (zero means no label smoothing)."}
    )
430
    adafactor: bool = field(default=False, metadata={"help": "Whether or not to replace AdamW by Adafactor."})
431
432
433
434
    group_by_length: bool = field(
        default=False,
        metadata={"help": "Whether or not to group samples of roughly the same length together when batching."},
    )
435
436
437
    report_to: Optional[List[str]] = field(
        default=None, metadata={"help": "The list of integrations to report the results and logs to."}
    )
438
439
440
441
442
443
444
    ddp_find_unused_parameters: Optional[bool] = field(
        default=None,
        metadata={
            "help": "When using distributed training, the value of the flag `find_unused_parameters` passed to "
            "`DistributedDataParallel`."
        },
    )
445
446
447
    dataloader_pin_memory: bool = field(
        default=True, metadata={"help": "Whether or not to pin memory for DataLoader."}
    )
448
    _n_gpu: int = field(init=False, repr=False, default=-1)
449

Sylvain Gugger's avatar
Sylvain Gugger committed
450
    def __post_init__(self):
451
452
453
454
455
456
457
458
459
460
461
462
        if self.output_dir is None and os.getenv("SM_OUTPUT_DATA_DIR") is None:
            raise ValueError(
                "`output_dir` is only optional if it can get inferred from the environment. Please set a value for "
                "`output_dir`."
            )
        elif os.getenv("SM_OUTPUT_DATA_DIR") is not None:
            if self.output_dir is not None:
                logger.warn(
                    "`output_dir` is overwritten by the env variable 'SM_OUTPUT_DATA_DIR' "
                    f"({os.getenv('SM_OUTPUT_DATA_DIR')})."
                )
            self.output_dir = os.getenv("SM_OUTPUT_DATA_DIR")
Sylvain Gugger's avatar
Sylvain Gugger committed
463
464
        if self.disable_tqdm is None:
            self.disable_tqdm = logger.getEffectiveLevel() > logging.WARN
465
        self.evaluation_strategy = EvaluationStrategy(self.evaluation_strategy)
Sylvain Gugger's avatar
Sylvain Gugger committed
466
        self.lr_scheduler_type = SchedulerType(self.lr_scheduler_type)
467
468
        if self.do_eval is False and self.evaluation_strategy != EvaluationStrategy.NO:
            self.do_eval = True
469
470
        if self.eval_steps is None:
            self.eval_steps = self.logging_steps
471

472
473
474
475
        if self.load_best_model_at_end and self.metric_for_best_model is None:
            self.metric_for_best_model = "loss"
        if self.greater_is_better is None and self.metric_for_best_model is not None:
            self.greater_is_better = self.metric_for_best_model not in ["loss", "eval_loss"]
476
477
        if self.run_name is None:
            self.run_name = self.output_dir
478

479
        if is_torch_available() and self.device.type != "cuda" and self.fp16:
480
            raise ValueError("Mixed precision training with AMP or APEX (`--fp16`) can only be used on CUDA devices.")
481
        if self.report_to is None:
482
483
484
485
486
487
488
            logger.info(
                "The default value for the training argument `--report_to` will change in v5 (from all installed "
                "integrations to none). In v5, you will need to use `--report_to all` to get the same behavior as "
                "now. You should start updating your code and make this info disappear :-)."
            )
            self.report_to = "all"
        if self.report_to == "all" or self.report_to == ["all"]:
489
490
491
492
            # Import at runtime to avoid a circular import.
            from .integrations import get_available_reporting_integrations

            self.report_to = get_available_reporting_integrations()
493
494
495
496
        elif self.report_to == "none" or self.report_to == ["none"]:
            self.report_to = []
        elif not isinstance(self.report_to, list):
            self.report_to = [self.report_to]
497
498
499
500
501
502
503
504
505

    def __repr__(self):
        # We override the default repr to remove deprecated arguments from the repr. This method should be removed once
        # those deprecated arguments are removed form TrainingArguments. (TODO: v5)
        self_as_dict = asdict(self)
        del self_as_dict["per_gpu_train_batch_size"]
        del self_as_dict["per_gpu_eval_batch_size"]
        attrs_as_str = [f"{k}={v}" for k, v in self_as_dict.items()]
        return f"{self.__class__.__name__}({', '.join(attrs_as_str)})"
506

Julien Chaumond's avatar
Julien Chaumond committed
507
508
    @property
    def train_batch_size(self) -> int:
509
510
511
        """
        The actual batch size for training (may differ from :obj:`per_gpu_train_batch_size` in distributed training).
        """
512
513
514
515
516
517
        if self.per_gpu_train_batch_size:
            logger.warning(
                "Using deprecated `--per_gpu_train_batch_size` argument which will be removed in a future "
                "version. Using `--per_device_train_batch_size` is preferred."
            )
        per_device_batch_size = self.per_gpu_train_batch_size or self.per_device_train_batch_size
518
        train_batch_size = per_device_batch_size * max(1, self.n_gpu)
519
        return train_batch_size
Julien Chaumond's avatar
Julien Chaumond committed
520
521
522

    @property
    def eval_batch_size(self) -> int:
523
524
525
        """
        The actual batch size for evaluation (may differ from :obj:`per_gpu_eval_batch_size` in distributed training).
        """
526
527
528
529
530
531
        if self.per_gpu_eval_batch_size:
            logger.warning(
                "Using deprecated `--per_gpu_eval_batch_size` argument which will be removed in a future "
                "version. Using `--per_device_eval_batch_size` is preferred."
            )
        per_device_batch_size = self.per_gpu_eval_batch_size or self.per_device_eval_batch_size
532
        eval_batch_size = per_device_batch_size * max(1, self.n_gpu)
533
        return eval_batch_size
Julien Chaumond's avatar
Julien Chaumond committed
534
535
536

    @cached_property
    @torch_required
537
    def _setup_devices(self) -> "torch.device":
Julien Chaumond's avatar
Julien Chaumond committed
538
539
540
        logger.info("PyTorch: setting up devices")
        if self.no_cuda:
            device = torch.device("cpu")
541
            self._n_gpu = 0
542
        elif is_torch_tpu_available():
Lysandre Debut's avatar
Lysandre Debut committed
543
            device = xm.xla_device()
544
            self._n_gpu = 0
Sylvain Gugger's avatar
Sylvain Gugger committed
545
546
547
548
549
550
551
        elif is_sagemaker_distributed_available():
            import smdistributed.dataparallel.torch.distributed as dist

            dist.init_process_group()
            self.local_rank = dist.get_local_rank()
            device = torch.device("cuda", self.local_rank)
            self._n_gpu = 1
552
553
554
555
556
557
558
559
560
561
562
563
        elif self.deepspeed:
            # deepspeed performs its own DDP internally, and requires the program to be started with:
            # deepspeed  ./program.py
            # rather than:
            # python -m torch.distributed.launch --nproc_per_node=2 ./program.py
            from .integrations import is_deepspeed_available

            if not is_deepspeed_available():
                raise ImportError("--deepspeed requires deepspeed: `pip install deepspeed`.")
            import deepspeed

            deepspeed.init_distributed()
564
565
566
567
568
569

            # workaround for setups like notebooks where the launcher can't be used,
            # but deepspeed requires a dist env.
            # env LOCAL_RANK could be set manually by the user, or via init_distributed if mpi4py is installed
            self.local_rank = int(os.environ.get("LOCAL_RANK", "-1"))

570
571
            device = torch.device("cuda", self.local_rank)
            self._n_gpu = 1
Julien Chaumond's avatar
Julien Chaumond committed
572
573
574
        elif self.local_rank == -1:
            # if n_gpu is > 1 we'll use nn.DataParallel.
            # If you only want to use a specific subset of GPUs use `CUDA_VISIBLE_DEVICES=0`
575
576
577
578
579
            # Explicitly set CUDA to the first (index 0) CUDA device, otherwise `set_device` will
            # trigger an error that a device index is missing. Index 0 takes into account the
            # GPUs available in the environment, so `CUDA_VISIBLE_DEVICES=1,2` with `cuda:0`
            # will use the first GPU in that env, i.e. GPU#1
            device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
580
581
            # Sometimes the line in the postinit has not been run before we end up here, so just checking we're not at
            # the default value.
582
            self._n_gpu = torch.cuda.device_count()
Julien Chaumond's avatar
Julien Chaumond committed
583
584
        else:
            # Here, we'll use torch.distributed.
585
            # Initializes the distributed backend which will take care of synchronizing nodes/GPUs
586
            torch.distributed.init_process_group(backend="nccl")
Julien Chaumond's avatar
Julien Chaumond committed
587
            device = torch.device("cuda", self.local_rank)
588
            self._n_gpu = 1
589
590
591
592

        if device.type == "cuda":
            torch.cuda.set_device(device)

593
        return device
Julien Chaumond's avatar
Julien Chaumond committed
594
595
596
597

    @property
    @torch_required
    def device(self) -> "torch.device":
598
599
600
        """
        The device used by this process.
        """
601
        return self._setup_devices
Julien Chaumond's avatar
Julien Chaumond committed
602
603
604
605

    @property
    @torch_required
    def n_gpu(self):
606
607
608
609
610
611
612
        """
        The number of GPUs used by this process.

        Note:
            This will only be greater than one when you have multiple GPUs available but are not using distributed
            training. For distributed training, it will always be 1.
        """
613
614
615
        # Make sure `self._n_gpu` is properly setup.
        _ = self._setup_devices
        return self._n_gpu
Julien Chaumond's avatar
Julien Chaumond committed
616

617
618
619
620
621
622
623
624
625
626
627
628
629
630
    @property
    @torch_required
    def parallel_mode(self):
        """
        The current mode used for parallelism if multiple GPUs/TPU cores are available. One of:

        - :obj:`ParallelMode.NOT_PARALLEL`: no parallelism (CPU or one GPU).
        - :obj:`ParallelMode.NOT_DISTRIBUTED`: several GPUs in one single process (uses :obj:`torch.nn.DataParallel`).
        - :obj:`ParallelMode.DISTRIBUTED`: several GPUs, each ahving its own process (uses
          :obj:`torch.nn.DistributedDataParallel`).
        - :obj:`ParallelMode.TPU`: several TPU cores.
        """
        if is_torch_tpu_available():
            return ParallelMode.TPU
Sylvain Gugger's avatar
Sylvain Gugger committed
631
632
        elif is_sagemaker_distributed_available():
            return ParallelMode.SAGEMAKER_DISTRIBUTED
633
634
635
636
637
638
639
        elif self.local_rank != -1:
            return ParallelMode.DISTRIBUTED
        elif self.n_gpu > 1:
            return ParallelMode.NOT_DISTRIBUTED
        else:
            return ParallelMode.NOT_PARALLEL

640
641
642
643
644
645
646
    @property
    def place_model_on_device(self):
        """
        Can be subclassed and overridden for some specific integrations.
        """
        return True

647
648
649
650
    def to_dict(self):
        """
        Serializes this instance while replace `Enum` by their values (for JSON serialization support).
        """
651
        d = asdict(self)
652
653
654
655
656
        for k, v in d.items():
            if isinstance(v, Enum):
                d[k] = v.value
        return d

Julien Chaumond's avatar
Julien Chaumond committed
657
658
659
660
    def to_json_string(self):
        """
        Serializes this instance to a JSON string.
        """
661
        return json.dumps(self.to_dict(), indent=2)
662
663
664
665
666

    def to_sanitized_dict(self) -> Dict[str, Any]:
        """
        Sanitized serialization to use with TensorBoard鈥檚 hparams
        """
667
        d = self.to_dict()
668
669
        d = {**d, **{"train_batch_size": self.train_batch_size, "eval_batch_size": self.eval_batch_size}}

670
671
672
        valid_types = [bool, int, float, str]
        if is_torch_available():
            valid_types.append(torch.Tensor)
673

674
        return {k: v if type(v) in valid_types else str(v) for k, v in d.items()}
675
676
677
678
679
680


class ParallelMode(Enum):
    NOT_PARALLEL = "not_parallel"
    NOT_DISTRIBUTED = "not_distributed"
    DISTRIBUTED = "distributed"
Sylvain Gugger's avatar
Sylvain Gugger committed
681
    SAGEMAKER_DISTRIBUTED = "sm_distributed"
682
    TPU = "tpu"