"tests/openai/test_modeling_tf_openai.py" did not exist on "3552d0e0d89711404ec49b0080cd1f48ae224bb3"
run_flax_glue.py 28.2 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
#!/usr/bin/env python
# coding=utf-8
# Copyright 2021 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Finetuning a 馃 Flax Transformers model for sequence classification on GLUE."""
Suraj Patil's avatar
Suraj Patil committed
17
import json
18
import logging
19
import math
20
21
import os
import random
22
import sys
23
import time
24
import warnings
25
from dataclasses import dataclass, field
26
from pathlib import Path
27
from typing import Any, Callable, Dict, Optional, Tuple
28
29

import datasets
30
import evaluate
31
32
import jax
import jax.numpy as jnp
33
import numpy as np
34
import optax
35
from datasets import load_dataset
36
from flax import struct, traverse_util
37
from flax.jax_utils import pad_shard_unpad, replicate, unreplicate
38
from flax.training import train_state
39
from flax.training.common_utils import get_metrics, onehot, shard
40
from huggingface_hub import HfApi
41
42
43
from tqdm import tqdm

import transformers
Suraj Patil's avatar
Suraj Patil committed
44
45
46
47
from transformers import (
    AutoConfig,
    AutoTokenizer,
    FlaxAutoModelForSequenceClassification,
48
    HfArgumentParser,
Suraj Patil's avatar
Suraj Patil committed
49
    PretrainedConfig,
50
    TrainingArguments,
Suraj Patil's avatar
Suraj Patil committed
51
52
    is_tensorboard_available,
)
53
from transformers.utils import check_min_version, send_example_telemetry
54
55
56


logger = logging.getLogger(__name__)
57
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
Arthur Zucker's avatar
Arthur Zucker committed
58
check_min_version("4.39.0.dev0")
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77

Array = Any
Dataset = datasets.arrow_dataset.Dataset
PRNGKey = Any


task_to_keys = {
    "cola": ("sentence", None),
    "mnli": ("premise", "hypothesis"),
    "mrpc": ("sentence1", "sentence2"),
    "qnli": ("question", "sentence"),
    "qqp": ("question1", "question2"),
    "rte": ("sentence1", "sentence2"),
    "sst2": ("sentence", None),
    "stsb": ("sentence1", "sentence2"),
    "wnli": ("sentence1", "sentence2"),
}


78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
@dataclass
class ModelArguments:
    """
    Arguments pertaining to which model/config/tokenizer we are going to fine-tune from.
    """

    model_name_or_path: str = field(
        metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"}
    )
    config_name: Optional[str] = field(
        default=None, metadata={"help": "Pretrained config name or path if not the same as model_name"}
    )
    tokenizer_name: Optional[str] = field(
        default=None, metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"}
    )
    use_slow_tokenizer: Optional[bool] = field(
        default=False,
        metadata={"help": "If passed, will use a slow tokenizer (not backed by the 馃 Tokenizers library)."},
    )
    cache_dir: Optional[str] = field(
98
        default=None,
99
100
101
102
103
104
        metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"},
    )
    model_revision: str = field(
        default="main",
        metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."},
    )
105
106
    token: str = field(
        default=None,
107
        metadata={
Sylvain Gugger's avatar
Sylvain Gugger committed
108
            "help": (
109
110
                "The token to use as HTTP bearer authorization for remote files. If not specified, will use the token "
                "generated when running `huggingface-cli login` (stored in `~/.huggingface`)."
Sylvain Gugger's avatar
Sylvain Gugger committed
111
            )
112
        },
113
    )
114
115
116
    use_auth_token: bool = field(
        default=None,
        metadata={
117
            "help": "The `use_auth_token` argument is deprecated and will be removed in v4.34. Please use `token` instead."
118
119
        },
    )
120
121
122
123
    trust_remote_code: bool = field(
        default=False,
        metadata={
            "help": (
124
                "Whether or not to allow for custom models defined on the Hub in their own modeling files. This option "
125
                "should only be set to `True` for repositories you trust and in which you have read the code, as it will "
126
127
128
129
                "execute code present on the Hub on your local machine."
            )
        },
    )
130
131
132
133
134
135
136
137
138
139


@dataclass
class DataTrainingArguments:
    """
    Arguments pertaining to what data we are going to input our model for training and eval.
    """

    task_name: Optional[str] = field(
        default=None, metadata={"help": f"The name of the glue task to train on. choices {list(task_to_keys.keys())}"}
140
    )
141
142
    dataset_config_name: Optional[str] = field(
        default=None, metadata={"help": "The configuration name of the dataset to use (via the datasets library)."}
143
    )
144
145
    train_file: Optional[str] = field(
        default=None, metadata={"help": "The input training data file (a csv or JSON file)."}
146
    )
147
148
149
    validation_file: Optional[str] = field(
        default=None,
        metadata={"help": "An optional input evaluation data file to evaluate on (a csv or JSON file)."},
150
    )
151
152
153
    test_file: Optional[str] = field(
        default=None,
        metadata={"help": "An optional input test data file to predict on (a csv or JSON file)."},
154
    )
155
156
    text_column_name: Optional[str] = field(
        default=None, metadata={"help": "The column name of text to input in the file (a csv or JSON file)."}
157
    )
158
159
    label_column_name: Optional[str] = field(
        default=None, metadata={"help": "The column name of label to input in the file (a csv or JSON file)."}
160
    )
161
162
    overwrite_cache: bool = field(
        default=False, metadata={"help": "Overwrite the cached training and evaluation sets"}
163
    )
164
    preprocessing_num_workers: Optional[int] = field(
165
        default=None,
166
        metadata={"help": "The number of processes to use for the preprocessing."},
167
    )
168
169
170
    max_seq_length: int = field(
        default=None,
        metadata={
Sylvain Gugger's avatar
Sylvain Gugger committed
171
172
173
174
            "help": (
                "The maximum total input sequence length after tokenization. If set, sequences longer "
                "than this will be truncated, sequences shorter will be padded."
            )
175
        },
176
    )
177
178
179
    max_train_samples: Optional[int] = field(
        default=None,
        metadata={
Sylvain Gugger's avatar
Sylvain Gugger committed
180
181
182
183
            "help": (
                "For debugging purposes or quicker training, truncate the number of training examples to this "
                "value if set."
            )
184
        },
185
    )
186
187
188
    max_eval_samples: Optional[int] = field(
        default=None,
        metadata={
Sylvain Gugger's avatar
Sylvain Gugger committed
189
190
191
192
            "help": (
                "For debugging purposes or quicker training, truncate the number of evaluation examples to this "
                "value if set."
            )
193
194
195
196
197
        },
    )
    max_predict_samples: Optional[int] = field(
        default=None,
        metadata={
Sylvain Gugger's avatar
Sylvain Gugger committed
198
199
200
201
            "help": (
                "For debugging purposes or quicker training, truncate the number of prediction examples to this "
                "value if set."
            )
202
        },
203
    )
204

205
206
207
208
209
210
211
212
213
214
    def __post_init__(self):
        if self.task_name is None and self.train_file is None and self.validation_file is None:
            raise ValueError("Need either a dataset name or a training/validation file.")
        else:
            if self.train_file is not None:
                extension = self.train_file.split(".")[-1]
                assert extension in ["csv", "json"], "`train_file` should be a csv or a json file."
            if self.validation_file is not None:
                extension = self.validation_file.split(".")[-1]
                assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file."
215
        self.task_name = self.task_name.lower() if isinstance(self.task_name, str) else self.task_name
216
217
218
219
220
221
222


def create_train_state(
    model: FlaxAutoModelForSequenceClassification,
    learning_rate_fn: Callable[[int], float],
    is_regression: bool,
    num_labels: int,
223
    weight_decay: float,
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
) -> train_state.TrainState:
    """Create initial training state."""

    class TrainState(train_state.TrainState):
        """Train state with an Optax optimizer.

        The two functions below differ depending on whether the task is classification
        or regression.

        Args:
          logits_fn: Applied to last layer to obtain the logits.
          loss_fn: Function to compute the loss.
        """

        logits_fn: Callable = struct.field(pytree_node=False)
        loss_fn: Callable = struct.field(pytree_node=False)

241
242
243
244
245
246
    # We use Optax's "masking" functionality to not apply weight decay
    # to bias and LayerNorm scale parameters. decay_mask_fn returns a
    # mask boolean with the same structure as the parameters.
    # The mask is True for parameters that should be decayed.
    def decay_mask_fn(params):
        flat_params = traverse_util.flatten_dict(params)
247
248
        # find out all LayerNorm parameters
        layer_norm_candidates = ["layernorm", "layer_norm", "ln"]
249
250
251
252
253
254
        layer_norm_named_params = {
            layer[-2:]
            for layer_norm_name in layer_norm_candidates
            for layer in flat_params.keys()
            if layer_norm_name in "".join(layer).lower()
        }
255
        flat_mask = {path: (path[-1] != "bias" and path[-2:] not in layer_norm_named_params) for path in flat_params}
256
257
258
259
        return traverse_util.unflatten_dict(flat_mask)

    tx = optax.adamw(
        learning_rate=learning_rate_fn, b1=0.9, b2=0.999, eps=1e-6, weight_decay=weight_decay, mask=decay_mask_fn
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
    )

    if is_regression:

        def mse_loss(logits, labels):
            return jnp.mean((logits[..., 0] - labels) ** 2)

        return TrainState.create(
            apply_fn=model.__call__,
            params=model.params,
            tx=tx,
            logits_fn=lambda logits: logits[..., 0],
            loss_fn=mse_loss,
        )
    else:  # Classification.

        def cross_entropy_loss(logits, labels):
            xentropy = optax.softmax_cross_entropy(logits, onehot(labels, num_classes=num_labels))
            return jnp.mean(xentropy)

        return TrainState.create(
            apply_fn=model.__call__,
            params=model.params,
            tx=tx,
            logits_fn=lambda logits: logits.argmax(-1),
            loss_fn=cross_entropy_loss,
        )


def create_learning_rate_fn(
    train_ds_size: int, train_batch_size: int, num_train_epochs: int, num_warmup_steps: int, learning_rate: float
291
) -> Callable[[int], jnp.ndarray]:
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
    """Returns a linear warmup, linear_decay learning rate function."""
    steps_per_epoch = train_ds_size // train_batch_size
    num_train_steps = steps_per_epoch * num_train_epochs
    warmup_fn = optax.linear_schedule(init_value=0.0, end_value=learning_rate, transition_steps=num_warmup_steps)
    decay_fn = optax.linear_schedule(
        init_value=learning_rate, end_value=0, transition_steps=num_train_steps - num_warmup_steps
    )
    schedule_fn = optax.join_schedules(schedules=[warmup_fn, decay_fn], boundaries=[num_warmup_steps])
    return schedule_fn


def glue_train_data_collator(rng: PRNGKey, dataset: Dataset, batch_size: int):
    """Returns shuffled batches of size `batch_size` from truncated `train dataset`, sharded over all local devices."""
    steps_per_epoch = len(dataset) // batch_size
    perms = jax.random.permutation(rng, len(dataset))
    perms = perms[: steps_per_epoch * batch_size]  # Skip incomplete batch.
    perms = perms.reshape((steps_per_epoch, batch_size))

    for perm in perms:
        batch = dataset[perm]
312
        batch = {k: np.array(v) for k, v in batch.items()}
313
314
315
316
317
318
        batch = shard(batch)

        yield batch


def glue_eval_data_collator(dataset: Dataset, batch_size: int):
319
320
321
322
323
324
325
326
    """Returns batches of size `batch_size` from `eval dataset`. Sharding handled by `pad_shard_unpad` in the eval loop."""
    batch_idx = np.arange(len(dataset))

    steps_per_epoch = math.ceil(len(dataset) / batch_size)
    batch_idx = np.array_split(batch_idx, steps_per_epoch)

    for idx in batch_idx:
        batch = dataset[idx]
327
        batch = {k: np.array(v) for k, v in batch.items()}
328
329
330
331
332

        yield batch


def main():
333
334
335
336
337
338
339
    parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments))
    if len(sys.argv) == 2 and sys.argv[1].endswith(".json"):
        # If we pass only one argument to the script and it's the path to a json file,
        # let's parse it to get our arguments.
        model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))
    else:
        model_args, data_args, training_args = parser.parse_args_into_dataclasses()
340

341
    if model_args.use_auth_token is not None:
342
343
344
345
        warnings.warn(
            "The `use_auth_token` argument is deprecated and will be removed in v4.34. Please use `token` instead.",
            FutureWarning,
        )
346
347
348
349
        if model_args.token is not None:
            raise ValueError("`token` and `use_auth_token` are both specified. Please set only the argument `token`.")
        model_args.token = model_args.use_auth_token

350
351
352
353
    # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
    # information sent is the one passed as arguments along with your Python/PyTorch versions.
    send_example_telemetry("run_glue", model_args, data_args, framework="flax")

354
355
    # Make one log on every process with the configuration for debugging.
    logging.basicConfig(
356
        format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
357
358
359
360
361
362
363
364
365
366
367
368
        datefmt="%m/%d/%Y %H:%M:%S",
        level=logging.INFO,
    )
    # Setup logging, we only want one process per machine to log things on the screen.
    logger.setLevel(logging.INFO if jax.process_index() == 0 else logging.ERROR)
    if jax.process_index() == 0:
        datasets.utils.logging.set_verbosity_warning()
        transformers.utils.logging.set_verbosity_info()
    else:
        datasets.utils.logging.set_verbosity_error()
        transformers.utils.logging.set_verbosity_error()

369
    # Handle the repository creation
370
    if training_args.push_to_hub:
371
372
373
374
375
        # Retrieve of infer repo_name
        repo_name = training_args.hub_model_id
        if repo_name is None:
            repo_name = Path(training_args.output_dir).absolute().name
        # Create repo and retrieve repo_id
376
377
        api = HfApi()
        repo_id = api.create_repo(repo_name, exist_ok=True, token=training_args.hub_token).repo_id
378

379
380
381
382
383
384
385
386
387
388
389
390
    # Get the datasets: you can either provide your own CSV/JSON training and evaluation files (see below)
    # or specify a GLUE benchmark task (the dataset will be downloaded automatically from the datasets Hub).

    # For CSV/JSON files, this script will use as labels the column called 'label' and as pair of sentences the
    # sentences in columns called 'sentence1' and 'sentence2' if such column exists or the first two columns not named
    # label if at least two columns are provided.

    # If the CSVs/JSONs contain only one non-label column, the script does single sentence classification on this
    # single column. You can easily tweak this behavior (see below)

    # In distributed training, the load_dataset function guarantee that only one local process can concurrently
    # download the dataset.
391
    if data_args.task_name is not None:
392
        # Downloading and loading a dataset from the hub.
393
394
395
        raw_datasets = load_dataset(
            "glue",
            data_args.task_name,
396
            token=model_args.token,
397
        )
398
399
400
    else:
        # Loading the dataset from local csv or json file.
        data_files = {}
401
402
403
404
405
        if data_args.train_file is not None:
            data_files["train"] = data_args.train_file
        if data_args.validation_file is not None:
            data_files["validation"] = data_args.validation_file
        extension = (data_args.train_file if data_args.train_file is not None else data_args.valid_file).split(".")[-1]
406
407
408
        raw_datasets = load_dataset(
            extension,
            data_files=data_files,
409
            token=model_args.token,
410
        )
411
    # See more about loading any type of standard or custom dataset at
412
    # https://huggingface.co/docs/datasets/loading_datasets.
413
414

    # Labels
415
416
    if data_args.task_name is not None:
        is_regression = data_args.task_name == "stsb"
417
418
419
420
421
422
423
424
425
426
427
428
        if not is_regression:
            label_list = raw_datasets["train"].features["label"].names
            num_labels = len(label_list)
        else:
            num_labels = 1
    else:
        # Trying to have good defaults here, don't hesitate to tweak to your needs.
        is_regression = raw_datasets["train"].features["label"].dtype in ["float32", "float64"]
        if is_regression:
            num_labels = 1
        else:
            # A useful fast method:
429
            # https://huggingface.co/docs/datasets/package_reference/main_classes#datasets.Dataset.unique
430
431
432
433
434
            label_list = raw_datasets["train"].unique("label")
            label_list.sort()  # Let's sort it for determinism
            num_labels = len(label_list)

    # Load pretrained model and tokenizer
435
    config = AutoConfig.from_pretrained(
436
437
438
        model_args.model_name_or_path,
        num_labels=num_labels,
        finetuning_task=data_args.task_name,
439
        token=model_args.token,
440
        trust_remote_code=model_args.trust_remote_code,
441
442
    )
    tokenizer = AutoTokenizer.from_pretrained(
443
444
        model_args.model_name_or_path,
        use_fast=not model_args.use_slow_tokenizer,
445
        token=model_args.token,
446
        trust_remote_code=model_args.trust_remote_code,
447
448
449
450
    )
    model = FlaxAutoModelForSequenceClassification.from_pretrained(
        model_args.model_name_or_path,
        config=config,
451
        token=model_args.token,
452
        trust_remote_code=model_args.trust_remote_code,
453
    )
454
455

    # Preprocessing the datasets
456
457
    if data_args.task_name is not None:
        sentence1_key, sentence2_key = task_to_keys[data_args.task_name]
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
    else:
        # Again, we try to have some nice defaults but don't hesitate to tweak to your use case.
        non_label_column_names = [name for name in raw_datasets["train"].column_names if name != "label"]
        if "sentence1" in non_label_column_names and "sentence2" in non_label_column_names:
            sentence1_key, sentence2_key = "sentence1", "sentence2"
        else:
            if len(non_label_column_names) >= 2:
                sentence1_key, sentence2_key = non_label_column_names[:2]
            else:
                sentence1_key, sentence2_key = non_label_column_names[0], None

    # Some models have set the order of the labels to use, so let's make sure we do use it.
    label_to_id = None
    if (
        model.config.label2id != PretrainedConfig(num_labels=num_labels).label2id
473
        and data_args.task_name is not None
474
475
476
477
        and not is_regression
    ):
        # Some have all caps in their config, some don't.
        label_name_to_id = {k.lower(): v for k, v in model.config.label2id.items()}
478
        if sorted(label_name_to_id.keys()) == sorted(label_list):
479
480
481
482
483
484
485
486
            logger.info(
                f"The configuration of the model provided the following label correspondence: {label_name_to_id}. "
                "Using it!"
            )
            label_to_id = {i: label_name_to_id[label_list[i]] for i in range(num_labels)}
        else:
            logger.warning(
                "Your model seems to have been trained with labels, but they don't match the dataset: ",
487
                f"model labels: {sorted(label_name_to_id.keys())}, dataset labels: {sorted(label_list)}."
488
489
                "\nIgnoring the model labels as a result.",
            )
490
    elif data_args.task_name is None:
491
492
493
494
495
496
497
        label_to_id = {v: i for i, v in enumerate(label_list)}

    def preprocess_function(examples):
        # Tokenize the texts
        texts = (
            (examples[sentence1_key],) if sentence2_key is None else (examples[sentence1_key], examples[sentence2_key])
        )
498
        result = tokenizer(*texts, padding="max_length", max_length=data_args.max_seq_length, truncation=True)
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513

        if "label" in examples:
            if label_to_id is not None:
                # Map labels to IDs (not necessary for GLUE tasks)
                result["labels"] = [label_to_id[l] for l in examples["label"]]
            else:
                # In all cases, rename the column to labels because the model will expect that.
                result["labels"] = examples["label"]
        return result

    processed_datasets = raw_datasets.map(
        preprocess_function, batched=True, remove_columns=raw_datasets["train"].column_names
    )

    train_dataset = processed_datasets["train"]
514
    eval_dataset = processed_datasets["validation_matched" if data_args.task_name == "mnli" else "validation"]
515
516
517
518
519
520

    # Log a few random samples from the training set:
    for index in random.sample(range(len(train_dataset)), 3):
        logger.info(f"Sample {index} of the training set: {train_dataset[index]}.")

    # Define a summary writer
Suraj Patil's avatar
Suraj Patil committed
521
522
523
524
525
    has_tensorboard = is_tensorboard_available()
    if has_tensorboard and jax.process_index() == 0:
        try:
            from flax.metrics.tensorboard import SummaryWriter

526
527
            summary_writer = SummaryWriter(training_args.output_dir)
            summary_writer.hparams({**training_args.to_dict(), **vars(model_args), **vars(data_args)})
Suraj Patil's avatar
Suraj Patil committed
528
529
530
531
532
533
534
535
536
537
        except ImportError as ie:
            has_tensorboard = False
            logger.warning(
                f"Unable to display metrics through TensorBoard because some package are not installed: {ie}"
            )
    else:
        logger.warning(
            "Unable to display metrics through TensorBoard because the package is not installed: "
            "Please run pip install tensorboard to enable."
        )
538

539
    def write_train_metric(summary_writer, train_metrics, train_time, step):
540
541
542
543
544
545
546
547
        summary_writer.scalar("train_time", train_time, step)

        train_metrics = get_metrics(train_metrics)
        for key, vals in train_metrics.items():
            tag = f"train_{key}"
            for i, val in enumerate(vals):
                summary_writer.scalar(tag, val, step - len(vals) + i + 1)

548
    def write_eval_metric(summary_writer, eval_metrics, step):
549
550
551
        for metric_name, value in eval_metrics.items():
            summary_writer.scalar(f"eval_{metric_name}", value, step)

552
553
    num_epochs = int(training_args.num_train_epochs)
    rng = jax.random.PRNGKey(training_args.seed)
554
    dropout_rngs = jax.random.split(rng, jax.local_device_count())
555

556
557
558
    train_batch_size = int(training_args.per_device_train_batch_size) * jax.local_device_count()
    per_device_eval_batch_size = int(training_args.per_device_eval_batch_size)
    eval_batch_size = per_device_eval_batch_size * jax.device_count()
559
560

    learning_rate_fn = create_learning_rate_fn(
561
562
563
564
565
        len(train_dataset),
        train_batch_size,
        training_args.num_train_epochs,
        training_args.warmup_steps,
        training_args.learning_rate,
566
567
    )

568
    state = create_train_state(
569
        model, learning_rate_fn, is_regression, num_labels=num_labels, weight_decay=training_args.weight_decay
570
    )
571
572
573
574
575
576

    # define step functions
    def train_step(
        state: train_state.TrainState, batch: Dict[str, Array], dropout_rng: PRNGKey
    ) -> Tuple[train_state.TrainState, float]:
        """Trains model with an optimizer (both in `state`) on `batch`, returning a pair `(new_state, loss)`."""
577
        dropout_rng, new_dropout_rng = jax.random.split(dropout_rng)
578
579
580
581
582
        targets = batch.pop("labels")

        def loss_fn(params):
            logits = state.apply_fn(**batch, params=params, dropout_rng=dropout_rng, train=True)[0]
            loss = state.loss_fn(logits, targets)
583
            return loss
584

585
586
        grad_fn = jax.value_and_grad(loss_fn)
        loss, grad = grad_fn(state.params)
587
588
589
        grad = jax.lax.pmean(grad, "batch")
        new_state = state.apply_gradients(grads=grad)
        metrics = jax.lax.pmean({"loss": loss, "learning_rate": learning_rate_fn(state.step)}, axis_name="batch")
590
        return new_state, metrics, new_dropout_rng
591
592
593
594
595
596
597
598
599

    p_train_step = jax.pmap(train_step, axis_name="batch", donate_argnums=(0,))

    def eval_step(state, batch):
        logits = state.apply_fn(**batch, params=state.params, train=False)[0]
        return state.logits_fn(logits)

    p_eval_step = jax.pmap(eval_step, axis_name="batch")

600
    if data_args.task_name is not None:
601
        metric = evaluate.load("glue", data_args.task_name, cache_dir=model_args.cache_dir)
602
    else:
603
        metric = evaluate.load("accuracy", cache_dir=model_args.cache_dir)
604
605
606
607

    logger.info(f"===== Starting training ({num_epochs} epochs) =====")
    train_time = 0

608
609
610
    # make sure weights are replicated on each device
    state = replicate(state)

611
612
613
614
    steps_per_epoch = len(train_dataset) // train_batch_size
    total_steps = steps_per_epoch * num_epochs
    epochs = tqdm(range(num_epochs), desc=f"Epoch ... (0/{num_epochs})", position=0)
    for epoch in epochs:
615
616
        train_start = time.time()
        train_metrics = []
617
618

        # Create sampling rng
619
        rng, input_rng = jax.random.split(rng)
620
621

        # train
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
        train_loader = glue_train_data_collator(input_rng, train_dataset, train_batch_size)
        for step, batch in enumerate(
            tqdm(
                train_loader,
                total=steps_per_epoch,
                desc="Training...",
                position=1,
            ),
        ):
            state, train_metric, dropout_rngs = p_train_step(state, batch, dropout_rngs)
            train_metrics.append(train_metric)

            cur_step = (epoch * steps_per_epoch) + (step + 1)

            if cur_step % training_args.logging_steps == 0 and cur_step > 0:
                # Save metrics
                train_metric = unreplicate(train_metric)
                train_time += time.time() - train_start
                if has_tensorboard and jax.process_index() == 0:
                    write_train_metric(summary_writer, train_metrics, train_time, cur_step)

                epochs.write(
Sylvain Gugger's avatar
Sylvain Gugger committed
644
645
                    f"Step... ({cur_step}/{total_steps} | Training Loss: {train_metric['loss']}, Learning Rate:"
                    f" {train_metric['learning_rate']})"
646
647
648
649
650
651
652
653
654
                )

                train_metrics = []

            if (cur_step % training_args.eval_steps == 0 or cur_step % steps_per_epoch == 0) and cur_step > 0:
                # evaluate
                eval_loader = glue_eval_data_collator(eval_dataset, eval_batch_size)
                for batch in tqdm(
                    eval_loader,
655
                    total=math.ceil(len(eval_dataset) / eval_batch_size),
656
657
658
659
                    desc="Evaluating ...",
                    position=2,
                ):
                    labels = batch.pop("labels")
660
661
662
663
                    predictions = pad_shard_unpad(p_eval_step)(
                        state, batch, min_device_batch=per_device_eval_batch_size
                    )
                    metric.add_batch(predictions=np.array(predictions), references=labels)
664
665
666
667
668
669

                eval_metric = metric.compute()

                logger.info(f"Step... ({cur_step}/{total_steps} | Eval metrics: {eval_metric})")

                if has_tensorboard and jax.process_index() == 0:
670
                    write_eval_metric(summary_writer, eval_metric, cur_step)
671
672
673
674
675
676
677
678

            if (cur_step % training_args.save_steps == 0 and cur_step > 0) or (cur_step == total_steps):
                # save checkpoint after each epoch and push checkpoint to the hub
                if jax.process_index() == 0:
                    params = jax.device_get(unreplicate(state.params))
                    model.save_pretrained(training_args.output_dir, params=params)
                    tokenizer.save_pretrained(training_args.output_dir)
                    if training_args.push_to_hub:
679
680
681
682
683
684
685
                        api.upload_folder(
                            commit_message=f"Saving weights and logs of epoch {epoch}",
                            folder_path=training_args.output_dir,
                            repo_id=repo_id,
                            repo_type="model",
                            token=training_args.hub_token,
                        )
686
            epochs.desc = f"Epoch ... {epoch + 1}/{num_epochs}"
687

Suraj Patil's avatar
Suraj Patil committed
688
689
690
    # save the eval metrics in json
    if jax.process_index() == 0:
        eval_metric = {f"eval_{metric_name}": value for metric_name, value in eval_metric.items()}
691
        path = os.path.join(training_args.output_dir, "eval_results.json")
Suraj Patil's avatar
Suraj Patil committed
692
693
694
        with open(path, "w") as f:
            json.dump(eval_metric, f, indent=4, sort_keys=True)

695
696
697

if __name__ == "__main__":
    main()