run_xnli.py 16.8 KB
Newer Older
1
#!/usr/bin/env python
VictorSanh's avatar
VictorSanh committed
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION.  All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
17
""" Finetuning multi-lingual models on XNLI (e.g. Bert, DistilBERT, XLM).
18
    Adapted from `examples/text-classification/run_glue.py`"""
VictorSanh's avatar
VictorSanh committed
19
20
21
22

import logging
import os
import random
23
24
25
import sys
from dataclasses import dataclass, field
from typing import Optional
VictorSanh's avatar
VictorSanh committed
26

27
import datasets
VictorSanh's avatar
VictorSanh committed
28
import numpy as np
29
from datasets import load_dataset, load_metric
VictorSanh's avatar
VictorSanh committed
30

31
import transformers
32
from transformers import (
33
34
35
    AutoConfig,
    AutoModelForSequenceClassification,
    AutoTokenizer,
36
37
38
39
40
41
42
    DataCollatorWithPadding,
    EvalPrediction,
    HfArgumentParser,
    Trainer,
    TrainingArguments,
    default_data_collator,
    set_seed,
43
)
44
from transformers.trainer_utils import get_last_checkpoint
45
from transformers.utils import check_min_version, send_example_telemetry
46
from transformers.utils.versions import require_version
Aymeric Augustin's avatar
Aymeric Augustin committed
47

VictorSanh's avatar
VictorSanh committed
48

49
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
Sylvain Gugger's avatar
Sylvain Gugger committed
50
check_min_version("4.21.0.dev0")
Lysandre's avatar
Lysandre committed
51

52
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/text-classification/requirements.txt")
53

VictorSanh's avatar
VictorSanh committed
54
55
56
logger = logging.getLogger(__name__)


57
58
59
60
@dataclass
class DataTrainingArguments:
    """
    Arguments pertaining to what data we are going to input our model for training and eval.
VictorSanh's avatar
VictorSanh committed
61

62
63
64
65
    Using `HfArgumentParser` we can turn this class
    into argparse arguments to be able to specify them on
    the command line.
    """
VictorSanh's avatar
VictorSanh committed
66

67
68
69
    max_seq_length: Optional[int] = field(
        default=128,
        metadata={
Sylvain Gugger's avatar
Sylvain Gugger committed
70
71
72
73
            "help": (
                "The maximum total input sequence length after tokenization. Sequences longer "
                "than this will be truncated, sequences shorter will be padded."
            )
74
75
        },
    )
76
77
    overwrite_cache: bool = field(
        default=False, metadata={"help": "Overwrite the cached preprocessed datasets or not."}
78
    )
79
80
81
    pad_to_max_length: bool = field(
        default=True,
        metadata={
Sylvain Gugger's avatar
Sylvain Gugger committed
82
83
84
85
            "help": (
                "Whether to pad all samples to `max_seq_length`. "
                "If False, will pad the samples dynamically when batching to the maximum length in the batch."
            )
86
        },
87
    )
88
89
90
    max_train_samples: Optional[int] = field(
        default=None,
        metadata={
Sylvain Gugger's avatar
Sylvain Gugger committed
91
92
93
94
            "help": (
                "For debugging purposes or quicker training, truncate the number of training examples to this "
                "value if set."
            )
95
96
        },
    )
97
    max_eval_samples: Optional[int] = field(
98
99
        default=None,
        metadata={
Sylvain Gugger's avatar
Sylvain Gugger committed
100
101
102
103
            "help": (
                "For debugging purposes or quicker training, truncate the number of evaluation examples to this "
                "value if set."
            )
104
105
        },
    )
106
    max_predict_samples: Optional[int] = field(
107
108
        default=None,
        metadata={
Sylvain Gugger's avatar
Sylvain Gugger committed
109
110
111
112
            "help": (
                "For debugging purposes or quicker training, truncate the number of prediction examples to this "
                "value if set."
            )
113
114
        },
    )
VictorSanh's avatar
VictorSanh committed
115
116


117
118
119
120
121
@dataclass
class ModelArguments:
    """
    Arguments pertaining to which model/config/tokenizer we are going to fine-tune from.
    """
VictorSanh's avatar
VictorSanh committed
122

123
124
    model_name_or_path: str = field(
        default=None, metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"}
125
    )
126
127
    language: str = field(
        default=None, metadata={"help": "Evaluation language. Also train language if `train_language` is set to None."}
128
    )
129
130
    train_language: Optional[str] = field(
        default=None, metadata={"help": "Train language if it is different from the evaluation language."}
131
    )
132
133
    config_name: Optional[str] = field(
        default=None, metadata={"help": "Pretrained config name or path if not the same as model_name"}
134
    )
135
136
    tokenizer_name: Optional[str] = field(
        default=None, metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"}
137
    )
138
    cache_dir: Optional[str] = field(
139
        default=None,
140
        metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"},
141
    )
142
143
144
    do_lower_case: Optional[bool] = field(
        default=False,
        metadata={"help": "arg to indicate if tokenizer should do lower case in AutoTokenizer.from_pretrained()"},
145
    )
146
147
148
    use_fast_tokenizer: bool = field(
        default=True,
        metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."},
149
    )
150
151
152
    model_revision: str = field(
        default="main",
        metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."},
153
    )
154
155
156
    use_auth_token: bool = field(
        default=False,
        metadata={
Sylvain Gugger's avatar
Sylvain Gugger committed
157
158
159
160
            "help": (
                "Will use the token generated when running `transformers-cli login` (necessary to use this script "
                "with private models)."
            )
161
        },
162
    )
163
164
165
166
    ignore_mismatched_sizes: bool = field(
        default=False,
        metadata={"help": "Will enable to load a pretrained model whose head dimensions are different."},
    )
167

168
169
170
171
172
173
174
175
176

def main():
    # See all possible arguments in src/transformers/training_args.py
    # or by passing the --help flag to this script.
    # We now keep distinct sets of args, for a cleaner separation of concerns.

    parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments))
    model_args, data_args, training_args = parser.parse_args_into_dataclasses()

177
178
179
    # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
    # information sent is the one passed as arguments along with your Python/PyTorch versions.
    send_example_telemetry("run_xnli", model_args)
VictorSanh's avatar
VictorSanh committed
180
181

    # Setup logging
182
    logging.basicConfig(
183
        format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
184
        datefmt="%m/%d/%Y %H:%M:%S",
185
        handlers=[logging.StreamHandler(sys.stdout)],
186
    )
187
188
189
190
191
192
193

    log_level = training_args.get_process_log_level()
    logger.setLevel(log_level)
    datasets.utils.logging.set_verbosity(log_level)
    transformers.utils.logging.set_verbosity(log_level)
    transformers.utils.logging.enable_default_handler()
    transformers.utils.logging.enable_explicit_format()
194
195

    # Log on each process the small summary:
196
    logger.warning(
197
198
        f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
        + f"distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fp16}"
199
    )
200
201
    logger.info(f"Training/evaluation parameters {training_args}")

202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
    # Detecting last checkpoint.
    last_checkpoint = None
    if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir:
        last_checkpoint = get_last_checkpoint(training_args.output_dir)
        if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0:
            raise ValueError(
                f"Output directory ({training_args.output_dir}) already exists and is not empty. "
                "Use --overwrite_output_dir to overcome."
            )
        elif last_checkpoint is not None:
            logger.info(
                f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
                "the `--output_dir` or add `--overwrite_output_dir` to train from scratch."
            )

217
218
219
220
221
222
    # Set seed before initializing model.
    set_seed(training_args.seed)

    # In distributed training, the load_dataset function guarantees that only one local process can concurrently
    # download the dataset.
    # Downloading and loading xnli dataset from the hub.
223
224
    if training_args.do_train:
        if model_args.train_language is None:
225
226
227
228
229
230
231
            train_dataset = load_dataset(
                "xnli",
                model_args.language,
                split="train",
                cache_dir=model_args.cache_dir,
                use_auth_token=True if model_args.use_auth_token else None,
            )
232
        else:
233
            train_dataset = load_dataset(
234
235
236
237
238
                "xnli",
                model_args.train_language,
                split="train",
                cache_dir=model_args.cache_dir,
                use_auth_token=True if model_args.use_auth_token else None,
239
            )
240
241
242
        label_list = train_dataset.features["label"].names

    if training_args.do_eval:
243
244
245
246
247
248
249
        eval_dataset = load_dataset(
            "xnli",
            model_args.language,
            split="validation",
            cache_dir=model_args.cache_dir,
            use_auth_token=True if model_args.use_auth_token else None,
        )
250
251
252
        label_list = eval_dataset.features["label"].names

    if training_args.do_predict:
253
254
255
256
257
258
259
        predict_dataset = load_dataset(
            "xnli",
            model_args.language,
            split="test",
            cache_dir=model_args.cache_dir,
            use_auth_token=True if model_args.use_auth_token else None,
        )
260
        label_list = predict_dataset.features["label"].names
261
262

    # Labels
VictorSanh's avatar
VictorSanh committed
263
264
265
    num_labels = len(label_list)

    # Load pretrained model and tokenizer
266
267
    # In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
    # download model & vocab.
268
    config = AutoConfig.from_pretrained(
269
        model_args.config_name if model_args.config_name else model_args.model_name_or_path,
270
        num_labels=num_labels,
271
272
273
274
        finetuning_task="xnli",
        cache_dir=model_args.cache_dir,
        revision=model_args.model_revision,
        use_auth_token=True if model_args.use_auth_token else None,
275
    )
276
    tokenizer = AutoTokenizer.from_pretrained(
277
278
279
280
281
282
        model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path,
        do_lower_case=model_args.do_lower_case,
        cache_dir=model_args.cache_dir,
        use_fast=model_args.use_fast_tokenizer,
        revision=model_args.model_revision,
        use_auth_token=True if model_args.use_auth_token else None,
283
    )
284
    model = AutoModelForSequenceClassification.from_pretrained(
285
286
        model_args.model_name_or_path,
        from_tf=bool(".ckpt" in model_args.model_name_or_path),
287
        config=config,
288
289
290
        cache_dir=model_args.cache_dir,
        revision=model_args.model_revision,
        use_auth_token=True if model_args.use_auth_token else None,
291
        ignore_mismatched_sizes=model_args.ignore_mismatched_sizes,
292
    )
VictorSanh's avatar
VictorSanh committed
293

294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
    # Preprocessing the datasets
    # Padding strategy
    if data_args.pad_to_max_length:
        padding = "max_length"
    else:
        # We will pad later, dynamically at batch creation, to the max sequence length in each batch
        padding = False

    def preprocess_function(examples):
        # Tokenize the texts
        return tokenizer(
            examples["premise"],
            examples["hypothesis"],
            padding=padding,
            max_length=data_args.max_seq_length,
            truncation=True,
        )
VictorSanh's avatar
VictorSanh committed
311

312
313
    if training_args.do_train:
        if data_args.max_train_samples is not None:
314
315
            max_train_samples = min(len(train_dataset), data_args.max_train_samples)
            train_dataset = train_dataset.select(range(max_train_samples))
316
317
318
319
320
321
322
        with training_args.main_process_first(desc="train dataset map pre-processing"):
            train_dataset = train_dataset.map(
                preprocess_function,
                batched=True,
                load_from_cache_file=not data_args.overwrite_cache,
                desc="Running tokenizer on train dataset",
            )
323
324
325
        # Log a few random samples from the training set:
        for index in random.sample(range(len(train_dataset)), 3):
            logger.info(f"Sample {index} of the training set: {train_dataset[index]}.")
326
327

    if training_args.do_eval:
328
        if data_args.max_eval_samples is not None:
329
330
            max_eval_samples = min(len(eval_dataset), data_args.max_eval_samples)
            eval_dataset = eval_dataset.select(range(max_eval_samples))
331
332
333
334
335
336
337
        with training_args.main_process_first(desc="validation dataset map pre-processing"):
            eval_dataset = eval_dataset.map(
                preprocess_function,
                batched=True,
                load_from_cache_file=not data_args.overwrite_cache,
                desc="Running tokenizer on validation dataset",
            )
VictorSanh's avatar
VictorSanh committed
338

339
    if training_args.do_predict:
340
        if data_args.max_predict_samples is not None:
341
342
            max_predict_samples = min(len(predict_dataset), data_args.max_predict_samples)
            predict_dataset = predict_dataset.select(range(max_predict_samples))
343
344
345
346
347
348
349
        with training_args.main_process_first(desc="prediction dataset map pre-processing"):
            predict_dataset = predict_dataset.map(
                preprocess_function,
                batched=True,
                load_from_cache_file=not data_args.overwrite_cache,
                desc="Running tokenizer on prediction dataset",
            )
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372

    # Get the metric function
    metric = load_metric("xnli")

    # You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
    # predictions and label_ids field) and has to return a dictionary string to float.
    def compute_metrics(p: EvalPrediction):
        preds = p.predictions[0] if isinstance(p.predictions, tuple) else p.predictions
        preds = np.argmax(preds, axis=1)
        return metric.compute(predictions=preds, references=p.label_ids)

    # Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
    if data_args.pad_to_max_length:
        data_collator = default_data_collator
    elif training_args.fp16:
        data_collator = DataCollatorWithPadding(tokenizer, pad_to_multiple_of=8)
    else:
        data_collator = None

    # Initialize our Trainer
    trainer = Trainer(
        model=model,
        args=training_args,
373
        train_dataset=train_dataset if training_args.do_train else None,
374
375
376
377
378
        eval_dataset=eval_dataset if training_args.do_eval else None,
        compute_metrics=compute_metrics,
        tokenizer=tokenizer,
        data_collator=data_collator,
    )
VictorSanh's avatar
VictorSanh committed
379
380

    # Training
381
    if training_args.do_train:
382
        checkpoint = None
383
384
385
        if training_args.resume_from_checkpoint is not None:
            checkpoint = training_args.resume_from_checkpoint
        elif last_checkpoint is not None:
386
387
            checkpoint = last_checkpoint
        train_result = trainer.train(resume_from_checkpoint=checkpoint)
388
        metrics = train_result.metrics
389
390
391
392
        max_train_samples = (
            data_args.max_train_samples if data_args.max_train_samples is not None else len(train_dataset)
        )
        metrics["train_samples"] = min(max_train_samples, len(train_dataset))
VictorSanh's avatar
VictorSanh committed
393

394
        trainer.save_model()  # Saves the tokenizer too for easy upload
395

396
397
398
        trainer.log_metrics("train", metrics)
        trainer.save_metrics("train", metrics)
        trainer.save_state()
VictorSanh's avatar
VictorSanh committed
399

400
401
402
    # Evaluation
    if training_args.do_eval:
        logger.info("*** Evaluate ***")
403
404
        metrics = trainer.evaluate(eval_dataset=eval_dataset)

405
406
        max_eval_samples = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(eval_dataset)
        metrics["eval_samples"] = min(max_eval_samples, len(eval_dataset))
407

408
409
        trainer.log_metrics("eval", metrics)
        trainer.save_metrics("eval", metrics)
VictorSanh's avatar
VictorSanh committed
410

411
412
413
    # Prediction
    if training_args.do_predict:
        logger.info("*** Predict ***")
414
        predictions, labels, metrics = trainer.predict(predict_dataset, metric_key_prefix="predict")
415

416
417
418
419
        max_predict_samples = (
            data_args.max_predict_samples if data_args.max_predict_samples is not None else len(predict_dataset)
        )
        metrics["predict_samples"] = min(max_predict_samples, len(predict_dataset))
420

421
422
        trainer.log_metrics("predict", metrics)
        trainer.save_metrics("predict", metrics)
423
424

        predictions = np.argmax(predictions, axis=1)
425
        output_predict_file = os.path.join(training_args.output_dir, "predictions.txt")
426
        if trainer.is_world_process_zero():
427
            with open(output_predict_file, "w") as writer:
428
429
430
431
432
                writer.write("index\tprediction\n")
                for index, item in enumerate(predictions):
                    item = label_list[item]
                    writer.write(f"{index}\t{item}\n")

VictorSanh's avatar
VictorSanh committed
433
434
435

if __name__ == "__main__":
    main()