run_xnli.py 11.9 KB
Newer Older
1
#!/usr/bin/env python
VictorSanh's avatar
VictorSanh committed
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION.  All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
17
""" Finetuning multi-lingual models on XNLI (e.g. Bert, DistilBERT, XLM).
18
    Adapted from `examples/text-classification/run_glue.py`"""
VictorSanh's avatar
VictorSanh committed
19
20
21
22

import logging
import os
import random
23
24
25
import sys
from dataclasses import dataclass, field
from typing import Optional
VictorSanh's avatar
VictorSanh committed
26
27

import numpy as np
28
from datasets import load_dataset, load_metric
VictorSanh's avatar
VictorSanh committed
29

30
import transformers
31
from transformers import (
32
33
34
    AutoConfig,
    AutoModelForSequenceClassification,
    AutoTokenizer,
35
36
37
38
39
40
41
    DataCollatorWithPadding,
    EvalPrediction,
    HfArgumentParser,
    Trainer,
    TrainingArguments,
    default_data_collator,
    set_seed,
42
)
43
from transformers.trainer_utils import get_last_checkpoint, is_main_process
Aymeric Augustin's avatar
Aymeric Augustin committed
44

VictorSanh's avatar
VictorSanh committed
45
46
47
48

logger = logging.getLogger(__name__)


49
50
51
52
@dataclass
class DataTrainingArguments:
    """
    Arguments pertaining to what data we are going to input our model for training and eval.
VictorSanh's avatar
VictorSanh committed
53

54
55
56
57
    Using `HfArgumentParser` we can turn this class
    into argparse arguments to be able to specify them on
    the command line.
    """
VictorSanh's avatar
VictorSanh committed
58

59
60
61
62
63
    max_seq_length: Optional[int] = field(
        default=128,
        metadata={
            "help": "The maximum total input sequence length after tokenization. Sequences longer "
            "than this will be truncated, sequences shorter will be padded."
64
65
        },
    )
66
67
    overwrite_cache: bool = field(
        default=False, metadata={"help": "Overwrite the cached preprocessed datasets or not."}
68
    )
69
70
71
72
73
74
    pad_to_max_length: bool = field(
        default=True,
        metadata={
            "help": "Whether to pad all samples to `max_seq_length`. "
            "If False, will pad the samples dynamically when batching to the maximum length in the batch."
        },
75
    )
76
77
    server_ip: Optional[str] = field(default=None, metadata={"help": "For distant debugging."})
    server_port: Optional[str] = field(default=None, metadata={"help": "For distant debugging."})
VictorSanh's avatar
VictorSanh committed
78
79


80
81
82
83
84
@dataclass
class ModelArguments:
    """
    Arguments pertaining to which model/config/tokenizer we are going to fine-tune from.
    """
VictorSanh's avatar
VictorSanh committed
85

86
87
    model_name_or_path: str = field(
        default=None, metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"}
88
    )
89
90
    language: str = field(
        default=None, metadata={"help": "Evaluation language. Also train language if `train_language` is set to None."}
91
    )
92
93
    train_language: Optional[str] = field(
        default=None, metadata={"help": "Train language if it is different from the evaluation language."}
94
    )
95
96
    config_name: Optional[str] = field(
        default=None, metadata={"help": "Pretrained config name or path if not the same as model_name"}
97
    )
98
99
    tokenizer_name: Optional[str] = field(
        default=None, metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"}
100
    )
101
    cache_dir: Optional[str] = field(
102
        default=None,
103
        metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"},
104
    )
105
106
107
    do_lower_case: Optional[bool] = field(
        default=False,
        metadata={"help": "arg to indicate if tokenizer should do lower case in AutoTokenizer.from_pretrained()"},
108
    )
109
110
111
    use_fast_tokenizer: bool = field(
        default=True,
        metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."},
112
    )
113
114
115
    model_revision: str = field(
        default="main",
        metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."},
116
    )
117
118
119
120
121
122
    use_auth_token: bool = field(
        default=False,
        metadata={
            "help": "Will use the token generated when running `transformers-cli login` (necessary to use this script "
            "with private models)."
        },
123
124
    )

125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146

def main():
    # See all possible arguments in src/transformers/training_args.py
    # or by passing the --help flag to this script.
    # We now keep distinct sets of args, for a cleaner separation of concerns.

    parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments))
    model_args, data_args, training_args = parser.parse_args_into_dataclasses()

    # Detecting last checkpoint.
    last_checkpoint = None
    if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir:
        last_checkpoint = get_last_checkpoint(training_args.output_dir)
        if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0:
            raise ValueError(
                f"Output directory ({training_args.output_dir}) already exists and is not empty. "
                "Use --overwrite_output_dir to overcome."
            )
        elif last_checkpoint is not None:
            logger.info(
                f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
                "the `--output_dir` or add `--overwrite_output_dir` to train from scratch."
147
            )
VictorSanh's avatar
VictorSanh committed
148
149

    # Setup distant debugging if needed
150
    if data_args.server_ip and data_args.server_port:
VictorSanh's avatar
VictorSanh committed
151
152
        # Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
        import ptvsd
153

VictorSanh's avatar
VictorSanh committed
154
        print("Waiting for debugger attach")
155
        ptvsd.enable_attach(address=(data_args.server_ip, data_args.server_port), redirect_output=True)
VictorSanh's avatar
VictorSanh committed
156
157
158
        ptvsd.wait_for_attach()

    # Setup logging
159
160
161
    logging.basicConfig(
        format="%(asctime)s - %(levelname)s - %(name)s -   %(message)s",
        datefmt="%m/%d/%Y %H:%M:%S",
162
        handlers=[logging.StreamHandler(sys.stdout)],
163
    )
164
165
166
    logger.setLevel(logging.INFO if is_main_process(training_args.local_rank) else logging.WARN)

    # Log on each process the small summary:
167
    logger.warning(
168
169
        f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
        + f"distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fp16}"
170
    )
171

172
    # Set the verbosity to info of the Transformers logger (on main process only):
173
    if is_main_process(training_args.local_rank):
174
175
176
        transformers.utils.logging.set_verbosity_info()
        transformers.utils.logging.enable_default_handler()
        transformers.utils.logging.enable_explicit_format()
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
    logger.info(f"Training/evaluation parameters {training_args}")

    # Set seed before initializing model.
    set_seed(training_args.seed)

    # In distributed training, the load_dataset function guarantees that only one local process can concurrently
    # download the dataset.
    # Downloading and loading xnli dataset from the hub.
    if model_args.train_language is None:
        train_dataset = load_dataset("xnli", model_args.language, split="train")
    else:
        train_dataset = load_dataset("xnli", model_args.train_language, split="train")

    eval_dataset = load_dataset("xnli", model_args.language, split="validation")
    # Labels
    label_list = train_dataset.features["label"].names
VictorSanh's avatar
VictorSanh committed
193
194
195
    num_labels = len(label_list)

    # Load pretrained model and tokenizer
196
197
    # In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
    # download model & vocab.
198
    config = AutoConfig.from_pretrained(
199
        model_args.config_name if model_args.config_name else model_args.model_name_or_path,
200
        num_labels=num_labels,
201
202
203
204
        finetuning_task="xnli",
        cache_dir=model_args.cache_dir,
        revision=model_args.model_revision,
        use_auth_token=True if model_args.use_auth_token else None,
205
    )
206
    tokenizer = AutoTokenizer.from_pretrained(
207
208
209
210
211
212
        model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path,
        do_lower_case=model_args.do_lower_case,
        cache_dir=model_args.cache_dir,
        use_fast=model_args.use_fast_tokenizer,
        revision=model_args.model_revision,
        use_auth_token=True if model_args.use_auth_token else None,
213
    )
214
    model = AutoModelForSequenceClassification.from_pretrained(
215
216
        model_args.model_name_or_path,
        from_tf=bool(".ckpt" in model_args.model_name_or_path),
217
        config=config,
218
219
220
        cache_dir=model_args.cache_dir,
        revision=model_args.model_revision,
        use_auth_token=True if model_args.use_auth_token else None,
221
    )
VictorSanh's avatar
VictorSanh committed
222

223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
    # Preprocessing the datasets
    # Padding strategy
    if data_args.pad_to_max_length:
        padding = "max_length"
    else:
        # We will pad later, dynamically at batch creation, to the max sequence length in each batch
        padding = False

    def preprocess_function(examples):
        # Tokenize the texts
        return tokenizer(
            examples["premise"],
            examples["hypothesis"],
            padding=padding,
            max_length=data_args.max_seq_length,
            truncation=True,
        )
VictorSanh's avatar
VictorSanh committed
240

241
242
243
244
245
246
    train_dataset = train_dataset.map(
        preprocess_function, batched=True, load_from_cache_file=not data_args.overwrite_cache
    )
    eval_dataset = eval_dataset.map(
        preprocess_function, batched=True, load_from_cache_file=not data_args.overwrite_cache
    )
VictorSanh's avatar
VictorSanh committed
247

248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
    # Log a few random samples from the training set:
    for index in random.sample(range(len(train_dataset)), 3):
        logger.info(f"Sample {index} of the training set: {train_dataset[index]}.")

    # Get the metric function
    metric = load_metric("xnli")

    # You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
    # predictions and label_ids field) and has to return a dictionary string to float.
    def compute_metrics(p: EvalPrediction):
        preds = p.predictions[0] if isinstance(p.predictions, tuple) else p.predictions
        preds = np.argmax(preds, axis=1)
        return metric.compute(predictions=preds, references=p.label_ids)

    # Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
    if data_args.pad_to_max_length:
        data_collator = default_data_collator
    elif training_args.fp16:
        data_collator = DataCollatorWithPadding(tokenizer, pad_to_multiple_of=8)
    else:
        data_collator = None

    # Initialize our Trainer
    trainer = Trainer(
        model=model,
        args=training_args,
        train_dataset=train_dataset,
        eval_dataset=eval_dataset if training_args.do_eval else None,
        compute_metrics=compute_metrics,
        tokenizer=tokenizer,
        data_collator=data_collator,
    )
VictorSanh's avatar
VictorSanh committed
280
281

    # Training
282
283
284
285
286
287
288
289
290
    if training_args.do_train:
        if last_checkpoint is not None:
            model_path = last_checkpoint
        elif os.path.isdir(model_args.model_name_or_path):
            model_path = model_args.model_name_or_path
        else:
            model_path = None
        train_result = trainer.train(model_path=model_path)
        metrics = train_result.metrics
VictorSanh's avatar
VictorSanh committed
291

292
        trainer.save_model()  # Saves the tokenizer too for easy upload
293

294
295
296
        trainer.log_metrics("train", metrics)
        trainer.save_metrics("train", metrics)
        trainer.save_state()
VictorSanh's avatar
VictorSanh committed
297

298
299
300
301
302
    # Evaluation
    eval_results = {}
    if training_args.do_eval:
        logger.info("*** Evaluate ***")
        eval_result = trainer.evaluate(eval_dataset=eval_dataset)
303
304
        trainer.log_metrics("eval", eval_result)
        trainer.save_metrics("eval", eval_result)
305
        eval_results.update(eval_result)
306

307
    return eval_results
VictorSanh's avatar
VictorSanh committed
308
309
310
311


if __name__ == "__main__":
    main()