run_glue.py 26.3 KB
Newer Older
thomwolf's avatar
thomwolf committed
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION.  All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
16
""" Finetuning the library models for sequence classification on GLUE (Bert, XLM, XLNet, RoBERTa)."""
thomwolf's avatar
thomwolf committed
17
18
19
20

from __future__ import absolute_import, division, print_function

import argparse
thomwolf's avatar
thomwolf committed
21
import glob
thomwolf's avatar
thomwolf committed
22
23
24
25
26
27
28
29
30
import logging
import os
import random

import numpy as np
import torch
from torch.utils.data import (DataLoader, RandomSampler, SequentialSampler,
                              TensorDataset)
from torch.utils.data.distributed import DistributedSampler
thomwolf's avatar
thomwolf committed
31
from tensorboardX import SummaryWriter
thomwolf's avatar
thomwolf committed
32
from tqdm import tqdm, trange
thomwolf's avatar
thomwolf committed
33

34
from transformers import (WEIGHTS_NAME, BertConfig,
thomwolf's avatar
thomwolf committed
35
                                  BertForSequenceClassification, BertTokenizer,
36
37
38
                                  RobertaConfig,
                                  RobertaForSequenceClassification,
                                  RobertaTokenizer,
thomwolf's avatar
thomwolf committed
39
40
41
                                  XLMConfig, XLMForSequenceClassification,
                                  XLMTokenizer, XLNetConfig,
                                  XLNetForSequenceClassification,
42
43
44
45
                                  XLNetTokenizer,
                                  DistilBertConfig,
                                  DistilBertForSequenceClassification,
                                  DistilBertTokenizer)
thomwolf's avatar
thomwolf committed
46

47
from transformers import AdamW, WarmupLinearSchedule
thomwolf's avatar
thomwolf committed
48

49
50
51
52
from transformers import glue_compute_metrics as compute_metrics
from transformers import glue_output_modes as output_modes
from transformers import glue_processors as processors
from transformers import glue_convert_examples_to_features as convert_examples_to_features
thomwolf's avatar
thomwolf committed
53
54
55

logger = logging.getLogger(__name__)

56
ALL_MODELS = sum((tuple(conf.pretrained_config_archive_map.keys()) for conf in (BertConfig, XLNetConfig, XLMConfig, RobertaConfig)), ())
57
58

MODEL_CLASSES = {
thomwolf's avatar
thomwolf committed
59
60
61
    'bert': (BertConfig, BertForSequenceClassification, BertTokenizer),
    'xlnet': (XLNetConfig, XLNetForSequenceClassification, XLNetTokenizer),
    'xlm': (XLMConfig, XLMForSequenceClassification, XLMTokenizer),
62
    'roberta': (RobertaConfig, RobertaForSequenceClassification, RobertaTokenizer),
63
    'distilbert': (DistilBertConfig, DistilBertForSequenceClassification, DistilBertTokenizer)
64
}
thomwolf's avatar
thomwolf committed
65

thomwolf's avatar
thomwolf committed
66
67
68
69
70
71
72
73
74

def set_seed(args):
    random.seed(args.seed)
    np.random.seed(args.seed)
    torch.manual_seed(args.seed)
    if args.n_gpu > 0:
        torch.cuda.manual_seed_all(args.seed)


thomwolf's avatar
thomwolf committed
75
def train(args, train_dataset, model, tokenizer):
thomwolf's avatar
thomwolf committed
76
77
78
79
    """ Train the model """
    if args.local_rank in [-1, 0]:
        tb_writer = SummaryWriter()

thomwolf's avatar
thomwolf committed
80
    args.train_batch_size = args.per_gpu_train_batch_size * max(1, args.n_gpu)
81
82
    train_sampler = RandomSampler(train_dataset) if args.local_rank == -1 else DistributedSampler(train_dataset)
    train_dataloader = DataLoader(train_dataset, sampler=train_sampler, batch_size=args.train_batch_size)
thomwolf's avatar
thomwolf committed
83

thomwolf's avatar
thomwolf committed
84
    if args.max_steps > 0:
thomwolf's avatar
thomwolf committed
85
        t_total = args.max_steps
thomwolf's avatar
thomwolf committed
86
87
        args.num_train_epochs = args.max_steps // (len(train_dataloader) // args.gradient_accumulation_steps) + 1
    else:
thomwolf's avatar
thomwolf committed
88
        t_total = len(train_dataloader) // args.gradient_accumulation_steps * args.num_train_epochs
thomwolf's avatar
thomwolf committed
89

thomwolf's avatar
thomwolf committed
90
    # Prepare optimizer and schedule (linear warmup and decay)
thomwolf's avatar
thomwolf committed
91
    no_decay = ['bias', 'LayerNorm.weight']
thomwolf's avatar
thomwolf committed
92
    optimizer_grouped_parameters = [
thomwolf's avatar
thomwolf committed
93
94
        {'params': [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)], 'weight_decay': args.weight_decay},
        {'params': [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}
thomwolf's avatar
thomwolf committed
95
        ]
thomwolf's avatar
thomwolf committed
96
97
    optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon)
    scheduler = WarmupLinearSchedule(optimizer, warmup_steps=args.warmup_steps, t_total=t_total)
thomwolf's avatar
thomwolf committed
98
99
    if args.fp16:
        try:
thomwolf's avatar
thomwolf committed
100
            from apex import amp
thomwolf's avatar
thomwolf committed
101
102
        except ImportError:
            raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.")
thomwolf's avatar
thomwolf committed
103
        model, optimizer = amp.initialize(model, optimizer, opt_level=args.fp16_opt_level)
thomwolf's avatar
thomwolf committed
104

105
106
107
108
    # multi-gpu training (should be after apex fp16 initialization)
    if args.n_gpu > 1:
        model = torch.nn.DataParallel(model)

thomwolf's avatar
thomwolf committed
109
110
111
112
113
114
    # Distributed training (should be after apex fp16 initialization)
    if args.local_rank != -1:
        model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.local_rank],
                                                          output_device=args.local_rank,
                                                          find_unused_parameters=True)

thomwolf's avatar
thomwolf committed
115
116
    # Train!
    logger.info("***** Running training *****")
117
118
    logger.info("  Num examples = %d", len(train_dataset))
    logger.info("  Num Epochs = %d", args.num_train_epochs)
thomwolf's avatar
thomwolf committed
119
120
121
    logger.info("  Instantaneous batch size per GPU = %d", args.per_gpu_train_batch_size)
    logger.info("  Total train batch size (w. parallel, distributed & accumulation) = %d",
                   args.train_batch_size * args.gradient_accumulation_steps * (torch.distributed.get_world_size() if args.local_rank != -1 else 1))
122
    logger.info("  Gradient Accumulation steps = %d", args.gradient_accumulation_steps)
thomwolf's avatar
thomwolf committed
123
    logger.info("  Total optimization steps = %d", t_total)
thomwolf's avatar
thomwolf committed
124
125

    global_step = 0
thomwolf's avatar
thomwolf committed
126
    tr_loss, logging_loss = 0.0, 0.0
thomwolf's avatar
thomwolf committed
127
    model.zero_grad()
thomwolf's avatar
thomwolf committed
128
    train_iterator = trange(int(args.num_train_epochs), desc="Epoch", disable=args.local_rank not in [-1, 0])
thomwolf's avatar
thomwolf committed
129
    set_seed(args)  # Added here for reproductibility (even between python 2 and 3)
thomwolf's avatar
thomwolf committed
130
131
132
    for _ in train_iterator:
        epoch_iterator = tqdm(train_dataloader, desc="Iteration", disable=args.local_rank not in [-1, 0])
        for step, batch in enumerate(epoch_iterator):
thomwolf's avatar
thomwolf committed
133
            model.train()
thomwolf's avatar
thomwolf committed
134
            batch = tuple(t.to(args.device) for t in batch)
135
136
            inputs = {'input_ids':      batch[0],
                      'attention_mask': batch[1],
137
                      'token_type_ids': batch[2] if args.model_type in ['bert', 'xlnet'] else None,  # XLM, DistilBERT and RoBERTa don't use segment_ids
138
                      'labels':         batch[3]}
Peiqin Lin's avatar
typos  
Peiqin Lin committed
139
            outputs = model(**inputs)
140
            loss = outputs[0]  # model outputs are always tuple in transformers (see doc)
thomwolf's avatar
thomwolf committed
141
142
143
144
145
146

            if args.n_gpu > 1:
                loss = loss.mean() # mean() to average on multi-gpu parallel training
            if args.gradient_accumulation_steps > 1:
                loss = loss / args.gradient_accumulation_steps

thomwolf's avatar
thomwolf committed
147
148
149
            if args.fp16:
                with amp.scale_loss(loss, optimizer) as scaled_loss:
                    scaled_loss.backward()
thomwolf's avatar
thomwolf committed
150
                torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), args.max_grad_norm)
thomwolf's avatar
thomwolf committed
151
152
            else:
                loss.backward()
thomwolf's avatar
thomwolf committed
153
                torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)
thomwolf's avatar
thomwolf committed
154
155
156
157

            tr_loss += loss.item()
            if (step + 1) % args.gradient_accumulation_steps == 0:
                optimizer.step()
thomwolf's avatar
thomwolf committed
158
                scheduler.step()  # Update learning rate schedule
thomwolf's avatar
thomwolf committed
159
                model.zero_grad()
thomwolf's avatar
thomwolf committed
160
                global_step += 1
thomwolf's avatar
thomwolf committed
161

thomwolf's avatar
thomwolf committed
162
                if args.local_rank in [-1, 0] and args.logging_steps > 0 and global_step % args.logging_steps == 0:
thomwolf's avatar
thomwolf committed
163
                    # Log metrics
thomwolf's avatar
thomwolf committed
164
                    if args.local_rank == -1 and args.evaluate_during_training:  # Only evaluate when single GPU otherwise metrics may not average well
thomwolf's avatar
thomwolf committed
165
                        results = evaluate(args, model, tokenizer)
thomwolf's avatar
thomwolf committed
166
167
                        for key, value in results.items():
                            tb_writer.add_scalar('eval_{}'.format(key), value, global_step)
thomwolf's avatar
thomwolf committed
168
                    tb_writer.add_scalar('lr', scheduler.get_lr()[0], global_step)
thomwolf's avatar
thomwolf committed
169
170
                    tb_writer.add_scalar('loss', (tr_loss - logging_loss)/args.logging_steps, global_step)
                    logging_loss = tr_loss
thomwolf's avatar
thomwolf committed
171
172
173
174
175
176
177
178
179

                if args.local_rank in [-1, 0] and args.save_steps > 0 and global_step % args.save_steps == 0:
                    # Save model checkpoint
                    output_dir = os.path.join(args.output_dir, 'checkpoint-{}'.format(global_step))
                    if not os.path.exists(output_dir):
                        os.makedirs(output_dir)
                    model_to_save = model.module if hasattr(model, 'module') else model  # Take care of distributed/parallel training
                    model_to_save.save_pretrained(output_dir)
                    torch.save(args, os.path.join(output_dir, 'training_args.bin'))
thomwolf's avatar
thomwolf committed
180
                    logger.info("Saving model checkpoint to %s", output_dir)
thomwolf's avatar
thomwolf committed
181

thomwolf's avatar
thomwolf committed
182
            if args.max_steps > 0 and global_step > args.max_steps:
thomwolf's avatar
thomwolf committed
183
                epoch_iterator.close()
thomwolf's avatar
thomwolf committed
184
185
                break
        if args.max_steps > 0 and global_step > args.max_steps:
thomwolf's avatar
thomwolf committed
186
            train_iterator.close()
thomwolf's avatar
thomwolf committed
187
            break
thomwolf's avatar
thomwolf committed
188

thomwolf's avatar
thomwolf committed
189
190
191
    if args.local_rank in [-1, 0]:
        tb_writer.close()

thomwolf's avatar
thomwolf committed
192
193
194
    return global_step, tr_loss / global_step


thomwolf's avatar
thomwolf committed
195
def evaluate(args, model, tokenizer, prefix=""):
thomwolf's avatar
thomwolf committed
196
197
198
199
200
201
202
203
204
205
206
    # Loop to handle MNLI double evaluation (matched, mis-matched)
    eval_task_names = ("mnli", "mnli-mm") if args.task_name == "mnli" else (args.task_name,)
    eval_outputs_dirs = (args.output_dir, args.output_dir + '-MM') if args.task_name == "mnli" else (args.output_dir,)

    results = {}
    for eval_task, eval_output_dir in zip(eval_task_names, eval_outputs_dirs):
        eval_dataset = load_and_cache_examples(args, eval_task, tokenizer, evaluate=True)

        if not os.path.exists(eval_output_dir) and args.local_rank in [-1, 0]:
            os.makedirs(eval_output_dir)

thomwolf's avatar
thomwolf committed
207
        args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu)
thomwolf's avatar
thomwolf committed
208
209
210
211
212
        # Note that DistributedSampler samples randomly
        eval_sampler = SequentialSampler(eval_dataset) if args.local_rank == -1 else DistributedSampler(eval_dataset)
        eval_dataloader = DataLoader(eval_dataset, sampler=eval_sampler, batch_size=args.eval_batch_size)

        # Eval!
thomwolf's avatar
thomwolf committed
213
        logger.info("***** Running evaluation {} *****".format(prefix))
thomwolf's avatar
thomwolf committed
214
215
        logger.info("  Num examples = %d", len(eval_dataset))
        logger.info("  Batch size = %d", args.eval_batch_size)
thomwolf's avatar
thomwolf committed
216
        eval_loss = 0.0
thomwolf's avatar
thomwolf committed
217
218
219
220
        nb_eval_steps = 0
        preds = None
        out_label_ids = None
        for batch in tqdm(eval_dataloader, desc="Evaluating"):
thomwolf's avatar
thomwolf committed
221
            model.eval()
thomwolf's avatar
thomwolf committed
222
223
224
225
226
            batch = tuple(t.to(args.device) for t in batch)

            with torch.no_grad():
                inputs = {'input_ids':      batch[0],
                          'attention_mask': batch[1],
227
                          'token_type_ids': batch[2] if args.model_type in ['bert', 'xlnet'] else None,  # XLM, DistilBERT and RoBERTa don't use segment_ids
thomwolf's avatar
thomwolf committed
228
229
230
231
                          'labels':         batch[3]}
                outputs = model(**inputs)
                tmp_eval_loss, logits = outputs[:2]

thomwolf's avatar
thomwolf committed
232
                eval_loss += tmp_eval_loss.mean().item()
thomwolf's avatar
thomwolf committed
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
            nb_eval_steps += 1
            if preds is None:
                preds = logits.detach().cpu().numpy()
                out_label_ids = inputs['labels'].detach().cpu().numpy()
            else:
                preds = np.append(preds, logits.detach().cpu().numpy(), axis=0)
                out_label_ids = np.append(out_label_ids, inputs['labels'].detach().cpu().numpy(), axis=0)

        eval_loss = eval_loss / nb_eval_steps
        if args.output_mode == "classification":
            preds = np.argmax(preds, axis=1)
        elif args.output_mode == "regression":
            preds = np.squeeze(preds)
        result = compute_metrics(eval_task, preds, out_label_ids)
        results.update(result)

        output_eval_file = os.path.join(eval_output_dir, "eval_results.txt")
        with open(output_eval_file, "w") as writer:
thomwolf's avatar
thomwolf committed
251
            logger.info("***** Eval results {} *****".format(prefix))
thomwolf's avatar
thomwolf committed
252
253
254
255
256
257
258
            for key in sorted(result.keys()):
                logger.info("  %s = %s", key, str(result[key]))
                writer.write("%s = %s\n" % (key, str(result[key])))

    return results


thomwolf's avatar
thomwolf committed
259
def load_and_cache_examples(args, task, tokenizer, evaluate=False):
VictorSanh's avatar
VictorSanh committed
260
    if args.local_rank not in [-1, 0] and not evaluate:
thomwolf's avatar
thomwolf committed
261
262
        torch.distributed.barrier()  # Make sure only the first process in distributed training process the dataset, and the others will use the cache

thomwolf's avatar
thomwolf committed
263
    processor = processors[task]()
264
265
266
267
    output_mode = output_modes[task]
    # Load data features from cache or dataset file
    cached_features_file = os.path.join(args.data_dir, 'cached_{}_{}_{}_{}'.format(
        'dev' if evaluate else 'train',
268
        list(filter(None, args.model_name_or_path.split('/'))).pop(),
thomwolf's avatar
thomwolf committed
269
270
        str(args.max_seq_length),
        str(task)))
thomwolf's avatar
thomwolf committed
271
    if os.path.exists(cached_features_file):
thomwolf's avatar
thomwolf committed
272
        logger.info("Loading features from cached file %s", cached_features_file)
thomwolf's avatar
thomwolf committed
273
274
        features = torch.load(cached_features_file)
    else:
275
276
        logger.info("Creating features from dataset file at %s", args.data_dir)
        label_list = processor.get_labels()
277
278
279
        if task in ['mnli', 'mnli-mm'] and args.model_type in ['roberta']:
            # HACK(label indices are swapped in RoBERTa pretrained model)
            label_list[1], label_list[2] = label_list[2], label_list[1] 
280
        examples = processor.get_dev_examples(args.data_dir) if evaluate else processor.get_train_examples(args.data_dir)
thomwolf's avatar
thomwolf committed
281
282
        features = convert_examples_to_features(examples,
                                                tokenizer,
thomwolf's avatar
thomwolf committed
283
284
285
                                                label_list=label_list,
                                                max_length=args.max_seq_length,
                                                output_mode=output_mode,
thomwolf's avatar
thomwolf committed
286
287
288
                                                pad_on_left=bool(args.model_type in ['xlnet']),                 # pad on the left for xlnet
                                                pad_token=tokenizer.convert_tokens_to_ids([tokenizer.pad_token])[0],
                                                pad_token_segment_id=4 if args.model_type in ['xlnet'] else 0,
289
        )
290
        if args.local_rank in [-1, 0]:
thomwolf's avatar
thomwolf committed
291
            logger.info("Saving features into cached file %s", cached_features_file)
thomwolf's avatar
thomwolf committed
292
293
            torch.save(features, cached_features_file)

VictorSanh's avatar
VictorSanh committed
294
    if args.local_rank == 0 and not evaluate:
thomwolf's avatar
thomwolf committed
295
296
        torch.distributed.barrier()  # Make sure only the first process in distributed training process the dataset, and the others will use the cache

297
298
    # Convert to Tensors and build dataset
    all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long)
thomwolf's avatar
thomwolf committed
299
300
    all_attention_mask = torch.tensor([f.attention_mask for f in features], dtype=torch.long)
    all_token_type_ids = torch.tensor([f.token_type_ids for f in features], dtype=torch.long)
301
    if output_mode == "classification":
thomwolf's avatar
thomwolf committed
302
        all_labels = torch.tensor([f.label for f in features], dtype=torch.long)
303
    elif output_mode == "regression":
thomwolf's avatar
thomwolf committed
304
        all_labels = torch.tensor([f.label for f in features], dtype=torch.float)
305

thomwolf's avatar
thomwolf committed
306
    dataset = TensorDataset(all_input_ids, all_attention_mask, all_token_type_ids, all_labels)
307
    return dataset
thomwolf's avatar
thomwolf committed
308
309


thomwolf's avatar
thomwolf committed
310
311
312
313
314
315
def main():
    parser = argparse.ArgumentParser()

    ## Required parameters
    parser.add_argument("--data_dir", default=None, type=str, required=True,
                        help="The input data dir. Should contain the .tsv files (or other data files) for the task.")
316
317
318
319
    parser.add_argument("--model_type", default=None, type=str, required=True,
                        help="Model type selected in the list: " + ", ".join(MODEL_CLASSES.keys()))
    parser.add_argument("--model_name_or_path", default=None, type=str, required=True,
                        help="Path to pre-trained model or shortcut name selected in the list: " + ", ".join(ALL_MODELS))
thomwolf's avatar
thomwolf committed
320
    parser.add_argument("--task_name", default=None, type=str, required=True,
321
                        help="The name of the task to train selected in the list: " + ", ".join(processors.keys()))
thomwolf's avatar
thomwolf committed
322
323
324
325
    parser.add_argument("--output_dir", default=None, type=str, required=True,
                        help="The output directory where the model predictions and checkpoints will be written.")

    ## Other parameters
thomwolf's avatar
thomwolf committed
326
327
328
329
    parser.add_argument("--config_name", default="", type=str,
                        help="Pretrained config name or path if not the same as model_name")
    parser.add_argument("--tokenizer_name", default="", type=str,
                        help="Pretrained tokenizer name or path if not the same as model_name")
thomwolf's avatar
thomwolf committed
330
331
332
    parser.add_argument("--cache_dir", default="", type=str,
                        help="Where do you want to store the pre-trained models downloaded from s3")
    parser.add_argument("--max_seq_length", default=128, type=int,
333
334
                        help="The maximum total input sequence length after tokenization. Sequences longer "
                             "than this will be truncated, sequences shorter will be padded.")
thomwolf's avatar
thomwolf committed
335
336
337
338
    parser.add_argument("--do_train", action='store_true',
                        help="Whether to run training.")
    parser.add_argument("--do_eval", action='store_true',
                        help="Whether to run eval on the dev set.")
thomwolf's avatar
thomwolf committed
339
340
    parser.add_argument("--evaluate_during_training", action='store_true',
                        help="Rul evaluation during training at each logging step.")
thomwolf's avatar
thomwolf committed
341
342
    parser.add_argument("--do_lower_case", action='store_true',
                        help="Set this flag if you are using an uncased model.")
thomwolf's avatar
thomwolf committed
343
344

    parser.add_argument("--per_gpu_train_batch_size", default=8, type=int,
345
                        help="Batch size per GPU/CPU for training.")
thomwolf's avatar
thomwolf committed
346
    parser.add_argument("--per_gpu_eval_batch_size", default=8, type=int,
347
                        help="Batch size per GPU/CPU for evaluation.")
thomwolf's avatar
thomwolf committed
348
349
350
351
    parser.add_argument('--gradient_accumulation_steps', type=int, default=1,
                        help="Number of updates steps to accumulate before performing a backward/update pass.")
    parser.add_argument("--learning_rate", default=5e-5, type=float,
                        help="The initial learning rate for Adam.")
thomwolf's avatar
thomwolf committed
352
353
    parser.add_argument("--weight_decay", default=0.0, type=float,
                        help="Weight deay if we apply some.")
thomwolf's avatar
thomwolf committed
354
355
    parser.add_argument("--adam_epsilon", default=1e-8, type=float,
                        help="Epsilon for Adam optimizer.")
thomwolf's avatar
thomwolf committed
356
357
    parser.add_argument("--max_grad_norm", default=1.0, type=float,
                        help="Max gradient norm.")
thomwolf's avatar
thomwolf committed
358
359
    parser.add_argument("--num_train_epochs", default=3.0, type=float,
                        help="Total number of training epochs to perform.")
thomwolf's avatar
thomwolf committed
360
361
    parser.add_argument("--max_steps", default=-1, type=int,
                        help="If > 0: set total number of training steps to perform. Override num_train_epochs.")
thomwolf's avatar
thomwolf committed
362
363
    parser.add_argument("--warmup_steps", default=0, type=int,
                        help="Linear warmup over warmup_steps.")
thomwolf's avatar
thomwolf committed
364

thomwolf's avatar
thomwolf committed
365
    parser.add_argument('--logging_steps', type=int, default=50,
thomwolf's avatar
thomwolf committed
366
                        help="Log every X updates steps.")
thomwolf's avatar
thomwolf committed
367
368
369
370
    parser.add_argument('--save_steps', type=int, default=50,
                        help="Save checkpoint every X updates steps.")
    parser.add_argument("--eval_all_checkpoints", action='store_true',
                        help="Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number")
thomwolf's avatar
thomwolf committed
371
372
373
374
    parser.add_argument("--no_cuda", action='store_true',
                        help="Avoid using CUDA when available")
    parser.add_argument('--overwrite_output_dir', action='store_true',
                        help="Overwrite the content of the output directory")
thomwolf's avatar
thomwolf committed
375
376
    parser.add_argument('--overwrite_cache', action='store_true',
                        help="Overwrite the cached training and evaluation sets")
thomwolf's avatar
thomwolf committed
377
378
379
380
    parser.add_argument('--seed', type=int, default=42,
                        help="random seed for initialization")

    parser.add_argument('--fp16', action='store_true',
thomwolf's avatar
thomwolf committed
381
382
383
384
                        help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit")
    parser.add_argument('--fp16_opt_level', type=str, default='O1',
                        help="For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."
                             "See details at https://nvidia.github.io/apex/amp.html")
thomwolf's avatar
thomwolf committed
385
    parser.add_argument("--local_rank", type=int, default=-1,
thomwolf's avatar
thomwolf committed
386
387
388
                        help="For distributed training: local_rank")
    parser.add_argument('--server_ip', type=str, default='', help="For distant debugging.")
    parser.add_argument('--server_port', type=str, default='', help="For distant debugging.")
thomwolf's avatar
thomwolf committed
389
390
    args = parser.parse_args()

thomwolf's avatar
thomwolf committed
391
392
393
    if os.path.exists(args.output_dir) and os.listdir(args.output_dir) and args.do_train and not args.overwrite_output_dir:
        raise ValueError("Output directory ({}) already exists and is not empty. Use --overwrite_output_dir to overcome.".format(args.output_dir))

thomwolf's avatar
thomwolf committed
394
395
396
397
398
399
400
401
402
403
404
    # Setup distant debugging if needed
    if args.server_ip and args.server_port:
        # Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
        import ptvsd
        print("Waiting for debugger attach")
        ptvsd.enable_attach(address=(args.server_ip, args.server_port), redirect_output=True)
        ptvsd.wait_for_attach()

    # Setup CUDA, GPU & distributed training
    if args.local_rank == -1 or args.no_cuda:
        device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
thomwolf's avatar
thomwolf committed
405
        args.n_gpu = torch.cuda.device_count()
thomwolf's avatar
thomwolf committed
406
407
408
409
    else:  # Initializes the distributed backend which will take care of sychronizing nodes/GPUs
        torch.cuda.set_device(args.local_rank)
        device = torch.device("cuda", args.local_rank)
        torch.distributed.init_process_group(backend='nccl')
thomwolf's avatar
thomwolf committed
410
        args.n_gpu = 1
thomwolf's avatar
thomwolf committed
411
412
413
    args.device = device

    # Setup logging
thomwolf's avatar
thomwolf committed
414
415
416
    logging.basicConfig(format = '%(asctime)s - %(levelname)s - %(name)s -   %(message)s',
                        datefmt = '%m/%d/%Y %H:%M:%S',
                        level = logging.INFO if args.local_rank in [-1, 0] else logging.WARN)
thomwolf's avatar
thomwolf committed
417
    logger.warning("Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s",
thomwolf's avatar
thomwolf committed
418
                    args.local_rank, device, args.n_gpu, bool(args.local_rank != -1), args.fp16)
thomwolf's avatar
thomwolf committed
419

thomwolf's avatar
thomwolf committed
420
421
    # Set seed
    set_seed(args)
thomwolf's avatar
thomwolf committed
422
423

    # Prepare GLUE task
thomwolf's avatar
thomwolf committed
424
425
426
427
428
    args.task_name = args.task_name.lower()
    if args.task_name not in processors:
        raise ValueError("Task not found: %s" % (args.task_name))
    processor = processors[args.task_name]()
    args.output_mode = output_modes[args.task_name]
thomwolf's avatar
thomwolf committed
429
430
431
432
433
    label_list = processor.get_labels()
    num_labels = len(label_list)

    # Load pretrained model and tokenizer
    if args.local_rank not in [-1, 0]:
434
        torch.distributed.barrier()  # Make sure only the first process in distributed training will download model & vocab
thomwolf's avatar
thomwolf committed
435

436
    args.model_type = args.model_type.lower()
thomwolf's avatar
thomwolf committed
437
    config_class, model_class, tokenizer_class = MODEL_CLASSES[args.model_type]
438
439
440
    config = config_class.from_pretrained(args.config_name if args.config_name else args.model_name_or_path, num_labels=num_labels, finetuning_task=args.task_name)
    tokenizer = tokenizer_class.from_pretrained(args.tokenizer_name if args.tokenizer_name else args.model_name_or_path, do_lower_case=args.do_lower_case)
    model = model_class.from_pretrained(args.model_name_or_path, from_tf=bool('.ckpt' in args.model_name_or_path), config=config)
thomwolf's avatar
thomwolf committed
441
442

    if args.local_rank == 0:
443
        torch.distributed.barrier()  # Make sure only the first process in distributed training will download model & vocab
thomwolf's avatar
thomwolf committed
444

thomwolf's avatar
thomwolf committed
445
    model.to(args.device)
thomwolf's avatar
thomwolf committed
446

thomwolf's avatar
thomwolf committed
447
448
    logger.info("Training/evaluation parameters %s", args)

449

thomwolf's avatar
thomwolf committed
450
    # Training
thomwolf's avatar
thomwolf committed
451
    if args.do_train:
452
        train_dataset = load_and_cache_examples(args, args.task_name, tokenizer, evaluate=False)
thomwolf's avatar
thomwolf committed
453
        global_step, tr_loss = train(args, train_dataset, model, tokenizer)
thomwolf's avatar
thomwolf committed
454
        logger.info(" global_step = %s, average loss = %s", global_step, tr_loss)
thomwolf's avatar
thomwolf committed
455
456


thomwolf's avatar
thomwolf committed
457
    # Saving best-practices: if you use defaults names for the model, you can reload it using from_pretrained()
thomwolf's avatar
thomwolf committed
458
    if args.do_train and (args.local_rank == -1 or torch.distributed.get_rank() == 0):
459
460
461
462
        # Create output directory if needed
        if not os.path.exists(args.output_dir) and args.local_rank in [-1, 0]:
            os.makedirs(args.output_dir)

thomwolf's avatar
thomwolf committed
463
        logger.info("Saving model checkpoint to %s", args.output_dir)
464
465
        # Save a trained model, configuration and tokenizer using `save_pretrained()`.
        # They can then be reloaded using `from_pretrained()`
thomwolf's avatar
thomwolf committed
466
467
        model_to_save = model.module if hasattr(model, 'module') else model  # Take care of distributed/parallel training
        model_to_save.save_pretrained(args.output_dir)
468
        tokenizer.save_pretrained(args.output_dir)
thomwolf's avatar
thomwolf committed
469
470

        # Good practice: save your training arguments together with the trained model
471
        torch.save(args, os.path.join(args.output_dir, 'training_args.bin'))
thomwolf's avatar
thomwolf committed
472

473
        # Load a trained model and vocabulary that you have fine-tuned
474
        model = model_class.from_pretrained(args.output_dir)
Peng Qi's avatar
Peng Qi committed
475
        tokenizer = tokenizer_class.from_pretrained(args.output_dir, do_lower_case=args.do_lower_case)
476
        model.to(args.device)
thomwolf's avatar
thomwolf committed
477

478

thomwolf's avatar
thomwolf committed
479
    # Evaluation
thomwolf's avatar
thomwolf committed
480
    results = {}
thomwolf's avatar
thomwolf committed
481
    if args.do_eval and args.local_rank in [-1, 0]:
482
        tokenizer = tokenizer_class.from_pretrained(args.output_dir, do_lower_case=args.do_lower_case)
thomwolf's avatar
thomwolf committed
483
        checkpoints = [args.output_dir]
thomwolf's avatar
thomwolf committed
484
        if args.eval_all_checkpoints:
thomwolf's avatar
thomwolf committed
485
            checkpoints = list(os.path.dirname(c) for c in sorted(glob.glob(args.output_dir + '/**/' + WEIGHTS_NAME, recursive=True)))
486
            logging.getLogger("transformers.modeling_utils").setLevel(logging.WARN)  # Reduce logging
thomwolf's avatar
thomwolf committed
487
488
        logger.info("Evaluate the following checkpoints: %s", checkpoints)
        for checkpoint in checkpoints:
489
            global_step = checkpoint.split('-')[-1] if len(checkpoints) > 1 else ""
thomwolf's avatar
thomwolf committed
490
            model = model_class.from_pretrained(checkpoint)
thomwolf's avatar
thomwolf committed
491
492
            model.to(args.device)
            result = evaluate(args, model, tokenizer, prefix=global_step)
thomwolf's avatar
thomwolf committed
493
494
495
            result = dict((k + '_{}'.format(global_step), v) for k, v in result.items())
            results.update(result)

thomwolf's avatar
thomwolf committed
496
    return results
thomwolf's avatar
thomwolf committed
497
498
499
500


if __name__ == "__main__":
    main()