run_glue.py 25.9 KB
Newer Older
thomwolf's avatar
thomwolf committed
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION.  All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
16
""" Finetuning the library models for sequence classification on GLUE (Bert, XLM, XLNet, RoBERTa)."""
thomwolf's avatar
thomwolf committed
17
18
19
20

from __future__ import absolute_import, division, print_function

import argparse
thomwolf's avatar
thomwolf committed
21
import glob
thomwolf's avatar
thomwolf committed
22
23
24
25
26
27
28
29
30
import logging
import os
import random

import numpy as np
import torch
from torch.utils.data import (DataLoader, RandomSampler, SequentialSampler,
                              TensorDataset)
from torch.utils.data.distributed import DistributedSampler
thomwolf's avatar
thomwolf committed
31
from tensorboardX import SummaryWriter
thomwolf's avatar
thomwolf committed
32
from tqdm import tqdm, trange
thomwolf's avatar
thomwolf committed
33

thomwolf's avatar
thomwolf committed
34
35
from pytorch_transformers import (WEIGHTS_NAME, BertConfig,
                                  BertForSequenceClassification, BertTokenizer,
36
37
38
                                  RobertaConfig,
                                  RobertaForSequenceClassification,
                                  RobertaTokenizer,
thomwolf's avatar
thomwolf committed
39
40
41
                                  XLMConfig, XLMForSequenceClassification,
                                  XLMTokenizer, XLNetConfig,
                                  XLNetForSequenceClassification,
thomwolf's avatar
thomwolf committed
42
                                  XLNetTokenizer)
thomwolf's avatar
thomwolf committed
43
44
45

from pytorch_transformers import AdamW, WarmupLinearSchedule

thomwolf's avatar
thomwolf committed
46
47
from utils_glue import (compute_metrics, convert_examples_to_features,
                        output_modes, processors)
thomwolf's avatar
thomwolf committed
48
49
50

logger = logging.getLogger(__name__)

51
ALL_MODELS = sum((tuple(conf.pretrained_config_archive_map.keys()) for conf in (BertConfig, XLNetConfig, XLMConfig, RobertaConfig)), ())
52
53

MODEL_CLASSES = {
thomwolf's avatar
thomwolf committed
54
55
56
    'bert': (BertConfig, BertForSequenceClassification, BertTokenizer),
    'xlnet': (XLNetConfig, XLNetForSequenceClassification, XLNetTokenizer),
    'xlm': (XLMConfig, XLMForSequenceClassification, XLMTokenizer),
57
    'roberta': (RobertaConfig, RobertaForSequenceClassification, RobertaTokenizer),
58
}
thomwolf's avatar
thomwolf committed
59

thomwolf's avatar
thomwolf committed
60
61
62
63
64
65
66
67
68

def set_seed(args):
    random.seed(args.seed)
    np.random.seed(args.seed)
    torch.manual_seed(args.seed)
    if args.n_gpu > 0:
        torch.cuda.manual_seed_all(args.seed)


thomwolf's avatar
thomwolf committed
69
def train(args, train_dataset, model, tokenizer):
thomwolf's avatar
thomwolf committed
70
71
72
73
    """ Train the model """
    if args.local_rank in [-1, 0]:
        tb_writer = SummaryWriter()

thomwolf's avatar
thomwolf committed
74
    args.train_batch_size = args.per_gpu_train_batch_size * max(1, args.n_gpu)
75
76
    train_sampler = RandomSampler(train_dataset) if args.local_rank == -1 else DistributedSampler(train_dataset)
    train_dataloader = DataLoader(train_dataset, sampler=train_sampler, batch_size=args.train_batch_size)
thomwolf's avatar
thomwolf committed
77

thomwolf's avatar
thomwolf committed
78
    if args.max_steps > 0:
thomwolf's avatar
thomwolf committed
79
        t_total = args.max_steps
thomwolf's avatar
thomwolf committed
80
81
        args.num_train_epochs = args.max_steps // (len(train_dataloader) // args.gradient_accumulation_steps) + 1
    else:
thomwolf's avatar
thomwolf committed
82
        t_total = len(train_dataloader) // args.gradient_accumulation_steps * args.num_train_epochs
thomwolf's avatar
thomwolf committed
83

thomwolf's avatar
thomwolf committed
84
    # Prepare optimizer and schedule (linear warmup and decay)
thomwolf's avatar
thomwolf committed
85
    no_decay = ['bias', 'LayerNorm.weight']
thomwolf's avatar
thomwolf committed
86
    optimizer_grouped_parameters = [
thomwolf's avatar
thomwolf committed
87
88
        {'params': [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)], 'weight_decay': args.weight_decay},
        {'params': [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}
thomwolf's avatar
thomwolf committed
89
        ]
thomwolf's avatar
thomwolf committed
90
91
    optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon)
    scheduler = WarmupLinearSchedule(optimizer, warmup_steps=args.warmup_steps, t_total=t_total)
thomwolf's avatar
thomwolf committed
92
93
    if args.fp16:
        try:
thomwolf's avatar
thomwolf committed
94
            from apex import amp
thomwolf's avatar
thomwolf committed
95
96
        except ImportError:
            raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.")
thomwolf's avatar
thomwolf committed
97
        model, optimizer = amp.initialize(model, optimizer, opt_level=args.fp16_opt_level)
thomwolf's avatar
thomwolf committed
98

99
100
101
102
    # multi-gpu training (should be after apex fp16 initialization)
    if args.n_gpu > 1:
        model = torch.nn.DataParallel(model)

thomwolf's avatar
thomwolf committed
103
104
105
106
107
108
    # Distributed training (should be after apex fp16 initialization)
    if args.local_rank != -1:
        model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.local_rank],
                                                          output_device=args.local_rank,
                                                          find_unused_parameters=True)

thomwolf's avatar
thomwolf committed
109
110
    # Train!
    logger.info("***** Running training *****")
111
112
    logger.info("  Num examples = %d", len(train_dataset))
    logger.info("  Num Epochs = %d", args.num_train_epochs)
thomwolf's avatar
thomwolf committed
113
114
115
    logger.info("  Instantaneous batch size per GPU = %d", args.per_gpu_train_batch_size)
    logger.info("  Total train batch size (w. parallel, distributed & accumulation) = %d",
                   args.train_batch_size * args.gradient_accumulation_steps * (torch.distributed.get_world_size() if args.local_rank != -1 else 1))
116
    logger.info("  Gradient Accumulation steps = %d", args.gradient_accumulation_steps)
thomwolf's avatar
thomwolf committed
117
    logger.info("  Total optimization steps = %d", t_total)
thomwolf's avatar
thomwolf committed
118
119

    global_step = 0
thomwolf's avatar
thomwolf committed
120
    tr_loss, logging_loss = 0.0, 0.0
thomwolf's avatar
thomwolf committed
121
    model.zero_grad()
thomwolf's avatar
thomwolf committed
122
    train_iterator = trange(int(args.num_train_epochs), desc="Epoch", disable=args.local_rank not in [-1, 0])
thomwolf's avatar
thomwolf committed
123
    set_seed(args)  # Added here for reproductibility (even between python 2 and 3)
thomwolf's avatar
thomwolf committed
124
125
126
    for _ in train_iterator:
        epoch_iterator = tqdm(train_dataloader, desc="Iteration", disable=args.local_rank not in [-1, 0])
        for step, batch in enumerate(epoch_iterator):
thomwolf's avatar
thomwolf committed
127
            model.train()
thomwolf's avatar
thomwolf committed
128
            batch = tuple(t.to(args.device) for t in batch)
129
130
            inputs = {'input_ids':      batch[0],
                      'attention_mask': batch[1],
thomwolf's avatar
thomwolf committed
131
                      'token_type_ids': batch[2] if args.model_type in ['bert', 'xlnet'] else None,  # XLM don't use segment_ids
132
                      'labels':         batch[3]}
Peiqin Lin's avatar
typos  
Peiqin Lin committed
133
134
            outputs = model(**inputs)
            loss = outputs[0]  # model outputs are always tuple in pytorch-transformers (see doc)
thomwolf's avatar
thomwolf committed
135
136
137
138
139
140

            if args.n_gpu > 1:
                loss = loss.mean() # mean() to average on multi-gpu parallel training
            if args.gradient_accumulation_steps > 1:
                loss = loss / args.gradient_accumulation_steps

thomwolf's avatar
thomwolf committed
141
142
143
            if args.fp16:
                with amp.scale_loss(loss, optimizer) as scaled_loss:
                    scaled_loss.backward()
thomwolf's avatar
thomwolf committed
144
                torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), args.max_grad_norm)
thomwolf's avatar
thomwolf committed
145
146
            else:
                loss.backward()
thomwolf's avatar
thomwolf committed
147
                torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)
thomwolf's avatar
thomwolf committed
148
149
150

            tr_loss += loss.item()
            if (step + 1) % args.gradient_accumulation_steps == 0:
thomwolf's avatar
thomwolf committed
151
                scheduler.step()  # Update learning rate schedule
thomwolf's avatar
thomwolf committed
152
                optimizer.step()
thomwolf's avatar
thomwolf committed
153
                model.zero_grad()
thomwolf's avatar
thomwolf committed
154
                global_step += 1
thomwolf's avatar
thomwolf committed
155

thomwolf's avatar
thomwolf committed
156
                if args.local_rank in [-1, 0] and args.logging_steps > 0 and global_step % args.logging_steps == 0:
thomwolf's avatar
thomwolf committed
157
                    # Log metrics
thomwolf's avatar
thomwolf committed
158
                    if args.local_rank == -1 and args.evaluate_during_training:  # Only evaluate when single GPU otherwise metrics may not average well
thomwolf's avatar
thomwolf committed
159
                        results = evaluate(args, model, tokenizer)
thomwolf's avatar
thomwolf committed
160
161
                        for key, value in results.items():
                            tb_writer.add_scalar('eval_{}'.format(key), value, global_step)
thomwolf's avatar
thomwolf committed
162
                    tb_writer.add_scalar('lr', scheduler.get_lr()[0], global_step)
thomwolf's avatar
thomwolf committed
163
164
                    tb_writer.add_scalar('loss', (tr_loss - logging_loss)/args.logging_steps, global_step)
                    logging_loss = tr_loss
thomwolf's avatar
thomwolf committed
165
166
167
168
169
170
171
172
173

                if args.local_rank in [-1, 0] and args.save_steps > 0 and global_step % args.save_steps == 0:
                    # Save model checkpoint
                    output_dir = os.path.join(args.output_dir, 'checkpoint-{}'.format(global_step))
                    if not os.path.exists(output_dir):
                        os.makedirs(output_dir)
                    model_to_save = model.module if hasattr(model, 'module') else model  # Take care of distributed/parallel training
                    model_to_save.save_pretrained(output_dir)
                    torch.save(args, os.path.join(output_dir, 'training_args.bin'))
thomwolf's avatar
thomwolf committed
174
                    logger.info("Saving model checkpoint to %s", output_dir)
thomwolf's avatar
thomwolf committed
175

thomwolf's avatar
thomwolf committed
176
            if args.max_steps > 0 and global_step > args.max_steps:
thomwolf's avatar
thomwolf committed
177
                epoch_iterator.close()
thomwolf's avatar
thomwolf committed
178
179
                break
        if args.max_steps > 0 and global_step > args.max_steps:
thomwolf's avatar
thomwolf committed
180
            train_iterator.close()
thomwolf's avatar
thomwolf committed
181
            break
thomwolf's avatar
thomwolf committed
182

thomwolf's avatar
thomwolf committed
183
184
185
    if args.local_rank in [-1, 0]:
        tb_writer.close()

thomwolf's avatar
thomwolf committed
186
187
188
    return global_step, tr_loss / global_step


thomwolf's avatar
thomwolf committed
189
def evaluate(args, model, tokenizer, prefix=""):
thomwolf's avatar
thomwolf committed
190
191
192
193
194
195
196
197
198
199
200
    # Loop to handle MNLI double evaluation (matched, mis-matched)
    eval_task_names = ("mnli", "mnli-mm") if args.task_name == "mnli" else (args.task_name,)
    eval_outputs_dirs = (args.output_dir, args.output_dir + '-MM') if args.task_name == "mnli" else (args.output_dir,)

    results = {}
    for eval_task, eval_output_dir in zip(eval_task_names, eval_outputs_dirs):
        eval_dataset = load_and_cache_examples(args, eval_task, tokenizer, evaluate=True)

        if not os.path.exists(eval_output_dir) and args.local_rank in [-1, 0]:
            os.makedirs(eval_output_dir)

thomwolf's avatar
thomwolf committed
201
        args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu)
thomwolf's avatar
thomwolf committed
202
203
204
205
206
        # Note that DistributedSampler samples randomly
        eval_sampler = SequentialSampler(eval_dataset) if args.local_rank == -1 else DistributedSampler(eval_dataset)
        eval_dataloader = DataLoader(eval_dataset, sampler=eval_sampler, batch_size=args.eval_batch_size)

        # Eval!
thomwolf's avatar
thomwolf committed
207
        logger.info("***** Running evaluation {} *****".format(prefix))
thomwolf's avatar
thomwolf committed
208
209
        logger.info("  Num examples = %d", len(eval_dataset))
        logger.info("  Batch size = %d", args.eval_batch_size)
thomwolf's avatar
thomwolf committed
210
        eval_loss = 0.0
thomwolf's avatar
thomwolf committed
211
212
213
214
        nb_eval_steps = 0
        preds = None
        out_label_ids = None
        for batch in tqdm(eval_dataloader, desc="Evaluating"):
thomwolf's avatar
thomwolf committed
215
            model.eval()
thomwolf's avatar
thomwolf committed
216
217
218
219
220
            batch = tuple(t.to(args.device) for t in batch)

            with torch.no_grad():
                inputs = {'input_ids':      batch[0],
                          'attention_mask': batch[1],
221
                          'token_type_ids': batch[2] if args.model_type in ['bert', 'xlnet'] else None,  # XLM and RoBERTa don't use segment_ids
thomwolf's avatar
thomwolf committed
222
223
224
225
                          'labels':         batch[3]}
                outputs = model(**inputs)
                tmp_eval_loss, logits = outputs[:2]

thomwolf's avatar
thomwolf committed
226
                eval_loss += tmp_eval_loss.mean().item()
thomwolf's avatar
thomwolf committed
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
            nb_eval_steps += 1
            if preds is None:
                preds = logits.detach().cpu().numpy()
                out_label_ids = inputs['labels'].detach().cpu().numpy()
            else:
                preds = np.append(preds, logits.detach().cpu().numpy(), axis=0)
                out_label_ids = np.append(out_label_ids, inputs['labels'].detach().cpu().numpy(), axis=0)

        eval_loss = eval_loss / nb_eval_steps
        if args.output_mode == "classification":
            preds = np.argmax(preds, axis=1)
        elif args.output_mode == "regression":
            preds = np.squeeze(preds)
        result = compute_metrics(eval_task, preds, out_label_ids)
        results.update(result)

        output_eval_file = os.path.join(eval_output_dir, "eval_results.txt")
        with open(output_eval_file, "w") as writer:
thomwolf's avatar
thomwolf committed
245
            logger.info("***** Eval results {} *****".format(prefix))
thomwolf's avatar
thomwolf committed
246
247
248
249
250
251
252
            for key in sorted(result.keys()):
                logger.info("  %s = %s", key, str(result[key]))
                writer.write("%s = %s\n" % (key, str(result[key])))

    return results


thomwolf's avatar
thomwolf committed
253
def load_and_cache_examples(args, task, tokenizer, evaluate=False):
thomwolf's avatar
thomwolf committed
254
255
256
    if args.local_rank not in [-1, 0]:
        torch.distributed.barrier()  # Make sure only the first process in distributed training process the dataset, and the others will use the cache

thomwolf's avatar
thomwolf committed
257
    processor = processors[task]()
258
259
260
261
    output_mode = output_modes[task]
    # Load data features from cache or dataset file
    cached_features_file = os.path.join(args.data_dir, 'cached_{}_{}_{}_{}'.format(
        'dev' if evaluate else 'train',
262
        list(filter(None, args.model_name_or_path.split('/'))).pop(),
thomwolf's avatar
thomwolf committed
263
264
        str(args.max_seq_length),
        str(task)))
thomwolf's avatar
thomwolf committed
265
    if os.path.exists(cached_features_file):
thomwolf's avatar
thomwolf committed
266
        logger.info("Loading features from cached file %s", cached_features_file)
thomwolf's avatar
thomwolf committed
267
268
        features = torch.load(cached_features_file)
    else:
269
270
        logger.info("Creating features from dataset file at %s", args.data_dir)
        label_list = processor.get_labels()
271
272
273
        if task in ['mnli', 'mnli-mm'] and args.model_type in ['roberta']:
            # HACK(label indices are swapped in RoBERTa pretrained model)
            label_list[1], label_list[2] = label_list[2], label_list[1] 
274
        examples = processor.get_dev_examples(args.data_dir) if evaluate else processor.get_train_examples(args.data_dir)
275
        features = convert_examples_to_features(examples, label_list, args.max_seq_length, tokenizer, output_mode,
276
            cls_token_at_end=bool(args.model_type in ['xlnet']),            # xlnet has a cls token at the end
277
            cls_token=tokenizer.cls_token,
thomwolf's avatar
thomwolf committed
278
            cls_token_segment_id=2 if args.model_type in ['xlnet'] else 0,
279
280
            sep_token=tokenizer.sep_token,
            sep_token_extra=bool(args.model_type in ['roberta']),           # roberta uses an extra separator b/w pairs of sentences, cf. github.com/pytorch/fairseq/commit/1684e166e3da03f5b600dbb7855cb98ddfcd0805
281
            pad_on_left=bool(args.model_type in ['xlnet']),                 # pad on the left for xlnet
282
            pad_token=tokenizer.convert_tokens_to_ids([tokenizer.pad_token])[0],
283
284
            pad_token_segment_id=4 if args.model_type in ['xlnet'] else 0,
        )
285
        if args.local_rank in [-1, 0]:
thomwolf's avatar
thomwolf committed
286
            logger.info("Saving features into cached file %s", cached_features_file)
thomwolf's avatar
thomwolf committed
287
288
            torch.save(features, cached_features_file)

thomwolf's avatar
thomwolf committed
289
290
291
    if args.local_rank == 0:
        torch.distributed.barrier()  # Make sure only the first process in distributed training process the dataset, and the others will use the cache

292
293
294
295
296
297
298
299
300
301
302
    # Convert to Tensors and build dataset
    all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long)
    all_input_mask = torch.tensor([f.input_mask for f in features], dtype=torch.long)
    all_segment_ids = torch.tensor([f.segment_ids for f in features], dtype=torch.long)
    if output_mode == "classification":
        all_label_ids = torch.tensor([f.label_id for f in features], dtype=torch.long)
    elif output_mode == "regression":
        all_label_ids = torch.tensor([f.label_id for f in features], dtype=torch.float)

    dataset = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_label_ids)
    return dataset
thomwolf's avatar
thomwolf committed
303
304


thomwolf's avatar
thomwolf committed
305
306
307
308
309
310
def main():
    parser = argparse.ArgumentParser()

    ## Required parameters
    parser.add_argument("--data_dir", default=None, type=str, required=True,
                        help="The input data dir. Should contain the .tsv files (or other data files) for the task.")
311
312
313
314
    parser.add_argument("--model_type", default=None, type=str, required=True,
                        help="Model type selected in the list: " + ", ".join(MODEL_CLASSES.keys()))
    parser.add_argument("--model_name_or_path", default=None, type=str, required=True,
                        help="Path to pre-trained model or shortcut name selected in the list: " + ", ".join(ALL_MODELS))
thomwolf's avatar
thomwolf committed
315
    parser.add_argument("--task_name", default=None, type=str, required=True,
316
                        help="The name of the task to train selected in the list: " + ", ".join(processors.keys()))
thomwolf's avatar
thomwolf committed
317
318
319
320
    parser.add_argument("--output_dir", default=None, type=str, required=True,
                        help="The output directory where the model predictions and checkpoints will be written.")

    ## Other parameters
thomwolf's avatar
thomwolf committed
321
322
323
324
    parser.add_argument("--config_name", default="", type=str,
                        help="Pretrained config name or path if not the same as model_name")
    parser.add_argument("--tokenizer_name", default="", type=str,
                        help="Pretrained tokenizer name or path if not the same as model_name")
thomwolf's avatar
thomwolf committed
325
326
327
    parser.add_argument("--cache_dir", default="", type=str,
                        help="Where do you want to store the pre-trained models downloaded from s3")
    parser.add_argument("--max_seq_length", default=128, type=int,
328
329
                        help="The maximum total input sequence length after tokenization. Sequences longer "
                             "than this will be truncated, sequences shorter will be padded.")
thomwolf's avatar
thomwolf committed
330
331
332
333
    parser.add_argument("--do_train", action='store_true',
                        help="Whether to run training.")
    parser.add_argument("--do_eval", action='store_true',
                        help="Whether to run eval on the dev set.")
thomwolf's avatar
thomwolf committed
334
335
    parser.add_argument("--evaluate_during_training", action='store_true',
                        help="Rul evaluation during training at each logging step.")
thomwolf's avatar
thomwolf committed
336
337
    parser.add_argument("--do_lower_case", action='store_true',
                        help="Set this flag if you are using an uncased model.")
thomwolf's avatar
thomwolf committed
338
339

    parser.add_argument("--per_gpu_train_batch_size", default=8, type=int,
340
                        help="Batch size per GPU/CPU for training.")
thomwolf's avatar
thomwolf committed
341
    parser.add_argument("--per_gpu_eval_batch_size", default=8, type=int,
342
                        help="Batch size per GPU/CPU for evaluation.")
thomwolf's avatar
thomwolf committed
343
344
345
346
    parser.add_argument('--gradient_accumulation_steps', type=int, default=1,
                        help="Number of updates steps to accumulate before performing a backward/update pass.")
    parser.add_argument("--learning_rate", default=5e-5, type=float,
                        help="The initial learning rate for Adam.")
thomwolf's avatar
thomwolf committed
347
348
    parser.add_argument("--weight_decay", default=0.0, type=float,
                        help="Weight deay if we apply some.")
thomwolf's avatar
thomwolf committed
349
350
    parser.add_argument("--adam_epsilon", default=1e-8, type=float,
                        help="Epsilon for Adam optimizer.")
thomwolf's avatar
thomwolf committed
351
352
    parser.add_argument("--max_grad_norm", default=1.0, type=float,
                        help="Max gradient norm.")
thomwolf's avatar
thomwolf committed
353
354
    parser.add_argument("--num_train_epochs", default=3.0, type=float,
                        help="Total number of training epochs to perform.")
thomwolf's avatar
thomwolf committed
355
356
    parser.add_argument("--max_steps", default=-1, type=int,
                        help="If > 0: set total number of training steps to perform. Override num_train_epochs.")
thomwolf's avatar
thomwolf committed
357
358
    parser.add_argument("--warmup_steps", default=0, type=int,
                        help="Linear warmup over warmup_steps.")
thomwolf's avatar
thomwolf committed
359

thomwolf's avatar
thomwolf committed
360
    parser.add_argument('--logging_steps', type=int, default=50,
thomwolf's avatar
thomwolf committed
361
                        help="Log every X updates steps.")
thomwolf's avatar
thomwolf committed
362
363
364
365
    parser.add_argument('--save_steps', type=int, default=50,
                        help="Save checkpoint every X updates steps.")
    parser.add_argument("--eval_all_checkpoints", action='store_true',
                        help="Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number")
thomwolf's avatar
thomwolf committed
366
367
368
369
    parser.add_argument("--no_cuda", action='store_true',
                        help="Avoid using CUDA when available")
    parser.add_argument('--overwrite_output_dir', action='store_true',
                        help="Overwrite the content of the output directory")
thomwolf's avatar
thomwolf committed
370
371
    parser.add_argument('--overwrite_cache', action='store_true',
                        help="Overwrite the cached training and evaluation sets")
thomwolf's avatar
thomwolf committed
372
373
374
375
    parser.add_argument('--seed', type=int, default=42,
                        help="random seed for initialization")

    parser.add_argument('--fp16', action='store_true',
thomwolf's avatar
thomwolf committed
376
377
378
379
                        help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit")
    parser.add_argument('--fp16_opt_level', type=str, default='O1',
                        help="For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."
                             "See details at https://nvidia.github.io/apex/amp.html")
thomwolf's avatar
thomwolf committed
380
    parser.add_argument("--local_rank", type=int, default=-1,
thomwolf's avatar
thomwolf committed
381
382
383
                        help="For distributed training: local_rank")
    parser.add_argument('--server_ip', type=str, default='', help="For distant debugging.")
    parser.add_argument('--server_port', type=str, default='', help="For distant debugging.")
thomwolf's avatar
thomwolf committed
384
385
    args = parser.parse_args()

thomwolf's avatar
thomwolf committed
386
387
388
    if os.path.exists(args.output_dir) and os.listdir(args.output_dir) and args.do_train and not args.overwrite_output_dir:
        raise ValueError("Output directory ({}) already exists and is not empty. Use --overwrite_output_dir to overcome.".format(args.output_dir))

thomwolf's avatar
thomwolf committed
389
390
391
392
393
394
395
396
397
398
399
    # Setup distant debugging if needed
    if args.server_ip and args.server_port:
        # Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
        import ptvsd
        print("Waiting for debugger attach")
        ptvsd.enable_attach(address=(args.server_ip, args.server_port), redirect_output=True)
        ptvsd.wait_for_attach()

    # Setup CUDA, GPU & distributed training
    if args.local_rank == -1 or args.no_cuda:
        device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
thomwolf's avatar
thomwolf committed
400
        args.n_gpu = torch.cuda.device_count()
thomwolf's avatar
thomwolf committed
401
402
403
404
    else:  # Initializes the distributed backend which will take care of sychronizing nodes/GPUs
        torch.cuda.set_device(args.local_rank)
        device = torch.device("cuda", args.local_rank)
        torch.distributed.init_process_group(backend='nccl')
thomwolf's avatar
thomwolf committed
405
        args.n_gpu = 1
thomwolf's avatar
thomwolf committed
406
407
408
    args.device = device

    # Setup logging
thomwolf's avatar
thomwolf committed
409
410
411
    logging.basicConfig(format = '%(asctime)s - %(levelname)s - %(name)s -   %(message)s',
                        datefmt = '%m/%d/%Y %H:%M:%S',
                        level = logging.INFO if args.local_rank in [-1, 0] else logging.WARN)
thomwolf's avatar
thomwolf committed
412
    logger.warning("Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s",
thomwolf's avatar
thomwolf committed
413
                    args.local_rank, device, args.n_gpu, bool(args.local_rank != -1), args.fp16)
thomwolf's avatar
thomwolf committed
414

thomwolf's avatar
thomwolf committed
415
416
    # Set seed
    set_seed(args)
thomwolf's avatar
thomwolf committed
417
418

    # Prepare GLUE task
thomwolf's avatar
thomwolf committed
419
420
421
422
423
    args.task_name = args.task_name.lower()
    if args.task_name not in processors:
        raise ValueError("Task not found: %s" % (args.task_name))
    processor = processors[args.task_name]()
    args.output_mode = output_modes[args.task_name]
thomwolf's avatar
thomwolf committed
424
425
426
427
428
    label_list = processor.get_labels()
    num_labels = len(label_list)

    # Load pretrained model and tokenizer
    if args.local_rank not in [-1, 0]:
429
        torch.distributed.barrier()  # Make sure only the first process in distributed training will download model & vocab
thomwolf's avatar
thomwolf committed
430

431
    args.model_type = args.model_type.lower()
thomwolf's avatar
thomwolf committed
432
    config_class, model_class, tokenizer_class = MODEL_CLASSES[args.model_type]
433
434
435
    config = config_class.from_pretrained(args.config_name if args.config_name else args.model_name_or_path, num_labels=num_labels, finetuning_task=args.task_name)
    tokenizer = tokenizer_class.from_pretrained(args.tokenizer_name if args.tokenizer_name else args.model_name_or_path, do_lower_case=args.do_lower_case)
    model = model_class.from_pretrained(args.model_name_or_path, from_tf=bool('.ckpt' in args.model_name_or_path), config=config)
thomwolf's avatar
thomwolf committed
436
437

    if args.local_rank == 0:
438
        torch.distributed.barrier()  # Make sure only the first process in distributed training will download model & vocab
thomwolf's avatar
thomwolf committed
439

thomwolf's avatar
thomwolf committed
440
    model.to(args.device)
thomwolf's avatar
thomwolf committed
441

thomwolf's avatar
thomwolf committed
442
443
    logger.info("Training/evaluation parameters %s", args)

444

thomwolf's avatar
thomwolf committed
445
    # Training
thomwolf's avatar
thomwolf committed
446
    if args.do_train:
447
        train_dataset = load_and_cache_examples(args, args.task_name, tokenizer, evaluate=False)
thomwolf's avatar
thomwolf committed
448
        global_step, tr_loss = train(args, train_dataset, model, tokenizer)
thomwolf's avatar
thomwolf committed
449
        logger.info(" global_step = %s, average loss = %s", global_step, tr_loss)
thomwolf's avatar
thomwolf committed
450
451


thomwolf's avatar
thomwolf committed
452
    # Saving best-practices: if you use defaults names for the model, you can reload it using from_pretrained()
thomwolf's avatar
thomwolf committed
453
    if args.do_train and (args.local_rank == -1 or torch.distributed.get_rank() == 0):
454
455
456
457
        # Create output directory if needed
        if not os.path.exists(args.output_dir) and args.local_rank in [-1, 0]:
            os.makedirs(args.output_dir)

thomwolf's avatar
thomwolf committed
458
        logger.info("Saving model checkpoint to %s", args.output_dir)
459
460
        # Save a trained model, configuration and tokenizer using `save_pretrained()`.
        # They can then be reloaded using `from_pretrained()`
thomwolf's avatar
thomwolf committed
461
462
        model_to_save = model.module if hasattr(model, 'module') else model  # Take care of distributed/parallel training
        model_to_save.save_pretrained(args.output_dir)
463
        tokenizer.save_pretrained(args.output_dir)
thomwolf's avatar
thomwolf committed
464
465

        # Good practice: save your training arguments together with the trained model
466
        torch.save(args, os.path.join(args.output_dir, 'training_args.bin'))
thomwolf's avatar
thomwolf committed
467

468
        # Load a trained model and vocabulary that you have fine-tuned
469
        model = model_class.from_pretrained(args.output_dir)
Peng Qi's avatar
Peng Qi committed
470
        tokenizer = tokenizer_class.from_pretrained(args.output_dir, do_lower_case=args.do_lower_case)
471
        model.to(args.device)
thomwolf's avatar
thomwolf committed
472

473

thomwolf's avatar
thomwolf committed
474
    # Evaluation
thomwolf's avatar
thomwolf committed
475
    results = {}
thomwolf's avatar
thomwolf committed
476
    if args.do_eval and args.local_rank in [-1, 0]:
thomwolf's avatar
thomwolf committed
477
        checkpoints = [args.output_dir]
thomwolf's avatar
thomwolf committed
478
        if args.eval_all_checkpoints:
thomwolf's avatar
thomwolf committed
479
480
            checkpoints = list(os.path.dirname(c) for c in sorted(glob.glob(args.output_dir + '/**/' + WEIGHTS_NAME, recursive=True)))
            logging.getLogger("pytorch_transformers.modeling_utils").setLevel(logging.WARN)  # Reduce logging
thomwolf's avatar
thomwolf committed
481
482
        logger.info("Evaluate the following checkpoints: %s", checkpoints)
        for checkpoint in checkpoints:
483
            global_step = checkpoint.split('-')[-1] if len(checkpoints) > 1 else ""
thomwolf's avatar
thomwolf committed
484
            model = model_class.from_pretrained(checkpoint)
thomwolf's avatar
thomwolf committed
485
486
            model.to(args.device)
            result = evaluate(args, model, tokenizer, prefix=global_step)
thomwolf's avatar
thomwolf committed
487
488
489
            result = dict((k + '_{}'.format(global_step), v) for k, v in result.items())
            results.update(result)

thomwolf's avatar
thomwolf committed
490
    return results
thomwolf's avatar
thomwolf committed
491
492
493
494


if __name__ == "__main__":
    main()