run_glue.py 20 KB
Newer Older
thomwolf's avatar
thomwolf committed
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION.  All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""BERT finetuning runner."""

from __future__ import absolute_import, division, print_function

import argparse
import logging
import os
import random
from tqdm import tqdm, trange

import numpy as np

import torch
from torch.utils.data import (DataLoader, RandomSampler, SequentialSampler,
                              TensorDataset)
from torch.utils.data.distributed import DistributedSampler

from tensorboardX import SummaryWriter

35
36
37
38
39
from pytorch_transformers import (BertForSequenceClassification, XLNetForSequenceClassification,
                                  XLMForSequenceClassification, BERT_PRETRAINED_MODEL_ARCHIVE_MAP,
                                  XLNET_PRETRAINED_MODEL_ARCHIVE_MAP, XLM_PRETRAINED_MODEL_ARCHIVE_MAP)
from pytorch_transformers import (BertTokenizer, XLNetTokenizer,
                                  XLMTokenizer)
thomwolf's avatar
thomwolf committed
40
41
42
43
44
45
46
from pytorch_transformers.optimization import BertAdam, WarmupLinearSchedule

from utils_glue import processors, output_modes, convert_examples_to_features, compute_metrics


logger = logging.getLogger(__name__)

47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
ALL_MODELS = sum((tuple(m.keys()) for m in (BERT_PRETRAINED_MODEL_ARCHIVE_MAP,
                                            XLNET_PRETRAINED_MODEL_ARCHIVE_MAP,
                                            XLM_PRETRAINED_MODEL_ARCHIVE_MAP)), ())

MODEL_CLASSES = {
    'bert': BertForSequenceClassification,
    'xlnet': XLNetForSequenceClassification,
    'xlm': XLMForSequenceClassification,
}

TOKENIZER_CLASSES = {
    'bert': BertTokenizer,
    'xlnet': XLNetTokenizer,
    'xlm': XLMTokenizer,
}
thomwolf's avatar
thomwolf committed
62

63
def train(args, train_dataset, model):
thomwolf's avatar
thomwolf committed
64
65
66
67
68
    """ Train the model """
    if args.local_rank in [-1, 0]:
        tb_writer = SummaryWriter()

    args.train_batch_size = args.train_batch_size // args.gradient_accumulation_steps
69
70
    train_sampler = RandomSampler(train_dataset) if args.local_rank == -1 else DistributedSampler(train_dataset)
    train_dataloader = DataLoader(train_dataset, sampler=train_sampler, batch_size=args.train_batch_size)
thomwolf's avatar
thomwolf committed
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100

    num_train_optimization_steps = len(train_dataloader) // args.gradient_accumulation_steps * args.num_train_epochs

    # Prepare optimizer
    param_optimizer = list(model.named_parameters())
    no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
    optimizer_grouped_parameters = [
        {'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)], 'weight_decay': 0.01},
        {'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}
        ]
    if args.fp16:
        try:
            from apex.optimizers import FP16_Optimizer, FusedAdam
        except ImportError:
            raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.")
        optimizer = FusedAdam(optimizer_grouped_parameters, lr=args.learning_rate, bias_correction=False, max_grad_norm=1.0)
        if args.loss_scale == 0:
            optimizer = FP16_Optimizer(optimizer, dynamic_loss_scale=True)
        else:
            optimizer = FP16_Optimizer(optimizer, static_loss_scale=args.loss_scale)
        warmup_linear = WarmupLinearSchedule(warmup=args.warmup_proportion, t_total=num_train_optimization_steps)

    else:
        optimizer = BertAdam(optimizer_grouped_parameters,
                                lr=args.learning_rate,
                                warmup=args.warmup_proportion,
                                t_total=num_train_optimization_steps)

    # Train!
    logger.info("***** Running training *****")
101
102
    logger.info("  Num examples = %d", len(train_dataset))
    logger.info("  Num Epochs = %d", args.num_train_epochs)
thomwolf's avatar
thomwolf committed
103
    logger.info("  Batch size = %d", args.train_batch_size)
104
105
    logger.info("  Gradient Accumulation steps = %d", args.gradient_accumulation_steps)
    logger.info("  Total optimization steps = %d", num_train_optimization_steps)
thomwolf's avatar
thomwolf committed
106
107
108
109

    global_step = 0
    tr_loss = 0
    model.train()
110
    optimizer.zero_grad()
thomwolf's avatar
thomwolf committed
111
112
113
    for _ in trange(int(args.num_train_epochs), desc="Epoch", disable=args.local_rank not in [-1, 0]):
        for step, batch in enumerate(tqdm(train_dataloader, desc="Iteration", disable=args.local_rank not in [-1, 0])):
            batch = tuple(t.to(args.device) for t in batch)
114
115
116
117
118
            inputs = {'input_ids':      batch[0],
                      'attention_mask': batch[1],
                      'token_type_ids': batch[2] if args.model_type in ['bert', 'xlnet'] else None,
                      'labels':         batch[3]}
            ouputs = model(**inputs)
thomwolf's avatar
thomwolf committed
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
            loss = ouputs[0]

            if args.n_gpu > 1:
                loss = loss.mean() # mean() to average on multi-gpu parallel training
            if args.gradient_accumulation_steps > 1:
                loss = loss / args.gradient_accumulation_steps

            loss.backward() if not args.fp16 else optimizer.backward(loss)

            tr_loss += loss.item()
            if (step + 1) % args.gradient_accumulation_steps == 0:
                if args.fp16:
                    # modify learning rate with special warm up BERT uses
                    # if args.fp16 is False, BertAdam is used that handles this automatically
                    lr_this_step = args.learning_rate * warmup_linear.get_lr(global_step, args.warmup_proportion)
                    for param_group in optimizer.param_groups:
                        param_group['lr'] = lr_this_step
                optimizer.step()
                optimizer.zero_grad()
                global_step += 1
                if args.local_rank in [-1, 0]:
                    if not args.fp16:
                        tb_writer.add_scalar('lr', optimizer.get_lr()[0], global_step)
                    tb_writer.add_scalar('loss', loss.item(), global_step)

    return global_step, tr_loss / global_step


147
def evalutate(args, eval_task, eval_output_dir, dataset, model):
thomwolf's avatar
thomwolf committed
148
149
150
151
152
153
154
    """ Evaluate the model """
    if os.path.exists(eval_output_dir) and os.listdir(eval_output_dir) and args.do_train and not args.overwrite_output_dir:
        raise ValueError("Output directory ({}) already exists and is not empty. Use --overwrite_output_dir to overcome.".format(eval_output_dir))
    if not os.path.exists(eval_output_dir) and args.local_rank in [-1, 0]:
        os.makedirs(eval_output_dir)

    # Note that DistributedSampler samples randomly
155
156
    eval_sampler = SequentialSampler(dataset) if args.local_rank == -1 else DistributedSampler(dataset)
    eval_dataloader = DataLoader(dataset, sampler=eval_sampler, batch_size=args.eval_batch_size)
thomwolf's avatar
thomwolf committed
157
158
159

    # Eval!
    logger.info("***** Running evaluation *****")
160
    logger.info("  Num examples = %d", len(dataset))
thomwolf's avatar
thomwolf committed
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
    logger.info("  Batch size = %d", args.eval_batch_size)
    model.eval()
    eval_loss = 0
    nb_eval_steps = 0
    preds = None
    out_label_ids = None
    for batch in tqdm(eval_dataloader, desc="Evaluating"):
        batch = tuple(t.to(args.device) for t in batch)
        input_ids, input_mask, segment_ids, label_ids = batch

        with torch.no_grad():
            outputs = model(input_ids,
                            token_type_ids=segment_ids,
                            attention_mask=input_mask,
                            labels=label_ids)
            tmp_eval_loss, logits = outputs[:2]

        eval_loss += tmp_eval_loss.mean().item()
        nb_eval_steps += 1
        if preds is None:
            preds = logits.detach().cpu().numpy()
            out_label_ids = label_ids.detach().cpu().numpy()
        else:
            preds = np.append(preds, logits.detach().cpu().numpy(), axis=0)
            out_label_ids = np.append(out_label_ids, label_ids.detach().cpu().numpy(), axis=0)

    eval_loss = eval_loss / nb_eval_steps
    if args.output_mode == "classification":
        preds = np.argmax(preds, axis=1)
    elif args.output_mode == "regression":
        preds = np.squeeze(preds)
    result = compute_metrics(eval_task, preds, out_label_ids)

    output_eval_file = os.path.join(eval_output_dir, "eval_results.txt")
    with open(output_eval_file, "w") as writer:
        logger.info("***** Eval results *****")
        for key in sorted(result.keys()):
            logger.info("  %s = %s", key, str(result[key]))
            writer.write("%s = %s\n" % (key, str(result[key])))

201
    return result
thomwolf's avatar
thomwolf committed
202
203


204
def load_and_cache_examples(args, task, tokenizer, evaluate=False):
thomwolf's avatar
thomwolf committed
205
    processor = processors[task]()
206
207
208
209
    output_mode = output_modes[task]
    # Load data features from cache or dataset file
    cached_features_file = os.path.join(args.data_dir, 'cached_{}_{}_{}_{}'.format(
        'dev' if evaluate else 'train',
210
        list(filter(None, args.model_name.split('/'))).pop(),
thomwolf's avatar
thomwolf committed
211
212
213
        str(args.max_seq_length),
        str(task)))
    if os.path.exists(cached_features_file):
thomwolf's avatar
thomwolf committed
214
        logger.info("Loading features from cached file %s", cached_features_file)
thomwolf's avatar
thomwolf committed
215
216
        features = torch.load(cached_features_file)
    else:
217
218
219
        logger.info("Creating features from dataset file at %s", args.data_dir)
        label_list = processor.get_labels()
        examples = processor.get_dev_examples(args.data_dir) if evaluate else processor.get_train_examples(args.data_dir)
220
        features = convert_examples_to_features(examples, label_list, args.max_seq_length, tokenizer, output_mode,
221
            cls_token_at_end=bool(args.model_type in ['xlnet']),            # xlnet has a cls token at the end
222
            cls_token=tokenizer.cls_token,
223
224
225
226
227
            sep_token=tokenizer.sep_token,
            cls_token_segment_id=2 if args.model_type in ['xlnet'] else 1,
            pad_on_left=bool(args.model_type in ['xlnet']),                 # pad on the left for xlnet
            pad_token_segment_id=4 if args.model_type in ['xlnet'] else 0)
        if args.local_rank in [-1, 0]:
thomwolf's avatar
thomwolf committed
228
            logger.info("Saving features into cached file %s", cached_features_file)
thomwolf's avatar
thomwolf committed
229
230
            torch.save(features, cached_features_file)

231
232
233
234
235
236
237
238
239
240
241
    # Convert to Tensors and build dataset
    all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long)
    all_input_mask = torch.tensor([f.input_mask for f in features], dtype=torch.long)
    all_segment_ids = torch.tensor([f.segment_ids for f in features], dtype=torch.long)
    if output_mode == "classification":
        all_label_ids = torch.tensor([f.label_id for f in features], dtype=torch.long)
    elif output_mode == "regression":
        all_label_ids = torch.tensor([f.label_id for f in features], dtype=torch.float)

    dataset = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_label_ids)
    return dataset
thomwolf's avatar
thomwolf committed
242
243


thomwolf's avatar
thomwolf committed
244
245
246
247
248
249
def main():
    parser = argparse.ArgumentParser()

    ## Required parameters
    parser.add_argument("--data_dir", default=None, type=str, required=True,
                        help="The input data dir. Should contain the .tsv files (or other data files) for the task.")
250
251
    parser.add_argument("--model_name", default=None, type=str, required=True,
                        help="Bert/XLNet/XLM pre-trained model selected in the list: " + ", ".join(ALL_MODELS))
thomwolf's avatar
thomwolf committed
252
    parser.add_argument("--task_name", default=None, type=str, required=True,
253
                        help="The name of the task to train selected in the list: " + ", ".join(processors.keys()))
thomwolf's avatar
thomwolf committed
254
255
256
257
258
259
260
    parser.add_argument("--output_dir", default=None, type=str, required=True,
                        help="The output directory where the model predictions and checkpoints will be written.")

    ## Other parameters
    parser.add_argument("--cache_dir", default="", type=str,
                        help="Where do you want to store the pre-trained models downloaded from s3")
    parser.add_argument("--max_seq_length", default=128, type=int,
261
262
                        help="The maximum total input sequence length after tokenization. Sequences longer "
                             "than this will be truncated, sequences shorter will be padded.")
thomwolf's avatar
thomwolf committed
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
    parser.add_argument("--do_train", action='store_true',
                        help="Whether to run training.")
    parser.add_argument("--do_eval", action='store_true',
                        help="Whether to run eval on the dev set.")
    parser.add_argument("--do_lower_case", action='store_true',
                        help="Set this flag if you are using an uncased model.")
    parser.add_argument("--train_batch_size", default=32, type=int,
                        help="Total batch size for training.")
    parser.add_argument("--eval_batch_size", default=8, type=int,
                        help="Total batch size for eval.")
    parser.add_argument('--gradient_accumulation_steps', type=int, default=1,
                        help="Number of updates steps to accumulate before performing a backward/update pass.")
    parser.add_argument("--learning_rate", default=5e-5, type=float,
                        help="The initial learning rate for Adam.")
    parser.add_argument("--num_train_epochs", default=3.0, type=float,
                        help="Total number of training epochs to perform.")
    parser.add_argument("--warmup_proportion", default=0.1, type=float,
280
                        help="Proportion of training with linear learning rate warmup (0.1 = 10%% of training).")
thomwolf's avatar
thomwolf committed
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
    parser.add_argument("--no_cuda", action='store_true',
                        help="Avoid using CUDA when available")
    parser.add_argument('--overwrite_output_dir', action='store_true',
                        help="Overwrite the content of the output directory")
    parser.add_argument('--seed', type=int, default=42,
                        help="random seed for initialization")

    parser.add_argument('--fp16', action='store_true',
                        help="Whether to use 16-bit float precision instead of 32-bit")
    parser.add_argument('--loss_scale', type=float, default=0,
                        help="Loss scaling to improve fp16 numeric stability. Only used when fp16 set to True.\n"
                             "0 (default value): dynamic loss scaling.\n"
                             "Positive power of 2: static loss scaling value.\n")

    parser.add_argument("--local_rank", type=int, default=-1,
                        help="local_rank for distributed training on gpus")

    parser.add_argument('--server_ip', type=str, default='', help="Can be used for distant debugging.")
    parser.add_argument('--server_port', type=str, default='', help="Can be used for distant debugging.")
    args = parser.parse_args()

    # Setup distant debugging if needed
    if args.server_ip and args.server_port:
        # Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
        import ptvsd
        print("Waiting for debugger attach")
        ptvsd.enable_attach(address=(args.server_ip, args.server_port), redirect_output=True)
        ptvsd.wait_for_attach()

    # Setup CUDA, GPU & distributed training
    if args.local_rank == -1 or args.no_cuda:
        device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
thomwolf's avatar
thomwolf committed
313
        args.n_gpu = torch.cuda.device_count()
thomwolf's avatar
thomwolf committed
314
315
316
317
    else:  # Initializes the distributed backend which will take care of sychronizing nodes/GPUs
        torch.cuda.set_device(args.local_rank)
        device = torch.device("cuda", args.local_rank)
        torch.distributed.init_process_group(backend='nccl')
thomwolf's avatar
thomwolf committed
318
        args.n_gpu = 1
thomwolf's avatar
thomwolf committed
319
320
321
322
323
    args.device = device

    # Setup logging
    logging.basicConfig(level = logging.INFO if args.local_rank in [-1, 0] else logging.WARN)
    logger.info("device: {} n_gpu: {}, distributed training: {}, 16-bits training: {}".format(
thomwolf's avatar
thomwolf committed
324
        device, args.n_gpu, bool(args.local_rank != -1), args.fp16))
thomwolf's avatar
thomwolf committed
325
326
327
328
329

    # Setup seeds
    random.seed(args.seed)
    np.random.seed(args.seed)
    torch.manual_seed(args.seed)
thomwolf's avatar
thomwolf committed
330
    if args.n_gpu > 0:
thomwolf's avatar
thomwolf committed
331
332
333
        torch.cuda.manual_seed_all(args.seed)

    # Prepare GLUE task
thomwolf's avatar
thomwolf committed
334
335
336
337
338
    args.task_name = args.task_name.lower()
    if args.task_name not in processors:
        raise ValueError("Task not found: %s" % (args.task_name))
    processor = processors[args.task_name]()
    args.output_mode = output_modes[args.task_name]
thomwolf's avatar
thomwolf committed
339
340
341
342
343
344
345
346
    label_list = processor.get_labels()
    num_labels = len(label_list)

    # Load pretrained model and tokenizer
    if args.local_rank not in [-1, 0]:
        # Make sure only the first process in distributed training will download model & vocab
        torch.distributed.barrier()

347
    args.model_type = args.model_name.lower().split('-')[0]
348
349
350
351
    tokenizer_class = TOKENIZER_CLASSES[args.model_type]
    model_class = MODEL_CLASSES[args.model_type]
    tokenizer = tokenizer_class.from_pretrained(args.model_name, do_lower_case=args.do_lower_case)
    model = model_class.from_pretrained(args.model_name, num_labels=num_labels)
thomwolf's avatar
thomwolf committed
352
353
354
355
356
357
358

    if args.local_rank == 0:
        torch.distributed.barrier()

    # Distributed, parrallel and fp16 model
    if args.fp16:
        model.half()
thomwolf's avatar
thomwolf committed
359
    model.to(args.device)
thomwolf's avatar
thomwolf committed
360
361
362
363
364
    if args.local_rank != -1:
        model = torch.nn.parallel.DistributedDataParallel(model,
                                                          device_ids=[args.local_rank],
                                                          output_device=args.local_rank,
                                                          find_unused_parameters=True)
thomwolf's avatar
thomwolf committed
365
    elif args.n_gpu > 1:
thomwolf's avatar
thomwolf committed
366
367
        model = torch.nn.DataParallel(model)

thomwolf's avatar
thomwolf committed
368
    # Training
thomwolf's avatar
thomwolf committed
369
    if args.do_train:
370
371
        train_dataset = load_and_cache_examples(args, args.task_name, tokenizer, evaluate=False)
        global_step, tr_loss = train(args, train_dataset, model)
thomwolf's avatar
thomwolf committed
372
        logger.info(" global_step = %s, average loss = %s", global_step, tr_loss)
thomwolf's avatar
thomwolf committed
373
374


thomwolf's avatar
thomwolf committed
375
    # Saving best-practices: if you use defaults names for the model, you can reload it using from_pretrained()
thomwolf's avatar
thomwolf committed
376
    if args.do_train and (args.local_rank == -1 or torch.distributed.get_rank() == 0):
377
378
379
380
381
382
383
384
        # Create output directory if needed
        if os.path.exists(args.output_dir) and os.listdir(args.output_dir) and args.do_train and not args.overwrite_output_dir:
            raise ValueError("Output directory ({}) already exists and is not empty. Use --overwrite_output_dir to overcome.".format(args.output_dir))
        if not os.path.exists(args.output_dir) and args.local_rank in [-1, 0]:
            os.makedirs(args.output_dir)

        # Save a trained model, configuration and tokenizer using `save_pretrained()`.
        # They can then be reloaded using `from_pretrained()`
thomwolf's avatar
thomwolf committed
385
386
        model_to_save = model.module if hasattr(model, 'module') else model  # Take care of distributed/parallel training
        model_to_save.save_pretrained(args.output_dir)
387
        tokenizer.save_pretrained(args.output_dir)
thomwolf's avatar
thomwolf committed
388
389

        # Good practice: save your training arguments together with the trained model
390
        torch.save(args, os.path.join(args.output_dir, 'training_args.bin'))
thomwolf's avatar
thomwolf committed
391

392
        # Load a trained model and vocabulary that you have fine-tuned
393
394
        model = model_class.from_pretrained(args.output_dir)
        tokenizer = tokenizer_class.from_pretrained(args.output_dir)
395
        model.to(args.device)
thomwolf's avatar
thomwolf committed
396

thomwolf's avatar
thomwolf committed
397
    # Evaluation
thomwolf's avatar
thomwolf committed
398
    if args.do_eval and (args.local_rank == -1 or torch.distributed.get_rank() == 0):
thomwolf's avatar
thomwolf committed
399
400
401
402
403
        # Handle MNLI double evaluation
        eval_task_names = ("mnli", "mnli-mm") if args.task_name == "mnli" else (args.task_name,)
        eval_outputs_dirs = (args.output_dir, args.output_dir + '-MM') if args.task_name == "mnli" else (args.output_dir,)

        for eval_task, eval_output_dir in zip(eval_task_names, eval_outputs_dirs):
404
405
406
            eval_dataset = load_and_cache_examples(args, eval_task, tokenizer, evaluate=True)

            result = evalutate(args, eval_task, eval_output_dir, eval_dataset, model)
thomwolf's avatar
thomwolf committed
407

408
        return result
thomwolf's avatar
thomwolf committed
409
410
411
412


if __name__ == "__main__":
    main()