run_classifier.py 34 KB
Newer Older
1
# coding=utf-8
thomwolf's avatar
thomwolf committed
2
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
3
# Copyright (c) 2018, NVIDIA CORPORATION.  All rights reserved.
4
5
6
7
8
9
10
11
12
13
14
15
16
17
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""BERT finetuning runner."""

thomwolf's avatar
thomwolf committed
18
from __future__ import absolute_import, division, print_function
19

thomwolf's avatar
thomwolf committed
20
import argparse
21
import csv
22
import logging
thomwolf's avatar
thomwolf committed
23
import os
VictorSanh's avatar
VictorSanh committed
24
import random
thomwolf's avatar
thomwolf committed
25
import sys
thomwolf's avatar
thomwolf committed
26
27

import numpy as np
VictorSanh's avatar
VictorSanh committed
28
import torch
thomwolf's avatar
thomwolf committed
29
30
from torch.utils.data import (DataLoader, RandomSampler, SequentialSampler,
                              TensorDataset)
31
from torch.utils.data.distributed import DistributedSampler
thomwolf's avatar
thomwolf committed
32
from tqdm import tqdm, trange
33

thomwolf's avatar
thomwolf committed
34
from pytorch_pretrained_bert.file_utils import PYTORCH_PRETRAINED_BERT_CACHE
35
from pytorch_pretrained_bert.modeling import BertForSequenceClassification, BertConfig, WEIGHTS_NAME, CONFIG_NAME
thomwolf's avatar
thomwolf committed
36
from pytorch_pretrained_bert.tokenization import BertTokenizer
37
from pytorch_pretrained_bert.optimization import BertAdam, warmup_linear
38

39
logging.basicConfig(format = '%(asctime)s - %(levelname)s - %(name)s -   %(message)s',
40
41
42
                    datefmt = '%m/%d/%Y %H:%M:%S',
                    level = logging.INFO)
logger = logging.getLogger(__name__)
43

44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73

class InputExample(object):
    """A single training/test example for simple sequence classification."""

    def __init__(self, guid, text_a, text_b=None, label=None):
        """Constructs a InputExample.

        Args:
            guid: Unique id for the example.
            text_a: string. The untokenized text of the first sequence. For single
            sequence tasks, only this sequence must be specified.
            text_b: (Optional) string. The untokenized text of the second sequence.
            Only must be specified for sequence pair tasks.
            label: (Optional) string. The label of the example. This should be
            specified for train and dev examples, but not for test examples.
        """
        self.guid = guid
        self.text_a = text_a
        self.text_b = text_b
        self.label = label


class InputFeatures(object):
    """A single set of features of data."""

    def __init__(self, input_ids, input_mask, segment_ids, label_id):
        self.input_ids = input_ids
        self.input_mask = input_mask
        self.segment_ids = segment_ids
        self.label_id = label_id
thomwolf's avatar
thomwolf committed
74

75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93

class DataProcessor(object):
    """Base class for data converters for sequence classification data sets."""

    def get_train_examples(self, data_dir):
        """Gets a collection of `InputExample`s for the train set."""
        raise NotImplementedError()

    def get_dev_examples(self, data_dir):
        """Gets a collection of `InputExample`s for the dev set."""
        raise NotImplementedError()

    def get_labels(self):
        """Gets the list of labels for this data set."""
        raise NotImplementedError()

    @classmethod
    def _read_tsv(cls, input_file, quotechar=None):
        """Reads a tab separated value file."""
94
        with open(input_file, "r") as f:
95
96
97
            reader = csv.reader(f, delimiter="\t", quotechar=quotechar)
            lines = []
            for line in reader:
thomwolf's avatar
thomwolf committed
98
99
                if sys.version_info[0] == 2:
                    line = list(unicode(cell, 'utf-8') for cell in line)
100
101
                lines.append(line)
            return lines
thomwolf's avatar
thomwolf committed
102
103


VictorSanh's avatar
wip  
VictorSanh committed
104
105
106
107
108
class MrpcProcessor(DataProcessor):
    """Processor for the MRPC data set (GLUE version)."""

    def get_train_examples(self, data_dir):
        """See base class."""
thomwolf's avatar
thomwolf committed
109
        logger.info("LOOKING AT {}".format(os.path.join(data_dir, "train.tsv")))
VictorSanh's avatar
wip  
VictorSanh committed
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
        return self._create_examples(
            self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")

    def get_dev_examples(self, data_dir):
        """See base class."""
        return self._create_examples(
            self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev")

    def get_labels(self):
        """See base class."""
        return ["0", "1"]

    def _create_examples(self, lines, set_type):
        """Creates examples for the training and dev sets."""
        examples = []
        for (i, line) in enumerate(lines):
            if i == 0:
                continue
            guid = "%s-%s" % (set_type, i)
129
130
131
            text_a = line[3]
            text_b = line[4]
            label = line[0]
VictorSanh's avatar
wip  
VictorSanh committed
132
133
134
135
            examples.append(
                InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
        return examples

136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160

class MnliProcessor(DataProcessor):
    """Processor for the MultiNLI data set (GLUE version)."""

    def get_train_examples(self, data_dir):
        """See base class."""
        return self._create_examples(
            self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")

    def get_dev_examples(self, data_dir):
        """See base class."""
        return self._create_examples(
            self._read_tsv(os.path.join(data_dir, "dev_matched.tsv")),
            "dev_matched")

    def get_labels(self):
        """See base class."""
        return ["contradiction", "entailment", "neutral"]

    def _create_examples(self, lines, set_type):
        """Creates examples for the training and dev sets."""
        examples = []
        for (i, line) in enumerate(lines):
            if i == 0:
                continue
161
            guid = "%s-%s" % (set_type, line[0])
162
163
            text_a = line[8]
            text_b = line[9]
164
            label = line[-1]
165
166
167
            examples.append(
                InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
        return examples
thomwolf's avatar
thomwolf committed
168

169

170
171
172
173
174
175
176
177
178
179
class MnliMismatchedProcessor(MnliProcessor):
    """Processor for the MultiNLI Mismatched data set (GLUE version)."""

    def get_dev_examples(self, data_dir):
        """See base class."""
        return self._create_examples(
            self._read_tsv(os.path.join(data_dir, "dev_mismatched.tsv")),
            "dev_matched")


180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
class ColaProcessor(DataProcessor):
    """Processor for the CoLA data set (GLUE version)."""

    def get_train_examples(self, data_dir):
        """See base class."""
        return self._create_examples(
            self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")

    def get_dev_examples(self, data_dir):
        """See base class."""
        return self._create_examples(
            self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev")

    def get_labels(self):
        """See base class."""
        return ["0", "1"]

    def _create_examples(self, lines, set_type):
        """Creates examples for the training and dev sets."""
        examples = []
        for (i, line) in enumerate(lines):
            guid = "%s-%s" % (set_type, i)
202
203
            text_a = line[3]
            label = line[1]
204
205
206
            examples.append(
                InputExample(guid=guid, text_a=text_a, text_b=None, label=label))
        return examples
thomwolf's avatar
thomwolf committed
207
208


209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
class Sst2Processor(DataProcessor):
    """Processor for the SST-2 data set (GLUE version)."""

    def get_train_examples(self, data_dir):
        """See base class."""
        return self._create_examples(
            self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")

    def get_dev_examples(self, data_dir):
        """See base class."""
        return self._create_examples(
            self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev")

    def get_labels(self):
        """See base class."""
        return ["0", "1"]

    def _create_examples(self, lines, set_type):
        """Creates examples for the training and dev sets."""
        examples = []
        for (i, line) in enumerate(lines):
            if i == 0:
                continue
            guid = "%s-%s" % (set_type, i)
            text_a = line[0]
            label = line[1]
            examples.append(
                InputExample(guid=guid, text_a=text_a, text_b=None, label=label))
        return examples


240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
class StsbProcessor(DataProcessor):
    """Processor for the STS-B data set (GLUE version)."""

    def get_train_examples(self, data_dir):
        """See base class."""
        return self._create_examples(
            self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")

    def get_dev_examples(self, data_dir):
        """See base class."""
        return self._create_examples(
            self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev")

    def get_labels(self):
        """See base class."""
        return [None]

    def _create_examples(self, lines, set_type):
        """Creates examples for the training and dev sets."""
        examples = []
        for (i, line) in enumerate(lines):
            if i == 0:
                continue
            guid = "%s-%s" % (set_type, line[0])
            text_a = line[7]
            text_b = line[8]
            label = line[-1]
            examples.append(
                InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
        return examples


class QqpProcessor(DataProcessor):
    """Processor for the STS-B data set (GLUE version)."""

    def get_train_examples(self, data_dir):
        """See base class."""
        return self._create_examples(
            self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")

    def get_dev_examples(self, data_dir):
        """See base class."""
        return self._create_examples(
            self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev")

    def get_labels(self):
        """See base class."""
        return ["0", "1"]

    def _create_examples(self, lines, set_type):
        """Creates examples for the training and dev sets."""
        examples = []
        for (i, line) in enumerate(lines):
            if i == 0:
                continue
            guid = "%s-%s" % (set_type, line[0])
            try:
                text_a = line[3]
                text_b = line[4]
                label = line[5]
            except IndexError:
                continue
            examples.append(
                InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
        return examples


class QnliProcessor(DataProcessor):
    """Processor for the STS-B data set (GLUE version)."""

    def get_train_examples(self, data_dir):
        """See base class."""
        return self._create_examples(
            self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")

    def get_dev_examples(self, data_dir):
        """See base class."""
        return self._create_examples(
            self._read_tsv(os.path.join(data_dir, "dev.tsv")), 
            "dev_matched")

    def get_labels(self):
        """See base class."""
        return ["entailment", "not_entailment"]

    def _create_examples(self, lines, set_type):
        """Creates examples for the training and dev sets."""
        examples = []
        for (i, line) in enumerate(lines):
            if i == 0:
                continue
            guid = "%s-%s" % (set_type, line[0])
            text_a = line[1]
            text_b = line[2]
            label = line[-1]
            examples.append(
                InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
        return examples


class RteProcessor(DataProcessor):
    """Processor for the RTE data set (GLUE version)."""

    def get_train_examples(self, data_dir):
        """See base class."""
        return self._create_examples(
            self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")

    def get_dev_examples(self, data_dir):
        """See base class."""
        return self._create_examples(
            self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev")

    def get_labels(self):
        """See base class."""
        return ["entailment", "not_entailment"]

    def _create_examples(self, lines, set_type):
        """Creates examples for the training and dev sets."""
        examples = []
        for (i, line) in enumerate(lines):
            if i == 0:
                continue
            guid = "%s-%s" % (set_type, line[0])
            text_a = line[1]
            text_b = line[2]
            label = line[-1]
            examples.append(
                InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
        return examples


class WnliProcessor(DataProcessor):
    """Processor for the WNLI data set (GLUE version)."""

    def get_train_examples(self, data_dir):
        """See base class."""
        return self._create_examples(
            self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")

    def get_dev_examples(self, data_dir):
        """See base class."""
        return self._create_examples(
            self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev")

    def get_labels(self):
        """See base class."""
        return ["0", "1"]

    def _create_examples(self, lines, set_type):
        """Creates examples for the training and dev sets."""
        examples = []
        for (i, line) in enumerate(lines):
            if i == 0:
                continue
            guid = "%s-%s" % (set_type, line[0])
            text_a = line[1]
            text_b = line[2]
            label = line[-1]
            examples.append(
                InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
        return examples


thomwolf's avatar
thomwolf committed
404
def convert_examples_to_features(examples, label_list, max_seq_length, tokenizer):
405
406
    """Loads a data file into a list of `InputBatch`s."""

407
    label_map = {label : i for i, label in enumerate(label_list)}
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422

    features = []
    for (ex_index, example) in enumerate(examples):
        tokens_a = tokenizer.tokenize(example.text_a)

        tokens_b = None
        if example.text_b:
            tokens_b = tokenizer.tokenize(example.text_b)
            # Modifies `tokens_a` and `tokens_b` in place so that the total
            # length is less than the specified length.
            # Account for [CLS], [SEP], [SEP] with "- 3"
            _truncate_seq_pair(tokens_a, tokens_b, max_seq_length - 3)
        else:
            # Account for [CLS] and [SEP] with "- 2"
            if len(tokens_a) > max_seq_length - 2:
423
                tokens_a = tokens_a[:(max_seq_length - 2)]
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442

        # The convention in BERT is:
        # (a) For sequence pairs:
        #  tokens:   [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
        #  type_ids: 0   0  0    0    0     0       0 0    1  1  1  1   1 1
        # (b) For single sequences:
        #  tokens:   [CLS] the dog is hairy . [SEP]
        #  type_ids: 0   0   0   0  0     0 0
        #
        # Where "type_ids" are used to indicate whether this is the first
        # sequence or the second sequence. The embedding vectors for `type=0` and
        # `type=1` were learned during pre-training and are added to the wordpiece
        # embedding vector (and position vector). This is not *strictly* necessary
        # since the [SEP] token unambigiously separates the sequences, but it makes
        # it easier for the model to learn the concept of sequences.
        #
        # For classification tasks, the first vector (corresponding to [CLS]) is
        # used as as the "sentence vector". Note that this only makes sense because
        # the entire model is fine-tuned.
443
444
        tokens = ["[CLS]"] + tokens_a + ["[SEP]"]
        segment_ids = [0] * len(tokens)
445
446

        if tokens_b:
447
448
            tokens += tokens_b + ["[SEP]"]
            segment_ids += [1] * (len(tokens_b) + 1)
449
450
451
452
453
454
455
456

        input_ids = tokenizer.convert_tokens_to_ids(tokens)

        # The mask has 1 for real tokens and 0 for padding tokens. Only real
        # tokens are attended to.
        input_mask = [1] * len(input_ids)

        # Zero-pad up to the sequence length.
457
458
459
460
        padding = [0] * (max_seq_length - len(input_ids))
        input_ids += padding
        input_mask += padding
        segment_ids += padding
461
462
463
464
465
466
467
468
469
470

        assert len(input_ids) == max_seq_length
        assert len(input_mask) == max_seq_length
        assert len(segment_ids) == max_seq_length

        label_id = label_map[example.label]
        if ex_index < 5:
            logger.info("*** Example ***")
            logger.info("guid: %s" % (example.guid))
            logger.info("tokens: %s" % " ".join(
471
                    [str(x) for x in tokens]))
472
473
474
475
476
477
478
            logger.info("input_ids: %s" % " ".join([str(x) for x in input_ids]))
            logger.info("input_mask: %s" % " ".join([str(x) for x in input_mask]))
            logger.info(
                    "segment_ids: %s" % " ".join([str(x) for x in segment_ids]))
            logger.info("label: %s (id = %d)" % (example.label, label_id))

        features.append(
thomwolf's avatar
thomwolf committed
479
480
481
482
                InputFeatures(input_ids=input_ids,
                              input_mask=input_mask,
                              segment_ids=segment_ids,
                              label_id=label_id))
483
    return features
thomwolf's avatar
thomwolf committed
484
485


486
487
488
489
490
491
492
493
494
495
496
497
498
499
def _truncate_seq_pair(tokens_a, tokens_b, max_length):
    """Truncates a sequence pair in place to the maximum length."""

    # This is a simple heuristic which will always truncate the longer sequence
    # one token at a time. This makes more sense than truncating an equal percent
    # of tokens from each, since if one sequence is very short then each token
    # that's truncated likely contains more information than a longer sequence.
    while True:
        total_length = len(tokens_a) + len(tokens_b)
        if total_length <= max_length:
            break
        if len(tokens_a) > len(tokens_b):
            tokens_a.pop()
        else:
VictorSanh's avatar
VictorSanh committed
500
501
            tokens_b.pop()

502
503
def accuracy(out, labels):
    outputs = np.argmax(out, axis=1)
thomwolf's avatar
thomwolf committed
504
    return np.sum(outputs == labels)
VictorSanh's avatar
WIP  
VictorSanh committed
505

506
def main():
507
508
509
510
511
512
513
514
    parser = argparse.ArgumentParser()

    ## Required parameters
    parser.add_argument("--data_dir",
                        default=None,
                        type=str,
                        required=True,
                        help="The input data dir. Should contain the .tsv files (or other data files) for the task.")
thomwolf's avatar
thomwolf committed
515
516
    parser.add_argument("--bert_model", default=None, type=str, required=True,
                        help="Bert pre-trained model selected in the list: bert-base-uncased, "
517
518
                        "bert-large-uncased, bert-base-cased, bert-large-cased, bert-base-multilingual-uncased, "
                        "bert-base-multilingual-cased, bert-base-chinese.")
519
520
521
522
523
524
525
526
527
    parser.add_argument("--task_name",
                        default=None,
                        type=str,
                        required=True,
                        help="The name of the task to train.")
    parser.add_argument("--output_dir",
                        default=None,
                        type=str,
                        required=True,
528
                        help="The output directory where the model predictions and checkpoints will be written.")
529
530

    ## Other parameters
531
532
533
534
    parser.add_argument("--cache_dir",
                        default="",
                        type=str,
                        help="Where do you want to store the pre-trained models downloaded from s3")
535
536
537
538
539
540
541
542
543
544
545
546
    parser.add_argument("--max_seq_length",
                        default=128,
                        type=int,
                        help="The maximum total input sequence length after WordPiece tokenization. \n"
                             "Sequences longer than this will be truncated, and sequences shorter \n"
                             "than this will be padded.")
    parser.add_argument("--do_train",
                        action='store_true',
                        help="Whether to run training.")
    parser.add_argument("--do_eval",
                        action='store_true',
                        help="Whether to run eval on the dev set.")
thomwolf's avatar
thomwolf committed
547
548
549
    parser.add_argument("--do_lower_case",
                        action='store_true',
                        help="Set this flag if you are using an uncased model.")
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
    parser.add_argument("--train_batch_size",
                        default=32,
                        type=int,
                        help="Total batch size for training.")
    parser.add_argument("--eval_batch_size",
                        default=8,
                        type=int,
                        help="Total batch size for eval.")
    parser.add_argument("--learning_rate",
                        default=5e-5,
                        type=float,
                        help="The initial learning rate for Adam.")
    parser.add_argument("--num_train_epochs",
                        default=3.0,
                        type=float,
                        help="Total number of training epochs to perform.")
    parser.add_argument("--warmup_proportion",
                        default=0.1,
                        type=float,
                        help="Proportion of training to perform linear learning rate warmup for. "
                             "E.g., 0.1 = 10%% of training.")
    parser.add_argument("--no_cuda",
                        action='store_true',
                        help="Whether not to use CUDA when available")
    parser.add_argument("--local_rank",
                        type=int,
                        default=-1,
                        help="local_rank for distributed training on gpus")
578
579
    parser.add_argument('--seed',
                        type=int,
VictorSanh's avatar
VictorSanh committed
580
581
                        default=42,
                        help="random seed for initialization")
582
583
584
    parser.add_argument('--gradient_accumulation_steps',
                        type=int,
                        default=1,
585
                        help="Number of updates steps to accumulate before performing a backward/update pass.")
thomwolf's avatar
thomwolf committed
586
587
588
589
    parser.add_argument('--fp16',
                        action='store_true',
                        help="Whether to use 16-bit float precision instead of 32-bit")
    parser.add_argument('--loss_scale',
590
591
592
593
                        type=float, default=0,
                        help="Loss scaling to improve fp16 numeric stability. Only used when fp16 set to True.\n"
                             "0 (default value): dynamic loss scaling.\n"
                             "Positive power of 2: static loss scaling value.\n")
594
595
    parser.add_argument('--server_ip', type=str, default='', help="Can be used for distant debugging.")
    parser.add_argument('--server_port', type=str, default='', help="Can be used for distant debugging.")
596
597
    args = parser.parse_args()

598
599
600
601
602
603
604
    if args.server_ip and args.server_port:
        # Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
        import ptvsd
        print("Waiting for debugger attach")
        ptvsd.enable_attach(address=(args.server_ip, args.server_port), redirect_output=True)
        ptvsd.wait_for_attach()

VictorSanh's avatar
WIP  
VictorSanh committed
605
606
607
608
    processors = {
        "cola": ColaProcessor,
        "mnli": MnliProcessor,
        "mrpc": MrpcProcessor,
609
        "sst-2": Sst2Processor,
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
        "sts-b": StsbProcessor,
        "qqp": QqpProcessor,
        "qnli": QnliProcessor,
        "rte": RteProcessor,
        "wnli": WnliProcessor,
    }

    output_modes = {
        "cola": "classification",
        "mnli": "classification",
        "mrpc": "classification",
        "sst-2": "classification",
        "sts-b": "regression",
        "qqp": "classification",
        "qnli": "classification",
        "rte": "classification",
        "wnli": "classification",
VictorSanh's avatar
WIP  
VictorSanh committed
627
    }
thomwolf's avatar
thomwolf committed
628

629
630
    num_labels_task = {
        "cola": 2,
631
        "sst-2": 2,
632
633
634
635
        "mnli": 3,
        "mrpc": 2,
    }

thomwolf's avatar
thomwolf committed
636
637
638
639
    if args.local_rank == -1 or args.no_cuda:
        device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
        n_gpu = torch.cuda.device_count()
    else:
640
        torch.cuda.set_device(args.local_rank)
thomwolf's avatar
thomwolf committed
641
642
        device = torch.device("cuda", args.local_rank)
        n_gpu = 1
thomwolf's avatar
thomwolf committed
643
644
        # Initializes the distributed backend which will take care of sychronizing nodes/GPUs
        torch.distributed.init_process_group(backend='nccl')
645
646
    logger.info("device: {} n_gpu: {}, distributed training: {}, 16-bits training: {}".format(
        device, n_gpu, bool(args.local_rank != -1), args.fp16))
thomwolf's avatar
thomwolf committed
647

648
649
650
    if args.gradient_accumulation_steps < 1:
        raise ValueError("Invalid gradient_accumulation_steps parameter: {}, should be >= 1".format(
                            args.gradient_accumulation_steps))
thomwolf's avatar
thomwolf committed
651

652
    args.train_batch_size = args.train_batch_size // args.gradient_accumulation_steps
thomwolf's avatar
thomwolf committed
653

VictorSanh's avatar
VictorSanh committed
654
655
656
    random.seed(args.seed)
    np.random.seed(args.seed)
    torch.manual_seed(args.seed)
thomwolf's avatar
thomwolf committed
657
658
    if n_gpu > 0:
        torch.cuda.manual_seed_all(args.seed)
thomwolf's avatar
thomwolf committed
659

VictorSanh's avatar
WIP  
VictorSanh committed
660
661
    if not args.do_train and not args.do_eval:
        raise ValueError("At least one of `do_train` or `do_eval` must be True.")
thomwolf's avatar
thomwolf committed
662

663
    if os.path.exists(args.output_dir) and os.listdir(args.output_dir) and args.do_train:
thomwolf's avatar
thomwolf committed
664
        raise ValueError("Output directory ({}) already exists and is not empty.".format(args.output_dir))
thomwolf's avatar
thomwolf committed
665
666
    if not os.path.exists(args.output_dir):
        os.makedirs(args.output_dir)
VictorSanh's avatar
WIP  
VictorSanh committed
667
668

    task_name = args.task_name.lower()
thomwolf's avatar
thomwolf committed
669

VictorSanh's avatar
WIP  
VictorSanh committed
670
671
672
673
    if task_name not in processors:
        raise ValueError("Task not found: %s" % (task_name))

    processor = processors[task_name]()
674
    num_labels = num_labels_task[task_name]
VictorSanh's avatar
WIP  
VictorSanh committed
675
676
    label_list = processor.get_labels()

thomwolf's avatar
thomwolf committed
677
    tokenizer = BertTokenizer.from_pretrained(args.bert_model, do_lower_case=args.do_lower_case)
thomwolf's avatar
thomwolf committed
678

VictorSanh's avatar
WIP  
VictorSanh committed
679
    train_examples = None
680
    num_train_optimization_steps = None
VictorSanh's avatar
WIP  
VictorSanh committed
681
682
    if args.do_train:
        train_examples = processor.get_train_examples(args.data_dir)
683
684
685
686
        num_train_optimization_steps = int(
            len(train_examples) / args.train_batch_size / args.gradient_accumulation_steps) * args.num_train_epochs
        if args.local_rank != -1:
            num_train_optimization_steps = num_train_optimization_steps // torch.distributed.get_world_size()
thomwolf's avatar
thomwolf committed
687

thomwolf's avatar
thomwolf committed
688
    # Prepare model
689
    cache_dir = args.cache_dir if args.cache_dir else os.path.join(str(PYTORCH_PRETRAINED_BERT_CACHE), 'distributed_{}'.format(args.local_rank))
690
    model = BertForSequenceClassification.from_pretrained(args.bert_model,
691
              cache_dir=cache_dir,
692
              num_labels = num_labels)
thomwolf's avatar
thomwolf committed
693
694
    if args.fp16:
        model.half()
thomwolf's avatar
thomwolf committed
695
    model.to(device)
thomwolf's avatar
thomwolf committed
696
    if args.local_rank != -1:
thomwolf's avatar
thomwolf committed
697
698
699
700
701
        try:
            from apex.parallel import DistributedDataParallel as DDP
        except ImportError:
            raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use distributed and fp16 training.")

702
        model = DDP(model)
thomwolf's avatar
thomwolf committed
703
    elif n_gpu > 1:
704
        model = torch.nn.DataParallel(model)
thomwolf's avatar
thomwolf committed
705

thomwolf's avatar
thomwolf committed
706
    # Prepare optimizer
707
708
    param_optimizer = list(model.named_parameters())
    no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
thomwolf's avatar
thomwolf committed
709
    optimizer_grouped_parameters = [
710
711
        {'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)], 'weight_decay': 0.01},
        {'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}
thomwolf's avatar
thomwolf committed
712
        ]
713
    if args.fp16:
thomwolf's avatar
thomwolf committed
714
715
716
717
718
719
        try:
            from apex.optimizers import FP16_Optimizer
            from apex.optimizers import FusedAdam
        except ImportError:
            raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use distributed and fp16 training.")

720
721
722
723
724
725
726
727
728
729
730
731
732
        optimizer = FusedAdam(optimizer_grouped_parameters,
                              lr=args.learning_rate,
                              bias_correction=False,
                              max_grad_norm=1.0)
        if args.loss_scale == 0:
            optimizer = FP16_Optimizer(optimizer, dynamic_loss_scale=True)
        else:
            optimizer = FP16_Optimizer(optimizer, static_loss_scale=args.loss_scale)

    else:
        optimizer = BertAdam(optimizer_grouped_parameters,
                             lr=args.learning_rate,
                             warmup=args.warmup_proportion,
733
                             t_total=num_train_optimization_steps)
thomwolf's avatar
thomwolf committed
734

thomwolf's avatar
thomwolf committed
735
    global_step = 0
Jade Abbott's avatar
Jade Abbott committed
736
    nb_tr_steps = 0
737
    tr_loss = 0
VictorSanh's avatar
WIP  
VictorSanh committed
738
739
740
741
742
743
    if args.do_train:
        train_features = convert_examples_to_features(
            train_examples, label_list, args.max_seq_length, tokenizer)
        logger.info("***** Running training *****")
        logger.info("  Num examples = %d", len(train_examples))
        logger.info("  Batch size = %d", args.train_batch_size)
744
        logger.info("  Num steps = %d", num_train_optimization_steps)
745
746
747
748
        all_input_ids = torch.tensor([f.input_ids for f in train_features], dtype=torch.long)
        all_input_mask = torch.tensor([f.input_mask for f in train_features], dtype=torch.long)
        all_segment_ids = torch.tensor([f.segment_ids for f in train_features], dtype=torch.long)
        all_label_ids = torch.tensor([f.label_id for f in train_features], dtype=torch.long)
749
750
751
752
753
754
755
756
        train_data = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_label_ids)
        if args.local_rank == -1:
            train_sampler = RandomSampler(train_data)
        else:
            train_sampler = DistributedSampler(train_data)
        train_dataloader = DataLoader(train_data, sampler=train_sampler, batch_size=args.train_batch_size)

        model.train()
757
        for _ in trange(int(args.num_train_epochs), desc="Epoch"):
758
759
            tr_loss = 0
            nb_tr_examples, nb_tr_steps = 0, 0
760
761
762
            for step, batch in enumerate(tqdm(train_dataloader, desc="Iteration")):
                batch = tuple(t.to(device) for t in batch)
                input_ids, input_mask, segment_ids, label_ids = batch
763
                loss = model(input_ids, segment_ids, input_mask, label_ids)
thomwolf's avatar
thomwolf committed
764
765
                if n_gpu > 1:
                    loss = loss.mean() # mean() to average on multi-gpu.
766
767
                if args.gradient_accumulation_steps > 1:
                    loss = loss / args.gradient_accumulation_steps
768
769
770
771
772
773

                if args.fp16:
                    optimizer.backward(loss)
                else:
                    loss.backward()

774
                tr_loss += loss.item()
775
                nb_tr_examples += input_ids.size(0)
776
                nb_tr_steps += 1
thomwolf's avatar
thomwolf committed
777
                if (step + 1) % args.gradient_accumulation_steps == 0:
778
779
780
                    if args.fp16:
                        # modify learning rate with special warm up BERT uses
                        # if args.fp16 is False, BertAdam is used that handles this automatically
781
                        lr_this_step = args.learning_rate * warmup_linear(global_step/num_train_optimization_steps, args.warmup_proportion)
782
783
                        for param_group in optimizer.param_groups:
                            param_group['lr'] = lr_this_step
784
785
                    optimizer.step()
                    optimizer.zero_grad()
thomwolf's avatar
thomwolf committed
786
                    global_step += 1
thomwolf's avatar
thomwolf committed
787

788
    if args.do_train:
789
790
791
        # Save a trained model and the associated configuration
        model_to_save = model.module if hasattr(model, 'module') else model  # Only save the model it-self
        output_model_file = os.path.join(args.output_dir, WEIGHTS_NAME)
792
        torch.save(model_to_save.state_dict(), output_model_file)
793
794
795
796
797
798
799
800
801
802
        output_config_file = os.path.join(args.output_dir, CONFIG_NAME)
        with open(output_config_file, 'w') as f:
            f.write(model_to_save.config.to_json_string())

        # Load a trained model and config that you have fine-tuned
        config = BertConfig(output_config_file)
        model = BertForSequenceClassification(config, num_labels=num_labels)
        model.load_state_dict(torch.load(output_model_file))
    else:
        model = BertForSequenceClassification.from_pretrained(args.bert_model, num_labels=num_labels)
803
    model.to(device)
804

805
    if args.do_eval and (args.local_rank == -1 or torch.distributed.get_rank() == 0):
VictorSanh's avatar
WIP  
VictorSanh committed
806
807
808
        eval_examples = processor.get_dev_examples(args.data_dir)
        eval_features = convert_examples_to_features(
            eval_examples, label_list, args.max_seq_length, tokenizer)
VictorSanh's avatar
wip  
VictorSanh committed
809
810
811
        logger.info("***** Running evaluation *****")
        logger.info("  Num examples = %d", len(eval_examples))
        logger.info("  Batch size = %d", args.eval_batch_size)
812
813
814
815
        all_input_ids = torch.tensor([f.input_ids for f in eval_features], dtype=torch.long)
        all_input_mask = torch.tensor([f.input_mask for f in eval_features], dtype=torch.long)
        all_segment_ids = torch.tensor([f.segment_ids for f in eval_features], dtype=torch.long)
        all_label_ids = torch.tensor([f.label_id for f in eval_features], dtype=torch.long)
816
        eval_data = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_label_ids)
817
818
        # Run prediction for full data
        eval_sampler = SequentialSampler(eval_data)
819
820
821
        eval_dataloader = DataLoader(eval_data, sampler=eval_sampler, batch_size=args.eval_batch_size)

        model.eval()
822
        eval_loss, eval_accuracy = 0, 0
VictorSanh's avatar
VictorSanh committed
823
        nb_eval_steps, nb_eval_examples = 0, 0
824

825
        for input_ids, input_mask, segment_ids, label_ids in tqdm(eval_dataloader, desc="Evaluating"):
826
            input_ids = input_ids.to(device)
thomwolf's avatar
thomwolf committed
827
            input_mask = input_mask.to(device)
828
            segment_ids = segment_ids.to(device)
829
            label_ids = label_ids.to(device)
830

831
            with torch.no_grad():
832
833
                tmp_eval_loss = model(input_ids, segment_ids, input_mask, label_ids)
                logits = model(input_ids, segment_ids, input_mask)
thomwolf's avatar
thomwolf committed
834
835
836

            logits = logits.detach().cpu().numpy()
            label_ids = label_ids.to('cpu').numpy()
837
838
            tmp_eval_accuracy = accuracy(logits, label_ids)

839
            eval_loss += tmp_eval_loss.mean().item()
840
            eval_accuracy += tmp_eval_accuracy
thomwolf's avatar
thomwolf committed
841

VictorSanh's avatar
VictorSanh committed
842
            nb_eval_examples += input_ids.size(0)
843
            nb_eval_steps += 1
VictorSanh's avatar
WIP  
VictorSanh committed
844

845
846
        eval_loss = eval_loss / nb_eval_steps
        eval_accuracy = eval_accuracy / nb_eval_examples
847
        loss = tr_loss/nb_tr_steps if args.do_train else None
848
849
850
        result = {'eval_loss': eval_loss,
                  'eval_accuracy': eval_accuracy,
                  'global_step': global_step,
851
                  'loss': loss}
VictorSanh's avatar
WIP  
VictorSanh committed
852
853

        output_eval_file = os.path.join(args.output_dir, "eval_results.txt")
VictorSanh's avatar
wip  
VictorSanh committed
854
855
        with open(output_eval_file, "w") as writer:
            logger.info("***** Eval results *****")
VictorSanh's avatar
WIP  
VictorSanh committed
856
            for key in sorted(result.keys()):
VictorSanh's avatar
wip  
VictorSanh committed
857
                logger.info("  %s = %s", key, str(result[key]))
VictorSanh's avatar
WIP  
VictorSanh committed
858
                writer.write("%s = %s\n" % (key, str(result[key])))
859

VictorSanh's avatar
WIP  
VictorSanh committed
860
861
if __name__ == "__main__":
    main()