run_classifier.py 28 KB
Newer Older
1
# coding=utf-8
thomwolf's avatar
thomwolf committed
2
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
3
# Copyright (c) 2018, NVIDIA CORPORATION.  All rights reserved.
4
5
6
7
8
9
10
11
12
13
14
15
16
17
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""BERT finetuning runner."""

thomwolf's avatar
thomwolf committed
18
from __future__ import absolute_import, division, print_function
19

thomwolf's avatar
thomwolf committed
20
import argparse
21
import csv
22
import logging
thomwolf's avatar
thomwolf committed
23
import os
VictorSanh's avatar
VictorSanh committed
24
import random
thomwolf's avatar
thomwolf committed
25
import sys
thomwolf's avatar
thomwolf committed
26
27

import numpy as np
VictorSanh's avatar
VictorSanh committed
28
import torch
thomwolf's avatar
thomwolf committed
29
30
from torch.utils.data import (DataLoader, RandomSampler, SequentialSampler,
                              TensorDataset)
31
from torch.utils.data.distributed import DistributedSampler
thomwolf's avatar
thomwolf committed
32
from tqdm import tqdm, trange
33

thomwolf's avatar
thomwolf committed
34
from pytorch_pretrained_bert.file_utils import PYTORCH_PRETRAINED_BERT_CACHE
35
from pytorch_pretrained_bert.modeling import BertForSequenceClassification, BertConfig, WEIGHTS_NAME, CONFIG_NAME
thomwolf's avatar
thomwolf committed
36
from pytorch_pretrained_bert.tokenization import BertTokenizer
37
from pytorch_pretrained_bert.optimization import BertAdam, warmup_linear
38

39
logging.basicConfig(format = '%(asctime)s - %(levelname)s - %(name)s -   %(message)s',
40
41
42
                    datefmt = '%m/%d/%Y %H:%M:%S',
                    level = logging.INFO)
logger = logging.getLogger(__name__)
43

44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73

class InputExample(object):
    """A single training/test example for simple sequence classification."""

    def __init__(self, guid, text_a, text_b=None, label=None):
        """Constructs a InputExample.

        Args:
            guid: Unique id for the example.
            text_a: string. The untokenized text of the first sequence. For single
            sequence tasks, only this sequence must be specified.
            text_b: (Optional) string. The untokenized text of the second sequence.
            Only must be specified for sequence pair tasks.
            label: (Optional) string. The label of the example. This should be
            specified for train and dev examples, but not for test examples.
        """
        self.guid = guid
        self.text_a = text_a
        self.text_b = text_b
        self.label = label


class InputFeatures(object):
    """A single set of features of data."""

    def __init__(self, input_ids, input_mask, segment_ids, label_id):
        self.input_ids = input_ids
        self.input_mask = input_mask
        self.segment_ids = segment_ids
        self.label_id = label_id
thomwolf's avatar
thomwolf committed
74

75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93

class DataProcessor(object):
    """Base class for data converters for sequence classification data sets."""

    def get_train_examples(self, data_dir):
        """Gets a collection of `InputExample`s for the train set."""
        raise NotImplementedError()

    def get_dev_examples(self, data_dir):
        """Gets a collection of `InputExample`s for the dev set."""
        raise NotImplementedError()

    def get_labels(self):
        """Gets the list of labels for this data set."""
        raise NotImplementedError()

    @classmethod
    def _read_tsv(cls, input_file, quotechar=None):
        """Reads a tab separated value file."""
94
        with open(input_file, "r") as f:
95
96
97
            reader = csv.reader(f, delimiter="\t", quotechar=quotechar)
            lines = []
            for line in reader:
thomwolf's avatar
thomwolf committed
98
99
                if sys.version_info[0] == 2:
                    line = list(unicode(cell, 'utf-8') for cell in line)
100
101
                lines.append(line)
            return lines
thomwolf's avatar
thomwolf committed
102
103


VictorSanh's avatar
wip  
VictorSanh committed
104
105
106
107
108
class MrpcProcessor(DataProcessor):
    """Processor for the MRPC data set (GLUE version)."""

    def get_train_examples(self, data_dir):
        """See base class."""
thomwolf's avatar
thomwolf committed
109
        logger.info("LOOKING AT {}".format(os.path.join(data_dir, "train.tsv")))
VictorSanh's avatar
wip  
VictorSanh committed
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
        return self._create_examples(
            self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")

    def get_dev_examples(self, data_dir):
        """See base class."""
        return self._create_examples(
            self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev")

    def get_labels(self):
        """See base class."""
        return ["0", "1"]

    def _create_examples(self, lines, set_type):
        """Creates examples for the training and dev sets."""
        examples = []
        for (i, line) in enumerate(lines):
            if i == 0:
                continue
            guid = "%s-%s" % (set_type, i)
129
130
131
            text_a = line[3]
            text_b = line[4]
            label = line[0]
VictorSanh's avatar
wip  
VictorSanh committed
132
133
134
135
            examples.append(
                InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
        return examples

136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160

class MnliProcessor(DataProcessor):
    """Processor for the MultiNLI data set (GLUE version)."""

    def get_train_examples(self, data_dir):
        """See base class."""
        return self._create_examples(
            self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")

    def get_dev_examples(self, data_dir):
        """See base class."""
        return self._create_examples(
            self._read_tsv(os.path.join(data_dir, "dev_matched.tsv")),
            "dev_matched")

    def get_labels(self):
        """See base class."""
        return ["contradiction", "entailment", "neutral"]

    def _create_examples(self, lines, set_type):
        """Creates examples for the training and dev sets."""
        examples = []
        for (i, line) in enumerate(lines):
            if i == 0:
                continue
161
            guid = "%s-%s" % (set_type, line[0])
162
163
            text_a = line[8]
            text_b = line[9]
164
            label = line[-1]
165
166
167
            examples.append(
                InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
        return examples
thomwolf's avatar
thomwolf committed
168

169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191

class ColaProcessor(DataProcessor):
    """Processor for the CoLA data set (GLUE version)."""

    def get_train_examples(self, data_dir):
        """See base class."""
        return self._create_examples(
            self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")

    def get_dev_examples(self, data_dir):
        """See base class."""
        return self._create_examples(
            self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev")

    def get_labels(self):
        """See base class."""
        return ["0", "1"]

    def _create_examples(self, lines, set_type):
        """Creates examples for the training and dev sets."""
        examples = []
        for (i, line) in enumerate(lines):
            guid = "%s-%s" % (set_type, i)
192
193
            text_a = line[3]
            label = line[1]
194
195
196
            examples.append(
                InputExample(guid=guid, text_a=text_a, text_b=None, label=label))
        return examples
thomwolf's avatar
thomwolf committed
197
198


199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
class Sst2Processor(DataProcessor):
    """Processor for the SST-2 data set (GLUE version)."""

    def get_train_examples(self, data_dir):
        """See base class."""
        return self._create_examples(
            self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")

    def get_dev_examples(self, data_dir):
        """See base class."""
        return self._create_examples(
            self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev")

    def get_labels(self):
        """See base class."""
        return ["0", "1"]

    def _create_examples(self, lines, set_type):
        """Creates examples for the training and dev sets."""
        examples = []
        for (i, line) in enumerate(lines):
            if i == 0:
                continue
            guid = "%s-%s" % (set_type, i)
            text_a = line[0]
            label = line[1]
            examples.append(
                InputExample(guid=guid, text_a=text_a, text_b=None, label=label))
        return examples


thomwolf's avatar
thomwolf committed
230
def convert_examples_to_features(examples, label_list, max_seq_length, tokenizer):
231
232
    """Loads a data file into a list of `InputBatch`s."""

233
    label_map = {label : i for i, label in enumerate(label_list)}
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248

    features = []
    for (ex_index, example) in enumerate(examples):
        tokens_a = tokenizer.tokenize(example.text_a)

        tokens_b = None
        if example.text_b:
            tokens_b = tokenizer.tokenize(example.text_b)
            # Modifies `tokens_a` and `tokens_b` in place so that the total
            # length is less than the specified length.
            # Account for [CLS], [SEP], [SEP] with "- 3"
            _truncate_seq_pair(tokens_a, tokens_b, max_seq_length - 3)
        else:
            # Account for [CLS] and [SEP] with "- 2"
            if len(tokens_a) > max_seq_length - 2:
249
                tokens_a = tokens_a[:(max_seq_length - 2)]
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268

        # The convention in BERT is:
        # (a) For sequence pairs:
        #  tokens:   [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
        #  type_ids: 0   0  0    0    0     0       0 0    1  1  1  1   1 1
        # (b) For single sequences:
        #  tokens:   [CLS] the dog is hairy . [SEP]
        #  type_ids: 0   0   0   0  0     0 0
        #
        # Where "type_ids" are used to indicate whether this is the first
        # sequence or the second sequence. The embedding vectors for `type=0` and
        # `type=1` were learned during pre-training and are added to the wordpiece
        # embedding vector (and position vector). This is not *strictly* necessary
        # since the [SEP] token unambigiously separates the sequences, but it makes
        # it easier for the model to learn the concept of sequences.
        #
        # For classification tasks, the first vector (corresponding to [CLS]) is
        # used as as the "sentence vector". Note that this only makes sense because
        # the entire model is fine-tuned.
269
270
        tokens = ["[CLS]"] + tokens_a + ["[SEP]"]
        segment_ids = [0] * len(tokens)
271
272

        if tokens_b:
273
274
            tokens += tokens_b + ["[SEP]"]
            segment_ids += [1] * (len(tokens_b) + 1)
275
276
277
278
279
280
281
282

        input_ids = tokenizer.convert_tokens_to_ids(tokens)

        # The mask has 1 for real tokens and 0 for padding tokens. Only real
        # tokens are attended to.
        input_mask = [1] * len(input_ids)

        # Zero-pad up to the sequence length.
283
284
285
286
        padding = [0] * (max_seq_length - len(input_ids))
        input_ids += padding
        input_mask += padding
        segment_ids += padding
287
288
289
290
291
292
293
294
295
296

        assert len(input_ids) == max_seq_length
        assert len(input_mask) == max_seq_length
        assert len(segment_ids) == max_seq_length

        label_id = label_map[example.label]
        if ex_index < 5:
            logger.info("*** Example ***")
            logger.info("guid: %s" % (example.guid))
            logger.info("tokens: %s" % " ".join(
297
                    [str(x) for x in tokens]))
298
299
300
301
302
303
304
            logger.info("input_ids: %s" % " ".join([str(x) for x in input_ids]))
            logger.info("input_mask: %s" % " ".join([str(x) for x in input_mask]))
            logger.info(
                    "segment_ids: %s" % " ".join([str(x) for x in segment_ids]))
            logger.info("label: %s (id = %d)" % (example.label, label_id))

        features.append(
thomwolf's avatar
thomwolf committed
305
306
307
308
                InputFeatures(input_ids=input_ids,
                              input_mask=input_mask,
                              segment_ids=segment_ids,
                              label_id=label_id))
309
    return features
thomwolf's avatar
thomwolf committed
310
311


312
313
314
315
316
317
318
319
320
321
322
323
324
325
def _truncate_seq_pair(tokens_a, tokens_b, max_length):
    """Truncates a sequence pair in place to the maximum length."""

    # This is a simple heuristic which will always truncate the longer sequence
    # one token at a time. This makes more sense than truncating an equal percent
    # of tokens from each, since if one sequence is very short then each token
    # that's truncated likely contains more information than a longer sequence.
    while True:
        total_length = len(tokens_a) + len(tokens_b)
        if total_length <= max_length:
            break
        if len(tokens_a) > len(tokens_b):
            tokens_a.pop()
        else:
VictorSanh's avatar
VictorSanh committed
326
327
            tokens_b.pop()

328
329
def accuracy(out, labels):
    outputs = np.argmax(out, axis=1)
thomwolf's avatar
thomwolf committed
330
    return np.sum(outputs == labels)
VictorSanh's avatar
WIP  
VictorSanh committed
331

332
def main():
333
334
335
336
337
338
339
340
    parser = argparse.ArgumentParser()

    ## Required parameters
    parser.add_argument("--data_dir",
                        default=None,
                        type=str,
                        required=True,
                        help="The input data dir. Should contain the .tsv files (or other data files) for the task.")
thomwolf's avatar
thomwolf committed
341
342
    parser.add_argument("--bert_model", default=None, type=str, required=True,
                        help="Bert pre-trained model selected in the list: bert-base-uncased, "
343
344
                        "bert-large-uncased, bert-base-cased, bert-large-cased, bert-base-multilingual-uncased, "
                        "bert-base-multilingual-cased, bert-base-chinese.")
345
346
347
348
349
350
351
352
353
    parser.add_argument("--task_name",
                        default=None,
                        type=str,
                        required=True,
                        help="The name of the task to train.")
    parser.add_argument("--output_dir",
                        default=None,
                        type=str,
                        required=True,
354
                        help="The output directory where the model predictions and checkpoints will be written.")
355
356

    ## Other parameters
357
358
359
360
    parser.add_argument("--cache_dir",
                        default="",
                        type=str,
                        help="Where do you want to store the pre-trained models downloaded from s3")
361
362
363
364
365
366
367
368
369
370
371
372
    parser.add_argument("--max_seq_length",
                        default=128,
                        type=int,
                        help="The maximum total input sequence length after WordPiece tokenization. \n"
                             "Sequences longer than this will be truncated, and sequences shorter \n"
                             "than this will be padded.")
    parser.add_argument("--do_train",
                        action='store_true',
                        help="Whether to run training.")
    parser.add_argument("--do_eval",
                        action='store_true',
                        help="Whether to run eval on the dev set.")
thomwolf's avatar
thomwolf committed
373
374
375
    parser.add_argument("--do_lower_case",
                        action='store_true',
                        help="Set this flag if you are using an uncased model.")
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
    parser.add_argument("--train_batch_size",
                        default=32,
                        type=int,
                        help="Total batch size for training.")
    parser.add_argument("--eval_batch_size",
                        default=8,
                        type=int,
                        help="Total batch size for eval.")
    parser.add_argument("--learning_rate",
                        default=5e-5,
                        type=float,
                        help="The initial learning rate for Adam.")
    parser.add_argument("--num_train_epochs",
                        default=3.0,
                        type=float,
                        help="Total number of training epochs to perform.")
    parser.add_argument("--warmup_proportion",
                        default=0.1,
                        type=float,
                        help="Proportion of training to perform linear learning rate warmup for. "
                             "E.g., 0.1 = 10%% of training.")
    parser.add_argument("--no_cuda",
                        action='store_true',
                        help="Whether not to use CUDA when available")
    parser.add_argument("--local_rank",
                        type=int,
                        default=-1,
                        help="local_rank for distributed training on gpus")
404
405
    parser.add_argument('--seed',
                        type=int,
VictorSanh's avatar
VictorSanh committed
406
407
                        default=42,
                        help="random seed for initialization")
408
409
410
    parser.add_argument('--gradient_accumulation_steps',
                        type=int,
                        default=1,
411
                        help="Number of updates steps to accumulate before performing a backward/update pass.")
thomwolf's avatar
thomwolf committed
412
413
414
415
    parser.add_argument('--fp16',
                        action='store_true',
                        help="Whether to use 16-bit float precision instead of 32-bit")
    parser.add_argument('--loss_scale',
416
417
418
419
                        type=float, default=0,
                        help="Loss scaling to improve fp16 numeric stability. Only used when fp16 set to True.\n"
                             "0 (default value): dynamic loss scaling.\n"
                             "Positive power of 2: static loss scaling value.\n")
420
421
    parser.add_argument('--server_ip', type=str, default='', help="Can be used for distant debugging.")
    parser.add_argument('--server_port', type=str, default='', help="Can be used for distant debugging.")
422
423
    args = parser.parse_args()

424
425
426
427
428
429
430
    if args.server_ip and args.server_port:
        # Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
        import ptvsd
        print("Waiting for debugger attach")
        ptvsd.enable_attach(address=(args.server_ip, args.server_port), redirect_output=True)
        ptvsd.wait_for_attach()

VictorSanh's avatar
WIP  
VictorSanh committed
431
432
433
434
    processors = {
        "cola": ColaProcessor,
        "mnli": MnliProcessor,
        "mrpc": MrpcProcessor,
435
        "sst-2": Sst2Processor,
VictorSanh's avatar
WIP  
VictorSanh committed
436
    }
thomwolf's avatar
thomwolf committed
437

438
439
    num_labels_task = {
        "cola": 2,
440
        "sst-2": 2,
441
442
443
444
        "mnli": 3,
        "mrpc": 2,
    }

thomwolf's avatar
thomwolf committed
445
446
447
448
    if args.local_rank == -1 or args.no_cuda:
        device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
        n_gpu = torch.cuda.device_count()
    else:
449
        torch.cuda.set_device(args.local_rank)
thomwolf's avatar
thomwolf committed
450
451
        device = torch.device("cuda", args.local_rank)
        n_gpu = 1
thomwolf's avatar
thomwolf committed
452
453
        # Initializes the distributed backend which will take care of sychronizing nodes/GPUs
        torch.distributed.init_process_group(backend='nccl')
454
455
    logger.info("device: {} n_gpu: {}, distributed training: {}, 16-bits training: {}".format(
        device, n_gpu, bool(args.local_rank != -1), args.fp16))
thomwolf's avatar
thomwolf committed
456

457
458
459
    if args.gradient_accumulation_steps < 1:
        raise ValueError("Invalid gradient_accumulation_steps parameter: {}, should be >= 1".format(
                            args.gradient_accumulation_steps))
thomwolf's avatar
thomwolf committed
460

461
    args.train_batch_size = args.train_batch_size // args.gradient_accumulation_steps
thomwolf's avatar
thomwolf committed
462

VictorSanh's avatar
VictorSanh committed
463
464
465
    random.seed(args.seed)
    np.random.seed(args.seed)
    torch.manual_seed(args.seed)
thomwolf's avatar
thomwolf committed
466
467
    if n_gpu > 0:
        torch.cuda.manual_seed_all(args.seed)
thomwolf's avatar
thomwolf committed
468

VictorSanh's avatar
WIP  
VictorSanh committed
469
470
    if not args.do_train and not args.do_eval:
        raise ValueError("At least one of `do_train` or `do_eval` must be True.")
thomwolf's avatar
thomwolf committed
471

472
    if os.path.exists(args.output_dir) and os.listdir(args.output_dir) and args.do_train:
thomwolf's avatar
thomwolf committed
473
        raise ValueError("Output directory ({}) already exists and is not empty.".format(args.output_dir))
thomwolf's avatar
thomwolf committed
474
475
    if not os.path.exists(args.output_dir):
        os.makedirs(args.output_dir)
VictorSanh's avatar
WIP  
VictorSanh committed
476
477

    task_name = args.task_name.lower()
thomwolf's avatar
thomwolf committed
478

VictorSanh's avatar
WIP  
VictorSanh committed
479
480
481
482
    if task_name not in processors:
        raise ValueError("Task not found: %s" % (task_name))

    processor = processors[task_name]()
483
    num_labels = num_labels_task[task_name]
VictorSanh's avatar
WIP  
VictorSanh committed
484
485
    label_list = processor.get_labels()

thomwolf's avatar
thomwolf committed
486
    tokenizer = BertTokenizer.from_pretrained(args.bert_model, do_lower_case=args.do_lower_case)
thomwolf's avatar
thomwolf committed
487

VictorSanh's avatar
WIP  
VictorSanh committed
488
    train_examples = None
489
    num_train_optimization_steps = None
VictorSanh's avatar
WIP  
VictorSanh committed
490
491
    if args.do_train:
        train_examples = processor.get_train_examples(args.data_dir)
492
493
494
495
        num_train_optimization_steps = int(
            len(train_examples) / args.train_batch_size / args.gradient_accumulation_steps) * args.num_train_epochs
        if args.local_rank != -1:
            num_train_optimization_steps = num_train_optimization_steps // torch.distributed.get_world_size()
thomwolf's avatar
thomwolf committed
496

thomwolf's avatar
thomwolf committed
497
    # Prepare model
498
    cache_dir = args.cache_dir if args.cache_dir else os.path.join(PYTORCH_PRETRAINED_BERT_CACHE, 'distributed_{}'.format(args.local_rank))
499
    model = BertForSequenceClassification.from_pretrained(args.bert_model,
500
              cache_dir=cache_dir,
501
              num_labels = num_labels)
thomwolf's avatar
thomwolf committed
502
503
    if args.fp16:
        model.half()
thomwolf's avatar
thomwolf committed
504
    model.to(device)
thomwolf's avatar
thomwolf committed
505
    if args.local_rank != -1:
thomwolf's avatar
thomwolf committed
506
507
508
509
510
        try:
            from apex.parallel import DistributedDataParallel as DDP
        except ImportError:
            raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use distributed and fp16 training.")

511
        model = DDP(model)
thomwolf's avatar
thomwolf committed
512
    elif n_gpu > 1:
513
        model = torch.nn.DataParallel(model)
thomwolf's avatar
thomwolf committed
514

thomwolf's avatar
thomwolf committed
515
    # Prepare optimizer
516
517
    param_optimizer = list(model.named_parameters())
    no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
thomwolf's avatar
thomwolf committed
518
    optimizer_grouped_parameters = [
519
520
        {'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)], 'weight_decay': 0.01},
        {'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}
thomwolf's avatar
thomwolf committed
521
        ]
522
    if args.fp16:
thomwolf's avatar
thomwolf committed
523
524
525
526
527
528
        try:
            from apex.optimizers import FP16_Optimizer
            from apex.optimizers import FusedAdam
        except ImportError:
            raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use distributed and fp16 training.")

529
530
531
532
533
534
535
536
537
538
539
540
541
        optimizer = FusedAdam(optimizer_grouped_parameters,
                              lr=args.learning_rate,
                              bias_correction=False,
                              max_grad_norm=1.0)
        if args.loss_scale == 0:
            optimizer = FP16_Optimizer(optimizer, dynamic_loss_scale=True)
        else:
            optimizer = FP16_Optimizer(optimizer, static_loss_scale=args.loss_scale)

    else:
        optimizer = BertAdam(optimizer_grouped_parameters,
                             lr=args.learning_rate,
                             warmup=args.warmup_proportion,
542
                             t_total=num_train_optimization_steps)
thomwolf's avatar
thomwolf committed
543

thomwolf's avatar
thomwolf committed
544
    global_step = 0
Jade Abbott's avatar
Jade Abbott committed
545
    nb_tr_steps = 0
546
    tr_loss = 0
VictorSanh's avatar
WIP  
VictorSanh committed
547
548
549
550
551
552
    if args.do_train:
        train_features = convert_examples_to_features(
            train_examples, label_list, args.max_seq_length, tokenizer)
        logger.info("***** Running training *****")
        logger.info("  Num examples = %d", len(train_examples))
        logger.info("  Batch size = %d", args.train_batch_size)
553
        logger.info("  Num steps = %d", num_train_optimization_steps)
554
555
556
557
        all_input_ids = torch.tensor([f.input_ids for f in train_features], dtype=torch.long)
        all_input_mask = torch.tensor([f.input_mask for f in train_features], dtype=torch.long)
        all_segment_ids = torch.tensor([f.segment_ids for f in train_features], dtype=torch.long)
        all_label_ids = torch.tensor([f.label_id for f in train_features], dtype=torch.long)
558
559
560
561
562
563
564
565
        train_data = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_label_ids)
        if args.local_rank == -1:
            train_sampler = RandomSampler(train_data)
        else:
            train_sampler = DistributedSampler(train_data)
        train_dataloader = DataLoader(train_data, sampler=train_sampler, batch_size=args.train_batch_size)

        model.train()
566
        for _ in trange(int(args.num_train_epochs), desc="Epoch"):
567
568
            tr_loss = 0
            nb_tr_examples, nb_tr_steps = 0, 0
569
570
571
            for step, batch in enumerate(tqdm(train_dataloader, desc="Iteration")):
                batch = tuple(t.to(device) for t in batch)
                input_ids, input_mask, segment_ids, label_ids = batch
572
                loss = model(input_ids, segment_ids, input_mask, label_ids)
thomwolf's avatar
thomwolf committed
573
574
                if n_gpu > 1:
                    loss = loss.mean() # mean() to average on multi-gpu.
575
576
                if args.gradient_accumulation_steps > 1:
                    loss = loss / args.gradient_accumulation_steps
577
578
579
580
581
582

                if args.fp16:
                    optimizer.backward(loss)
                else:
                    loss.backward()

583
                tr_loss += loss.item()
584
                nb_tr_examples += input_ids.size(0)
585
                nb_tr_steps += 1
thomwolf's avatar
thomwolf committed
586
                if (step + 1) % args.gradient_accumulation_steps == 0:
587
588
589
                    if args.fp16:
                        # modify learning rate with special warm up BERT uses
                        # if args.fp16 is False, BertAdam is used that handles this automatically
590
                        lr_this_step = args.learning_rate * warmup_linear(global_step/num_train_optimization_steps, args.warmup_proportion)
591
592
                        for param_group in optimizer.param_groups:
                            param_group['lr'] = lr_this_step
593
594
                    optimizer.step()
                    optimizer.zero_grad()
thomwolf's avatar
thomwolf committed
595
                    global_step += 1
thomwolf's avatar
thomwolf committed
596

597
    if args.do_train:
598
599
600
        # Save a trained model and the associated configuration
        model_to_save = model.module if hasattr(model, 'module') else model  # Only save the model it-self
        output_model_file = os.path.join(args.output_dir, WEIGHTS_NAME)
601
        torch.save(model_to_save.state_dict(), output_model_file)
602
603
604
605
606
607
608
609
610
611
        output_config_file = os.path.join(args.output_dir, CONFIG_NAME)
        with open(output_config_file, 'w') as f:
            f.write(model_to_save.config.to_json_string())

        # Load a trained model and config that you have fine-tuned
        config = BertConfig(output_config_file)
        model = BertForSequenceClassification(config, num_labels=num_labels)
        model.load_state_dict(torch.load(output_model_file))
    else:
        model = BertForSequenceClassification.from_pretrained(args.bert_model, num_labels=num_labels)
612
    model.to(device)
613

614
    if args.do_eval and (args.local_rank == -1 or torch.distributed.get_rank() == 0):
VictorSanh's avatar
WIP  
VictorSanh committed
615
616
617
        eval_examples = processor.get_dev_examples(args.data_dir)
        eval_features = convert_examples_to_features(
            eval_examples, label_list, args.max_seq_length, tokenizer)
VictorSanh's avatar
wip  
VictorSanh committed
618
619
620
        logger.info("***** Running evaluation *****")
        logger.info("  Num examples = %d", len(eval_examples))
        logger.info("  Batch size = %d", args.eval_batch_size)
621
622
623
624
        all_input_ids = torch.tensor([f.input_ids for f in eval_features], dtype=torch.long)
        all_input_mask = torch.tensor([f.input_mask for f in eval_features], dtype=torch.long)
        all_segment_ids = torch.tensor([f.segment_ids for f in eval_features], dtype=torch.long)
        all_label_ids = torch.tensor([f.label_id for f in eval_features], dtype=torch.long)
625
        eval_data = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_label_ids)
626
627
        # Run prediction for full data
        eval_sampler = SequentialSampler(eval_data)
628
629
630
        eval_dataloader = DataLoader(eval_data, sampler=eval_sampler, batch_size=args.eval_batch_size)

        model.eval()
631
        eval_loss, eval_accuracy = 0, 0
VictorSanh's avatar
VictorSanh committed
632
        nb_eval_steps, nb_eval_examples = 0, 0
633

634
        for input_ids, input_mask, segment_ids, label_ids in tqdm(eval_dataloader, desc="Evaluating"):
635
            input_ids = input_ids.to(device)
thomwolf's avatar
thomwolf committed
636
            input_mask = input_mask.to(device)
637
            segment_ids = segment_ids.to(device)
638
            label_ids = label_ids.to(device)
639

640
            with torch.no_grad():
641
642
                tmp_eval_loss = model(input_ids, segment_ids, input_mask, label_ids)
                logits = model(input_ids, segment_ids, input_mask)
thomwolf's avatar
thomwolf committed
643
644
645

            logits = logits.detach().cpu().numpy()
            label_ids = label_ids.to('cpu').numpy()
646
647
            tmp_eval_accuracy = accuracy(logits, label_ids)

648
            eval_loss += tmp_eval_loss.mean().item()
649
            eval_accuracy += tmp_eval_accuracy
thomwolf's avatar
thomwolf committed
650

VictorSanh's avatar
VictorSanh committed
651
            nb_eval_examples += input_ids.size(0)
652
            nb_eval_steps += 1
VictorSanh's avatar
WIP  
VictorSanh committed
653

654
655
        eval_loss = eval_loss / nb_eval_steps
        eval_accuracy = eval_accuracy / nb_eval_examples
656
        loss = tr_loss/nb_tr_steps if args.do_train else None
657
658
659
        result = {'eval_loss': eval_loss,
                  'eval_accuracy': eval_accuracy,
                  'global_step': global_step,
660
                  'loss': loss}
VictorSanh's avatar
WIP  
VictorSanh committed
661
662

        output_eval_file = os.path.join(args.output_dir, "eval_results.txt")
VictorSanh's avatar
wip  
VictorSanh committed
663
664
        with open(output_eval_file, "w") as writer:
            logger.info("***** Eval results *****")
VictorSanh's avatar
WIP  
VictorSanh committed
665
            for key in sorted(result.keys()):
VictorSanh's avatar
wip  
VictorSanh committed
666
                logger.info("  %s = %s", key, str(result[key]))
VictorSanh's avatar
WIP  
VictorSanh committed
667
                writer.write("%s = %s\n" % (key, str(result[key])))
668

VictorSanh's avatar
WIP  
VictorSanh committed
669
670
if __name__ == "__main__":
    main()