run_classifier.py 25.4 KB
Newer Older
1
# coding=utf-8
thomwolf's avatar
thomwolf committed
2
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
3
# Copyright (c) 2018, NVIDIA CORPORATION.  All rights reserved.
4
5
6
7
8
9
10
11
12
13
14
15
16
17
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""BERT finetuning runner."""

thomwolf's avatar
thomwolf committed
18
from __future__ import absolute_import, division, print_function
19

thomwolf's avatar
thomwolf committed
20
import argparse
21
import logging
thomwolf's avatar
thomwolf committed
22
import os
thomwolf's avatar
thomwolf committed
23
import sys
VictorSanh's avatar
VictorSanh committed
24
import random
25
from tqdm import tqdm, trange
thomwolf's avatar
thomwolf committed
26
27

import numpy as np
28

VictorSanh's avatar
VictorSanh committed
29
import torch
thomwolf's avatar
thomwolf committed
30
31
from torch.utils.data import (DataLoader, RandomSampler, SequentialSampler,
                              TensorDataset)
32
from torch.utils.data.distributed import DistributedSampler
33
34
from torch.nn import CrossEntropyLoss, MSELoss

35
36
37
38
from tensorboardX import SummaryWriter

from pytorch_pretrained_bert.file_utils import WEIGHTS_NAME, CONFIG_NAME
from pytorch_pretrained_bert.modeling import BertForSequenceClassification
thomwolf's avatar
thomwolf committed
39
from pytorch_pretrained_bert.tokenization import BertTokenizer
40
from pytorch_pretrained_bert.optimization import BertAdam, WarmupLinearSchedule
41

42
from run_classifier_dataset_utils import processors, output_modes, convert_examples_to_features, compute_metrics
43

thomwolf's avatar
thomwolf committed
44
45
46
47
48
49
if sys.version_info[0] == 2:
    import cPickle as pickle
else:
    import pickle


50
logger = logging.getLogger(__name__)
51

VictorSanh's avatar
WIP  
VictorSanh committed
52

53
def main():
54
55
56
57
58
59
60
61
    parser = argparse.ArgumentParser()

    ## Required parameters
    parser.add_argument("--data_dir",
                        default=None,
                        type=str,
                        required=True,
                        help="The input data dir. Should contain the .tsv files (or other data files) for the task.")
thomwolf's avatar
thomwolf committed
62
63
    parser.add_argument("--bert_model", default=None, type=str, required=True,
                        help="Bert pre-trained model selected in the list: bert-base-uncased, "
64
65
                        "bert-large-uncased, bert-base-cased, bert-large-cased, bert-base-multilingual-uncased, "
                        "bert-base-multilingual-cased, bert-base-chinese.")
66
67
68
69
70
71
72
73
74
    parser.add_argument("--task_name",
                        default=None,
                        type=str,
                        required=True,
                        help="The name of the task to train.")
    parser.add_argument("--output_dir",
                        default=None,
                        type=str,
                        required=True,
75
                        help="The output directory where the model predictions and checkpoints will be written.")
76
77

    ## Other parameters
78
79
80
81
    parser.add_argument("--cache_dir",
                        default="",
                        type=str,
                        help="Where do you want to store the pre-trained models downloaded from s3")
82
83
84
85
86
87
88
89
90
91
92
93
    parser.add_argument("--max_seq_length",
                        default=128,
                        type=int,
                        help="The maximum total input sequence length after WordPiece tokenization. \n"
                             "Sequences longer than this will be truncated, and sequences shorter \n"
                             "than this will be padded.")
    parser.add_argument("--do_train",
                        action='store_true',
                        help="Whether to run training.")
    parser.add_argument("--do_eval",
                        action='store_true',
                        help="Whether to run eval on the dev set.")
thomwolf's avatar
thomwolf committed
94
95
96
    parser.add_argument("--do_lower_case",
                        action='store_true',
                        help="Set this flag if you are using an uncased model.")
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
    parser.add_argument("--train_batch_size",
                        default=32,
                        type=int,
                        help="Total batch size for training.")
    parser.add_argument("--eval_batch_size",
                        default=8,
                        type=int,
                        help="Total batch size for eval.")
    parser.add_argument("--learning_rate",
                        default=5e-5,
                        type=float,
                        help="The initial learning rate for Adam.")
    parser.add_argument("--num_train_epochs",
                        default=3.0,
                        type=float,
                        help="Total number of training epochs to perform.")
    parser.add_argument("--warmup_proportion",
                        default=0.1,
                        type=float,
                        help="Proportion of training to perform linear learning rate warmup for. "
                             "E.g., 0.1 = 10%% of training.")
    parser.add_argument("--no_cuda",
                        action='store_true',
                        help="Whether not to use CUDA when available")
thomwolf's avatar
thomwolf committed
121
122
123
    parser.add_argument('--overwrite_output_dir',
                        action='store_true',
                        help="Overwrite the content of the output directory")
124
125
126
127
    parser.add_argument("--local_rank",
                        type=int,
                        default=-1,
                        help="local_rank for distributed training on gpus")
128
129
    parser.add_argument('--seed',
                        type=int,
VictorSanh's avatar
VictorSanh committed
130
131
                        default=42,
                        help="random seed for initialization")
132
133
134
    parser.add_argument('--gradient_accumulation_steps',
                        type=int,
                        default=1,
135
                        help="Number of updates steps to accumulate before performing a backward/update pass.")
thomwolf's avatar
thomwolf committed
136
137
138
139
    parser.add_argument('--fp16',
                        action='store_true',
                        help="Whether to use 16-bit float precision instead of 32-bit")
    parser.add_argument('--loss_scale',
140
141
142
143
                        type=float, default=0,
                        help="Loss scaling to improve fp16 numeric stability. Only used when fp16 set to True.\n"
                             "0 (default value): dynamic loss scaling.\n"
                             "Positive power of 2: static loss scaling value.\n")
144
145
    parser.add_argument('--server_ip', type=str, default='', help="Can be used for distant debugging.")
    parser.add_argument('--server_port', type=str, default='', help="Can be used for distant debugging.")
146
147
    args = parser.parse_args()

148
149
150
151
152
153
154
    if args.server_ip and args.server_port:
        # Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
        import ptvsd
        print("Waiting for debugger attach")
        ptvsd.enable_attach(address=(args.server_ip, args.server_port), redirect_output=True)
        ptvsd.wait_for_attach()

thomwolf's avatar
thomwolf committed
155
156
157
158
    if args.local_rank == -1 or args.no_cuda:
        device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
        n_gpu = torch.cuda.device_count()
    else:
159
        torch.cuda.set_device(args.local_rank)
thomwolf's avatar
thomwolf committed
160
161
        device = torch.device("cuda", args.local_rank)
        n_gpu = 1
thomwolf's avatar
thomwolf committed
162
163
        # Initializes the distributed backend which will take care of sychronizing nodes/GPUs
        torch.distributed.init_process_group(backend='nccl')
164
    args.device = device
165
166
167
168
169

    logging.basicConfig(format = '%(asctime)s - %(levelname)s - %(name)s -   %(message)s',
                        datefmt = '%m/%d/%Y %H:%M:%S',
                        level = logging.INFO if args.local_rank in [-1, 0] else logging.WARN)

170
171
    logger.info("device: {} n_gpu: {}, distributed training: {}, 16-bits training: {}".format(
        device, n_gpu, bool(args.local_rank != -1), args.fp16))
thomwolf's avatar
thomwolf committed
172

173
174
175
    if args.gradient_accumulation_steps < 1:
        raise ValueError("Invalid gradient_accumulation_steps parameter: {}, should be >= 1".format(
                            args.gradient_accumulation_steps))
thomwolf's avatar
thomwolf committed
176

177
    args.train_batch_size = args.train_batch_size // args.gradient_accumulation_steps
thomwolf's avatar
thomwolf committed
178

VictorSanh's avatar
VictorSanh committed
179
180
181
    random.seed(args.seed)
    np.random.seed(args.seed)
    torch.manual_seed(args.seed)
thomwolf's avatar
thomwolf committed
182
183
    if n_gpu > 0:
        torch.cuda.manual_seed_all(args.seed)
thomwolf's avatar
thomwolf committed
184

VictorSanh's avatar
WIP  
VictorSanh committed
185
186
    if not args.do_train and not args.do_eval:
        raise ValueError("At least one of `do_train` or `do_eval` must be True.")
thomwolf's avatar
thomwolf committed
187

thomwolf's avatar
thomwolf committed
188
    if os.path.exists(args.output_dir) and os.listdir(args.output_dir) and args.do_train and not args.overwrite_output_dir:
thomwolf's avatar
thomwolf committed
189
        raise ValueError("Output directory ({}) already exists and is not empty.".format(args.output_dir))
thomwolf's avatar
thomwolf committed
190
    if not os.path.exists(args.output_dir) and args.local_rank in [-1, 0]:
thomwolf's avatar
thomwolf committed
191
        os.makedirs(args.output_dir)
VictorSanh's avatar
WIP  
VictorSanh committed
192
193

    task_name = args.task_name.lower()
thomwolf's avatar
thomwolf committed
194

VictorSanh's avatar
WIP  
VictorSanh committed
195
196
197
198
    if task_name not in processors:
        raise ValueError("Task not found: %s" % (task_name))

    processor = processors[task_name]()
199
200
    output_mode = output_modes[task_name]

VictorSanh's avatar
WIP  
VictorSanh committed
201
    label_list = processor.get_labels()
202
    num_labels = len(label_list)
VictorSanh's avatar
WIP  
VictorSanh committed
203

thomwolf's avatar
thomwolf committed
204
    if args.local_rank not in [-1, 0]:
thomwolf's avatar
thomwolf committed
205
        torch.distributed.barrier()  # Make sure only the first process in distributed training will download model & vocab
thomwolf's avatar
thomwolf committed
206
    tokenizer = BertTokenizer.from_pretrained(args.bert_model, do_lower_case=args.do_lower_case)
207
    model = BertForSequenceClassification.from_pretrained(args.bert_model, num_labels=num_labels)
thomwolf's avatar
thomwolf committed
208
    if args.local_rank == 0:
thomwolf's avatar
thomwolf committed
209
        torch.distributed.barrier()
thomwolf's avatar
thomwolf committed
210

samuel.broscheit's avatar
samuel.broscheit committed
211
212
213
214
    if args.fp16:
        model.half()
    model.to(device)
    if args.local_rank != -1:
215
216
217
218
        model = torch.nn.parallel.DistributedDataParallel(model,
                                                          device_ids=[args.local_rank],
                                                          output_device=args.local_rank,
                                                          find_unused_parameters=True)
samuel.broscheit's avatar
samuel.broscheit committed
219
220
221
    elif n_gpu > 1:
        model = torch.nn.DataParallel(model)

thomwolf's avatar
fixing  
thomwolf committed
222
223
224
225
    global_step = 0
    nb_tr_steps = 0
    tr_loss = 0

VictorSanh's avatar
WIP  
VictorSanh committed
226
    if args.do_train:
227
228
        if args.local_rank in [-1, 0]:
            tb_writer = SummaryWriter()
samuel.broscheit's avatar
samuel.broscheit committed
229
230

        # Prepare data loader
VictorSanh's avatar
WIP  
VictorSanh committed
231
        train_examples = processor.get_train_examples(args.data_dir)
thomwolf's avatar
thomwolf committed
232
        cached_train_features_file = os.path.join(args.data_dir, 'train_{0}_{1}_{2}'.format(
233
234
            list(filter(None, args.bert_model.split('/'))).pop(),
                        str(args.max_seq_length),
thomwolf's avatar
thomwolf committed
235
                        str(task_name)))
236
237
238
239
240
241
242
243
244
245
246
        try:
            with open(cached_train_features_file, "rb") as reader:
                train_features = pickle.load(reader)
        except:
            train_features = convert_examples_to_features(
                train_examples, label_list, args.max_seq_length, tokenizer, output_mode)
            if args.local_rank == -1 or torch.distributed.get_rank() == 0:
                logger.info("  Saving train features into cached file %s", cached_train_features_file)
                with open(cached_train_features_file, "wb") as writer:
                    pickle.dump(train_features, writer)

247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
        all_input_ids = torch.tensor([f.input_ids for f in train_features], dtype=torch.long)
        all_input_mask = torch.tensor([f.input_mask for f in train_features], dtype=torch.long)
        all_segment_ids = torch.tensor([f.segment_ids for f in train_features], dtype=torch.long)

        if output_mode == "classification":
            all_label_ids = torch.tensor([f.label_id for f in train_features], dtype=torch.long)
        elif output_mode == "regression":
            all_label_ids = torch.tensor([f.label_id for f in train_features], dtype=torch.float)

        train_data = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_label_ids)
        if args.local_rank == -1:
            train_sampler = RandomSampler(train_data)
        else:
            train_sampler = DistributedSampler(train_data)
        train_dataloader = DataLoader(train_data, sampler=train_sampler, batch_size=args.train_batch_size)

263
        num_train_optimization_steps = len(train_dataloader) // args.gradient_accumulation_steps * args.num_train_epochs
thomwolf's avatar
thomwolf committed
264

samuel.broscheit's avatar
samuel.broscheit committed
265
        # Prepare optimizer
thomwolf's avatar
thomwolf committed
266

267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
        param_optimizer = list(model.named_parameters())
        no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
        optimizer_grouped_parameters = [
            {'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)], 'weight_decay': 0.01},
            {'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}
            ]
        if args.fp16:
            try:
                from apex.optimizers import FP16_Optimizer
                from apex.optimizers import FusedAdam
            except ImportError:
                raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use distributed and fp16 training.")

            optimizer = FusedAdam(optimizer_grouped_parameters,
                                  lr=args.learning_rate,
                                  bias_correction=False,
                                  max_grad_norm=1.0)
            if args.loss_scale == 0:
                optimizer = FP16_Optimizer(optimizer, dynamic_loss_scale=True)
            else:
                optimizer = FP16_Optimizer(optimizer, static_loss_scale=args.loss_scale)
            warmup_linear = WarmupLinearSchedule(warmup=args.warmup_proportion,
                                                 t_total=num_train_optimization_steps)
thomwolf's avatar
thomwolf committed
290

291
        else:
292
293
294
295
            optimizer = BertAdam(optimizer_grouped_parameters,
                                 lr=args.learning_rate,
                                 warmup=args.warmup_proportion,
                                 t_total=num_train_optimization_steps)
thomwolf's avatar
thomwolf committed
296

VictorSanh's avatar
WIP  
VictorSanh committed
297
298
299
        logger.info("***** Running training *****")
        logger.info("  Num examples = %d", len(train_examples))
        logger.info("  Batch size = %d", args.train_batch_size)
300
        logger.info("  Num steps = %d", num_train_optimization_steps)
301
302

        model.train()
thomwolf's avatar
thomwolf committed
303
        for _ in trange(int(args.num_train_epochs), desc="Epoch", disable=args.local_rank not in [-1, 0]):
304
305
            tr_loss = 0
            nb_tr_examples, nb_tr_steps = 0, 0
thomwolf's avatar
thomwolf committed
306
            for step, batch in enumerate(tqdm(train_dataloader, desc="Iteration", disable=args.local_rank not in [-1, 0])):
307
308
                batch = tuple(t.to(device) for t in batch)
                input_ids, input_mask, segment_ids, label_ids = batch
309
310

                # define a new function to compute loss values for both output_modes
thomwolf's avatar
thomwolf committed
311
                logits = model(input_ids, token_type_ids=segment_ids, attention_mask=input_mask)
312
313
314
315
316
317
318
319

                if output_mode == "classification":
                    loss_fct = CrossEntropyLoss()
                    loss = loss_fct(logits.view(-1, num_labels), label_ids.view(-1))
                elif output_mode == "regression":
                    loss_fct = MSELoss()
                    loss = loss_fct(logits.view(-1), label_ids.view(-1))

thomwolf's avatar
thomwolf committed
320
321
                if n_gpu > 1:
                    loss = loss.mean() # mean() to average on multi-gpu.
322
323
                if args.gradient_accumulation_steps > 1:
                    loss = loss / args.gradient_accumulation_steps
324
325
326
327
328
329

                if args.fp16:
                    optimizer.backward(loss)
                else:
                    loss.backward()

330
                tr_loss += loss.item()
331
                nb_tr_examples += input_ids.size(0)
332
                nb_tr_steps += 1
thomwolf's avatar
thomwolf committed
333
                if (step + 1) % args.gradient_accumulation_steps == 0:
334
335
336
                    if args.fp16:
                        # modify learning rate with special warm up BERT uses
                        # if args.fp16 is False, BertAdam is used that handles this automatically
337
                        lr_this_step = args.learning_rate * warmup_linear.get_lr(global_step, args.warmup_proportion)
338
339
                        for param_group in optimizer.param_groups:
                            param_group['lr'] = lr_this_step
340
341
                    optimizer.step()
                    optimizer.zero_grad()
thomwolf's avatar
thomwolf committed
342
                    global_step += 1
343
344
345
                    if args.local_rank in [-1, 0]:
                        tb_writer.add_scalar('lr', optimizer.get_lr()[0], global_step)
                        tb_writer.add_scalar('loss', loss.item(), global_step)
thomwolf's avatar
thomwolf committed
346

347
348
    ### Saving best-practices: if you use defaults names for the model, you can reload it using from_pretrained()
    ### Example:
349
    if args.do_train and (args.local_rank == -1 or torch.distributed.get_rank() == 0):
350
        # Save a trained model, configuration and tokenizer
351
        model_to_save = model.module if hasattr(model, 'module') else model  # Only save the model it-self
352
353

        # If we save using the predefined names, we can load using `from_pretrained`
354
355
356
        output_model_file = os.path.join(args.output_dir, WEIGHTS_NAME)
        output_config_file = os.path.join(args.output_dir, CONFIG_NAME)

357
358
        torch.save(model_to_save.state_dict(), output_model_file)
        model_to_save.config.to_json_file(output_config_file)
359
        tokenizer.save_vocabulary(args.output_dir)
360

361
362
        # Load a trained model and vocabulary that you have fine-tuned
        model = BertForSequenceClassification.from_pretrained(args.output_dir, num_labels=num_labels)
363
        tokenizer = BertTokenizer.from_pretrained(args.output_dir, do_lower_case=args.do_lower_case)
thomwolf's avatar
thomwolf committed
364
365
366
367

        # Good practice: save your training arguments together with the trained model
        output_args_file = os.path.join(args.output_dir, 'training_args.bin')
        torch.save(args, output_args_file)
thomwolf's avatar
thomwolf committed
368
    else:
thomwolf's avatar
thomwolf committed
369
        model = BertForSequenceClassification.from_pretrained(args.bert_model, num_labels=num_labels)
370

thomwolf's avatar
thomwolf committed
371
    model.to(device)
372
373

    ### Evaluation
thomwolf's avatar
thomwolf committed
374
    if args.do_eval and (args.local_rank == -1 or torch.distributed.get_rank() == 0):
VictorSanh's avatar
WIP  
VictorSanh committed
375
        eval_examples = processor.get_dev_examples(args.data_dir)
thomwolf's avatar
thomwolf committed
376
        cached_eval_features_file = os.path.join(args.data_dir, 'dev_{0}_{1}_{2}'.format(
thomwolf's avatar
thomwolf committed
377
378
379
380
            list(filter(None, args.bert_model.split('/'))).pop(),
                        str(args.max_seq_length),
                        str(task_name)))
        try:
thomwolf's avatar
thomwolf committed
381
            with open(cached_eval_features_file, "rb") as reader:
thomwolf's avatar
thomwolf committed
382
                eval_features = pickle.load(reader)
thomwolf's avatar
thomwolf committed
383
        except:
thomwolf's avatar
thomwolf committed
384
385
            eval_features = convert_examples_to_features(
                eval_examples, label_list, args.max_seq_length, tokenizer, output_mode)
thomwolf's avatar
thomwolf committed
386
            if args.local_rank == -1 or torch.distributed.get_rank() == 0:
thomwolf's avatar
thomwolf committed
387
388
389
                logger.info("  Saving eval features into cached file %s", cached_eval_features_file)
                with open(cached_eval_features_file, "wb") as writer:
                    pickle.dump(eval_features, writer)
thomwolf's avatar
thomwolf committed
390
391


VictorSanh's avatar
wip  
VictorSanh committed
392
393
394
        logger.info("***** Running evaluation *****")
        logger.info("  Num examples = %d", len(eval_examples))
        logger.info("  Batch size = %d", args.eval_batch_size)
395
396
397
        all_input_ids = torch.tensor([f.input_ids for f in eval_features], dtype=torch.long)
        all_input_mask = torch.tensor([f.input_mask for f in eval_features], dtype=torch.long)
        all_segment_ids = torch.tensor([f.segment_ids for f in eval_features], dtype=torch.long)
398
399
400
401
402
403

        if output_mode == "classification":
            all_label_ids = torch.tensor([f.label_id for f in eval_features], dtype=torch.long)
        elif output_mode == "regression":
            all_label_ids = torch.tensor([f.label_id for f in eval_features], dtype=torch.float)

404
        eval_data = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_label_ids)
405
        # Run prediction for full data
406
407
408
409
        if args.local_rank == -1:
            eval_sampler = SequentialSampler(eval_data)
        else:
            eval_sampler = DistributedSampler(eval_data)  # Note that this sampler samples randomly
410
411
412
        eval_dataloader = DataLoader(eval_data, sampler=eval_sampler, batch_size=args.eval_batch_size)

        model.eval()
413
414
415
        eval_loss = 0
        nb_eval_steps = 0
        preds = []
thomwolf's avatar
thomwolf committed
416
        out_label_ids = None
417

418
        for input_ids, input_mask, segment_ids, label_ids in tqdm(eval_dataloader, desc="Evaluating"):
419
            input_ids = input_ids.to(device)
thomwolf's avatar
thomwolf committed
420
            input_mask = input_mask.to(device)
421
            segment_ids = segment_ids.to(device)
422
            label_ids = label_ids.to(device)
423

424
            with torch.no_grad():
thomwolf's avatar
thomwolf committed
425
                logits = model(input_ids, token_type_ids=segment_ids, attention_mask=input_mask)
426

427
428
429
430
431
432
433
            # create eval loss and other metric required by the task
            if output_mode == "classification":
                loss_fct = CrossEntropyLoss()
                tmp_eval_loss = loss_fct(logits.view(-1, num_labels), label_ids.view(-1))
            elif output_mode == "regression":
                loss_fct = MSELoss()
                tmp_eval_loss = loss_fct(logits.view(-1), label_ids.view(-1))
434

435
436
            eval_loss += tmp_eval_loss.mean().item()
            nb_eval_steps += 1
437
438
            if len(preds) == 0:
                preds.append(logits.detach().cpu().numpy())
thomwolf's avatar
hop  
thomwolf committed
439
                out_label_ids = label_ids.detach().cpu().numpy()
440
441
442
            else:
                preds[0] = np.append(
                    preds[0], logits.detach().cpu().numpy(), axis=0)
thomwolf's avatar
thomwolf committed
443
444
                out_label_ids = np.append(
                    out_label_ids, label_ids.detach().cpu().numpy(), axis=0)
VictorSanh's avatar
WIP  
VictorSanh committed
445

446
        eval_loss = eval_loss / nb_eval_steps
447
448
449
        preds = preds[0]
        if output_mode == "classification":
            preds = np.argmax(preds, axis=1)
450
451
        elif output_mode == "regression":
            preds = np.squeeze(preds)
thomwolf's avatar
thomwolf committed
452
        result = compute_metrics(task_name, preds, out_label_ids)
453

454
        loss = tr_loss/global_step if args.do_train else None
455
456
457
458

        result['eval_loss'] = eval_loss
        result['global_step'] = global_step
        result['loss'] = loss
VictorSanh's avatar
WIP  
VictorSanh committed
459
460

        output_eval_file = os.path.join(args.output_dir, "eval_results.txt")
VictorSanh's avatar
wip  
VictorSanh committed
461
462
        with open(output_eval_file, "w") as writer:
            logger.info("***** Eval results *****")
VictorSanh's avatar
WIP  
VictorSanh committed
463
            for key in sorted(result.keys()):
VictorSanh's avatar
wip  
VictorSanh committed
464
                logger.info("  %s = %s", key, str(result[key]))
VictorSanh's avatar
WIP  
VictorSanh committed
465
                writer.write("%s = %s\n" % (key, str(result[key])))
466

467
468
469
470
471
        # hack for MNLI-MM
        if task_name == "mnli":
            task_name = "mnli-mm"
            processor = processors[task_name]()

472
473
474
475
476
            if os.path.exists(args.output_dir + '-MM') and os.listdir(args.output_dir + '-MM') and args.do_train:
                raise ValueError("Output directory ({}) already exists and is not empty.".format(args.output_dir))
            if not os.path.exists(args.output_dir + '-MM'):
                os.makedirs(args.output_dir + '-MM')

477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
            eval_examples = processor.get_dev_examples(args.data_dir)
            eval_features = convert_examples_to_features(
                eval_examples, label_list, args.max_seq_length, tokenizer, output_mode)
            logger.info("***** Running evaluation *****")
            logger.info("  Num examples = %d", len(eval_examples))
            logger.info("  Batch size = %d", args.eval_batch_size)
            all_input_ids = torch.tensor([f.input_ids for f in eval_features], dtype=torch.long)
            all_input_mask = torch.tensor([f.input_mask for f in eval_features], dtype=torch.long)
            all_segment_ids = torch.tensor([f.segment_ids for f in eval_features], dtype=torch.long)
            all_label_ids = torch.tensor([f.label_id for f in eval_features], dtype=torch.long)

            eval_data = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_label_ids)
            # Run prediction for full data
            eval_sampler = SequentialSampler(eval_data)
            eval_dataloader = DataLoader(eval_data, sampler=eval_sampler, batch_size=args.eval_batch_size)

            model.eval()
            eval_loss = 0
            nb_eval_steps = 0
            preds = []
thomwolf's avatar
thomwolf committed
497
            out_label_ids = None
498
499
500
501
502
503
504
505

            for input_ids, input_mask, segment_ids, label_ids in tqdm(eval_dataloader, desc="Evaluating"):
                input_ids = input_ids.to(device)
                input_mask = input_mask.to(device)
                segment_ids = segment_ids.to(device)
                label_ids = label_ids.to(device)

                with torch.no_grad():
thomwolf's avatar
thomwolf committed
506
                    logits = model(input_ids, token_type_ids=segment_ids, attention_mask=input_mask, labels=None)
thomwolf's avatar
thomwolf committed
507

508
509
                loss_fct = CrossEntropyLoss()
                tmp_eval_loss = loss_fct(logits.view(-1, num_labels), label_ids.view(-1))
thomwolf's avatar
thomwolf committed
510

511
512
513
514
                eval_loss += tmp_eval_loss.mean().item()
                nb_eval_steps += 1
                if len(preds) == 0:
                    preds.append(logits.detach().cpu().numpy())
thomwolf's avatar
hop  
thomwolf committed
515
                    out_label_ids = label_ids.detach().cpu().numpy()
516
517
518
                else:
                    preds[0] = np.append(
                        preds[0], logits.detach().cpu().numpy(), axis=0)
thomwolf's avatar
thomwolf committed
519
520
                    out_label_ids = np.append(
                        out_label_ids, label_ids.detach().cpu().numpy(), axis=0)
521

522
523
524
            eval_loss = eval_loss / nb_eval_steps
            preds = preds[0]
            preds = np.argmax(preds, axis=1)
thomwolf's avatar
thomwolf committed
525
            result = compute_metrics(task_name, preds, out_label_ids)
526

527
            loss = tr_loss/global_step if args.do_train else None
528
529
530
531
532
533
534
535
536
537
538

            result['eval_loss'] = eval_loss
            result['global_step'] = global_step
            result['loss'] = loss

            output_eval_file = os.path.join(args.output_dir + '-MM', "eval_results.txt")
            with open(output_eval_file, "w") as writer:
                logger.info("***** Eval results *****")
                for key in sorted(result.keys()):
                    logger.info("  %s = %s", key, str(result[key]))
                    writer.write("%s = %s\n" % (key, str(result[key])))
539

VictorSanh's avatar
WIP  
VictorSanh committed
540
541
if __name__ == "__main__":
    main()