run_classifier.py 23.2 KB
Newer Older
1
# coding=utf-8
thomwolf's avatar
thomwolf committed
2
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
3
# Copyright (c) 2018, NVIDIA CORPORATION.  All rights reserved.
4
5
6
7
8
9
10
11
12
13
14
15
16
17
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""BERT finetuning runner."""

thomwolf's avatar
thomwolf committed
18
from __future__ import absolute_import, division, print_function
19

thomwolf's avatar
thomwolf committed
20
import argparse
21
import logging
thomwolf's avatar
thomwolf committed
22
import os
VictorSanh's avatar
VictorSanh committed
23
import random
24
from tqdm import tqdm, trange
thomwolf's avatar
thomwolf committed
25
26

import numpy as np
27

VictorSanh's avatar
VictorSanh committed
28
import torch
thomwolf's avatar
thomwolf committed
29
30
from torch.utils.data import (DataLoader, RandomSampler, SequentialSampler,
                              TensorDataset)
31
from torch.utils.data.distributed import DistributedSampler
32
33
from torch.nn import CrossEntropyLoss, MSELoss

34
35
36
37
from tensorboardX import SummaryWriter

from pytorch_pretrained_bert.file_utils import WEIGHTS_NAME, CONFIG_NAME
from pytorch_pretrained_bert.modeling import BertForSequenceClassification
thomwolf's avatar
thomwolf committed
38
from pytorch_pretrained_bert.tokenization import BertTokenizer
39
from pytorch_pretrained_bert.optimization import BertAdam, WarmupLinearSchedule
40

41
from run_classifier_dataset_utils import processors, output_modes, convert_examples_to_features, compute_metrics
42

43
logger = logging.getLogger(__name__)
44

VictorSanh's avatar
WIP  
VictorSanh committed
45

46
def main():
47
48
49
50
51
52
53
54
    parser = argparse.ArgumentParser()

    ## Required parameters
    parser.add_argument("--data_dir",
                        default=None,
                        type=str,
                        required=True,
                        help="The input data dir. Should contain the .tsv files (or other data files) for the task.")
thomwolf's avatar
thomwolf committed
55
56
    parser.add_argument("--bert_model", default=None, type=str, required=True,
                        help="Bert pre-trained model selected in the list: bert-base-uncased, "
57
58
                        "bert-large-uncased, bert-base-cased, bert-large-cased, bert-base-multilingual-uncased, "
                        "bert-base-multilingual-cased, bert-base-chinese.")
59
60
61
62
63
64
65
66
67
    parser.add_argument("--task_name",
                        default=None,
                        type=str,
                        required=True,
                        help="The name of the task to train.")
    parser.add_argument("--output_dir",
                        default=None,
                        type=str,
                        required=True,
68
                        help="The output directory where the model predictions and checkpoints will be written.")
69
70

    ## Other parameters
71
72
73
74
    parser.add_argument("--cache_dir",
                        default="",
                        type=str,
                        help="Where do you want to store the pre-trained models downloaded from s3")
75
76
77
78
79
80
81
82
83
84
85
86
    parser.add_argument("--max_seq_length",
                        default=128,
                        type=int,
                        help="The maximum total input sequence length after WordPiece tokenization. \n"
                             "Sequences longer than this will be truncated, and sequences shorter \n"
                             "than this will be padded.")
    parser.add_argument("--do_train",
                        action='store_true',
                        help="Whether to run training.")
    parser.add_argument("--do_eval",
                        action='store_true',
                        help="Whether to run eval on the dev set.")
thomwolf's avatar
thomwolf committed
87
88
89
    parser.add_argument("--do_lower_case",
                        action='store_true',
                        help="Set this flag if you are using an uncased model.")
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
    parser.add_argument("--train_batch_size",
                        default=32,
                        type=int,
                        help="Total batch size for training.")
    parser.add_argument("--eval_batch_size",
                        default=8,
                        type=int,
                        help="Total batch size for eval.")
    parser.add_argument("--learning_rate",
                        default=5e-5,
                        type=float,
                        help="The initial learning rate for Adam.")
    parser.add_argument("--num_train_epochs",
                        default=3.0,
                        type=float,
                        help="Total number of training epochs to perform.")
    parser.add_argument("--warmup_proportion",
                        default=0.1,
                        type=float,
                        help="Proportion of training to perform linear learning rate warmup for. "
                             "E.g., 0.1 = 10%% of training.")
    parser.add_argument("--no_cuda",
                        action='store_true',
                        help="Whether not to use CUDA when available")
    parser.add_argument("--local_rank",
                        type=int,
                        default=-1,
                        help="local_rank for distributed training on gpus")
118
119
    parser.add_argument('--seed',
                        type=int,
VictorSanh's avatar
VictorSanh committed
120
121
                        default=42,
                        help="random seed for initialization")
122
123
124
    parser.add_argument('--gradient_accumulation_steps',
                        type=int,
                        default=1,
125
                        help="Number of updates steps to accumulate before performing a backward/update pass.")
thomwolf's avatar
thomwolf committed
126
127
128
129
    parser.add_argument('--fp16',
                        action='store_true',
                        help="Whether to use 16-bit float precision instead of 32-bit")
    parser.add_argument('--loss_scale',
130
131
132
133
                        type=float, default=0,
                        help="Loss scaling to improve fp16 numeric stability. Only used when fp16 set to True.\n"
                             "0 (default value): dynamic loss scaling.\n"
                             "Positive power of 2: static loss scaling value.\n")
134
135
    parser.add_argument('--server_ip', type=str, default='', help="Can be used for distant debugging.")
    parser.add_argument('--server_port', type=str, default='', help="Can be used for distant debugging.")
136
137
    args = parser.parse_args()

138
139
140
141
142
143
144
    if args.server_ip and args.server_port:
        # Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
        import ptvsd
        print("Waiting for debugger attach")
        ptvsd.enable_attach(address=(args.server_ip, args.server_port), redirect_output=True)
        ptvsd.wait_for_attach()

thomwolf's avatar
thomwolf committed
145
146
147
148
    if args.local_rank == -1 or args.no_cuda:
        device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
        n_gpu = torch.cuda.device_count()
    else:
149
        torch.cuda.set_device(args.local_rank)
thomwolf's avatar
thomwolf committed
150
151
        device = torch.device("cuda", args.local_rank)
        n_gpu = 1
thomwolf's avatar
thomwolf committed
152
153
        # Initializes the distributed backend which will take care of sychronizing nodes/GPUs
        torch.distributed.init_process_group(backend='nccl')
154
155
156
157
158

    logging.basicConfig(format = '%(asctime)s - %(levelname)s - %(name)s -   %(message)s',
                        datefmt = '%m/%d/%Y %H:%M:%S',
                        level = logging.INFO if args.local_rank in [-1, 0] else logging.WARN)

159
160
    logger.info("device: {} n_gpu: {}, distributed training: {}, 16-bits training: {}".format(
        device, n_gpu, bool(args.local_rank != -1), args.fp16))
thomwolf's avatar
thomwolf committed
161

162
163
164
    if args.gradient_accumulation_steps < 1:
        raise ValueError("Invalid gradient_accumulation_steps parameter: {}, should be >= 1".format(
                            args.gradient_accumulation_steps))
thomwolf's avatar
thomwolf committed
165

166
    args.train_batch_size = args.train_batch_size // args.gradient_accumulation_steps
thomwolf's avatar
thomwolf committed
167

VictorSanh's avatar
VictorSanh committed
168
169
170
    random.seed(args.seed)
    np.random.seed(args.seed)
    torch.manual_seed(args.seed)
thomwolf's avatar
thomwolf committed
171
172
    if n_gpu > 0:
        torch.cuda.manual_seed_all(args.seed)
thomwolf's avatar
thomwolf committed
173

VictorSanh's avatar
WIP  
VictorSanh committed
174
175
    if not args.do_train and not args.do_eval:
        raise ValueError("At least one of `do_train` or `do_eval` must be True.")
thomwolf's avatar
thomwolf committed
176

177
    if os.path.exists(args.output_dir) and os.listdir(args.output_dir) and args.do_train:
thomwolf's avatar
thomwolf committed
178
        raise ValueError("Output directory ({}) already exists and is not empty.".format(args.output_dir))
thomwolf's avatar
thomwolf committed
179
180
    if not os.path.exists(args.output_dir):
        os.makedirs(args.output_dir)
VictorSanh's avatar
WIP  
VictorSanh committed
181
182

    task_name = args.task_name.lower()
thomwolf's avatar
thomwolf committed
183

VictorSanh's avatar
WIP  
VictorSanh committed
184
185
186
187
    if task_name not in processors:
        raise ValueError("Task not found: %s" % (task_name))

    processor = processors[task_name]()
188
189
    output_mode = output_modes[task_name]

VictorSanh's avatar
WIP  
VictorSanh committed
190
    label_list = processor.get_labels()
191
    num_labels = len(label_list)
VictorSanh's avatar
WIP  
VictorSanh committed
192

thomwolf's avatar
thomwolf committed
193
    tokenizer = BertTokenizer.from_pretrained(args.bert_model, do_lower_case=args.do_lower_case)
thomwolf's avatar
thomwolf committed
194

samuel.broscheit's avatar
samuel.broscheit committed
195
    # Prepare model
196
    model = BertForSequenceClassification.from_pretrained(args.bert_model, num_labels=num_labels)
samuel.broscheit's avatar
samuel.broscheit committed
197
198
199
200
    if args.fp16:
        model.half()
    model.to(device)
    if args.local_rank != -1:
201
202
203
204
        model = torch.nn.parallel.DistributedDataParallel(model,
                                                          device_ids=[args.local_rank],
                                                          output_device=args.local_rank,
                                                          find_unused_parameters=True)
samuel.broscheit's avatar
samuel.broscheit committed
205
206
207
    elif n_gpu > 1:
        model = torch.nn.DataParallel(model)

VictorSanh's avatar
WIP  
VictorSanh committed
208
    if args.do_train:
209
210
        if args.local_rank in [-1, 0]:
            tb_writer = SummaryWriter()
samuel.broscheit's avatar
samuel.broscheit committed
211
212

        # Prepare data loader
VictorSanh's avatar
WIP  
VictorSanh committed
213
        train_examples = processor.get_train_examples(args.data_dir)
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
        cached_train_features_file = args.data_dir + '_{0}_{1}_{2}'.format(
            list(filter(None, args.bert_model.split('/'))).pop(),
                        str(args.max_seq_length),
                        str(task_name))
        try:
            with open(cached_train_features_file, "rb") as reader:
                train_features = pickle.load(reader)
        except:
            train_features = convert_examples_to_features(
                train_examples, label_list, args.max_seq_length, tokenizer, output_mode)
            if args.local_rank == -1 or torch.distributed.get_rank() == 0:
                logger.info("  Saving train features into cached file %s", cached_train_features_file)
                with open(cached_train_features_file, "wb") as writer:
                    pickle.dump(train_features, writer)

229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
        all_input_ids = torch.tensor([f.input_ids for f in train_features], dtype=torch.long)
        all_input_mask = torch.tensor([f.input_mask for f in train_features], dtype=torch.long)
        all_segment_ids = torch.tensor([f.segment_ids for f in train_features], dtype=torch.long)

        if output_mode == "classification":
            all_label_ids = torch.tensor([f.label_id for f in train_features], dtype=torch.long)
        elif output_mode == "regression":
            all_label_ids = torch.tensor([f.label_id for f in train_features], dtype=torch.float)

        train_data = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_label_ids)
        if args.local_rank == -1:
            train_sampler = RandomSampler(train_data)
        else:
            train_sampler = DistributedSampler(train_data)
        train_dataloader = DataLoader(train_data, sampler=train_sampler, batch_size=args.train_batch_size)

245
        num_train_optimization_steps = len(train_dataloader) // args.gradient_accumulation_steps * args.num_train_epochs
thomwolf's avatar
thomwolf committed
246

samuel.broscheit's avatar
samuel.broscheit committed
247
        # Prepare optimizer
thomwolf's avatar
thomwolf committed
248

249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
        param_optimizer = list(model.named_parameters())
        no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
        optimizer_grouped_parameters = [
            {'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)], 'weight_decay': 0.01},
            {'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}
            ]
        if args.fp16:
            try:
                from apex.optimizers import FP16_Optimizer
                from apex.optimizers import FusedAdam
            except ImportError:
                raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use distributed and fp16 training.")

            optimizer = FusedAdam(optimizer_grouped_parameters,
                                  lr=args.learning_rate,
                                  bias_correction=False,
                                  max_grad_norm=1.0)
            if args.loss_scale == 0:
                optimizer = FP16_Optimizer(optimizer, dynamic_loss_scale=True)
            else:
                optimizer = FP16_Optimizer(optimizer, static_loss_scale=args.loss_scale)
            warmup_linear = WarmupLinearSchedule(warmup=args.warmup_proportion,
                                                 t_total=num_train_optimization_steps)
thomwolf's avatar
thomwolf committed
272

273
        else:
274
275
276
277
            optimizer = BertAdam(optimizer_grouped_parameters,
                                 lr=args.learning_rate,
                                 warmup=args.warmup_proportion,
                                 t_total=num_train_optimization_steps)
thomwolf's avatar
thomwolf committed
278

samuel.broscheit's avatar
samuel.broscheit committed
279
280
281
282
        global_step = 0
        nb_tr_steps = 0
        tr_loss = 0

VictorSanh's avatar
WIP  
VictorSanh committed
283
284
285
        logger.info("***** Running training *****")
        logger.info("  Num examples = %d", len(train_examples))
        logger.info("  Batch size = %d", args.train_batch_size)
286
        logger.info("  Num steps = %d", num_train_optimization_steps)
287
288

        model.train()
289
        for _ in trange(int(args.num_train_epochs), desc="Epoch"):
290
291
            tr_loss = 0
            nb_tr_examples, nb_tr_steps = 0, 0
292
293
294
            for step, batch in enumerate(tqdm(train_dataloader, desc="Iteration")):
                batch = tuple(t.to(device) for t in batch)
                input_ids, input_mask, segment_ids, label_ids = batch
295
296
297
298
299
300
301
302
303
304
305

                # define a new function to compute loss values for both output_modes
                logits = model(input_ids, segment_ids, input_mask, labels=None)

                if output_mode == "classification":
                    loss_fct = CrossEntropyLoss()
                    loss = loss_fct(logits.view(-1, num_labels), label_ids.view(-1))
                elif output_mode == "regression":
                    loss_fct = MSELoss()
                    loss = loss_fct(logits.view(-1), label_ids.view(-1))

thomwolf's avatar
thomwolf committed
306
307
                if n_gpu > 1:
                    loss = loss.mean() # mean() to average on multi-gpu.
308
309
                if args.gradient_accumulation_steps > 1:
                    loss = loss / args.gradient_accumulation_steps
310
311
312
313
314
315

                if args.fp16:
                    optimizer.backward(loss)
                else:
                    loss.backward()

316
                tr_loss += loss.item()
317
                nb_tr_examples += input_ids.size(0)
318
                nb_tr_steps += 1
thomwolf's avatar
thomwolf committed
319
                if (step + 1) % args.gradient_accumulation_steps == 0:
320
321
322
                    if args.fp16:
                        # modify learning rate with special warm up BERT uses
                        # if args.fp16 is False, BertAdam is used that handles this automatically
323
                        lr_this_step = args.learning_rate * warmup_linear.get_lr(global_step, args.warmup_proportion)
324
325
                        for param_group in optimizer.param_groups:
                            param_group['lr'] = lr_this_step
326
327
                    optimizer.step()
                    optimizer.zero_grad()
thomwolf's avatar
thomwolf committed
328
                    global_step += 1
329
330
331
                    if args.local_rank in [-1, 0]:
                        tb_writer.add_scalar('lr', optimizer.get_lr()[0], global_step)
                        tb_writer.add_scalar('loss', loss.item(), global_step)
thomwolf's avatar
thomwolf committed
332

333
    if args.do_train and (args.local_rank == -1 or torch.distributed.get_rank() == 0):
334
        # Save a trained model, configuration and tokenizer
335
        model_to_save = model.module if hasattr(model, 'module') else model  # Only save the model it-self
336
337

        # If we save using the predefined names, we can load using `from_pretrained`
338
339
340
        output_model_file = os.path.join(args.output_dir, WEIGHTS_NAME)
        output_config_file = os.path.join(args.output_dir, CONFIG_NAME)

341
342
        torch.save(model_to_save.state_dict(), output_model_file)
        model_to_save.config.to_json_file(output_config_file)
343
        tokenizer.save_vocabulary(args.output_dir)
344

345
346
        # Load a trained model and vocabulary that you have fine-tuned
        model = BertForSequenceClassification.from_pretrained(args.output_dir, num_labels=num_labels)
347
        tokenizer = BertTokenizer.from_pretrained(args.output_dir, do_lower_case=args.do_lower_case)
348
349
    else:
        model = BertForSequenceClassification.from_pretrained(args.bert_model, num_labels=num_labels)
350
    model.to(device)
351

352
    if args.do_eval and (args.local_rank == -1 or torch.distributed.get_rank() == 0):
VictorSanh's avatar
WIP  
VictorSanh committed
353
354
        eval_examples = processor.get_dev_examples(args.data_dir)
        eval_features = convert_examples_to_features(
355
            eval_examples, label_list, args.max_seq_length, tokenizer, output_mode)
VictorSanh's avatar
wip  
VictorSanh committed
356
357
358
        logger.info("***** Running evaluation *****")
        logger.info("  Num examples = %d", len(eval_examples))
        logger.info("  Batch size = %d", args.eval_batch_size)
359
360
361
        all_input_ids = torch.tensor([f.input_ids for f in eval_features], dtype=torch.long)
        all_input_mask = torch.tensor([f.input_mask for f in eval_features], dtype=torch.long)
        all_segment_ids = torch.tensor([f.segment_ids for f in eval_features], dtype=torch.long)
362
363
364
365
366
367

        if output_mode == "classification":
            all_label_ids = torch.tensor([f.label_id for f in eval_features], dtype=torch.long)
        elif output_mode == "regression":
            all_label_ids = torch.tensor([f.label_id for f in eval_features], dtype=torch.float)

368
        eval_data = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_label_ids)
369
370
        # Run prediction for full data
        eval_sampler = SequentialSampler(eval_data)
371
372
373
        eval_dataloader = DataLoader(eval_data, sampler=eval_sampler, batch_size=args.eval_batch_size)

        model.eval()
374
375
376
        eval_loss = 0
        nb_eval_steps = 0
        preds = []
377

378
        for input_ids, input_mask, segment_ids, label_ids in tqdm(eval_dataloader, desc="Evaluating"):
379
            input_ids = input_ids.to(device)
thomwolf's avatar
thomwolf committed
380
            input_mask = input_mask.to(device)
381
            segment_ids = segment_ids.to(device)
382
            label_ids = label_ids.to(device)
383

384
            with torch.no_grad():
385
                logits = model(input_ids, segment_ids, input_mask, labels=None)
386

387
388
389
390
391
392
393
394
            # create eval loss and other metric required by the task
            if output_mode == "classification":
                loss_fct = CrossEntropyLoss()
                tmp_eval_loss = loss_fct(logits.view(-1, num_labels), label_ids.view(-1))
            elif output_mode == "regression":
                loss_fct = MSELoss()
                tmp_eval_loss = loss_fct(logits.view(-1), label_ids.view(-1))
            
395
396
            eval_loss += tmp_eval_loss.mean().item()
            nb_eval_steps += 1
397
398
399
400
401
            if len(preds) == 0:
                preds.append(logits.detach().cpu().numpy())
            else:
                preds[0] = np.append(
                    preds[0], logits.detach().cpu().numpy(), axis=0)
VictorSanh's avatar
WIP  
VictorSanh committed
402

403
        eval_loss = eval_loss / nb_eval_steps
404
405
406
        preds = preds[0]
        if output_mode == "classification":
            preds = np.argmax(preds, axis=1)
407
408
        elif output_mode == "regression":
            preds = np.squeeze(preds)
409
        result = compute_metrics(task_name, preds, all_label_ids.numpy())
410
        loss = tr_loss/global_step if args.do_train else None
411
412
413
414

        result['eval_loss'] = eval_loss
        result['global_step'] = global_step
        result['loss'] = loss
VictorSanh's avatar
WIP  
VictorSanh committed
415
416

        output_eval_file = os.path.join(args.output_dir, "eval_results.txt")
VictorSanh's avatar
wip  
VictorSanh committed
417
418
        with open(output_eval_file, "w") as writer:
            logger.info("***** Eval results *****")
VictorSanh's avatar
WIP  
VictorSanh committed
419
            for key in sorted(result.keys()):
VictorSanh's avatar
wip  
VictorSanh committed
420
                logger.info("  %s = %s", key, str(result[key]))
VictorSanh's avatar
WIP  
VictorSanh committed
421
                writer.write("%s = %s\n" % (key, str(result[key])))
422

423
424
425
426
427
        # hack for MNLI-MM
        if task_name == "mnli":
            task_name = "mnli-mm"
            processor = processors[task_name]()

428
429
430
431
432
            if os.path.exists(args.output_dir + '-MM') and os.listdir(args.output_dir + '-MM') and args.do_train:
                raise ValueError("Output directory ({}) already exists and is not empty.".format(args.output_dir))
            if not os.path.exists(args.output_dir + '-MM'):
                os.makedirs(args.output_dir + '-MM')

433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
            eval_examples = processor.get_dev_examples(args.data_dir)
            eval_features = convert_examples_to_features(
                eval_examples, label_list, args.max_seq_length, tokenizer, output_mode)
            logger.info("***** Running evaluation *****")
            logger.info("  Num examples = %d", len(eval_examples))
            logger.info("  Batch size = %d", args.eval_batch_size)
            all_input_ids = torch.tensor([f.input_ids for f in eval_features], dtype=torch.long)
            all_input_mask = torch.tensor([f.input_mask for f in eval_features], dtype=torch.long)
            all_segment_ids = torch.tensor([f.segment_ids for f in eval_features], dtype=torch.long)
            all_label_ids = torch.tensor([f.label_id for f in eval_features], dtype=torch.long)

            eval_data = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_label_ids)
            # Run prediction for full data
            eval_sampler = SequentialSampler(eval_data)
            eval_dataloader = DataLoader(eval_data, sampler=eval_sampler, batch_size=args.eval_batch_size)

            model.eval()
            eval_loss = 0
            nb_eval_steps = 0
            preds = []

            for input_ids, input_mask, segment_ids, label_ids in tqdm(eval_dataloader, desc="Evaluating"):
                input_ids = input_ids.to(device)
                input_mask = input_mask.to(device)
                segment_ids = segment_ids.to(device)
                label_ids = label_ids.to(device)

                with torch.no_grad():
                    logits = model(input_ids, segment_ids, input_mask, labels=None)
            
                loss_fct = CrossEntropyLoss()
                tmp_eval_loss = loss_fct(logits.view(-1, num_labels), label_ids.view(-1))
            
                eval_loss += tmp_eval_loss.mean().item()
                nb_eval_steps += 1
                if len(preds) == 0:
                    preds.append(logits.detach().cpu().numpy())
                else:
                    preds[0] = np.append(
                        preds[0], logits.detach().cpu().numpy(), axis=0)
473

474
475
476
477
            eval_loss = eval_loss / nb_eval_steps
            preds = preds[0]
            preds = np.argmax(preds, axis=1)
            result = compute_metrics(task_name, preds, all_label_ids.numpy())
478
            loss = tr_loss/global_step if args.do_train else None
479
480
481
482
483
484
485
486
487
488
489

            result['eval_loss'] = eval_loss
            result['global_step'] = global_step
            result['loss'] = loss

            output_eval_file = os.path.join(args.output_dir + '-MM', "eval_results.txt")
            with open(output_eval_file, "w") as writer:
                logger.info("***** Eval results *****")
                for key in sorted(result.keys()):
                    logger.info("  %s = %s", key, str(result[key]))
                    writer.write("%s = %s\n" % (key, str(result[key])))
490

VictorSanh's avatar
WIP  
VictorSanh committed
491
492
if __name__ == "__main__":
    main()