eval_transfo_xl.py 5.74 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
# coding=utf-8
# Copyright 2018 Google AI, Google Brain and Carnegie Mellon University Authors and the HugginFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION.  All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" PyTorch Transformer XL model evaluation script.
    Adapted from https://github.com/kimiyoung/transformer-xl.
    In particular https://github.com/kimiyoung/transformer-xl/blob/master/pytorch/eval.py
"""
thomwolf's avatar
thomwolf committed
20
21
from __future__ import absolute_import, division, print_function, unicode_literals

22
23
24
import os
import functools
import argparse
thomwolf's avatar
thomwolf committed
25
import logging
26
27
import time
import math
thomwolf's avatar
thomwolf committed
28
29
import sys
from io import open
30
31
32
33
34

import torch

from pytorch_pretrained_bert import TransfoXLModel, TransfoXLCorpus

thomwolf's avatar
thomwolf committed
35
36
37
38
logging.basicConfig(format = '%(asctime)s - %(levelname)s - %(name)s -   %(message)s',
                    datefmt = '%m/%d/%Y %H:%M:%S',
                    level = logging.INFO)
logger = logging.getLogger(__name__)
39
40
41
42
43
44


parser = argparse.ArgumentParser(description='PyTorch Transformer Language Model')
# parser.add_argument('--data', type=str, default='../data/wikitext-103',
#                     help='location of the data corpus')
parser.add_argument('--model_name', type=str, default='transfo-xl-wt103',
45
                    # choices=['transfo-xl-wt103'], #, 'lm1b', 'enwik8', 'text8'],
46
                    help='pretrained model name')
thomwolf's avatar
thomwolf committed
47
parser.add_argument('--split', type=str, default='test',
48
49
50
51
                    choices=['all', 'valid', 'test'],
                    help='which split to evaluate')
parser.add_argument('--batch_size', type=int, default=10,
                    help='batch size')
thomwolf's avatar
thomwolf committed
52
parser.add_argument('--tgt_len', type=int, default=128,
53
54
55
                    help='number of tokens to predict')
parser.add_argument('--ext_len', type=int, default=0,
                    help='length of the extended context')
thomwolf's avatar
thomwolf committed
56
parser.add_argument('--mem_len', type=int, default=1600,
57
                    help='length of the retained previous heads')
thomwolf's avatar
thomwolf committed
58
parser.add_argument('--clamp_len', type=int, default=1000,
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
                    help='max positional embedding index')
parser.add_argument('--cuda', action='store_true',
                    help='use CUDA')
parser.add_argument('--work_dir', type=str, required=True,
                    help='path to the work_dir')
parser.add_argument('--no_log', action='store_true',
                    help='do not log the eval result')
parser.add_argument('--same_length', action='store_true',
                    help='set same length attention with masking')
args = parser.parse_args()
assert args.ext_len >= 0, 'extended context length must be non-negative'

device = torch.device("cuda" if args.cuda else "cpu")

# Get logger
thomwolf's avatar
thomwolf committed
74
75
# logging = get_logger(os.path.join(args.work_dir, 'log.txt'),
#                      log_=not args.no_log)
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92

# Load dataset
corpus = TransfoXLCorpus.from_pretrained(args.model_name)
ntokens = len(corpus.vocab)

va_iter = corpus.get_iterator('valid', args.batch_size, args.tgt_len,
    device=device, ext_len=args.ext_len)
te_iter = corpus.get_iterator('test', args.batch_size, args.tgt_len,
    device=device, ext_len=args.ext_len)

# Load the best saved model.
# with open(os.path.join(args.work_dir, 'model.pt'), 'rb') as f:
#     model = torch.load(f)
# model.backward_compatible()
model = TransfoXLModel.from_pretrained(args.model_name)
model = model.to(device)

thomwolf's avatar
thomwolf committed
93
logger.info('Evaluating with bsz {} tgt_len {} ext_len {} mem_len {} clamp_len {}'.format(
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
       args.batch_size, args.tgt_len, args.ext_len, args.mem_len, args.clamp_len))

model.reset_length(args.tgt_len, args.ext_len, args.mem_len)
if args.clamp_len > 0:
    model.clamp_len = args.clamp_len
if args.same_length:
    model.same_length = True

###############################################################################
# Evaluation code
###############################################################################
def evaluate(eval_iter):
    # Turn on evaluation mode which disables dropout.
    model.eval()
    total_len, total_loss = 0, 0.
    start_time = time.time()
    with torch.no_grad():
        mems = tuple()
        for idx, (data, target, seq_len) in enumerate(eval_iter):
            ret = model(data, target, *mems)
            loss, mems = ret[0], ret[1:]
            loss = loss.mean()
            total_loss += seq_len * loss.item()
            total_len += seq_len
        total_time = time.time() - start_time
thomwolf's avatar
thomwolf committed
119
    logger.info('Time : {:.2f}s, {:.2f}ms/segment'.format(
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
            total_time, 1000 * total_time / (idx+1)))
    return total_loss / total_len

# Run on test data.
if args.split == 'all':
    test_loss = evaluate(te_iter)
    valid_loss = evaluate(va_iter)
elif args.split == 'valid':
    valid_loss = evaluate(va_iter)
    test_loss = None
elif args.split == 'test':
    test_loss = evaluate(te_iter)
    valid_loss = None

def format_log(loss, split):
thomwolf's avatar
thomwolf committed
135
136
137
138
139
140
    # if args.dataset in ['enwik8', 'text8']:
    #     log_str = '| {0} loss {1:5.2f} | {0} bpc {2:9.5f} '.format(
    #         split, loss, loss / math.log(2))
    # else:
    log_str = '| {0} loss {1:5.2f} | {0} ppl {2:9.3f} '.format(
        split, loss, math.exp(loss))
141
142
143
144
145
146
147
148
    return log_str

log_str = ''
if valid_loss is not None:
    log_str += format_log(valid_loss, 'valid')
if test_loss is not None:
    log_str += format_log(test_loss, 'test')

thomwolf's avatar
thomwolf committed
149
150
151
logger.info('=' * 100)
logger.info(log_str)
logger.info('=' * 100)