modeling_test.py 13.3 KB
Newer Older
thomwolf's avatar
thomwolf committed
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

19
import os
20
import unittest
thomwolf's avatar
thomwolf committed
21
22
23
import json
import random

24
25
import torch

26
27
28
29
from pytorch_pretrained_bert import (BertConfig, BertModel, BertForMaskedLM,
                                     BertForNextSentencePrediction, BertForPreTraining,
                                     BertForQuestionAnswering, BertForSequenceClassification,
                                     BertForTokenClassification)
thomwolf's avatar
thomwolf committed
30
31


32
class BertModelTest(unittest.TestCase):
33
34
35
36
37
38
39
40
41
    class BertModelTester(object):

        def __init__(self,
                     parent,
                     batch_size=13,
                     seq_length=7,
                     is_training=True,
                     use_input_mask=True,
                     use_token_type_ids=True,
42
                     use_labels=True,
43
44
45
46
47
48
49
50
51
52
                     vocab_size=99,
                     hidden_size=32,
                     num_hidden_layers=5,
                     num_attention_heads=4,
                     intermediate_size=37,
                     hidden_act="gelu",
                     hidden_dropout_prob=0.1,
                     attention_probs_dropout_prob=0.1,
                     max_position_embeddings=512,
                     type_vocab_size=16,
53
                     type_sequence_label_size=2,
54
                     initializer_range=0.02,
55
                     num_labels=3,
56
57
58
59
60
61
62
                     scope=None):
            self.parent = parent
            self.batch_size = batch_size
            self.seq_length = seq_length
            self.is_training = is_training
            self.use_input_mask = use_input_mask
            self.use_token_type_ids = use_token_type_ids
63
            self.use_labels = use_labels
64
65
66
67
68
69
70
71
72
73
            self.vocab_size = vocab_size
            self.hidden_size = hidden_size
            self.num_hidden_layers = num_hidden_layers
            self.num_attention_heads = num_attention_heads
            self.intermediate_size = intermediate_size
            self.hidden_act = hidden_act
            self.hidden_dropout_prob = hidden_dropout_prob
            self.attention_probs_dropout_prob = attention_probs_dropout_prob
            self.max_position_embeddings = max_position_embeddings
            self.type_vocab_size = type_vocab_size
74
            self.type_sequence_label_size = type_sequence_label_size
75
            self.initializer_range = initializer_range
76
            self.num_labels = num_labels
77
78
            self.scope = scope

79
        def prepare_config_and_inputs(self):
80
            input_ids = BertModelTest.ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
81
82
83

            input_mask = None
            if self.use_input_mask:
84
                input_mask = BertModelTest.ids_tensor([self.batch_size, self.seq_length], vocab_size=2)
85
86
87

            token_type_ids = None
            if self.use_token_type_ids:
88
                token_type_ids = BertModelTest.ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size)
89

90
91
92
93
94
95
            sequence_labels = None
            token_labels = None
            if self.use_labels:
                sequence_labels = BertModelTest.ids_tensor([self.batch_size], self.type_sequence_label_size)
                token_labels = BertModelTest.ids_tensor([self.batch_size, self.seq_length], self.num_labels)

thomwolf's avatar
thomwolf committed
96
97
            config = BertConfig(
                vocab_size_or_config_json_file=self.vocab_size,
98
99
100
101
102
103
104
105
106
107
108
                hidden_size=self.hidden_size,
                num_hidden_layers=self.num_hidden_layers,
                num_attention_heads=self.num_attention_heads,
                intermediate_size=self.intermediate_size,
                hidden_act=self.hidden_act,
                hidden_dropout_prob=self.hidden_dropout_prob,
                attention_probs_dropout_prob=self.attention_probs_dropout_prob,
                max_position_embeddings=self.max_position_embeddings,
                type_vocab_size=self.type_vocab_size,
                initializer_range=self.initializer_range)

109
            return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels
110

111
112
113
114
        def check_loss_output(self, result):
            self.parent.assertListEqual(
                list(result["loss"].size()),
                [])
115

116
117
        def create_bert_model(self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels):
            model = BertModel(config=config)
thomwolf's avatar
thomwolf committed
118
            model.eval()
119
            all_encoder_layers, pooled_output = model(input_ids, token_type_ids, input_mask)
120
            outputs = {
121
122
123
                "sequence_output": all_encoder_layers[-1],
                "pooled_output": pooled_output,
                "all_encoder_layers": all_encoder_layers,
124
125
126
            }
            return outputs

127
128
129
130
        def check_bert_model_output(self, result):
            self.parent.assertListEqual(
                [size for layer in result["all_encoder_layers"] for size in layer.size()],
                [self.batch_size, self.seq_length, self.hidden_size] * self.num_hidden_layers)
131
132
            self.parent.assertListEqual(
                list(result["sequence_output"].size()),
133
                [self.batch_size, self.seq_length, self.hidden_size])
134
            self.parent.assertListEqual(list(result["pooled_output"].size()), [self.batch_size, self.hidden_size])
135

136
137
138

        def create_bert_for_masked_lm(self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels):
            model = BertForMaskedLM(config=config)
thomwolf's avatar
thomwolf committed
139
            model.eval()
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
            loss = model(input_ids, token_type_ids, input_mask, token_labels)
            prediction_scores = model(input_ids, token_type_ids, input_mask)
            outputs = {
                "loss": loss,
                "prediction_scores": prediction_scores,
            }
            return outputs

        def check_bert_for_masked_lm_output(self, result):
            self.parent.assertListEqual(
                list(result["prediction_scores"].size()),
                [self.batch_size, self.seq_length, self.vocab_size])

        def create_bert_for_next_sequence_prediction(self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels):
            model = BertForNextSentencePrediction(config=config)
thomwolf's avatar
thomwolf committed
155
            model.eval()
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
            loss = model(input_ids, token_type_ids, input_mask, sequence_labels)
            seq_relationship_score = model(input_ids, token_type_ids, input_mask)
            outputs = {
                "loss": loss,
                "seq_relationship_score": seq_relationship_score,
            }
            return outputs

        def check_bert_for_next_sequence_prediction_output(self, result):
            self.parent.assertListEqual(
                list(result["seq_relationship_score"].size()),
                [self.batch_size, 2])


        def create_bert_for_pretraining(self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels):
            model = BertForPreTraining(config=config)
thomwolf's avatar
thomwolf committed
172
            model.eval()
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
            loss = model(input_ids, token_type_ids, input_mask, token_labels, sequence_labels)
            prediction_scores, seq_relationship_score = model(input_ids, token_type_ids, input_mask)
            outputs = {
                "loss": loss,
                "prediction_scores": prediction_scores,
                "seq_relationship_score": seq_relationship_score,
            }
            return outputs

        def check_bert_for_pretraining_output(self, result):
            self.parent.assertListEqual(
                list(result["prediction_scores"].size()),
                [self.batch_size, self.seq_length, self.vocab_size])
            self.parent.assertListEqual(
                list(result["seq_relationship_score"].size()),
                [self.batch_size, 2])


        def create_bert_for_question_answering(self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels):
            model = BertForQuestionAnswering(config=config)
thomwolf's avatar
thomwolf committed
193
            model.eval()
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
            loss = model(input_ids, token_type_ids, input_mask, sequence_labels, sequence_labels)
            start_logits, end_logits = model(input_ids, token_type_ids, input_mask)
            outputs = {
                "loss": loss,
                "start_logits": start_logits,
                "end_logits": end_logits,
            }
            return outputs

        def check_bert_for_question_answering_output(self, result):
            self.parent.assertListEqual(
                list(result["start_logits"].size()),
                [self.batch_size, self.seq_length])
            self.parent.assertListEqual(
                list(result["end_logits"].size()),
                [self.batch_size, self.seq_length])


        def create_bert_for_sequence_classification(self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels):
            model = BertForSequenceClassification(config=config, num_labels=self.num_labels)
thomwolf's avatar
thomwolf committed
214
            model.eval()
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
            loss = model(input_ids, token_type_ids, input_mask, sequence_labels)
            logits = model(input_ids, token_type_ids, input_mask)
            outputs = {
                "loss": loss,
                "logits": logits,
            }
            return outputs

        def check_bert_for_sequence_classification_output(self, result):
            self.parent.assertListEqual(
                list(result["logits"].size()),
                [self.batch_size, self.num_labels])


        def create_bert_for_token_classification(self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels):
            model = BertForTokenClassification(config=config, num_labels=self.num_labels)
thomwolf's avatar
thomwolf committed
231
            model.eval()
232
233
234
235
236
237
238
239
240
241
242
243
244
245
            loss = model(input_ids, token_type_ids, input_mask, token_labels)
            logits = model(input_ids, token_type_ids, input_mask)
            outputs = {
                "loss": loss,
                "logits": logits,
            }
            return outputs

        def check_bert_for_token_classification_output(self, result):
            self.parent.assertListEqual(
                list(result["logits"].size()),
                [self.batch_size, self.seq_length, self.num_labels])


246
247
248
249
    def test_default(self):
        self.run_tester(BertModelTest.BertModelTester(self))

    def test_config_to_json_string(self):
thomwolf's avatar
thomwolf committed
250
        config = BertConfig(vocab_size_or_config_json_file=99, hidden_size=37)
251
252
253
254
        obj = json.loads(config.to_json_string())
        self.assertEqual(obj["vocab_size"], 99)
        self.assertEqual(obj["hidden_size"], 37)

255
256
257
258
259
260
261
262
    def test_config_to_json_file(self):
        config_first = BertConfig(vocab_size_or_config_json_file=99, hidden_size=37)
        json_file_path = "/tmp/config.json"
        config_first.to_json_file(json_file_path)
        config_second = BertConfig.from_json_file(json_file_path)
        os.remove(json_file_path)
        self.assertEqual(config_second.to_dict(), config_first.to_dict())

263
    def run_tester(self, tester):
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
        config_and_inputs = tester.prepare_config_and_inputs()
        output_result = tester.create_bert_model(*config_and_inputs)
        tester.check_bert_model_output(output_result)

        output_result = tester.create_bert_for_masked_lm(*config_and_inputs)
        tester.check_bert_for_masked_lm_output(output_result)
        tester.check_loss_output(output_result)

        output_result = tester.create_bert_for_next_sequence_prediction(*config_and_inputs)
        tester.check_bert_for_next_sequence_prediction_output(output_result)
        tester.check_loss_output(output_result)

        output_result = tester.create_bert_for_pretraining(*config_and_inputs)
        tester.check_bert_for_pretraining_output(output_result)
        tester.check_loss_output(output_result)

        output_result = tester.create_bert_for_question_answering(*config_and_inputs)
        tester.check_bert_for_question_answering_output(output_result)
        tester.check_loss_output(output_result)

        output_result = tester.create_bert_for_sequence_classification(*config_and_inputs)
        tester.check_bert_for_sequence_classification_output(output_result)
        tester.check_loss_output(output_result)

        output_result = tester.create_bert_for_token_classification(*config_and_inputs)
        tester.check_bert_for_token_classification_output(output_result)
        tester.check_loss_output(output_result)
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305

    @classmethod
    def ids_tensor(cls, shape, vocab_size, rng=None, name=None):
        """Creates a random int32 tensor of the shape within the vocab size."""
        if rng is None:
            rng = random.Random()

        total_dims = 1
        for dim in shape:
            total_dims *= dim

        values = []
        for _ in range(total_dims):
            values.append(rng.randint(0, vocab_size - 1))

thomwolf's avatar
thomwolf committed
306
        return torch.tensor(data=values, dtype=torch.long).view(shape).contiguous()
thomwolf's avatar
thomwolf committed
307
308
309


if __name__ == "__main__":
310
    unittest.main()