modeling_test.py 12.9 KB
Newer Older
thomwolf's avatar
thomwolf committed
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

19
import unittest
thomwolf's avatar
thomwolf committed
20
21
22
import json
import random

23
24
import torch

25
26
27
28
from pytorch_pretrained_bert import (BertConfig, BertModel, BertForMaskedLM,
                                     BertForNextSentencePrediction, BertForPreTraining,
                                     BertForQuestionAnswering, BertForSequenceClassification,
                                     BertForTokenClassification)
thomwolf's avatar
thomwolf committed
29
30


31
class BertModelTest(unittest.TestCase):
32
33
34
35
36
37
38
39
40
    class BertModelTester(object):

        def __init__(self,
                     parent,
                     batch_size=13,
                     seq_length=7,
                     is_training=True,
                     use_input_mask=True,
                     use_token_type_ids=True,
41
                     use_labels=True,
42
43
44
45
46
47
48
49
50
51
                     vocab_size=99,
                     hidden_size=32,
                     num_hidden_layers=5,
                     num_attention_heads=4,
                     intermediate_size=37,
                     hidden_act="gelu",
                     hidden_dropout_prob=0.1,
                     attention_probs_dropout_prob=0.1,
                     max_position_embeddings=512,
                     type_vocab_size=16,
52
                     type_sequence_label_size=2,
53
                     initializer_range=0.02,
54
                     num_labels=3,
55
56
57
58
59
60
61
                     scope=None):
            self.parent = parent
            self.batch_size = batch_size
            self.seq_length = seq_length
            self.is_training = is_training
            self.use_input_mask = use_input_mask
            self.use_token_type_ids = use_token_type_ids
62
            self.use_labels = use_labels
63
64
65
66
67
68
69
70
71
72
            self.vocab_size = vocab_size
            self.hidden_size = hidden_size
            self.num_hidden_layers = num_hidden_layers
            self.num_attention_heads = num_attention_heads
            self.intermediate_size = intermediate_size
            self.hidden_act = hidden_act
            self.hidden_dropout_prob = hidden_dropout_prob
            self.attention_probs_dropout_prob = attention_probs_dropout_prob
            self.max_position_embeddings = max_position_embeddings
            self.type_vocab_size = type_vocab_size
73
            self.type_sequence_label_size = type_sequence_label_size
74
            self.initializer_range = initializer_range
75
            self.num_labels = num_labels
76
77
            self.scope = scope

78
        def prepare_config_and_inputs(self):
79
            input_ids = BertModelTest.ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
80
81
82

            input_mask = None
            if self.use_input_mask:
83
                input_mask = BertModelTest.ids_tensor([self.batch_size, self.seq_length], vocab_size=2)
84
85
86

            token_type_ids = None
            if self.use_token_type_ids:
87
                token_type_ids = BertModelTest.ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size)
88

89
90
91
92
93
94
            sequence_labels = None
            token_labels = None
            if self.use_labels:
                sequence_labels = BertModelTest.ids_tensor([self.batch_size], self.type_sequence_label_size)
                token_labels = BertModelTest.ids_tensor([self.batch_size, self.seq_length], self.num_labels)

thomwolf's avatar
thomwolf committed
95
96
            config = BertConfig(
                vocab_size_or_config_json_file=self.vocab_size,
97
98
99
100
101
102
103
104
105
106
107
                hidden_size=self.hidden_size,
                num_hidden_layers=self.num_hidden_layers,
                num_attention_heads=self.num_attention_heads,
                intermediate_size=self.intermediate_size,
                hidden_act=self.hidden_act,
                hidden_dropout_prob=self.hidden_dropout_prob,
                attention_probs_dropout_prob=self.attention_probs_dropout_prob,
                max_position_embeddings=self.max_position_embeddings,
                type_vocab_size=self.type_vocab_size,
                initializer_range=self.initializer_range)

108
            return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels
109

110
111
112
113
        def check_loss_output(self, result):
            self.parent.assertListEqual(
                list(result["loss"].size()),
                [])
114

115
116
        def create_bert_model(self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels):
            model = BertModel(config=config)
thomwolf's avatar
thomwolf committed
117
            model.eval()
118
            all_encoder_layers, pooled_output = model(input_ids, token_type_ids, input_mask)
119
            outputs = {
120
121
122
                "sequence_output": all_encoder_layers[-1],
                "pooled_output": pooled_output,
                "all_encoder_layers": all_encoder_layers,
123
124
125
            }
            return outputs

126
127
128
129
        def check_bert_model_output(self, result):
            self.parent.assertListEqual(
                [size for layer in result["all_encoder_layers"] for size in layer.size()],
                [self.batch_size, self.seq_length, self.hidden_size] * self.num_hidden_layers)
130
131
            self.parent.assertListEqual(
                list(result["sequence_output"].size()),
132
                [self.batch_size, self.seq_length, self.hidden_size])
133
            self.parent.assertListEqual(list(result["pooled_output"].size()), [self.batch_size, self.hidden_size])
134

135
136
137

        def create_bert_for_masked_lm(self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels):
            model = BertForMaskedLM(config=config)
thomwolf's avatar
thomwolf committed
138
            model.eval()
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
            loss = model(input_ids, token_type_ids, input_mask, token_labels)
            prediction_scores = model(input_ids, token_type_ids, input_mask)
            outputs = {
                "loss": loss,
                "prediction_scores": prediction_scores,
            }
            return outputs

        def check_bert_for_masked_lm_output(self, result):
            self.parent.assertListEqual(
                list(result["prediction_scores"].size()),
                [self.batch_size, self.seq_length, self.vocab_size])

        def create_bert_for_next_sequence_prediction(self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels):
            model = BertForNextSentencePrediction(config=config)
thomwolf's avatar
thomwolf committed
154
            model.eval()
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
            loss = model(input_ids, token_type_ids, input_mask, sequence_labels)
            seq_relationship_score = model(input_ids, token_type_ids, input_mask)
            outputs = {
                "loss": loss,
                "seq_relationship_score": seq_relationship_score,
            }
            return outputs

        def check_bert_for_next_sequence_prediction_output(self, result):
            self.parent.assertListEqual(
                list(result["seq_relationship_score"].size()),
                [self.batch_size, 2])


        def create_bert_for_pretraining(self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels):
            model = BertForPreTraining(config=config)
thomwolf's avatar
thomwolf committed
171
            model.eval()
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
            loss = model(input_ids, token_type_ids, input_mask, token_labels, sequence_labels)
            prediction_scores, seq_relationship_score = model(input_ids, token_type_ids, input_mask)
            outputs = {
                "loss": loss,
                "prediction_scores": prediction_scores,
                "seq_relationship_score": seq_relationship_score,
            }
            return outputs

        def check_bert_for_pretraining_output(self, result):
            self.parent.assertListEqual(
                list(result["prediction_scores"].size()),
                [self.batch_size, self.seq_length, self.vocab_size])
            self.parent.assertListEqual(
                list(result["seq_relationship_score"].size()),
                [self.batch_size, 2])


        def create_bert_for_question_answering(self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels):
            model = BertForQuestionAnswering(config=config)
thomwolf's avatar
thomwolf committed
192
            model.eval()
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
            loss = model(input_ids, token_type_ids, input_mask, sequence_labels, sequence_labels)
            start_logits, end_logits = model(input_ids, token_type_ids, input_mask)
            outputs = {
                "loss": loss,
                "start_logits": start_logits,
                "end_logits": end_logits,
            }
            return outputs

        def check_bert_for_question_answering_output(self, result):
            self.parent.assertListEqual(
                list(result["start_logits"].size()),
                [self.batch_size, self.seq_length])
            self.parent.assertListEqual(
                list(result["end_logits"].size()),
                [self.batch_size, self.seq_length])


        def create_bert_for_sequence_classification(self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels):
            model = BertForSequenceClassification(config=config, num_labels=self.num_labels)
thomwolf's avatar
thomwolf committed
213
            model.eval()
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
            loss = model(input_ids, token_type_ids, input_mask, sequence_labels)
            logits = model(input_ids, token_type_ids, input_mask)
            outputs = {
                "loss": loss,
                "logits": logits,
            }
            return outputs

        def check_bert_for_sequence_classification_output(self, result):
            self.parent.assertListEqual(
                list(result["logits"].size()),
                [self.batch_size, self.num_labels])


        def create_bert_for_token_classification(self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels):
            model = BertForTokenClassification(config=config, num_labels=self.num_labels)
thomwolf's avatar
thomwolf committed
230
            model.eval()
231
232
233
234
235
236
237
238
239
240
241
242
243
244
            loss = model(input_ids, token_type_ids, input_mask, token_labels)
            logits = model(input_ids, token_type_ids, input_mask)
            outputs = {
                "loss": loss,
                "logits": logits,
            }
            return outputs

        def check_bert_for_token_classification_output(self, result):
            self.parent.assertListEqual(
                list(result["logits"].size()),
                [self.batch_size, self.seq_length, self.num_labels])


245
246
247
248
    def test_default(self):
        self.run_tester(BertModelTest.BertModelTester(self))

    def test_config_to_json_string(self):
thomwolf's avatar
thomwolf committed
249
        config = BertConfig(vocab_size_or_config_json_file=99, hidden_size=37)
250
251
252
253
254
        obj = json.loads(config.to_json_string())
        self.assertEqual(obj["vocab_size"], 99)
        self.assertEqual(obj["hidden_size"], 37)

    def run_tester(self, tester):
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
        config_and_inputs = tester.prepare_config_and_inputs()
        output_result = tester.create_bert_model(*config_and_inputs)
        tester.check_bert_model_output(output_result)

        output_result = tester.create_bert_for_masked_lm(*config_and_inputs)
        tester.check_bert_for_masked_lm_output(output_result)
        tester.check_loss_output(output_result)

        output_result = tester.create_bert_for_next_sequence_prediction(*config_and_inputs)
        tester.check_bert_for_next_sequence_prediction_output(output_result)
        tester.check_loss_output(output_result)

        output_result = tester.create_bert_for_pretraining(*config_and_inputs)
        tester.check_bert_for_pretraining_output(output_result)
        tester.check_loss_output(output_result)

        output_result = tester.create_bert_for_question_answering(*config_and_inputs)
        tester.check_bert_for_question_answering_output(output_result)
        tester.check_loss_output(output_result)

        output_result = tester.create_bert_for_sequence_classification(*config_and_inputs)
        tester.check_bert_for_sequence_classification_output(output_result)
        tester.check_loss_output(output_result)

        output_result = tester.create_bert_for_token_classification(*config_and_inputs)
        tester.check_bert_for_token_classification_output(output_result)
        tester.check_loss_output(output_result)
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296

    @classmethod
    def ids_tensor(cls, shape, vocab_size, rng=None, name=None):
        """Creates a random int32 tensor of the shape within the vocab size."""
        if rng is None:
            rng = random.Random()

        total_dims = 1
        for dim in shape:
            total_dims *= dim

        values = []
        for _ in range(total_dims):
            values.append(rng.randint(0, vocab_size - 1))

thomwolf's avatar
thomwolf committed
297
        return torch.tensor(data=values, dtype=torch.long).view(shape).contiguous()
thomwolf's avatar
thomwolf committed
298
299
300


if __name__ == "__main__":
301
    unittest.main()