test_modeling_distilbert.py 11.5 KB
Newer Older
LysandreJik's avatar
LysandreJik committed
1
2
3
4
5
6
7
8
9
10
11
12
13
14
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
Aymeric Augustin's avatar
Aymeric Augustin committed
15

LysandreJik's avatar
LysandreJik committed
16

17
18
import unittest

19
from transformers import is_torch_available
thomwolf's avatar
thomwolf committed
20

21
from .test_configuration_common import ConfigTester
22
from .test_modeling_common import ModelTesterMixin, ids_tensor
23
from .utils import require_torch, torch_device
Aymeric Augustin's avatar
Aymeric Augustin committed
24
25


26
if is_torch_available():
27
28
29
30
    from transformers import (
        DistilBertConfig,
        DistilBertModel,
        DistilBertForMaskedLM,
31
        DistilBertForMultipleChoice,
32
33
34
35
36
        DistilBertForTokenClassification,
        DistilBertForQuestionAnswering,
        DistilBertForSequenceClassification,
    )

thomwolf's avatar
thomwolf committed
37
    class DistilBertModelTester(object):
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
        def __init__(
            self,
            parent,
            batch_size=13,
            seq_length=7,
            is_training=True,
            use_input_mask=True,
            use_token_type_ids=False,
            use_labels=True,
            vocab_size=99,
            hidden_size=32,
            num_hidden_layers=5,
            num_attention_heads=4,
            intermediate_size=37,
            hidden_act="gelu",
            hidden_dropout_prob=0.1,
            attention_probs_dropout_prob=0.1,
            max_position_embeddings=512,
            type_vocab_size=16,
            type_sequence_label_size=2,
            initializer_range=0.02,
            num_labels=3,
            num_choices=4,
            scope=None,
        ):
LysandreJik's avatar
LysandreJik committed
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
            self.parent = parent
            self.batch_size = batch_size
            self.seq_length = seq_length
            self.is_training = is_training
            self.use_input_mask = use_input_mask
            self.use_token_type_ids = use_token_type_ids
            self.use_labels = use_labels
            self.vocab_size = vocab_size
            self.hidden_size = hidden_size
            self.num_hidden_layers = num_hidden_layers
            self.num_attention_heads = num_attention_heads
            self.intermediate_size = intermediate_size
            self.hidden_act = hidden_act
            self.hidden_dropout_prob = hidden_dropout_prob
            self.attention_probs_dropout_prob = attention_probs_dropout_prob
            self.max_position_embeddings = max_position_embeddings
            self.type_vocab_size = type_vocab_size
            self.type_sequence_label_size = type_sequence_label_size
            self.initializer_range = initializer_range
            self.num_labels = num_labels
            self.num_choices = num_choices
            self.scope = scope

        def prepare_config_and_inputs(self):
            input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)

            input_mask = None
            if self.use_input_mask:
                input_mask = ids_tensor([self.batch_size, self.seq_length], vocab_size=2)

            sequence_labels = None
            token_labels = None
            choice_labels = None
            if self.use_labels:
                sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size)
                token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels)
                choice_labels = ids_tensor([self.batch_size], self.num_choices)

thomwolf's avatar
thomwolf committed
101
            config = DistilBertConfig(
thomwolf's avatar
thomwolf committed
102
                vocab_size=self.vocab_size,
LysandreJik's avatar
LysandreJik committed
103
104
105
106
107
108
109
110
                dim=self.hidden_size,
                n_layers=self.num_hidden_layers,
                n_heads=self.num_attention_heads,
                hidden_dim=self.intermediate_size,
                hidden_act=self.hidden_act,
                dropout=self.hidden_dropout_prob,
                attention_dropout=self.attention_probs_dropout_prob,
                max_position_embeddings=self.max_position_embeddings,
111
112
                initializer_range=self.initializer_range,
            )
LysandreJik's avatar
LysandreJik committed
113
114
115
116

            return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels

        def check_loss_output(self, result):
117
            self.parent.assertListEqual(list(result["loss"].size()), [])
LysandreJik's avatar
LysandreJik committed
118

119
120
121
        def create_and_check_distilbert_model(
            self, config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
        ):
thomwolf's avatar
thomwolf committed
122
            model = DistilBertModel(config=config)
123
            model.to(torch_device)
LysandreJik's avatar
LysandreJik committed
124
            model.eval()
125
126
            (sequence_output,) = model(input_ids, input_mask)
            (sequence_output,) = model(input_ids)
LysandreJik's avatar
LysandreJik committed
127
128
129
130
131

            result = {
                "sequence_output": sequence_output,
            }
            self.parent.assertListEqual(
132
133
                list(result["sequence_output"].size()), [self.batch_size, self.seq_length, self.hidden_size]
            )
LysandreJik's avatar
LysandreJik committed
134

135
136
137
        def create_and_check_distilbert_for_masked_lm(
            self, config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
        ):
thomwolf's avatar
thomwolf committed
138
            model = DistilBertForMaskedLM(config=config)
139
            model.to(torch_device)
LysandreJik's avatar
LysandreJik committed
140
            model.eval()
141
            loss, prediction_scores = model(input_ids, attention_mask=input_mask, labels=token_labels)
LysandreJik's avatar
LysandreJik committed
142
143
144
145
146
            result = {
                "loss": loss,
                "prediction_scores": prediction_scores,
            }
            self.parent.assertListEqual(
147
148
                list(result["prediction_scores"].size()), [self.batch_size, self.seq_length, self.vocab_size]
            )
LysandreJik's avatar
LysandreJik committed
149
150
            self.check_loss_output(result)

151
152
153
        def create_and_check_distilbert_for_question_answering(
            self, config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
        ):
thomwolf's avatar
thomwolf committed
154
            model = DistilBertForQuestionAnswering(config=config)
155
            model.to(torch_device)
LysandreJik's avatar
LysandreJik committed
156
            model.eval()
157
158
159
            loss, start_logits, end_logits = model(
                input_ids, attention_mask=input_mask, start_positions=sequence_labels, end_positions=sequence_labels
            )
LysandreJik's avatar
LysandreJik committed
160
161
162
163
164
            result = {
                "loss": loss,
                "start_logits": start_logits,
                "end_logits": end_logits,
            }
165
166
            self.parent.assertListEqual(list(result["start_logits"].size()), [self.batch_size, self.seq_length])
            self.parent.assertListEqual(list(result["end_logits"].size()), [self.batch_size, self.seq_length])
LysandreJik's avatar
LysandreJik committed
167
168
            self.check_loss_output(result)

169
170
171
        def create_and_check_distilbert_for_sequence_classification(
            self, config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
        ):
LysandreJik's avatar
LysandreJik committed
172
            config.num_labels = self.num_labels
thomwolf's avatar
thomwolf committed
173
            model = DistilBertForSequenceClassification(config)
174
            model.to(torch_device)
LysandreJik's avatar
LysandreJik committed
175
            model.eval()
176
            loss, logits = model(input_ids, attention_mask=input_mask, labels=sequence_labels)
LysandreJik's avatar
LysandreJik committed
177
178
179
180
            result = {
                "loss": loss,
                "logits": logits,
            }
181
            self.parent.assertListEqual(list(result["logits"].size()), [self.batch_size, self.num_labels])
LysandreJik's avatar
LysandreJik committed
182
183
            self.check_loss_output(result)

184
185
186
        def create_and_check_distilbert_for_token_classification(
            self, config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
        ):
187
188
            config.num_labels = self.num_labels
            model = DistilBertForTokenClassification(config=config)
189
            model.to(torch_device)
190
191
192
193
194
195
196
197
            model.eval()

            loss, logits = model(input_ids, attention_mask=input_mask, labels=token_labels)
            result = {
                "loss": loss,
                "logits": logits,
            }
            self.parent.assertListEqual(
198
199
                list(result["logits"].size()), [self.batch_size, self.seq_length, self.num_labels]
            )
200
201
            self.check_loss_output(result)

202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
        def create_and_check_distilbert_for_multiple_choice(
            self, config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
        ):
            config.num_choices = self.num_choices
            model = DistilBertForMultipleChoice(config=config)
            model.to(torch_device)
            model.eval()
            multiple_choice_inputs_ids = input_ids.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous()
            multiple_choice_input_mask = input_mask.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous()
            loss, logits = model(
                multiple_choice_inputs_ids, attention_mask=multiple_choice_input_mask, labels=choice_labels,
            )
            result = {
                "loss": loss,
                "logits": logits,
            }
            self.parent.assertListEqual(list(result["logits"].size()), [self.batch_size, self.num_choices])
            self.check_loss_output(result)

LysandreJik's avatar
LysandreJik committed
221
222
223
        def prepare_config_and_inputs_for_common(self):
            config_and_inputs = self.prepare_config_and_inputs()
            (config, input_ids, input_mask, sequence_labels, token_labels, choice_labels) = config_and_inputs
224
            inputs_dict = {"input_ids": input_ids, "attention_mask": input_mask}
LysandreJik's avatar
LysandreJik committed
225
226
            return config, inputs_dict

227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247

@require_torch
class DistilBertModelTest(ModelTesterMixin, unittest.TestCase):

    all_model_classes = (
        (
            DistilBertModel,
            DistilBertForMaskedLM,
            DistilBertForMultipleChoice,
            DistilBertForQuestionAnswering,
            DistilBertForSequenceClassification,
            DistilBertForTokenClassification,
        )
        if is_torch_available()
        else None
    )
    test_pruning = True
    test_torchscript = True
    test_resize_embeddings = True
    test_head_masking = True

LysandreJik's avatar
LysandreJik committed
248
    def setUp(self):
249
        self.model_tester = DistilBertModelTester(self)
thomwolf's avatar
thomwolf committed
250
        self.config_tester = ConfigTester(self, config_class=DistilBertConfig, dim=37)
LysandreJik's avatar
LysandreJik committed
251
252
253
254

    def test_config(self):
        self.config_tester.run_common_tests()

thomwolf's avatar
thomwolf committed
255
    def test_distilbert_model(self):
LysandreJik's avatar
LysandreJik committed
256
        config_and_inputs = self.model_tester.prepare_config_and_inputs()
thomwolf's avatar
thomwolf committed
257
        self.model_tester.create_and_check_distilbert_model(*config_and_inputs)
LysandreJik's avatar
LysandreJik committed
258
259
260

    def test_for_masked_lm(self):
        config_and_inputs = self.model_tester.prepare_config_and_inputs()
thomwolf's avatar
thomwolf committed
261
        self.model_tester.create_and_check_distilbert_for_masked_lm(*config_and_inputs)
LysandreJik's avatar
LysandreJik committed
262
263
264

    def test_for_question_answering(self):
        config_and_inputs = self.model_tester.prepare_config_and_inputs()
thomwolf's avatar
thomwolf committed
265
        self.model_tester.create_and_check_distilbert_for_question_answering(*config_and_inputs)
LysandreJik's avatar
LysandreJik committed
266
267
268

    def test_for_sequence_classification(self):
        config_and_inputs = self.model_tester.prepare_config_and_inputs()
thomwolf's avatar
thomwolf committed
269
        self.model_tester.create_and_check_distilbert_for_sequence_classification(*config_and_inputs)
LysandreJik's avatar
LysandreJik committed
270

271
272
273
274
    def test_for_token_classification(self):
        config_and_inputs = self.model_tester.prepare_config_and_inputs()
        self.model_tester.create_and_check_distilbert_for_token_classification(*config_and_inputs)

275
276
277
278
    def test_for_multiple_choice(self):
        config_and_inputs = self.model_tester.prepare_config_and_inputs()
        self.model_tester.create_and_check_distilbert_for_multiple_choice(*config_and_inputs)

279
    # @slow
LysandreJik's avatar
LysandreJik committed
280
    # def test_model_from_pretrained(self):
281
    #     for model_name in DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
282
    #         model = DistilBertModel.from_pretrained(model_name)
LysandreJik's avatar
LysandreJik committed
283
    #         self.assertIsNotNone(model)