test_modeling_albert.py 12.6 KB
Newer Older
Lysandre's avatar
Lysandre committed
1
# coding=utf-8
Sylvain Gugger's avatar
Sylvain Gugger committed
2
# Copyright 2020 The HuggingFace Team. All rights reserved.
Lysandre's avatar
Lysandre committed
3
4
5
6
7
8
9
10
11
12
13
14
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
Aymeric Augustin's avatar
Aymeric Augustin committed
15

Lysandre's avatar
Lysandre committed
16

17
18
import unittest

19
from transformers import AlbertConfig, is_torch_available
20
from transformers.models.auto import get_values
21
from transformers.testing_utils import require_torch, slow, torch_device
Lysandre's avatar
Lysandre committed
22

23
from .test_configuration_common import ConfigTester
24
from .test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
Lysandre's avatar
Lysandre committed
25

Aymeric Augustin's avatar
Aymeric Augustin committed
26

Lysandre's avatar
Lysandre committed
27
if is_torch_available():
28
29
    import torch

30
    from transformers import (
31
        MODEL_FOR_PRETRAINING_MAPPING,
32
        AlbertForMaskedLM,
33
        AlbertForMultipleChoice,
34
35
        AlbertForPreTraining,
        AlbertForQuestionAnswering,
36
        AlbertForSequenceClassification,
37
        AlbertForTokenClassification,
38
        AlbertModel,
39
    )
Sylvain Gugger's avatar
Sylvain Gugger committed
40
    from transformers.models.albert.modeling_albert import ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST
Lysandre's avatar
Lysandre committed
41
42


43
44
class AlbertModelTester:
    def __init__(
Lysandre's avatar
Lysandre committed
45
46
        self,
        parent,
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
    ):
        self.parent = parent
        self.batch_size = 13
        self.seq_length = 7
        self.is_training = True
        self.use_input_mask = True
        self.use_token_type_ids = True
        self.use_labels = True
        self.vocab_size = 99
        self.embedding_size = 16
        self.hidden_size = 36
        self.num_hidden_layers = 6
        self.num_hidden_groups = 6
        self.num_attention_heads = 6
        self.intermediate_size = 37
        self.hidden_act = "gelu"
        self.hidden_dropout_prob = 0.1
        self.attention_probs_dropout_prob = 0.1
        self.max_position_embeddings = 512
        self.type_vocab_size = 16
        self.type_sequence_label_size = 2
        self.initializer_range = 0.02
        self.num_labels = 3
        self.num_choices = 4
        self.scope = None

    def prepare_config_and_inputs(self):
        input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)

        input_mask = None
        if self.use_input_mask:
78
            input_mask = random_attention_mask([self.batch_size, self.seq_length])
79
80
81
82
83
84
85
86
87
88
89
90
91

        token_type_ids = None
        if self.use_token_type_ids:
            token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size)

        sequence_labels = None
        token_labels = None
        choice_labels = None
        if self.use_labels:
            sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size)
            token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels)
            choice_labels = ids_tensor([self.batch_size], self.num_choices)

92
93
94
95
96
97
        config = self.get_config()

        return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels

    def get_config(self):
        return AlbertConfig(
98
99
100
101
102
103
104
105
106
107
108
109
110
111
            vocab_size=self.vocab_size,
            hidden_size=self.hidden_size,
            num_hidden_layers=self.num_hidden_layers,
            num_attention_heads=self.num_attention_heads,
            intermediate_size=self.intermediate_size,
            hidden_act=self.hidden_act,
            hidden_dropout_prob=self.hidden_dropout_prob,
            attention_probs_dropout_prob=self.attention_probs_dropout_prob,
            max_position_embeddings=self.max_position_embeddings,
            type_vocab_size=self.type_vocab_size,
            initializer_range=self.initializer_range,
            num_hidden_groups=self.num_hidden_groups,
        )

112
    def create_and_check_model(
113
114
115
116
117
        self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
    ):
        model = AlbertModel(config=config)
        model.to(torch_device)
        model.eval()
Sylvain Gugger's avatar
Sylvain Gugger committed
118
119
120
        result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids)
        result = model(input_ids, token_type_ids=token_type_ids)
        result = model(input_ids)
Stas Bekman's avatar
Stas Bekman committed
121
122
        self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))
        self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size))
123

124
    def create_and_check_for_pretraining(
125
126
127
128
129
        self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
    ):
        model = AlbertForPreTraining(config=config)
        model.to(torch_device)
        model.eval()
Sylvain Gugger's avatar
Sylvain Gugger committed
130
        result = model(
131
132
133
134
135
136
            input_ids,
            attention_mask=input_mask,
            token_type_ids=token_type_ids,
            labels=token_labels,
            sentence_order_label=sequence_labels,
        )
Stas Bekman's avatar
Stas Bekman committed
137
138
        self.parent.assertEqual(result.prediction_logits.shape, (self.batch_size, self.seq_length, self.vocab_size))
        self.parent.assertEqual(result.sop_logits.shape, (self.batch_size, config.num_labels))
139

140
    def create_and_check_for_masked_lm(
141
142
143
144
145
        self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
    ):
        model = AlbertForMaskedLM(config=config)
        model.to(torch_device)
        model.eval()
Sylvain Gugger's avatar
Sylvain Gugger committed
146
        result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels)
Stas Bekman's avatar
Stas Bekman committed
147
        self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size))
148

149
    def create_and_check_for_question_answering(
150
151
152
153
154
        self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
    ):
        model = AlbertForQuestionAnswering(config=config)
        model.to(torch_device)
        model.eval()
Sylvain Gugger's avatar
Sylvain Gugger committed
155
        result = model(
156
157
158
159
160
161
            input_ids,
            attention_mask=input_mask,
            token_type_ids=token_type_ids,
            start_positions=sequence_labels,
            end_positions=sequence_labels,
        )
Stas Bekman's avatar
Stas Bekman committed
162
163
        self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length))
        self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length))
164

165
    def create_and_check_for_sequence_classification(
166
167
168
169
170
171
        self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
    ):
        config.num_labels = self.num_labels
        model = AlbertForSequenceClassification(config)
        model.to(torch_device)
        model.eval()
Sylvain Gugger's avatar
Sylvain Gugger committed
172
        result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=sequence_labels)
Stas Bekman's avatar
Stas Bekman committed
173
        self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels))
174

175
    def create_and_check_for_token_classification(
176
177
178
179
180
181
        self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
    ):
        config.num_labels = self.num_labels
        model = AlbertForTokenClassification(config=config)
        model.to(torch_device)
        model.eval()
Sylvain Gugger's avatar
Sylvain Gugger committed
182
        result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels)
Stas Bekman's avatar
Stas Bekman committed
183
        self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels))
184

185
    def create_and_check_for_multiple_choice(
186
187
188
189
190
191
192
193
194
        self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
    ):
        config.num_choices = self.num_choices
        model = AlbertForMultipleChoice(config=config)
        model.to(torch_device)
        model.eval()
        multiple_choice_inputs_ids = input_ids.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous()
        multiple_choice_token_type_ids = token_type_ids.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous()
        multiple_choice_input_mask = input_mask.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous()
Sylvain Gugger's avatar
Sylvain Gugger committed
195
        result = model(
196
197
198
199
200
            multiple_choice_inputs_ids,
            attention_mask=multiple_choice_input_mask,
            token_type_ids=multiple_choice_token_type_ids,
            labels=choice_labels,
        )
Stas Bekman's avatar
Stas Bekman committed
201
        self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices))
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217

    def prepare_config_and_inputs_for_common(self):
        config_and_inputs = self.prepare_config_and_inputs()
        (
            config,
            input_ids,
            token_type_ids,
            input_mask,
            sequence_labels,
            token_labels,
            choice_labels,
        ) = config_and_inputs
        inputs_dict = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
        return config, inputs_dict


218
@require_torch
219
class AlbertModelTest(ModelTesterMixin, unittest.TestCase):
Lysandre's avatar
Lysandre committed
220

221
222
223
224
225
226
227
228
229
230
231
232
233
    all_model_classes = (
        (
            AlbertModel,
            AlbertForPreTraining,
            AlbertForMaskedLM,
            AlbertForMultipleChoice,
            AlbertForSequenceClassification,
            AlbertForTokenClassification,
            AlbertForQuestionAnswering,
        )
        if is_torch_available()
        else ()
    )
234
    fx_ready_model_classes = all_model_classes
235
    fx_dynamic_ready_model_classes = all_model_classes
Lysandre's avatar
Lysandre committed
236

237
238
239
240
241
    # special case for ForPreTraining model
    def _prepare_for_class(self, inputs_dict, model_class, return_labels=False):
        inputs_dict = super()._prepare_for_class(inputs_dict, model_class, return_labels=return_labels)

        if return_labels:
242
            if model_class in get_values(MODEL_FOR_PRETRAINING_MAPPING):
243
244
245
246
247
248
249
250
                inputs_dict["labels"] = torch.zeros(
                    (self.model_tester.batch_size, self.model_tester.seq_length), dtype=torch.long, device=torch_device
                )
                inputs_dict["sentence_order_label"] = torch.zeros(
                    self.model_tester.batch_size, dtype=torch.long, device=torch_device
                )
        return inputs_dict

Lysandre's avatar
Lysandre committed
251
    def setUp(self):
252
        self.model_tester = AlbertModelTester(self)
Lysandre's avatar
Lysandre committed
253
254
255
256
257
        self.config_tester = ConfigTester(self, config_class=AlbertConfig, hidden_size=37)

    def test_config(self):
        self.config_tester.run_common_tests()

258
    def test_model(self):
Lysandre's avatar
Lysandre committed
259
        config_and_inputs = self.model_tester.prepare_config_and_inputs()
260
        self.model_tester.create_and_check_model(*config_and_inputs)
Lysandre's avatar
Lysandre committed
261

262
263
    def test_for_pretraining(self):
        config_and_inputs = self.model_tester.prepare_config_and_inputs()
264
        self.model_tester.create_and_check_for_pretraining(*config_and_inputs)
265

Lysandre's avatar
Lysandre committed
266
267
    def test_for_masked_lm(self):
        config_and_inputs = self.model_tester.prepare_config_and_inputs()
268
        self.model_tester.create_and_check_for_masked_lm(*config_and_inputs)
Lysandre's avatar
Lysandre committed
269

270
271
    def test_for_multiple_choice(self):
        config_and_inputs = self.model_tester.prepare_config_and_inputs()
272
        self.model_tester.create_and_check_for_multiple_choice(*config_and_inputs)
273

274
275
    def test_for_question_answering(self):
        config_and_inputs = self.model_tester.prepare_config_and_inputs()
276
        self.model_tester.create_and_check_for_question_answering(*config_and_inputs)
277
278
279

    def test_for_sequence_classification(self):
        config_and_inputs = self.model_tester.prepare_config_and_inputs()
280
281
282
283
284
285
286
        self.model_tester.create_and_check_for_sequence_classification(*config_and_inputs)

    def test_model_various_embeddings(self):
        config_and_inputs = self.model_tester.prepare_config_and_inputs()
        for type in ["absolute", "relative_key", "relative_key_query"]:
            config_and_inputs[0].position_embedding_type = type
            self.model_tester.create_and_check_model(*config_and_inputs)
287

288
    @slow
Lysandre's avatar
Lysandre committed
289
    def test_model_from_pretrained(self):
290
        for model_name in ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
291
            model = AlbertModel.from_pretrained(model_name)
Lysandre's avatar
Lysandre committed
292
            self.assertIsNotNone(model)
293
294
295
296
297
298


@require_torch
class AlbertModelIntegrationTest(unittest.TestCase):
    @slow
    def test_inference_no_head_absolute_embedding(self):
299
        model = AlbertModel.from_pretrained("albert-base-v2")
300
        input_ids = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]])
301
302
303
        attention_mask = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]])
        output = model(input_ids, attention_mask=attention_mask)[0]
        expected_shape = torch.Size((1, 11, 768))
304
305
        self.assertEqual(output.shape, expected_shape)
        expected_slice = torch.tensor(
306
            [[[-0.6513, 1.5035, -0.2766], [-0.6515, 1.5046, -0.2780], [-0.6512, 1.5049, -0.2784]]]
307
308
        )

309
        self.assertTrue(torch.allclose(output[:, 1:4, 1:4], expected_slice, atol=1e-4))