test_modeling_bert.py 23.3 KB
Newer Older
thomwolf's avatar
thomwolf committed
1
# coding=utf-8
Sylvain Gugger's avatar
Sylvain Gugger committed
2
# Copyright 2020 The HuggingFace Team. All rights reserved.
thomwolf's avatar
thomwolf committed
3
4
5
6
7
8
9
10
11
12
13
14
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
Aymeric Augustin's avatar
Aymeric Augustin committed
15

thomwolf's avatar
thomwolf committed
16

17
18
import unittest

19
from transformers import is_torch_available
20
from transformers.models.auto import get_values
21
from transformers.testing_utils import require_torch, slow, torch_device
thomwolf's avatar
thomwolf committed
22

23
from .test_configuration_common import ConfigTester
24
from .test_generation_utils import GenerationTesterMixin
25
from .test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
thomwolf's avatar
thomwolf committed
26

Aymeric Augustin's avatar
Aymeric Augustin committed
27

28
if is_torch_available():
29
30
    import torch

31
    from transformers import (
32
        MODEL_FOR_PRETRAINING_MAPPING,
33
34
        BertConfig,
        BertForMaskedLM,
35
        BertForMultipleChoice,
36
37
38
39
40
        BertForNextSentencePrediction,
        BertForPreTraining,
        BertForQuestionAnswering,
        BertForSequenceClassification,
        BertForTokenClassification,
41
42
        BertLMHeadModel,
        BertModel,
43
    )
Sylvain Gugger's avatar
Sylvain Gugger committed
44
    from transformers.models.bert.modeling_bert import BERT_PRETRAINED_MODEL_ARCHIVE_LIST
thomwolf's avatar
thomwolf committed
45

thomwolf's avatar
thomwolf committed
46

47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
class BertModelTester:
    def __init__(
        self,
        parent,
        batch_size=13,
        seq_length=7,
        is_training=True,
        use_input_mask=True,
        use_token_type_ids=True,
        use_labels=True,
        vocab_size=99,
        hidden_size=32,
        num_hidden_layers=5,
        num_attention_heads=4,
        intermediate_size=37,
        hidden_act="gelu",
        hidden_dropout_prob=0.1,
        attention_probs_dropout_prob=0.1,
        max_position_embeddings=512,
        type_vocab_size=16,
        type_sequence_label_size=2,
        initializer_range=0.02,
        num_labels=3,
        num_choices=4,
        scope=None,
    ):
        self.parent = parent
        self.batch_size = batch_size
        self.seq_length = seq_length
        self.is_training = is_training
        self.use_input_mask = use_input_mask
        self.use_token_type_ids = use_token_type_ids
        self.use_labels = use_labels
        self.vocab_size = vocab_size
        self.hidden_size = hidden_size
        self.num_hidden_layers = num_hidden_layers
        self.num_attention_heads = num_attention_heads
        self.intermediate_size = intermediate_size
        self.hidden_act = hidden_act
        self.hidden_dropout_prob = hidden_dropout_prob
        self.attention_probs_dropout_prob = attention_probs_dropout_prob
        self.max_position_embeddings = max_position_embeddings
        self.type_vocab_size = type_vocab_size
        self.type_sequence_label_size = type_sequence_label_size
        self.initializer_range = initializer_range
        self.num_labels = num_labels
        self.num_choices = num_choices
        self.scope = scope

    def prepare_config_and_inputs(self):
        input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)

        input_mask = None
        if self.use_input_mask:
101
            input_mask = random_attention_mask([self.batch_size, self.seq_length])
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128

        token_type_ids = None
        if self.use_token_type_ids:
            token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size)

        sequence_labels = None
        token_labels = None
        choice_labels = None
        if self.use_labels:
            sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size)
            token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels)
            choice_labels = ids_tensor([self.batch_size], self.num_choices)

        config = BertConfig(
            vocab_size=self.vocab_size,
            hidden_size=self.hidden_size,
            num_hidden_layers=self.num_hidden_layers,
            num_attention_heads=self.num_attention_heads,
            intermediate_size=self.intermediate_size,
            hidden_act=self.hidden_act,
            hidden_dropout_prob=self.hidden_dropout_prob,
            attention_probs_dropout_prob=self.attention_probs_dropout_prob,
            max_position_embeddings=self.max_position_embeddings,
            type_vocab_size=self.type_vocab_size,
            is_decoder=False,
            initializer_range=self.initializer_range,
        )
thomwolf's avatar
thomwolf committed
129

130
131
132
        return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels

    def prepare_config_and_inputs_for_decoder(self):
133
        (
134
135
136
137
138
139
140
141
142
143
144
145
            config,
            input_ids,
            token_type_ids,
            input_mask,
            sequence_labels,
            token_labels,
            choice_labels,
        ) = self.prepare_config_and_inputs()

        config.is_decoder = True
        encoder_hidden_states = floats_tensor([self.batch_size, self.seq_length, self.hidden_size])
        encoder_attention_mask = ids_tensor([self.batch_size, self.seq_length], vocab_size=2)
thomwolf's avatar
thomwolf committed
146

147
        return (
148
149
150
151
152
153
154
155
156
            config,
            input_ids,
            token_type_ids,
            input_mask,
            sequence_labels,
            token_labels,
            choice_labels,
            encoder_hidden_states,
            encoder_attention_mask,
157
158
        )

159
    def create_and_check_model(
160
161
162
163
164
        self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
    ):
        model = BertModel(config=config)
        model.to(torch_device)
        model.eval()
Sylvain Gugger's avatar
Sylvain Gugger committed
165
166
167
        result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids)
        result = model(input_ids, token_type_ids=token_type_ids)
        result = model(input_ids)
Stas Bekman's avatar
Stas Bekman committed
168
169
        self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))
        self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size))
170

171
    def create_and_check_model_as_decoder(
172
173
174
175
176
177
178
179
180
181
182
        self,
        config,
        input_ids,
        token_type_ids,
        input_mask,
        sequence_labels,
        token_labels,
        choice_labels,
        encoder_hidden_states,
        encoder_attention_mask,
    ):
183
        config.add_cross_attention = True
184
185
186
        model = BertModel(config)
        model.to(torch_device)
        model.eval()
Sylvain Gugger's avatar
Sylvain Gugger committed
187
        result = model(
188
189
190
191
192
193
            input_ids,
            attention_mask=input_mask,
            token_type_ids=token_type_ids,
            encoder_hidden_states=encoder_hidden_states,
            encoder_attention_mask=encoder_attention_mask,
        )
Sylvain Gugger's avatar
Sylvain Gugger committed
194
        result = model(
195
196
197
198
199
            input_ids,
            attention_mask=input_mask,
            token_type_ids=token_type_ids,
            encoder_hidden_states=encoder_hidden_states,
        )
Sylvain Gugger's avatar
Sylvain Gugger committed
200
        result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids)
Stas Bekman's avatar
Stas Bekman committed
201
202
        self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))
        self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size))
203

204
    def create_and_check_for_causal_lm(
205
206
207
208
209
210
211
212
213
214
215
216
217
218
        self,
        config,
        input_ids,
        token_type_ids,
        input_mask,
        sequence_labels,
        token_labels,
        choice_labels,
        encoder_hidden_states,
        encoder_attention_mask,
    ):
        model = BertLMHeadModel(config=config)
        model.to(torch_device)
        model.eval()
Sylvain Gugger's avatar
Sylvain Gugger committed
219
        result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels)
Stas Bekman's avatar
Stas Bekman committed
220
        self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size))
221

222
    def create_and_check_for_masked_lm(
223
224
225
226
227
        self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
    ):
        model = BertForMaskedLM(config=config)
        model.to(torch_device)
        model.eval()
Sylvain Gugger's avatar
Sylvain Gugger committed
228
        result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels)
Stas Bekman's avatar
Stas Bekman committed
229
        self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size))
230

231
    def create_and_check_model_for_causal_lm_as_decoder(
232
233
234
235
236
237
238
239
240
241
242
        self,
        config,
        input_ids,
        token_type_ids,
        input_mask,
        sequence_labels,
        token_labels,
        choice_labels,
        encoder_hidden_states,
        encoder_attention_mask,
    ):
243
        config.add_cross_attention = True
244
        model = BertLMHeadModel(config=config)
245
246
        model.to(torch_device)
        model.eval()
Sylvain Gugger's avatar
Sylvain Gugger committed
247
        result = model(
248
249
250
            input_ids,
            attention_mask=input_mask,
            token_type_ids=token_type_ids,
251
            labels=token_labels,
252
253
254
            encoder_hidden_states=encoder_hidden_states,
            encoder_attention_mask=encoder_attention_mask,
        )
Sylvain Gugger's avatar
Sylvain Gugger committed
255
        result = model(
256
257
258
            input_ids,
            attention_mask=input_mask,
            token_type_ids=token_type_ids,
259
            labels=token_labels,
260
261
            encoder_hidden_states=encoder_hidden_states,
        )
Stas Bekman's avatar
Stas Bekman committed
262
        self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size))
263

264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
    def create_and_check_decoder_model_past_large_inputs(
        self,
        config,
        input_ids,
        token_type_ids,
        input_mask,
        sequence_labels,
        token_labels,
        choice_labels,
        encoder_hidden_states,
        encoder_attention_mask,
    ):
        config.is_decoder = True
        config.add_cross_attention = True
        model = BertLMHeadModel(config=config).to(torch_device).eval()

        # first forward pass
        outputs = model(
            input_ids,
            attention_mask=input_mask,
            encoder_hidden_states=encoder_hidden_states,
            encoder_attention_mask=encoder_attention_mask,
            use_cache=True,
        )
        past_key_values = outputs.past_key_values

        # create hypothetical multiple next token and extent to next_input_ids
        next_tokens = ids_tensor((self.batch_size, 3), config.vocab_size)
        next_mask = ids_tensor((self.batch_size, 3), vocab_size=2)

        # append to next input_ids and
        next_input_ids = torch.cat([input_ids, next_tokens], dim=-1)
        next_attention_mask = torch.cat([input_mask, next_mask], dim=-1)

        output_from_no_past = model(
            next_input_ids,
            attention_mask=next_attention_mask,
            encoder_hidden_states=encoder_hidden_states,
            encoder_attention_mask=encoder_attention_mask,
            output_hidden_states=True,
        )["hidden_states"][0]
        output_from_past = model(
            next_tokens,
            attention_mask=next_attention_mask,
            encoder_hidden_states=encoder_hidden_states,
            encoder_attention_mask=encoder_attention_mask,
            past_key_values=past_key_values,
            output_hidden_states=True,
        )["hidden_states"][0]

        # select random slice
        random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item()
        output_from_no_past_slice = output_from_no_past[:, -3:, random_slice_idx].detach()
        output_from_past_slice = output_from_past[:, :, random_slice_idx].detach()

        self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1])

        # test that outputs are equal for slice
        self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3))

324
    def create_and_check_for_next_sequence_prediction(
325
326
327
328
329
        self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
    ):
        model = BertForNextSentencePrediction(config=config)
        model.to(torch_device)
        model.eval()
Sylvain Gugger's avatar
Sylvain Gugger committed
330
        result = model(
Lysandre's avatar
Lysandre committed
331
332
333
            input_ids,
            attention_mask=input_mask,
            token_type_ids=token_type_ids,
334
            labels=sequence_labels,
335
        )
Stas Bekman's avatar
Stas Bekman committed
336
        self.parent.assertEqual(result.logits.shape, (self.batch_size, 2))
337

338
    def create_and_check_for_pretraining(
339
340
341
342
343
        self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
    ):
        model = BertForPreTraining(config=config)
        model.to(torch_device)
        model.eval()
Sylvain Gugger's avatar
Sylvain Gugger committed
344
        result = model(
345
346
347
            input_ids,
            attention_mask=input_mask,
            token_type_ids=token_type_ids,
348
            labels=token_labels,
349
350
            next_sentence_label=sequence_labels,
        )
Stas Bekman's avatar
Stas Bekman committed
351
352
        self.parent.assertEqual(result.prediction_logits.shape, (self.batch_size, self.seq_length, self.vocab_size))
        self.parent.assertEqual(result.seq_relationship_logits.shape, (self.batch_size, 2))
353

354
    def create_and_check_for_question_answering(
355
356
357
358
359
        self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
    ):
        model = BertForQuestionAnswering(config=config)
        model.to(torch_device)
        model.eval()
Sylvain Gugger's avatar
Sylvain Gugger committed
360
        result = model(
361
362
363
364
365
366
            input_ids,
            attention_mask=input_mask,
            token_type_ids=token_type_ids,
            start_positions=sequence_labels,
            end_positions=sequence_labels,
        )
Stas Bekman's avatar
Stas Bekman committed
367
368
        self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length))
        self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length))
369

370
    def create_and_check_for_sequence_classification(
371
372
373
374
375
376
        self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
    ):
        config.num_labels = self.num_labels
        model = BertForSequenceClassification(config)
        model.to(torch_device)
        model.eval()
Sylvain Gugger's avatar
Sylvain Gugger committed
377
        result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=sequence_labels)
Stas Bekman's avatar
Stas Bekman committed
378
        self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels))
379

380
    def create_and_check_for_token_classification(
381
382
383
384
385
386
        self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
    ):
        config.num_labels = self.num_labels
        model = BertForTokenClassification(config=config)
        model.to(torch_device)
        model.eval()
Sylvain Gugger's avatar
Sylvain Gugger committed
387
        result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels)
Stas Bekman's avatar
Stas Bekman committed
388
        self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels))
389

390
    def create_and_check_for_multiple_choice(
391
392
393
394
395
396
397
398
399
        self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
    ):
        config.num_choices = self.num_choices
        model = BertForMultipleChoice(config=config)
        model.to(torch_device)
        model.eval()
        multiple_choice_inputs_ids = input_ids.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous()
        multiple_choice_token_type_ids = token_type_ids.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous()
        multiple_choice_input_mask = input_mask.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous()
Sylvain Gugger's avatar
Sylvain Gugger committed
400
        result = model(
401
402
403
404
405
            multiple_choice_inputs_ids,
            attention_mask=multiple_choice_input_mask,
            token_type_ids=multiple_choice_token_type_ids,
            labels=choice_labels,
        )
Stas Bekman's avatar
Stas Bekman committed
406
        self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices))
407
408
409
410

    def prepare_config_and_inputs_for_common(self):
        config_and_inputs = self.prepare_config_and_inputs()
        (
411
412
413
414
415
416
417
            config,
            input_ids,
            token_type_ids,
            input_mask,
            sequence_labels,
            token_labels,
            choice_labels,
418
419
420
421
422
423
        ) = config_and_inputs
        inputs_dict = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
        return config, inputs_dict


@require_torch
424
class BertModelTest(ModelTesterMixin, GenerationTesterMixin, unittest.TestCase):
425
426
427
428

    all_model_classes = (
        (
            BertModel,
429
            BertLMHeadModel,
430
            BertForMaskedLM,
431
            BertForMultipleChoice,
432
433
434
435
436
437
438
439
440
            BertForNextSentencePrediction,
            BertForPreTraining,
            BertForQuestionAnswering,
            BertForSequenceClassification,
            BertForTokenClassification,
        )
        if is_torch_available()
        else ()
    )
441
    all_generative_model_classes = (BertLMHeadModel,) if is_torch_available() else ()
thomwolf's avatar
thomwolf committed
442

443
444
445
446
447
    # special case for ForPreTraining model
    def _prepare_for_class(self, inputs_dict, model_class, return_labels=False):
        inputs_dict = super()._prepare_for_class(inputs_dict, model_class, return_labels=return_labels)

        if return_labels:
448
            if model_class in get_values(MODEL_FOR_PRETRAINING_MAPPING):
449
450
451
452
453
454
455
456
                inputs_dict["labels"] = torch.zeros(
                    (self.model_tester.batch_size, self.model_tester.seq_length), dtype=torch.long, device=torch_device
                )
                inputs_dict["next_sentence_label"] = torch.zeros(
                    self.model_tester.batch_size, dtype=torch.long, device=torch_device
                )
        return inputs_dict

thomwolf's avatar
thomwolf committed
457
    def setUp(self):
458
        self.model_tester = BertModelTester(self)
thomwolf's avatar
thomwolf committed
459
        self.config_tester = ConfigTester(self, config_class=BertConfig, hidden_size=37)
thomwolf's avatar
thomwolf committed
460
461

    def test_config(self):
thomwolf's avatar
thomwolf committed
462
        self.config_tester.run_common_tests()
thomwolf's avatar
thomwolf committed
463

464
    def test_model(self):
thomwolf's avatar
thomwolf committed
465
        config_and_inputs = self.model_tester.prepare_config_and_inputs()
466
        self.model_tester.create_and_check_model(*config_and_inputs)
thomwolf's avatar
thomwolf committed
467

468
469
470
471
472
473
    def test_model_various_embeddings(self):
        config_and_inputs = self.model_tester.prepare_config_and_inputs()
        for type in ["absolute", "relative_key", "relative_key_query"]:
            config_and_inputs[0].position_embedding_type = type
            self.model_tester.create_and_check_model(*config_and_inputs)

474
    def test_model_as_decoder(self):
475
        config_and_inputs = self.model_tester.prepare_config_and_inputs_for_decoder()
476
        self.model_tester.create_and_check_model_as_decoder(*config_and_inputs)
477

478
    def test_model_as_decoder_with_default_input_mask(self):
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
        # This regression test was failing with PyTorch < 1.3
        (
            config,
            input_ids,
            token_type_ids,
            input_mask,
            sequence_labels,
            token_labels,
            choice_labels,
            encoder_hidden_states,
            encoder_attention_mask,
        ) = self.model_tester.prepare_config_and_inputs_for_decoder()

        input_mask = None

494
        self.model_tester.create_and_check_model_as_decoder(
495
496
497
498
499
500
501
502
503
504
505
            config,
            input_ids,
            token_type_ids,
            input_mask,
            sequence_labels,
            token_labels,
            choice_labels,
            encoder_hidden_states,
            encoder_attention_mask,
        )

506
507
    def test_for_causal_lm(self):
        config_and_inputs = self.model_tester.prepare_config_and_inputs_for_decoder()
508
        self.model_tester.create_and_check_for_causal_lm(*config_and_inputs)
509

thomwolf's avatar
thomwolf committed
510
511
    def test_for_masked_lm(self):
        config_and_inputs = self.model_tester.prepare_config_and_inputs()
512
        self.model_tester.create_and_check_for_masked_lm(*config_and_inputs)
thomwolf's avatar
thomwolf committed
513

514
    def test_for_causal_lm_decoder(self):
515
        config_and_inputs = self.model_tester.prepare_config_and_inputs_for_decoder()
516
        self.model_tester.create_and_check_model_for_causal_lm_as_decoder(*config_and_inputs)
517

518
519
520
521
    def test_decoder_model_past_with_large_inputs(self):
        config_and_inputs = self.model_tester.prepare_config_and_inputs_for_decoder()
        self.model_tester.create_and_check_decoder_model_past_large_inputs(*config_and_inputs)

thomwolf's avatar
thomwolf committed
522
523
    def test_for_multiple_choice(self):
        config_and_inputs = self.model_tester.prepare_config_and_inputs()
524
        self.model_tester.create_and_check_for_multiple_choice(*config_and_inputs)
thomwolf's avatar
thomwolf committed
525

thomwolf's avatar
thomwolf committed
526
527
    def test_for_next_sequence_prediction(self):
        config_and_inputs = self.model_tester.prepare_config_and_inputs()
528
        self.model_tester.create_and_check_for_next_sequence_prediction(*config_and_inputs)
thomwolf's avatar
thomwolf committed
529

thomwolf's avatar
thomwolf committed
530
531
    def test_for_pretraining(self):
        config_and_inputs = self.model_tester.prepare_config_and_inputs()
532
        self.model_tester.create_and_check_for_pretraining(*config_and_inputs)
thomwolf's avatar
thomwolf committed
533

thomwolf's avatar
thomwolf committed
534
535
    def test_for_question_answering(self):
        config_and_inputs = self.model_tester.prepare_config_and_inputs()
536
        self.model_tester.create_and_check_for_question_answering(*config_and_inputs)
thomwolf's avatar
thomwolf committed
537

thomwolf's avatar
thomwolf committed
538
539
    def test_for_sequence_classification(self):
        config_and_inputs = self.model_tester.prepare_config_and_inputs()
540
        self.model_tester.create_and_check_for_sequence_classification(*config_and_inputs)
thomwolf's avatar
thomwolf committed
541

thomwolf's avatar
thomwolf committed
542
543
    def test_for_token_classification(self):
        config_and_inputs = self.model_tester.prepare_config_and_inputs()
544
        self.model_tester.create_and_check_for_token_classification(*config_and_inputs)
thomwolf's avatar
thomwolf committed
545

546
    @slow
thomwolf's avatar
thomwolf committed
547
    def test_model_from_pretrained(self):
548
        for model_name in BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
549
            model = BertModel.from_pretrained(model_name)
thomwolf's avatar
thomwolf committed
550
            self.assertIsNotNone(model)
551
552
553
554
555
556
557
558


@require_torch
class BertModelIntegrationTest(unittest.TestCase):
    @slow
    def test_inference_no_head_absolute_embedding(self):
        model = BertModel.from_pretrained("bert-base-uncased")
        input_ids = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]])
559
560
        attention_mask = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]])
        output = model(input_ids, attention_mask=attention_mask)[0]
561
562
        expected_shape = torch.Size((1, 11, 768))
        self.assertEqual(output.shape, expected_shape)
563
        expected_slice = torch.tensor([[[0.4249, 0.1008, 0.7531], [0.3771, 0.1188, 0.7467], [0.4152, 0.1098, 0.7108]]])
564

565
        self.assertTrue(torch.allclose(output[:, 1:4, 1:4], expected_slice, atol=1e-4))
566
567
568
569
570

    @slow
    def test_inference_no_head_relative_embedding_key(self):
        model = BertModel.from_pretrained("zhiheng-huang/bert-base-uncased-embedding-relative-key")
        input_ids = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]])
571
572
        attention_mask = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]])
        output = model(input_ids, attention_mask=attention_mask)[0]
573
574
575
        expected_shape = torch.Size((1, 11, 768))
        self.assertEqual(output.shape, expected_shape)
        expected_slice = torch.tensor(
576
            [[[0.0756, 0.3142, -0.5128], [0.3761, 0.3462, -0.5477], [0.2052, 0.3760, -0.1240]]]
577
578
        )

579
        self.assertTrue(torch.allclose(output[:, 1:4, 1:4], expected_slice, atol=1e-4))
580
581
582
583
584

    @slow
    def test_inference_no_head_relative_embedding_key_query(self):
        model = BertModel.from_pretrained("zhiheng-huang/bert-base-uncased-embedding-relative-key-query")
        input_ids = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]])
585
586
        attention_mask = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]])
        output = model(input_ids, attention_mask=attention_mask)[0]
587
588
        expected_shape = torch.Size((1, 11, 768))
        self.assertEqual(output.shape, expected_shape)
589
590
591
        expected_slice = torch.tensor(
            [[[0.6496, 0.3784, 0.8203], [0.8148, 0.5656, 0.2636], [-0.0681, 0.5597, 0.7045]]]
        )
592

593
        self.assertTrue(torch.allclose(output[:, 1:4, 1:4], expected_slice, atol=1e-4))