test_modeling_bert.py 24.6 KB
Newer Older
thomwolf's avatar
thomwolf committed
1
# coding=utf-8
Sylvain Gugger's avatar
Sylvain Gugger committed
2
# Copyright 2020 The HuggingFace Team. All rights reserved.
thomwolf's avatar
thomwolf committed
3
4
5
6
7
8
9
10
11
12
13
14
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
Lysandre Debut's avatar
Lysandre Debut committed
15
16
import os
import tempfile
17
18
import unittest

19
from transformers import BertConfig, is_torch_available
20
from transformers.models.auto import get_values
Lysandre Debut's avatar
Lysandre Debut committed
21
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
thomwolf's avatar
thomwolf committed
22

23
from .test_configuration_common import ConfigTester
24
from .test_generation_utils import GenerationTesterMixin
25
from .test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
thomwolf's avatar
thomwolf committed
26

Aymeric Augustin's avatar
Aymeric Augustin committed
27

28
if is_torch_available():
29
30
    import torch

31
    from transformers import (
32
        MODEL_FOR_PRETRAINING_MAPPING,
33
        BertForMaskedLM,
34
        BertForMultipleChoice,
35
36
37
38
39
        BertForNextSentencePrediction,
        BertForPreTraining,
        BertForQuestionAnswering,
        BertForSequenceClassification,
        BertForTokenClassification,
40
41
        BertLMHeadModel,
        BertModel,
42
    )
Sylvain Gugger's avatar
Sylvain Gugger committed
43
    from transformers.models.bert.modeling_bert import BERT_PRETRAINED_MODEL_ARCHIVE_LIST
thomwolf's avatar
thomwolf committed
44

thomwolf's avatar
thomwolf committed
45

46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
class BertModelTester:
    def __init__(
        self,
        parent,
        batch_size=13,
        seq_length=7,
        is_training=True,
        use_input_mask=True,
        use_token_type_ids=True,
        use_labels=True,
        vocab_size=99,
        hidden_size=32,
        num_hidden_layers=5,
        num_attention_heads=4,
        intermediate_size=37,
        hidden_act="gelu",
        hidden_dropout_prob=0.1,
        attention_probs_dropout_prob=0.1,
        max_position_embeddings=512,
        type_vocab_size=16,
        type_sequence_label_size=2,
        initializer_range=0.02,
        num_labels=3,
        num_choices=4,
        scope=None,
    ):
        self.parent = parent
        self.batch_size = batch_size
        self.seq_length = seq_length
        self.is_training = is_training
        self.use_input_mask = use_input_mask
        self.use_token_type_ids = use_token_type_ids
        self.use_labels = use_labels
        self.vocab_size = vocab_size
        self.hidden_size = hidden_size
        self.num_hidden_layers = num_hidden_layers
        self.num_attention_heads = num_attention_heads
        self.intermediate_size = intermediate_size
        self.hidden_act = hidden_act
        self.hidden_dropout_prob = hidden_dropout_prob
        self.attention_probs_dropout_prob = attention_probs_dropout_prob
        self.max_position_embeddings = max_position_embeddings
        self.type_vocab_size = type_vocab_size
        self.type_sequence_label_size = type_sequence_label_size
        self.initializer_range = initializer_range
        self.num_labels = num_labels
        self.num_choices = num_choices
        self.scope = scope

    def prepare_config_and_inputs(self):
        input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)

        input_mask = None
        if self.use_input_mask:
100
            input_mask = random_attention_mask([self.batch_size, self.seq_length])
101
102
103
104
105
106
107
108
109
110
111
112
113

        token_type_ids = None
        if self.use_token_type_ids:
            token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size)

        sequence_labels = None
        token_labels = None
        choice_labels = None
        if self.use_labels:
            sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size)
            token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels)
            choice_labels = ids_tensor([self.batch_size], self.num_choices)

114
115
116
117
118
119
120
121
122
        config = self.get_config()

        return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels

    def get_config(self):
        """
        Returns a tiny configuration by default.
        """
        return BertConfig(
123
124
125
126
127
128
129
130
131
132
133
134
135
            vocab_size=self.vocab_size,
            hidden_size=self.hidden_size,
            num_hidden_layers=self.num_hidden_layers,
            num_attention_heads=self.num_attention_heads,
            intermediate_size=self.intermediate_size,
            hidden_act=self.hidden_act,
            hidden_dropout_prob=self.hidden_dropout_prob,
            attention_probs_dropout_prob=self.attention_probs_dropout_prob,
            max_position_embeddings=self.max_position_embeddings,
            type_vocab_size=self.type_vocab_size,
            is_decoder=False,
            initializer_range=self.initializer_range,
        )
thomwolf's avatar
thomwolf committed
136

137
    def prepare_config_and_inputs_for_decoder(self):
138
        (
139
140
141
142
143
144
145
146
147
148
149
150
            config,
            input_ids,
            token_type_ids,
            input_mask,
            sequence_labels,
            token_labels,
            choice_labels,
        ) = self.prepare_config_and_inputs()

        config.is_decoder = True
        encoder_hidden_states = floats_tensor([self.batch_size, self.seq_length, self.hidden_size])
        encoder_attention_mask = ids_tensor([self.batch_size, self.seq_length], vocab_size=2)
thomwolf's avatar
thomwolf committed
151

152
        return (
153
154
155
156
157
158
159
160
161
            config,
            input_ids,
            token_type_ids,
            input_mask,
            sequence_labels,
            token_labels,
            choice_labels,
            encoder_hidden_states,
            encoder_attention_mask,
162
163
        )

164
    def create_and_check_model(
165
166
167
168
169
        self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
    ):
        model = BertModel(config=config)
        model.to(torch_device)
        model.eval()
Sylvain Gugger's avatar
Sylvain Gugger committed
170
171
172
        result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids)
        result = model(input_ids, token_type_ids=token_type_ids)
        result = model(input_ids)
Stas Bekman's avatar
Stas Bekman committed
173
174
        self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))
        self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size))
175

176
    def create_and_check_model_as_decoder(
177
178
179
180
181
182
183
184
185
186
187
        self,
        config,
        input_ids,
        token_type_ids,
        input_mask,
        sequence_labels,
        token_labels,
        choice_labels,
        encoder_hidden_states,
        encoder_attention_mask,
    ):
188
        config.add_cross_attention = True
189
190
191
        model = BertModel(config)
        model.to(torch_device)
        model.eval()
Sylvain Gugger's avatar
Sylvain Gugger committed
192
        result = model(
193
194
195
196
197
198
            input_ids,
            attention_mask=input_mask,
            token_type_ids=token_type_ids,
            encoder_hidden_states=encoder_hidden_states,
            encoder_attention_mask=encoder_attention_mask,
        )
Sylvain Gugger's avatar
Sylvain Gugger committed
199
        result = model(
200
201
202
203
204
            input_ids,
            attention_mask=input_mask,
            token_type_ids=token_type_ids,
            encoder_hidden_states=encoder_hidden_states,
        )
Sylvain Gugger's avatar
Sylvain Gugger committed
205
        result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids)
Stas Bekman's avatar
Stas Bekman committed
206
207
        self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))
        self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size))
208

209
    def create_and_check_for_causal_lm(
210
211
212
213
214
215
216
217
218
219
220
221
222
223
        self,
        config,
        input_ids,
        token_type_ids,
        input_mask,
        sequence_labels,
        token_labels,
        choice_labels,
        encoder_hidden_states,
        encoder_attention_mask,
    ):
        model = BertLMHeadModel(config=config)
        model.to(torch_device)
        model.eval()
Sylvain Gugger's avatar
Sylvain Gugger committed
224
        result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels)
Stas Bekman's avatar
Stas Bekman committed
225
        self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size))
226

227
    def create_and_check_for_masked_lm(
228
229
230
231
232
        self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
    ):
        model = BertForMaskedLM(config=config)
        model.to(torch_device)
        model.eval()
Sylvain Gugger's avatar
Sylvain Gugger committed
233
        result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels)
Stas Bekman's avatar
Stas Bekman committed
234
        self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size))
235

236
    def create_and_check_model_for_causal_lm_as_decoder(
237
238
239
240
241
242
243
244
245
246
247
        self,
        config,
        input_ids,
        token_type_ids,
        input_mask,
        sequence_labels,
        token_labels,
        choice_labels,
        encoder_hidden_states,
        encoder_attention_mask,
    ):
248
        config.add_cross_attention = True
249
        model = BertLMHeadModel(config=config)
250
251
        model.to(torch_device)
        model.eval()
Sylvain Gugger's avatar
Sylvain Gugger committed
252
        result = model(
253
254
255
            input_ids,
            attention_mask=input_mask,
            token_type_ids=token_type_ids,
256
            labels=token_labels,
257
258
259
            encoder_hidden_states=encoder_hidden_states,
            encoder_attention_mask=encoder_attention_mask,
        )
Sylvain Gugger's avatar
Sylvain Gugger committed
260
        result = model(
261
262
263
            input_ids,
            attention_mask=input_mask,
            token_type_ids=token_type_ids,
264
            labels=token_labels,
265
266
            encoder_hidden_states=encoder_hidden_states,
        )
Stas Bekman's avatar
Stas Bekman committed
267
        self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size))
268

269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
    def create_and_check_decoder_model_past_large_inputs(
        self,
        config,
        input_ids,
        token_type_ids,
        input_mask,
        sequence_labels,
        token_labels,
        choice_labels,
        encoder_hidden_states,
        encoder_attention_mask,
    ):
        config.is_decoder = True
        config.add_cross_attention = True
        model = BertLMHeadModel(config=config).to(torch_device).eval()

        # first forward pass
        outputs = model(
            input_ids,
            attention_mask=input_mask,
            encoder_hidden_states=encoder_hidden_states,
            encoder_attention_mask=encoder_attention_mask,
            use_cache=True,
        )
        past_key_values = outputs.past_key_values

        # create hypothetical multiple next token and extent to next_input_ids
        next_tokens = ids_tensor((self.batch_size, 3), config.vocab_size)
        next_mask = ids_tensor((self.batch_size, 3), vocab_size=2)

        # append to next input_ids and
        next_input_ids = torch.cat([input_ids, next_tokens], dim=-1)
        next_attention_mask = torch.cat([input_mask, next_mask], dim=-1)

        output_from_no_past = model(
            next_input_ids,
            attention_mask=next_attention_mask,
            encoder_hidden_states=encoder_hidden_states,
            encoder_attention_mask=encoder_attention_mask,
            output_hidden_states=True,
        )["hidden_states"][0]
        output_from_past = model(
            next_tokens,
            attention_mask=next_attention_mask,
            encoder_hidden_states=encoder_hidden_states,
            encoder_attention_mask=encoder_attention_mask,
            past_key_values=past_key_values,
            output_hidden_states=True,
        )["hidden_states"][0]

        # select random slice
        random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item()
        output_from_no_past_slice = output_from_no_past[:, -3:, random_slice_idx].detach()
        output_from_past_slice = output_from_past[:, :, random_slice_idx].detach()

        self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1])

        # test that outputs are equal for slice
        self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3))

329
    def create_and_check_for_next_sequence_prediction(
330
331
332
333
334
        self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
    ):
        model = BertForNextSentencePrediction(config=config)
        model.to(torch_device)
        model.eval()
Sylvain Gugger's avatar
Sylvain Gugger committed
335
        result = model(
Lysandre's avatar
Lysandre committed
336
337
338
            input_ids,
            attention_mask=input_mask,
            token_type_ids=token_type_ids,
339
            labels=sequence_labels,
340
        )
Stas Bekman's avatar
Stas Bekman committed
341
        self.parent.assertEqual(result.logits.shape, (self.batch_size, 2))
342

343
    def create_and_check_for_pretraining(
344
345
346
347
348
        self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
    ):
        model = BertForPreTraining(config=config)
        model.to(torch_device)
        model.eval()
Sylvain Gugger's avatar
Sylvain Gugger committed
349
        result = model(
350
351
352
            input_ids,
            attention_mask=input_mask,
            token_type_ids=token_type_ids,
353
            labels=token_labels,
354
355
            next_sentence_label=sequence_labels,
        )
Stas Bekman's avatar
Stas Bekman committed
356
357
        self.parent.assertEqual(result.prediction_logits.shape, (self.batch_size, self.seq_length, self.vocab_size))
        self.parent.assertEqual(result.seq_relationship_logits.shape, (self.batch_size, 2))
358

359
    def create_and_check_for_question_answering(
360
361
362
363
364
        self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
    ):
        model = BertForQuestionAnswering(config=config)
        model.to(torch_device)
        model.eval()
Sylvain Gugger's avatar
Sylvain Gugger committed
365
        result = model(
366
367
368
369
370
371
            input_ids,
            attention_mask=input_mask,
            token_type_ids=token_type_ids,
            start_positions=sequence_labels,
            end_positions=sequence_labels,
        )
Stas Bekman's avatar
Stas Bekman committed
372
373
        self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length))
        self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length))
374

375
    def create_and_check_for_sequence_classification(
376
377
378
379
380
381
        self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
    ):
        config.num_labels = self.num_labels
        model = BertForSequenceClassification(config)
        model.to(torch_device)
        model.eval()
Sylvain Gugger's avatar
Sylvain Gugger committed
382
        result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=sequence_labels)
Stas Bekman's avatar
Stas Bekman committed
383
        self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels))
384

385
    def create_and_check_for_token_classification(
386
387
388
389
390
391
        self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
    ):
        config.num_labels = self.num_labels
        model = BertForTokenClassification(config=config)
        model.to(torch_device)
        model.eval()
Sylvain Gugger's avatar
Sylvain Gugger committed
392
        result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels)
Stas Bekman's avatar
Stas Bekman committed
393
        self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels))
394

395
    def create_and_check_for_multiple_choice(
396
397
398
399
400
401
402
403
404
        self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
    ):
        config.num_choices = self.num_choices
        model = BertForMultipleChoice(config=config)
        model.to(torch_device)
        model.eval()
        multiple_choice_inputs_ids = input_ids.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous()
        multiple_choice_token_type_ids = token_type_ids.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous()
        multiple_choice_input_mask = input_mask.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous()
Sylvain Gugger's avatar
Sylvain Gugger committed
405
        result = model(
406
407
408
409
410
            multiple_choice_inputs_ids,
            attention_mask=multiple_choice_input_mask,
            token_type_ids=multiple_choice_token_type_ids,
            labels=choice_labels,
        )
Stas Bekman's avatar
Stas Bekman committed
411
        self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices))
412
413
414
415

    def prepare_config_and_inputs_for_common(self):
        config_and_inputs = self.prepare_config_and_inputs()
        (
416
417
418
419
420
421
422
            config,
            input_ids,
            token_type_ids,
            input_mask,
            sequence_labels,
            token_labels,
            choice_labels,
423
424
425
426
427
428
        ) = config_and_inputs
        inputs_dict = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
        return config, inputs_dict


@require_torch
429
class BertModelTest(ModelTesterMixin, GenerationTesterMixin, unittest.TestCase):
430
431
432
433

    all_model_classes = (
        (
            BertModel,
434
            BertLMHeadModel,
435
            BertForMaskedLM,
436
            BertForMultipleChoice,
437
438
439
440
441
442
443
444
445
            BertForNextSentencePrediction,
            BertForPreTraining,
            BertForQuestionAnswering,
            BertForSequenceClassification,
            BertForTokenClassification,
        )
        if is_torch_available()
        else ()
    )
446
    all_generative_model_classes = (BertLMHeadModel,) if is_torch_available() else ()
447
    fx_ready_model_classes = all_model_classes
448
    fx_dynamic_ready_model_classes = all_model_classes
449
    test_sequence_classification_problem_types = True
thomwolf's avatar
thomwolf committed
450

451
452
453
454
455
    # special case for ForPreTraining model
    def _prepare_for_class(self, inputs_dict, model_class, return_labels=False):
        inputs_dict = super()._prepare_for_class(inputs_dict, model_class, return_labels=return_labels)

        if return_labels:
456
            if model_class in get_values(MODEL_FOR_PRETRAINING_MAPPING):
457
458
459
460
461
462
463
464
                inputs_dict["labels"] = torch.zeros(
                    (self.model_tester.batch_size, self.model_tester.seq_length), dtype=torch.long, device=torch_device
                )
                inputs_dict["next_sentence_label"] = torch.zeros(
                    self.model_tester.batch_size, dtype=torch.long, device=torch_device
                )
        return inputs_dict

thomwolf's avatar
thomwolf committed
465
    def setUp(self):
466
        self.model_tester = BertModelTester(self)
thomwolf's avatar
thomwolf committed
467
        self.config_tester = ConfigTester(self, config_class=BertConfig, hidden_size=37)
thomwolf's avatar
thomwolf committed
468
469

    def test_config(self):
thomwolf's avatar
thomwolf committed
470
        self.config_tester.run_common_tests()
thomwolf's avatar
thomwolf committed
471

472
    def test_model(self):
thomwolf's avatar
thomwolf committed
473
        config_and_inputs = self.model_tester.prepare_config_and_inputs()
474
        self.model_tester.create_and_check_model(*config_and_inputs)
thomwolf's avatar
thomwolf committed
475

476
477
478
479
480
481
    def test_model_various_embeddings(self):
        config_and_inputs = self.model_tester.prepare_config_and_inputs()
        for type in ["absolute", "relative_key", "relative_key_query"]:
            config_and_inputs[0].position_embedding_type = type
            self.model_tester.create_and_check_model(*config_and_inputs)

482
    def test_model_as_decoder(self):
483
        config_and_inputs = self.model_tester.prepare_config_and_inputs_for_decoder()
484
        self.model_tester.create_and_check_model_as_decoder(*config_and_inputs)
485

486
    def test_model_as_decoder_with_default_input_mask(self):
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
        # This regression test was failing with PyTorch < 1.3
        (
            config,
            input_ids,
            token_type_ids,
            input_mask,
            sequence_labels,
            token_labels,
            choice_labels,
            encoder_hidden_states,
            encoder_attention_mask,
        ) = self.model_tester.prepare_config_and_inputs_for_decoder()

        input_mask = None

502
        self.model_tester.create_and_check_model_as_decoder(
503
504
505
506
507
508
509
510
511
512
513
            config,
            input_ids,
            token_type_ids,
            input_mask,
            sequence_labels,
            token_labels,
            choice_labels,
            encoder_hidden_states,
            encoder_attention_mask,
        )

514
515
    def test_for_causal_lm(self):
        config_and_inputs = self.model_tester.prepare_config_and_inputs_for_decoder()
516
        self.model_tester.create_and_check_for_causal_lm(*config_and_inputs)
517

thomwolf's avatar
thomwolf committed
518
519
    def test_for_masked_lm(self):
        config_and_inputs = self.model_tester.prepare_config_and_inputs()
520
        self.model_tester.create_and_check_for_masked_lm(*config_and_inputs)
thomwolf's avatar
thomwolf committed
521

522
    def test_for_causal_lm_decoder(self):
523
        config_and_inputs = self.model_tester.prepare_config_and_inputs_for_decoder()
524
        self.model_tester.create_and_check_model_for_causal_lm_as_decoder(*config_and_inputs)
525

526
527
528
529
    def test_decoder_model_past_with_large_inputs(self):
        config_and_inputs = self.model_tester.prepare_config_and_inputs_for_decoder()
        self.model_tester.create_and_check_decoder_model_past_large_inputs(*config_and_inputs)

thomwolf's avatar
thomwolf committed
530
531
    def test_for_multiple_choice(self):
        config_and_inputs = self.model_tester.prepare_config_and_inputs()
532
        self.model_tester.create_and_check_for_multiple_choice(*config_and_inputs)
thomwolf's avatar
thomwolf committed
533

thomwolf's avatar
thomwolf committed
534
535
    def test_for_next_sequence_prediction(self):
        config_and_inputs = self.model_tester.prepare_config_and_inputs()
536
        self.model_tester.create_and_check_for_next_sequence_prediction(*config_and_inputs)
thomwolf's avatar
thomwolf committed
537

thomwolf's avatar
thomwolf committed
538
539
    def test_for_pretraining(self):
        config_and_inputs = self.model_tester.prepare_config_and_inputs()
540
        self.model_tester.create_and_check_for_pretraining(*config_and_inputs)
thomwolf's avatar
thomwolf committed
541

thomwolf's avatar
thomwolf committed
542
543
    def test_for_question_answering(self):
        config_and_inputs = self.model_tester.prepare_config_and_inputs()
544
        self.model_tester.create_and_check_for_question_answering(*config_and_inputs)
thomwolf's avatar
thomwolf committed
545

thomwolf's avatar
thomwolf committed
546
547
    def test_for_sequence_classification(self):
        config_and_inputs = self.model_tester.prepare_config_and_inputs()
548
        self.model_tester.create_and_check_for_sequence_classification(*config_and_inputs)
thomwolf's avatar
thomwolf committed
549

thomwolf's avatar
thomwolf committed
550
551
    def test_for_token_classification(self):
        config_and_inputs = self.model_tester.prepare_config_and_inputs()
552
        self.model_tester.create_and_check_for_token_classification(*config_and_inputs)
thomwolf's avatar
thomwolf committed
553

554
    @slow
thomwolf's avatar
thomwolf committed
555
    def test_model_from_pretrained(self):
556
        for model_name in BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
557
            model = BertModel.from_pretrained(model_name)
thomwolf's avatar
thomwolf committed
558
            self.assertIsNotNone(model)
559

Lysandre Debut's avatar
Lysandre Debut committed
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
    @slow
    @require_torch_gpu
    def test_torchscript_device_change(self):
        config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
        for model_class in self.all_model_classes:

            # BertForMultipleChoice behaves incorrectly in JIT environments.
            if model_class == BertForMultipleChoice:
                return

            config.torchscript = True
            model = model_class(config=config)

            inputs_dict = self._prepare_for_class(inputs_dict, model_class)
            traced_model = torch.jit.trace(
                model, (inputs_dict["input_ids"].to("cpu"), inputs_dict["attention_mask"].to("cpu"))
            )

            with tempfile.TemporaryDirectory() as tmp:
                torch.jit.save(traced_model, os.path.join(tmp, "bert.pt"))
                loaded = torch.jit.load(os.path.join(tmp, "bert.pt"), map_location=torch_device)
                loaded(inputs_dict["input_ids"].to(torch_device), inputs_dict["attention_mask"].to(torch_device))

583
584
585
586
587
588
589

@require_torch
class BertModelIntegrationTest(unittest.TestCase):
    @slow
    def test_inference_no_head_absolute_embedding(self):
        model = BertModel.from_pretrained("bert-base-uncased")
        input_ids = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]])
590
591
        attention_mask = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]])
        output = model(input_ids, attention_mask=attention_mask)[0]
592
593
        expected_shape = torch.Size((1, 11, 768))
        self.assertEqual(output.shape, expected_shape)
594
        expected_slice = torch.tensor([[[0.4249, 0.1008, 0.7531], [0.3771, 0.1188, 0.7467], [0.4152, 0.1098, 0.7108]]])
595

596
        self.assertTrue(torch.allclose(output[:, 1:4, 1:4], expected_slice, atol=1e-4))
597
598
599
600
601

    @slow
    def test_inference_no_head_relative_embedding_key(self):
        model = BertModel.from_pretrained("zhiheng-huang/bert-base-uncased-embedding-relative-key")
        input_ids = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]])
602
603
        attention_mask = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]])
        output = model(input_ids, attention_mask=attention_mask)[0]
604
605
606
        expected_shape = torch.Size((1, 11, 768))
        self.assertEqual(output.shape, expected_shape)
        expected_slice = torch.tensor(
607
            [[[0.0756, 0.3142, -0.5128], [0.3761, 0.3462, -0.5477], [0.2052, 0.3760, -0.1240]]]
608
609
        )

610
        self.assertTrue(torch.allclose(output[:, 1:4, 1:4], expected_slice, atol=1e-4))
611
612
613
614
615

    @slow
    def test_inference_no_head_relative_embedding_key_query(self):
        model = BertModel.from_pretrained("zhiheng-huang/bert-base-uncased-embedding-relative-key-query")
        input_ids = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]])
616
617
        attention_mask = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]])
        output = model(input_ids, attention_mask=attention_mask)[0]
618
619
        expected_shape = torch.Size((1, 11, 768))
        self.assertEqual(output.shape, expected_shape)
620
621
622
        expected_slice = torch.tensor(
            [[[0.6496, 0.3784, 0.8203], [0.8148, 0.5656, 0.2636], [-0.0681, 0.5597, 0.7045]]]
        )
623

624
        self.assertTrue(torch.allclose(output[:, 1:4, 1:4], expected_slice, atol=1e-4))