test_modeling_common.py 43.2 KB
Newer Older
thomwolf's avatar
thomwolf committed
1
2
3
4
5
6
7
8
9
10
11
12
13
14
# coding=utf-8
# Copyright 2019 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
Aymeric Augustin's avatar
Aymeric Augustin committed
15

16
import copy
17
import inspect
18
import os.path
Aymeric Augustin's avatar
Aymeric Augustin committed
19
import random
20
import tempfile
thomwolf's avatar
thomwolf committed
21
import unittest
22
from typing import List, Tuple
thomwolf's avatar
thomwolf committed
23

24
from transformers import is_torch_available
25
from transformers.file_utils import WEIGHTS_NAME
26
from transformers.testing_utils import require_torch, require_torch_multi_gpu, slow, torch_device
27

Aymeric Augustin's avatar
Aymeric Augustin committed
28

29
if is_torch_available():
30
    import numpy as np
31
    import torch
thomwolf's avatar
thomwolf committed
32

33
    from transformers import (
34
        BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
35
36
        MODEL_FOR_CAUSAL_LM_MAPPING,
        MODEL_FOR_MASKED_LM_MAPPING,
37
        MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
38
        MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING,
39
        MODEL_FOR_QUESTION_ANSWERING_MAPPING,
40
41
42
        MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
        MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
        MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
43
        MODEL_MAPPING,
44
45
46
47
48
        AdaptiveEmbedding,
        BertConfig,
        BertModel,
        PretrainedConfig,
        PreTrainedModel,
49
    )
thomwolf's avatar
thomwolf committed
50

51

52
53
54
def _config_zero_init(config):
    configs_no_init = copy.deepcopy(config)
    for key in configs_no_init.__dict__.keys():
55
        if "_range" in key or "_std" in key or "initializer_factor" in key:
Lysandre Debut's avatar
Lysandre Debut committed
56
            setattr(configs_no_init, key, 1e-10)
57
58
    return configs_no_init

thomwolf's avatar
thomwolf committed
59

60
61
62
63
64
@require_torch
class ModelTesterMixin:

    model_tester = None
    all_model_classes = ()
65
    all_generative_model_classes = ()
Patrick von Platen's avatar
Patrick von Platen committed
66
67
68
69
    test_torchscript = True
    test_pruning = True
    test_resize_embeddings = True
    test_head_masking = True
70
    test_missing_keys = True
71
72
    is_encoder_decoder = False

73
74
    def _prepare_for_class(self, inputs_dict, model_class, return_labels=False):
        inputs_dict = copy.deepcopy(inputs_dict)
75
        if model_class in MODEL_FOR_MULTIPLE_CHOICE_MAPPING.values():
76
            inputs_dict = {
77
                k: v.unsqueeze(1).expand(-1, self.model_tester.num_choices, -1).contiguous()
78
                if isinstance(v, torch.Tensor) and v.ndim > 1
Sylvain Gugger's avatar
Sylvain Gugger committed
79
                else v
80
81
                for k, v in inputs_dict.items()
            }
82
83
84
85
86
87
88
89
90
91
92

        if return_labels:
            if model_class in MODEL_FOR_MULTIPLE_CHOICE_MAPPING.values():
                inputs_dict["labels"] = torch.ones(self.model_tester.batch_size, dtype=torch.long, device=torch_device)
            elif model_class in MODEL_FOR_QUESTION_ANSWERING_MAPPING.values():
                inputs_dict["start_positions"] = torch.zeros(
                    self.model_tester.batch_size, dtype=torch.long, device=torch_device
                )
                inputs_dict["end_positions"] = torch.zeros(
                    self.model_tester.batch_size, dtype=torch.long, device=torch_device
                )
93
94
95
96
            elif model_class in [
                *MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING.values(),
                *MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING.values(),
            ]:
97
98
99
100
101
102
103
104
105
106
107
108
                inputs_dict["labels"] = torch.zeros(
                    self.model_tester.batch_size, dtype=torch.long, device=torch_device
                )
            elif model_class in [
                *MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING.values(),
                *MODEL_FOR_CAUSAL_LM_MAPPING.values(),
                *MODEL_FOR_MASKED_LM_MAPPING.values(),
                *MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING.values(),
            ]:
                inputs_dict["labels"] = torch.zeros(
                    (self.model_tester.batch_size, self.model_tester.seq_length), dtype=torch.long, device=torch_device
                )
109
110
        return inputs_dict

Patrick von Platen's avatar
Patrick von Platen committed
111
    def test_save_load(self):
112
113
114
115
116
117
118
        config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()

        for model_class in self.all_model_classes:
            model = model_class(config)
            model.to(torch_device)
            model.eval()
            with torch.no_grad():
119
                outputs = model(**self._prepare_for_class(inputs_dict, model_class))
Weizhen's avatar
Weizhen committed
120

121
            out_2 = outputs[0].cpu().numpy()
122
            out_2[np.isnan(out_2)] = 0
123

124
            with tempfile.TemporaryDirectory() as tmpdirname:
125
126
                model.save_pretrained(tmpdirname)
                model = model_class.from_pretrained(tmpdirname)
127
                model.to(torch_device)
128
                with torch.no_grad():
129
                    after_outputs = model(**self._prepare_for_class(inputs_dict, model_class))
thomwolf's avatar
thomwolf committed
130

131
132
133
                # Make sure we don't have nans
                out_1 = after_outputs[0].cpu().numpy()
                out_1[np.isnan(out_1)] = 0
thomwolf's avatar
thomwolf committed
134
135
                max_diff = np.amax(np.abs(out_1 - out_2))
                self.assertLessEqual(max_diff, 1e-5)
136

137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
    def test_save_load_keys_to_never_save(self):
        config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()

        for model_class in self.all_model_classes:
            model = model_class(config)
            keys_to_never_save = getattr(model, "keys_to_never_save", None)
            if keys_to_never_save is None:
                continue

            # check the keys are in the original state_dict
            for k in keys_to_never_save:
                self.assertIn(k, model.state_dict())

            # check that certain keys didn't get saved with the model
            with tempfile.TemporaryDirectory() as tmpdirname:
                model.save_pretrained(tmpdirname)
                output_model_file = os.path.join(tmpdirname, WEIGHTS_NAME)
                state_dict_saved = torch.load(output_model_file)
                for k in keys_to_never_save:
                    self.assertNotIn(k, state_dict_saved)

Patrick von Platen's avatar
Patrick von Platen committed
158
    def test_initialization(self):
159
160
161
162
163
164
165
166
        config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()

        configs_no_init = _config_zero_init(config)
        for model_class in self.all_model_classes:
            model = model_class(config=configs_no_init)
            for name, param in model.named_parameters():
                if param.requires_grad:
                    self.assertIn(
Lysandre Debut's avatar
Lysandre Debut committed
167
                        ((param.data.mean() * 1e9).round() / 1e9).item(),
168
169
170
                        [0.0, 1.0],
                        msg="Parameter {} of model {} seems not properly initialized".format(name, model_class),
                    )
thomwolf's avatar
thomwolf committed
171

Patrick von Platen's avatar
Patrick von Platen committed
172
    def test_determinism(self):
173
174
175
176
177
178
179
        config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()

        for model_class in self.all_model_classes:
            model = model_class(config)
            model.to(torch_device)
            model.eval()
            with torch.no_grad():
180
181
                first = model(**self._prepare_for_class(inputs_dict, model_class))[0]
                second = model(**self._prepare_for_class(inputs_dict, model_class))[0]
Weizhen's avatar
Weizhen committed
182

183
184
185
186
187
188
189
            out_1 = first.cpu().numpy()
            out_2 = second.cpu().numpy()
            out_1 = out_1[~np.isnan(out_1)]
            out_2 = out_2[~np.isnan(out_2)]
            max_diff = np.amax(np.abs(out_1 - out_2))
            self.assertLessEqual(max_diff, 1e-5)

190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
    def test_forward_signature(self):
        config, _ = self.model_tester.prepare_config_and_inputs_for_common()

        for model_class in self.all_model_classes:
            model = model_class(config)
            signature = inspect.signature(model.forward)
            # signature.parameters is an OrderedDict => so arg_names order is deterministic
            arg_names = [*signature.parameters.keys()]

            if model.config.is_encoder_decoder:
                expected_arg_names = [
                    "input_ids",
                    "attention_mask",
                    "decoder_input_ids",
                    "decoder_attention_mask",
                    "encoder_outputs",
                ]
                self.assertListEqual(arg_names[:5], expected_arg_names)
            else:
                expected_arg_names = ["input_ids"]
                self.assertListEqual(arg_names[:1], expected_arg_names)

212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
    def test_training(self):
        if not self.model_tester.is_training:
            return

        config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
        config.return_dict = True

        for model_class in self.all_model_classes:
            if model_class in MODEL_MAPPING.values():
                continue
            model = model_class(config)
            model.to(torch_device)
            model.train()
            inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True)
            loss = model(**inputs).loss
            loss.backward()

    def test_training_gradient_checkpointing(self):
        config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
        if not self.model_tester.is_training or not hasattr(config, "gradient_checkpointing"):
            return

        config.gradient_checkpointing = True
        config.return_dict = True

        for model_class in self.all_model_classes:
            if model_class in MODEL_MAPPING.values():
                continue
            model = model_class(config)
            model.to(torch_device)
            model.train()
            inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True)
            loss = model(**inputs).loss
            loss.backward()

Patrick von Platen's avatar
Patrick von Platen committed
247
    def test_attention_outputs(self):
248
        config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
Weizhen's avatar
Weizhen committed
249
250
        config.return_dict = True

sshleifer's avatar
sshleifer committed
251
        seq_len = getattr(self.model_tester, "seq_length", None)
sshleifer's avatar
sshleifer committed
252
253
        decoder_seq_length = getattr(self.model_tester, "decoder_seq_length", seq_len)
        encoder_seq_length = getattr(self.model_tester, "encoder_seq_length", seq_len)
Weizhen's avatar
Weizhen committed
254
        decoder_key_length = getattr(self.model_tester, "decoder_key_length", decoder_seq_length)
255
        encoder_key_length = getattr(self.model_tester, "key_length", encoder_seq_length)
Patrick von Platen's avatar
Patrick von Platen committed
256
257
258
        chunk_length = getattr(self.model_tester, "chunk_length", None)
        if chunk_length is not None and hasattr(self.model_tester, "num_hashes"):
            encoder_seq_length = encoder_seq_length * self.model_tester.num_hashes
259
260

        for model_class in self.all_model_classes:
261
            inputs_dict["output_attentions"] = True
Joseph Liu's avatar
Joseph Liu committed
262
            inputs_dict["output_hidden_states"] = False
263
            config.return_dict = True
264
265
266
267
            model = model_class(config)
            model.to(torch_device)
            model.eval()
            with torch.no_grad():
268
                outputs = model(**self._prepare_for_class(inputs_dict, model_class))
269
            attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
270
271
272
273
274
275
276
277
278
            self.assertEqual(len(attentions), self.model_tester.num_hidden_layers)

            # check that output_attentions also work using config
            del inputs_dict["output_attentions"]
            config.output_attentions = True
            model = model_class(config)
            model.to(torch_device)
            model.eval()
            with torch.no_grad():
279
280
                outputs = model(**self._prepare_for_class(inputs_dict, model_class))
            attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
281
            self.assertEqual(len(attentions), self.model_tester.num_hidden_layers)
Patrick von Platen's avatar
Patrick von Platen committed
282
283
284
285
286
287
288
289
290
291
292

            if chunk_length is not None:
                self.assertListEqual(
                    list(attentions[0].shape[-4:]),
                    [self.model_tester.num_attention_heads, encoder_seq_length, chunk_length, encoder_key_length],
                )
            else:
                self.assertListEqual(
                    list(attentions[0].shape[-3:]),
                    [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length],
                )
293
            out_len = len(outputs)
thomwolf's avatar
thomwolf committed
294

295
            if self.is_encoder_decoder:
296
                correct_outlen = 5
297

298
299
300
301
302
303
                # loss is at first position
                if "labels" in inputs_dict:
                    correct_outlen += 1  # loss is added to beginning
                # Question Answering model returns start_logits and end_logits
                if model_class in MODEL_FOR_QUESTION_ANSWERING_MAPPING.values():
                    correct_outlen += 1  # start_logits and end_logits instead of only 1 output
Weizhen's avatar
Weizhen committed
304

Sam Shleifer's avatar
Sam Shleifer committed
305
306
                self.assertEqual(out_len, correct_outlen)

307
                # decoder attentions
308
                decoder_attentions = outputs.decoder_attentions
Sam Shleifer's avatar
Sam Shleifer committed
309
                self.assertIsInstance(decoder_attentions, (list, tuple))
310
                self.assertEqual(len(decoder_attentions), self.model_tester.num_hidden_layers)
thomwolf's avatar
thomwolf committed
311
                self.assertListEqual(
312
313
                    list(decoder_attentions[0].shape[-3:]),
                    [self.model_tester.num_attention_heads, decoder_seq_length, decoder_key_length],
314
                )
thomwolf's avatar
thomwolf committed
315

316
317
318
319
320
321
322
323
324
325
326
327
328
                # cross attentions
                cross_attentions = outputs.cross_attentions
                self.assertIsInstance(cross_attentions, (list, tuple))
                self.assertEqual(len(cross_attentions), self.model_tester.num_hidden_layers)
                self.assertListEqual(
                    list(cross_attentions[0].shape[-3:]),
                    [
                        self.model_tester.num_attention_heads,
                        decoder_seq_length,
                        encoder_key_length,
                    ],
                )

329
            # Check attention is always last and order is fine
330
            inputs_dict["output_attentions"] = True
Joseph Liu's avatar
Joseph Liu committed
331
            inputs_dict["output_hidden_states"] = True
332
333
334
335
            model = model_class(config)
            model.to(torch_device)
            model.eval()
            with torch.no_grad():
336
                outputs = model(**self._prepare_for_class(inputs_dict, model_class))
337

Weizhen's avatar
Weizhen committed
338
339
340
341
342
343
344
345
            if hasattr(self.model_tester, "num_hidden_states_types"):
                added_hidden_states = self.model_tester.num_hidden_states_types
            elif self.is_encoder_decoder:
                added_hidden_states = 2
            else:
                added_hidden_states = 1
            self.assertEqual(out_len + added_hidden_states, len(outputs))

346
347
            self_attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions

348
            self.assertEqual(len(self_attentions), self.model_tester.num_hidden_layers)
Patrick von Platen's avatar
Patrick von Platen committed
349
350
351
352
353
354
355
356
357
358
            if chunk_length is not None:
                self.assertListEqual(
                    list(self_attentions[0].shape[-4:]),
                    [self.model_tester.num_attention_heads, encoder_seq_length, chunk_length, encoder_key_length],
                )
            else:
                self.assertListEqual(
                    list(self_attentions[0].shape[-3:]),
                    [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length],
                )
thomwolf's avatar
thomwolf committed
359

Patrick von Platen's avatar
Patrick von Platen committed
360
    def test_torchscript(self):
361
362
        config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
        self._create_and_check_torchscript(config, inputs_dict)
thomwolf's avatar
thomwolf committed
363

Patrick von Platen's avatar
Patrick von Platen committed
364
    def test_torchscript_output_attentions(self):
365
366
367
        config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
        config.output_attentions = True
        self._create_and_check_torchscript(config, inputs_dict)
thomwolf's avatar
thomwolf committed
368

Patrick von Platen's avatar
Patrick von Platen committed
369
    def test_torchscript_output_hidden_state(self):
370
371
372
        config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
        config.output_hidden_states = True
        self._create_and_check_torchscript(config, inputs_dict)
thomwolf's avatar
thomwolf committed
373

374
    def _create_and_check_torchscript(self, config, inputs_dict):
Patrick von Platen's avatar
Patrick von Platen committed
375
        if not self.test_torchscript:
376
            return
377

378
379
380
381
382
383
        configs_no_init = _config_zero_init(config)  # To be sure we have no Nan
        configs_no_init.torchscript = True
        for model_class in self.all_model_classes:
            model = model_class(config=configs_no_init)
            model.to(torch_device)
            model.eval()
384
            inputs = self._prepare_for_class(inputs_dict, model_class)
thomwolf's avatar
thomwolf committed
385

386
            try:
387
388
389
390
391
392
393
394
395
396
397
398
399
                if model.config.is_encoder_decoder:
                    model.config.use_cache = False  # TODO: this should be deleted after bug #7474 is solved
                    input_ids = inputs["input_ids"]
                    attention_mask = inputs["attention_mask"]
                    decoder_input_ids = inputs["decoder_input_ids"]
                    decoder_attention_mask = inputs["decoder_attention_mask"]

                    traced_model = torch.jit.trace(
                        model, (input_ids, attention_mask, decoder_input_ids, decoder_attention_mask)
                    )
                else:
                    input_ids = inputs["input_ids"]
                    traced_model = torch.jit.trace(model, input_ids)
400
401
            except RuntimeError:
                self.fail("Couldn't trace module.")
thomwolf's avatar
thomwolf committed
402

403
            with tempfile.TemporaryDirectory() as tmp_dir_name:
404
                pt_file_name = os.path.join(tmp_dir_name, "traced_model.pt")
thomwolf's avatar
thomwolf committed
405

406
                try:
407
                    torch.jit.save(traced_model, pt_file_name)
408
409
                except Exception:
                    self.fail("Couldn't save module.")
thomwolf's avatar
thomwolf committed
410

411
412
413
414
                try:
                    loaded_model = torch.jit.load(pt_file_name)
                except Exception:
                    self.fail("Couldn't load module.")
LysandreJik's avatar
LysandreJik committed
415

416
417
            model.to(torch_device)
            model.eval()
thomwolf's avatar
thomwolf committed
418

419
420
            loaded_model.to(torch_device)
            loaded_model.eval()
thomwolf's avatar
thomwolf committed
421

422
423
424
425
            model_state_dict = model.state_dict()
            loaded_model_state_dict = loaded_model.state_dict()

            self.assertEqual(set(model_state_dict.keys()), set(loaded_model_state_dict.keys()))
thomwolf's avatar
thomwolf committed
426

427
            models_equal = True
428
429
            for layer_name, p1 in model_state_dict.items():
                p2 = loaded_model_state_dict[layer_name]
430
431
                if p1.data.ne(p2.data).sum() > 0:
                    models_equal = False
thomwolf's avatar
thomwolf committed
432

433
            self.assertTrue(models_equal)
thomwolf's avatar
thomwolf committed
434

Patrick von Platen's avatar
Patrick von Platen committed
435
436
    def test_headmasking(self):
        if not self.test_head_masking:
437
            return
438

439
440
441
        global_rng.seed(42)
        config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
        global_rng.seed()
LysandreJik's avatar
LysandreJik committed
442

443
        inputs_dict["output_attentions"] = True
444
445
446
447
448
449
        config.output_hidden_states = True
        configs_no_init = _config_zero_init(config)  # To be sure we have no Nan
        for model_class in self.all_model_classes:
            model = model_class(config=configs_no_init)
            model.to(torch_device)
            model.eval()
LysandreJik's avatar
LysandreJik committed
450

451
452
453
            # Prepare head_mask
            # Set require_grad after having prepared the tensor to avoid error (leaf variable has been moved into the graph interior)
            head_mask = torch.ones(
Lysandre's avatar
Lysandre committed
454
455
456
                self.model_tester.num_hidden_layers,
                self.model_tester.num_attention_heads,
                device=torch_device,
457
458
459
460
            )
            head_mask[0, 0] = 0
            head_mask[-1, :-1] = 0
            head_mask.requires_grad_(requires_grad=True)
461
            inputs = self._prepare_for_class(inputs_dict, model_class).copy()
462
463
            inputs["head_mask"] = head_mask

464
            outputs = model(**inputs, return_dict=True)
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490

            # Test that we can get a gradient back for importance score computation
            output = sum(t.sum() for t in outputs[0])
            output = output.sum()
            output.backward()
            multihead_outputs = head_mask.grad

            attentions = outputs[-1]

            # Remove Nan
            for t in attentions:
                self.assertLess(
                    torch.sum(torch.isnan(t)), t.numel() / 4
                )  # Check we don't have more than 25% nans (arbitrary)
            attentions = [
                t.masked_fill(torch.isnan(t), 0.0) for t in attentions
            ]  # remove them (the test is less complete)

            self.assertIsNotNone(multihead_outputs)
            self.assertEqual(len(multihead_outputs), self.model_tester.num_hidden_layers)
            self.assertAlmostEqual(attentions[0][..., 0, :, :].flatten().sum().item(), 0.0)
            self.assertNotEqual(attentions[0][..., -1, :, :].flatten().sum().item(), 0.0)
            self.assertNotEqual(attentions[1][..., 0, :, :].flatten().sum().item(), 0.0)
            self.assertAlmostEqual(attentions[-1][..., -2, :, :].flatten().sum().item(), 0.0)
            self.assertNotEqual(attentions[-1][..., -1, :, :].flatten().sum().item(), 0.0)

Patrick von Platen's avatar
Patrick von Platen committed
491
492
    def test_head_pruning(self):
        if not self.test_pruning:
493
494
495
            return

        for model_class in self.all_model_classes:
Lysandre's avatar
Lysandre committed
496
497
498
499
            (
                config,
                inputs_dict,
            ) = self.model_tester.prepare_config_and_inputs_for_common()
500

501
502
            if "head_mask" in inputs_dict:
                del inputs_dict["head_mask"]
503

504
            inputs_dict["output_attentions"] = True
505
506
507
508
            config.output_hidden_states = False
            model = model_class(config=config)
            model.to(torch_device)
            model.eval()
509
510
511
512
            heads_to_prune = {
                0: list(range(1, self.model_tester.num_attention_heads)),
                -1: [0],
            }
513
514
            model.prune_heads(heads_to_prune)
            with torch.no_grad():
515
                outputs = model(**self._prepare_for_class(inputs_dict, model_class))
516

517
            attentions = outputs[-1]
518

519
520
521
            self.assertEqual(attentions[0].shape[-3], 1)
            self.assertEqual(attentions[1].shape[-3], self.model_tester.num_attention_heads)
            self.assertEqual(attentions[-1].shape[-3], self.model_tester.num_attention_heads - 1)
LysandreJik's avatar
LysandreJik committed
522

Patrick von Platen's avatar
Patrick von Platen committed
523
524
    def test_head_pruning_save_load_from_pretrained(self):
        if not self.test_pruning:
525
            return
LysandreJik's avatar
LysandreJik committed
526

527
        for model_class in self.all_model_classes:
Lysandre's avatar
Lysandre committed
528
529
530
531
            (
                config,
                inputs_dict,
            ) = self.model_tester.prepare_config_and_inputs_for_common()
532
533
534

            if "head_mask" in inputs_dict:
                del inputs_dict["head_mask"]
535

536
            inputs_dict["output_attentions"] = True
537
538
539
540
            config.output_hidden_states = False
            model = model_class(config=config)
            model.to(torch_device)
            model.eval()
541
542
543
544
            heads_to_prune = {
                0: list(range(1, self.model_tester.num_attention_heads)),
                -1: [0],
            }
545
            model.prune_heads(heads_to_prune)
546

547
            with tempfile.TemporaryDirectory() as temp_dir_name:
548
549
                model.save_pretrained(temp_dir_name)
                model = model_class.from_pretrained(temp_dir_name)
550
                model.to(torch_device)
551

552
            with torch.no_grad():
553
                outputs = model(**self._prepare_for_class(inputs_dict, model_class))
554
555
556
557
            attentions = outputs[-1]
            self.assertEqual(attentions[0].shape[-3], 1)
            self.assertEqual(attentions[1].shape[-3], self.model_tester.num_attention_heads)
            self.assertEqual(attentions[-1].shape[-3], self.model_tester.num_attention_heads - 1)
558

Patrick von Platen's avatar
Patrick von Platen committed
559
560
    def test_head_pruning_save_load_from_config_init(self):
        if not self.test_pruning:
561
            return
562

563
        for model_class in self.all_model_classes:
Lysandre's avatar
Lysandre committed
564
565
566
567
            (
                config,
                inputs_dict,
            ) = self.model_tester.prepare_config_and_inputs_for_common()
568

569
570
            if "head_mask" in inputs_dict:
                del inputs_dict["head_mask"]
571

572
            inputs_dict["output_attentions"] = True
573
            config.output_hidden_states = False
574

575
576
577
578
            heads_to_prune = {
                0: list(range(1, self.model_tester.num_attention_heads)),
                -1: [0],
            }
579
            config.pruned_heads = heads_to_prune
580

581
582
583
            model = model_class(config=config)
            model.to(torch_device)
            model.eval()
584

585
            with torch.no_grad():
586
                outputs = model(**self._prepare_for_class(inputs_dict, model_class))
587
            attentions = outputs[-1]
588

589
590
591
            self.assertEqual(attentions[0].shape[-3], 1)
            self.assertEqual(attentions[1].shape[-3], self.model_tester.num_attention_heads)
            self.assertEqual(attentions[-1].shape[-3], self.model_tester.num_attention_heads - 1)
592

Patrick von Platen's avatar
Patrick von Platen committed
593
594
    def test_head_pruning_integration(self):
        if not self.test_pruning:
595
            return
596

597
        for model_class in self.all_model_classes:
Lysandre's avatar
Lysandre committed
598
599
600
601
            (
                config,
                inputs_dict,
            ) = self.model_tester.prepare_config_and_inputs_for_common()
602

603
604
            if "head_mask" in inputs_dict:
                del inputs_dict["head_mask"]
605

606
            inputs_dict["output_attentions"] = True
607
            config.output_hidden_states = False
608

609
610
            heads_to_prune = {0: [0], 1: [1, 2]}
            config.pruned_heads = heads_to_prune
611

612
613
614
            model = model_class(config=config)
            model.to(torch_device)
            model.eval()
615

616
            with torch.no_grad():
617
                outputs = model(**self._prepare_for_class(inputs_dict, model_class))
618
            attentions = outputs[-1]
619

620
621
622
623
            self.assertEqual(attentions[0].shape[-3], self.model_tester.num_attention_heads - 1)
            self.assertEqual(attentions[1].shape[-3], self.model_tester.num_attention_heads - 2)
            self.assertEqual(attentions[2].shape[-3], self.model_tester.num_attention_heads)
            self.assertEqual(attentions[3].shape[-3], self.model_tester.num_attention_heads)
thomwolf's avatar
thomwolf committed
624

625
            with tempfile.TemporaryDirectory() as temp_dir_name:
626
627
                model.save_pretrained(temp_dir_name)
                model = model_class.from_pretrained(temp_dir_name)
628
                model.to(torch_device)
thomwolf's avatar
thomwolf committed
629

630
            with torch.no_grad():
631
                outputs = model(**self._prepare_for_class(inputs_dict, model_class))
632
            attentions = outputs[-1]
LysandreJik's avatar
LysandreJik committed
633

634
635
636
637
            self.assertEqual(attentions[0].shape[-3], self.model_tester.num_attention_heads - 1)
            self.assertEqual(attentions[1].shape[-3], self.model_tester.num_attention_heads - 2)
            self.assertEqual(attentions[2].shape[-3], self.model_tester.num_attention_heads)
            self.assertEqual(attentions[3].shape[-3], self.model_tester.num_attention_heads)
thomwolf's avatar
thomwolf committed
638

639
640
            heads_to_prune = {0: [0], 2: [1, 2]}
            model.prune_heads(heads_to_prune)
641

642
            with torch.no_grad():
643
                outputs = model(**self._prepare_for_class(inputs_dict, model_class))
644
            attentions = outputs[-1]
645

646
647
648
649
            self.assertEqual(attentions[0].shape[-3], self.model_tester.num_attention_heads - 1)
            self.assertEqual(attentions[1].shape[-3], self.model_tester.num_attention_heads - 2)
            self.assertEqual(attentions[2].shape[-3], self.model_tester.num_attention_heads - 2)
            self.assertEqual(attentions[3].shape[-3], self.model_tester.num_attention_heads)
650

651
            self.assertDictEqual(model.config.pruned_heads, {0: [0], 1: [1, 2], 2: [1, 2]})
thomwolf's avatar
thomwolf committed
652

Patrick von Platen's avatar
Patrick von Platen committed
653
    def test_hidden_states_output(self):
Joseph Liu's avatar
Joseph Liu committed
654
        def check_hidden_states_output(inputs_dict, config, model_class):
655
            model = model_class(config)
656
            model.to(torch_device)
thomwolf's avatar
thomwolf committed
657
            model.eval()
Joseph Liu's avatar
Joseph Liu committed
658

thomwolf's avatar
thomwolf committed
659
            with torch.no_grad():
660
                outputs = model(**self._prepare_for_class(inputs_dict, model_class))
Weizhen's avatar
Weizhen committed
661
            hidden_states = outputs["hidden_states"] if "hidden_states" in outputs else outputs[-1]
Patrick von Platen's avatar
Patrick von Platen committed
662

Sylvain Gugger's avatar
Sylvain Gugger committed
663
664
665
666
            expected_num_layers = getattr(
                self.model_tester, "expected_num_hidden_layers", self.model_tester.num_hidden_layers + 1
            )
            self.assertEqual(len(hidden_states), expected_num_layers)
Patrick von Platen's avatar
Patrick von Platen committed
667
668
669
670
671
672
673
            if hasattr(self.model_tester, "encoder_seq_length"):
                seq_length = self.model_tester.encoder_seq_length
                if hasattr(self.model_tester, "chunk_length") and self.model_tester.chunk_length > 1:
                    seq_length = seq_length * self.model_tester.chunk_length
            else:
                seq_length = self.model_tester.seq_length

674
            self.assertListEqual(
Lysandre's avatar
Lysandre committed
675
676
                list(hidden_states[0].shape[-2:]),
                [seq_length, self.model_tester.hidden_size],
677
            )
thomwolf's avatar
thomwolf committed
678

Joseph Liu's avatar
Joseph Liu committed
679
680
681
682
683
684
685
686
687
688
689
690
        config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()

        for model_class in self.all_model_classes:
            inputs_dict["output_hidden_states"] = True
            check_hidden_states_output(inputs_dict, config, model_class)

            # check that output_hidden_states also work using config
            del inputs_dict["output_hidden_states"]
            config.output_hidden_states = True

            check_hidden_states_output(inputs_dict, config, model_class)

Pradhy729's avatar
Pradhy729 committed
691
    def test_feed_forward_chunking(self):
Lysandre's avatar
Lysandre committed
692
693
694
695
        (
            original_config,
            inputs_dict,
        ) = self.model_tester.prepare_config_and_inputs_for_common()
Pradhy729's avatar
Pradhy729 committed
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
        for model_class in self.all_model_classes:
            torch.manual_seed(0)
            config = copy.deepcopy(original_config)
            model = model_class(config)
            model.to(torch_device)
            model.eval()

            hidden_states_no_chunk = model(**self._prepare_for_class(inputs_dict, model_class))[0]

            torch.manual_seed(0)
            config.chunk_size_feed_forward = 1
            model = model_class(config)
            model.to(torch_device)
            model.eval()

            hidden_states_with_chunk = model(**self._prepare_for_class(inputs_dict, model_class))[0]
            self.assertTrue(torch.allclose(hidden_states_no_chunk, hidden_states_with_chunk, atol=1e-3))

Patrick von Platen's avatar
Patrick von Platen committed
714
    def test_resize_tokens_embeddings(self):
Lysandre's avatar
Lysandre committed
715
716
717
718
        (
            original_config,
            inputs_dict,
        ) = self.model_tester.prepare_config_and_inputs_for_common()
Patrick von Platen's avatar
Patrick von Platen committed
719
        if not self.test_resize_embeddings:
720
721
722
723
724
            return

        for model_class in self.all_model_classes:
            config = copy.deepcopy(original_config)
            model = model_class(config)
725
            model.to(torch_device)
726

Patrick von Platen's avatar
Patrick von Platen committed
727
728
729
            if self.model_tester.is_training is False:
                model.eval()

730
731
732
733
734
735
736
737
738
739
            model_vocab_size = config.vocab_size
            # Retrieve the embeddings and clone theme
            model_embed = model.resize_token_embeddings(model_vocab_size)
            cloned_embeddings = model_embed.weight.clone()

            # Check that resizing the token embeddings with a larger vocab size increases the model's vocab size
            model_embed = model.resize_token_embeddings(model_vocab_size + 10)
            self.assertEqual(model.config.vocab_size, model_vocab_size + 10)
            # Check that it actually resizes the embeddings matrix
            self.assertEqual(model_embed.weight.shape[0], cloned_embeddings.shape[0] + 10)
740
            # Check that the model can still do a forward pass successfully (every parameter should be resized)
741
            model(**self._prepare_for_class(inputs_dict, model_class))
742
743
744
745
746
747
748

            # Check that resizing the token embeddings with a smaller vocab size decreases the model's vocab size
            model_embed = model.resize_token_embeddings(model_vocab_size - 15)
            self.assertEqual(model.config.vocab_size, model_vocab_size - 15)
            # Check that it actually resizes the embeddings matrix
            self.assertEqual(model_embed.weight.shape[0], cloned_embeddings.shape[0] - 15)

749
750
751
            # Check that the model can still do a forward pass successfully (every parameter should be resized)
            # Input ids should be clamped to the maximum size of the vocabulary
            inputs_dict["input_ids"].clamp_(max=model_vocab_size - 15 - 1)
752
            model(**self._prepare_for_class(inputs_dict, model_class))
753

754
755
756
757
758
759
760
761
            # Check that adding and removing tokens has not modified the first part of the embedding matrix.
            models_equal = True
            for p1, p2 in zip(cloned_embeddings, model_embed.weight):
                if p1.data.ne(p2.data).sum() > 0:
                    models_equal = False

            self.assertTrue(models_equal)

Patrick von Platen's avatar
Patrick von Platen committed
762
    def test_model_common_attributes(self):
763
764
765
766
767
768
769
770
771
        config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()

        for model_class in self.all_model_classes:
            model = model_class(config)
            self.assertIsInstance(model.get_input_embeddings(), (torch.nn.Embedding, AdaptiveEmbedding))
            model.set_input_embeddings(torch.nn.Embedding(10, 10))
            x = model.get_output_embeddings()
            self.assertTrue(x is None or isinstance(x, torch.nn.Linear))

772
    def test_correct_missing_keys(self):
773
774
        if not self.test_missing_keys:
            return
775
776
777
778
779
780
781
782
783
784
785
786
787
788
        config, _ = self.model_tester.prepare_config_and_inputs_for_common()

        for model_class in self.all_model_classes:
            model = model_class(config)
            base_model_prefix = model.base_model_prefix

            if hasattr(model, base_model_prefix):
                with tempfile.TemporaryDirectory() as temp_dir_name:
                    model.base_model.save_pretrained(temp_dir_name)
                    model, loading_info = model_class.from_pretrained(temp_dir_name, output_loading_info=True)

                    with self.subTest(msg="Missing keys for {}".format(model.__class__.__name__)):
                        self.assertGreater(len(loading_info["missing_keys"]), 0)

789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
    def test_tie_model_weights(self):
        if not self.test_torchscript:
            return

        config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()

        def check_same_values(layer_1, layer_2):
            equal = True
            for p1, p2 in zip(layer_1.weight, layer_2.weight):
                if p1.data.ne(p2.data).sum() > 0:
                    equal = False
            return equal

        for model_class in self.all_model_classes:
            config.torchscript = True
            model_not_tied = model_class(config)
            if model_not_tied.get_output_embeddings() is None:
                continue

            config_tied = copy.deepcopy(config)
            config_tied.torchscript = False
            model_tied = model_class(config_tied)
            params_tied = list(model_tied.parameters())
            # Check that the embedding layer and decoding layer are the same in size and in value
            # self.assertTrue(check_same_values(embeddings, decoding))

            # # Check that after modification, they remain the same.
            # embeddings.weight.data.div_(2)
            # # Check that the embedding layer and decoding layer are the same in size and in value
            # self.assertTrue(embeddings.weight.shape, decoding.weight.shape)
            # self.assertTrue(check_same_values(embeddings, decoding))

            # # Check that after modification, they remain the same.
            # decoding.weight.data.div_(4)
            # # Check that the embedding layer and decoding layer are the same in size and in value
            # self.assertTrue(embeddings.weight.shape, decoding.weight.shape)
            # self.assertTrue(check_same_values(embeddings, decoding))

            # Check that after resize they remain tied.
            model_tied.resize_token_embeddings(config.vocab_size + 10)
            params_tied_2 = list(model_tied.parameters())
            self.assertEqual(len(params_tied_2), len(params_tied))

            # decoding.weight.data.mul_(20)
            # # Check that the embedding layer and decoding layer are the same in size and in value
            # self.assertTrue(model.transformer.wte.weight.shape, model.lm_head.weight.shape)
            # self.assertTrue(check_same_values(model.transformer.wte, model.lm_head))

837
838
839
840
    def test_model_outputs_equivalence(self):

        config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()

Sam Shleifer's avatar
Sam Shleifer committed
841
842
843
844
        def set_nan_tensor_to_zero(t):
            t[t != t] = 0
            return t

845
846
847
848
849
850
851
852
853
854
855
856
857
        def check_equivalence(model, tuple_inputs, dict_inputs, additional_kwargs={}):
            with torch.no_grad():
                tuple_output = model(**tuple_inputs, return_dict=False, **additional_kwargs)
                dict_output = model(**dict_inputs, return_dict=True, **additional_kwargs).to_tuple()

                def recursive_check(tuple_object, dict_object):
                    if isinstance(tuple_object, (List, Tuple)):
                        for tuple_iterable_value, dict_iterable_value in zip(tuple_object, dict_object):
                            recursive_check(tuple_iterable_value, dict_iterable_value)
                    elif tuple_object is None:
                        return
                    else:
                        self.assertTrue(
Sam Shleifer's avatar
Sam Shleifer committed
858
859
860
                            torch.allclose(
                                set_nan_tensor_to_zero(tuple_object), set_nan_tensor_to_zero(dict_object), atol=1e-5
                            ),
861
                            msg=f"Tuple and dict output are not equal. Difference: {torch.max(torch.abs(tuple_object - dict_object))}. Tuple has `nan`: {torch.isnan(tuple_object).any()} and `inf`: {torch.isinf(tuple_object)}. Dict has `nan`: {torch.isnan(dict_object).any()} and `inf`: {torch.isinf(dict_object)}.",
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
                        )

                recursive_check(tuple_output, dict_output)

        for model_class in self.all_model_classes:
            model = model_class(config)
            model.to(torch_device)
            model.eval()

            tuple_inputs = self._prepare_for_class(inputs_dict, model_class)
            dict_inputs = self._prepare_for_class(inputs_dict, model_class)
            check_equivalence(model, tuple_inputs, dict_inputs)

            tuple_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True)
            dict_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True)
            check_equivalence(model, tuple_inputs, dict_inputs)

            tuple_inputs = self._prepare_for_class(inputs_dict, model_class)
            dict_inputs = self._prepare_for_class(inputs_dict, model_class)
            check_equivalence(model, tuple_inputs, dict_inputs, {"output_hidden_states": True})

            tuple_inputs = self._prepare_for_class(inputs_dict, model_class)
            dict_inputs = self._prepare_for_class(inputs_dict, model_class)
            check_equivalence(model, tuple_inputs, dict_inputs, {"output_attentions": True})

            tuple_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True)
            dict_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True)
            check_equivalence(model, tuple_inputs, dict_inputs, {"output_hidden_states": True})

            tuple_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True)
            dict_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True)
            check_equivalence(model, tuple_inputs, dict_inputs, {"output_attentions": True})

            tuple_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True)
            dict_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True)
            check_equivalence(
                model, tuple_inputs, dict_inputs, {"output_hidden_states": True, "output_attentions": True}
            )

Patrick von Platen's avatar
Patrick von Platen committed
901
    def test_inputs_embeds(self):
Sam Shleifer's avatar
Sam Shleifer committed
902

903
904
905
906
        config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()

        for model_class in self.all_model_classes:
            model = model_class(config)
907
            model.to(torch_device)
thomwolf's avatar
thomwolf committed
908
            model.eval()
909

910
            inputs = copy.deepcopy(self._prepare_for_class(inputs_dict, model_class))
Weizhen's avatar
Weizhen committed
911

912
913
914
915
916
917
918
919
920
            if not self.is_encoder_decoder:
                input_ids = inputs["input_ids"]
                del inputs["input_ids"]
            else:
                encoder_input_ids = inputs["input_ids"]
                decoder_input_ids = inputs.get("decoder_input_ids", encoder_input_ids)
                del inputs["input_ids"]
                inputs.pop("decoder_input_ids", None)

921
922
            wte = model.get_input_embeddings()
            if not self.is_encoder_decoder:
923
                inputs["inputs_embeds"] = wte(input_ids)
924
            else:
925
926
                inputs["inputs_embeds"] = wte(encoder_input_ids)
                inputs["decoder_inputs_embeds"] = wte(decoder_input_ids)
927

thomwolf's avatar
thomwolf committed
928
            with torch.no_grad():
Weizhen's avatar
Weizhen committed
929
                model(**inputs)[0]
930

931
932
    @require_torch_multi_gpu
    def test_multi_gpu_data_parallel_forward(self):
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
        config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()

        # some params shouldn't be scattered by nn.DataParallel
        # so just remove them if they are present.
        blacklist_non_batched_params = ["head_mask"]
        for k in blacklist_non_batched_params:
            inputs_dict.pop(k, None)

        # move input tensors to cuda:O
        for k, v in inputs_dict.items():
            if torch.is_tensor(v):
                inputs_dict[k] = v.to(0)

        for model_class in self.all_model_classes:
            model = model_class(config=config)
            model.to(0)
            model.eval()

            # Wrap model in nn.DataParallel
            model = torch.nn.DataParallel(model)
            with torch.no_grad():
954
                _ = model(**self._prepare_for_class(inputs_dict, model_class))
955

956

957
global_rng = random.Random()
thomwolf's avatar
thomwolf committed
958
959


thomwolf's avatar
thomwolf committed
960
def ids_tensor(shape, vocab_size, rng=None, name=None):
961
    #  Creates a random int32 tensor of the shape within the vocab size
thomwolf's avatar
thomwolf committed
962
    if rng is None:
963
        rng = global_rng
thomwolf's avatar
thomwolf committed
964

thomwolf's avatar
thomwolf committed
965
966
967
    total_dims = 1
    for dim in shape:
        total_dims *= dim
thomwolf's avatar
thomwolf committed
968

thomwolf's avatar
thomwolf committed
969
970
971
    values = []
    for _ in range(total_dims):
        values.append(rng.randint(0, vocab_size - 1))
thomwolf's avatar
thomwolf committed
972

973
    return torch.tensor(data=values, dtype=torch.long, device=torch_device).view(shape).contiguous()
thomwolf's avatar
thomwolf committed
974
975


976
977
978
979
980
981
982
def random_attention_mask(shape, rng=None, name=None):
    attn_mask = ids_tensor(shape, vocab_size=2, rng=None, name=None)
    # make sure that at least one token is attended to for each batch
    attn_mask[:, -1] = 1
    return attn_mask


983
def floats_tensor(shape, scale=1.0, rng=None, name=None):
Patrick von Platen's avatar
Patrick von Platen committed
984
    """Creates a random float32 tensor"""
985
986
987
988
989
990
991
992
993
994
995
    if rng is None:
        rng = global_rng

    total_dims = 1
    for dim in shape:
        total_dims *= dim

    values = []
    for _ in range(total_dims):
        values.append(rng.random() * scale)

996
    return torch.tensor(data=values, dtype=torch.float, device=torch_device).view(shape).contiguous()
997
998


999
@require_torch
thomwolf's avatar
thomwolf committed
1000
class ModelUtilsTest(unittest.TestCase):
1001
    @slow
Patrick von Platen's avatar
Patrick von Platen committed
1002
    def test_model_from_pretrained(self):
1003
        for model_name in BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
thomwolf's avatar
thomwolf committed
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
            config = BertConfig.from_pretrained(model_name)
            self.assertIsNotNone(config)
            self.assertIsInstance(config, PretrainedConfig)

            model = BertModel.from_pretrained(model_name)
            model, loading_info = BertModel.from_pretrained(model_name, output_loading_info=True)
            self.assertIsNotNone(model)
            self.assertIsInstance(model, PreTrainedModel)
            for value in loading_info.values():
                self.assertEqual(len(value), 0)

            config = BertConfig.from_pretrained(model_name, output_attentions=True, output_hidden_states=True)
Lysandre Debut's avatar
Lysandre Debut committed
1016
1017
1018
1019

            # Not sure this is the intended behavior. TODO fix Lysandre & Thom
            config.name_or_path = model_name

thomwolf's avatar
thomwolf committed
1020
1021
1022
            model = BertModel.from_pretrained(model_name, output_attentions=True, output_hidden_states=True)
            self.assertEqual(model.config.output_hidden_states, True)
            self.assertEqual(model.config, config)