test_modeling_common.py 41.5 KB
Newer Older
thomwolf's avatar
thomwolf committed
1
2
3
4
5
6
7
8
9
10
11
12
13
14
# coding=utf-8
# Copyright 2019 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
Aymeric Augustin's avatar
Aymeric Augustin committed
15

16
import copy
17
import inspect
18
import os.path
Aymeric Augustin's avatar
Aymeric Augustin committed
19
import random
20
import tempfile
thomwolf's avatar
thomwolf committed
21
import unittest
22
from typing import List, Tuple
thomwolf's avatar
thomwolf committed
23

24
from transformers import is_torch_available
25
from transformers.file_utils import WEIGHTS_NAME
26
from transformers.testing_utils import require_torch, require_torch_multigpu, slow, torch_device
27

Aymeric Augustin's avatar
Aymeric Augustin committed
28

29
if is_torch_available():
30
    import numpy as np
31
    import torch
thomwolf's avatar
thomwolf committed
32

33
    from transformers import (
34
        BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
35
36
        MODEL_FOR_CAUSAL_LM_MAPPING,
        MODEL_FOR_MASKED_LM_MAPPING,
37
38
        MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
        MODEL_FOR_QUESTION_ANSWERING_MAPPING,
39
40
41
        MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
        MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
        MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
42
43
44
45
46
        AdaptiveEmbedding,
        BertConfig,
        BertModel,
        PretrainedConfig,
        PreTrainedModel,
47
    )
thomwolf's avatar
thomwolf committed
48

49

50
51
52
def _config_zero_init(config):
    configs_no_init = copy.deepcopy(config)
    for key in configs_no_init.__dict__.keys():
53
        if "_range" in key or "_std" in key or "initializer_factor" in key:
Lysandre Debut's avatar
Lysandre Debut committed
54
            setattr(configs_no_init, key, 1e-10)
55
56
    return configs_no_init

thomwolf's avatar
thomwolf committed
57

58
59
60
61
62
@require_torch
class ModelTesterMixin:

    model_tester = None
    all_model_classes = ()
63
    all_generative_model_classes = ()
Patrick von Platen's avatar
Patrick von Platen committed
64
65
66
67
    test_torchscript = True
    test_pruning = True
    test_resize_embeddings = True
    test_head_masking = True
68
    test_missing_keys = True
69
70
    is_encoder_decoder = False

71
72
    def _prepare_for_class(self, inputs_dict, model_class, return_labels=False):
        inputs_dict = copy.deepcopy(inputs_dict)
73
        if model_class in MODEL_FOR_MULTIPLE_CHOICE_MAPPING.values():
74
            inputs_dict = {
75
                k: v.unsqueeze(1).expand(-1, self.model_tester.num_choices, -1).contiguous()
76
                if isinstance(v, torch.Tensor) and v.ndim > 1
Sylvain Gugger's avatar
Sylvain Gugger committed
77
                else v
78
79
                for k, v in inputs_dict.items()
            }
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103

        if return_labels:
            if model_class in MODEL_FOR_MULTIPLE_CHOICE_MAPPING.values():
                inputs_dict["labels"] = torch.ones(self.model_tester.batch_size, dtype=torch.long, device=torch_device)
            elif model_class in MODEL_FOR_QUESTION_ANSWERING_MAPPING.values():
                inputs_dict["start_positions"] = torch.zeros(
                    self.model_tester.batch_size, dtype=torch.long, device=torch_device
                )
                inputs_dict["end_positions"] = torch.zeros(
                    self.model_tester.batch_size, dtype=torch.long, device=torch_device
                )
            elif model_class in MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING.values():
                inputs_dict["labels"] = torch.zeros(
                    self.model_tester.batch_size, dtype=torch.long, device=torch_device
                )
            elif model_class in [
                *MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING.values(),
                *MODEL_FOR_CAUSAL_LM_MAPPING.values(),
                *MODEL_FOR_MASKED_LM_MAPPING.values(),
                *MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING.values(),
            ]:
                inputs_dict["labels"] = torch.zeros(
                    (self.model_tester.batch_size, self.model_tester.seq_length), dtype=torch.long, device=torch_device
                )
104
105
        return inputs_dict

Patrick von Platen's avatar
Patrick von Platen committed
106
    def test_save_load(self):
107
108
109
110
111
112
113
        config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()

        for model_class in self.all_model_classes:
            model = model_class(config)
            model.to(torch_device)
            model.eval()
            with torch.no_grad():
114
                outputs = model(**self._prepare_for_class(inputs_dict, model_class))
Weizhen's avatar
Weizhen committed
115

116
            out_2 = outputs[0].cpu().numpy()
117
            out_2[np.isnan(out_2)] = 0
118

119
            with tempfile.TemporaryDirectory() as tmpdirname:
120
121
                model.save_pretrained(tmpdirname)
                model = model_class.from_pretrained(tmpdirname)
122
                model.to(torch_device)
123
                with torch.no_grad():
124
                    after_outputs = model(**self._prepare_for_class(inputs_dict, model_class))
thomwolf's avatar
thomwolf committed
125

126
127
128
                # Make sure we don't have nans
                out_1 = after_outputs[0].cpu().numpy()
                out_1[np.isnan(out_1)] = 0
thomwolf's avatar
thomwolf committed
129
130
                max_diff = np.amax(np.abs(out_1 - out_2))
                self.assertLessEqual(max_diff, 1e-5)
131

132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
    def test_save_load_keys_to_never_save(self):
        config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()

        for model_class in self.all_model_classes:
            model = model_class(config)
            keys_to_never_save = getattr(model, "keys_to_never_save", None)
            if keys_to_never_save is None:
                continue

            # check the keys are in the original state_dict
            for k in keys_to_never_save:
                self.assertIn(k, model.state_dict())

            # check that certain keys didn't get saved with the model
            with tempfile.TemporaryDirectory() as tmpdirname:
                model.save_pretrained(tmpdirname)
                output_model_file = os.path.join(tmpdirname, WEIGHTS_NAME)
                state_dict_saved = torch.load(output_model_file)
                for k in keys_to_never_save:
                    self.assertNotIn(k, state_dict_saved)

Patrick von Platen's avatar
Patrick von Platen committed
153
    def test_initialization(self):
154
155
156
157
158
159
160
161
        config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()

        configs_no_init = _config_zero_init(config)
        for model_class in self.all_model_classes:
            model = model_class(config=configs_no_init)
            for name, param in model.named_parameters():
                if param.requires_grad:
                    self.assertIn(
Lysandre Debut's avatar
Lysandre Debut committed
162
                        ((param.data.mean() * 1e9).round() / 1e9).item(),
163
164
165
                        [0.0, 1.0],
                        msg="Parameter {} of model {} seems not properly initialized".format(name, model_class),
                    )
thomwolf's avatar
thomwolf committed
166

Patrick von Platen's avatar
Patrick von Platen committed
167
    def test_determinism(self):
168
169
170
171
172
173
174
        config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()

        for model_class in self.all_model_classes:
            model = model_class(config)
            model.to(torch_device)
            model.eval()
            with torch.no_grad():
175
176
                first = model(**self._prepare_for_class(inputs_dict, model_class))[0]
                second = model(**self._prepare_for_class(inputs_dict, model_class))[0]
Weizhen's avatar
Weizhen committed
177

178
179
180
181
182
183
184
            out_1 = first.cpu().numpy()
            out_2 = second.cpu().numpy()
            out_1 = out_1[~np.isnan(out_1)]
            out_2 = out_2[~np.isnan(out_2)]
            max_diff = np.amax(np.abs(out_1 - out_2))
            self.assertLessEqual(max_diff, 1e-5)

185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
    def test_forward_signature(self):
        config, _ = self.model_tester.prepare_config_and_inputs_for_common()

        for model_class in self.all_model_classes:
            model = model_class(config)
            signature = inspect.signature(model.forward)
            # signature.parameters is an OrderedDict => so arg_names order is deterministic
            arg_names = [*signature.parameters.keys()]

            if model.config.is_encoder_decoder:
                expected_arg_names = [
                    "input_ids",
                    "attention_mask",
                    "decoder_input_ids",
                    "decoder_attention_mask",
                    "encoder_outputs",
                ]
                self.assertListEqual(arg_names[:5], expected_arg_names)
            else:
                expected_arg_names = ["input_ids"]
                self.assertListEqual(arg_names[:1], expected_arg_names)

Patrick von Platen's avatar
Patrick von Platen committed
207
    def test_attention_outputs(self):
208
        config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
Weizhen's avatar
Weizhen committed
209
210
        config.return_dict = True

sshleifer's avatar
sshleifer committed
211
        seq_len = getattr(self.model_tester, "seq_length", None)
sshleifer's avatar
sshleifer committed
212
213
        decoder_seq_length = getattr(self.model_tester, "decoder_seq_length", seq_len)
        encoder_seq_length = getattr(self.model_tester, "encoder_seq_length", seq_len)
Weizhen's avatar
Weizhen committed
214
        decoder_key_length = getattr(self.model_tester, "decoder_key_length", decoder_seq_length)
215
        encoder_key_length = getattr(self.model_tester, "key_length", encoder_seq_length)
Patrick von Platen's avatar
Patrick von Platen committed
216
217
218
        chunk_length = getattr(self.model_tester, "chunk_length", None)
        if chunk_length is not None and hasattr(self.model_tester, "num_hashes"):
            encoder_seq_length = encoder_seq_length * self.model_tester.num_hashes
219
220

        for model_class in self.all_model_classes:
221
            inputs_dict["output_attentions"] = True
Joseph Liu's avatar
Joseph Liu committed
222
            inputs_dict["output_hidden_states"] = False
223
224
225
226
            model = model_class(config)
            model.to(torch_device)
            model.eval()
            with torch.no_grad():
227
                outputs = model(**self._prepare_for_class(inputs_dict, model_class))
228
            attentions = outputs[-1]
229
230
231
232
233
234
235
236
237
            self.assertEqual(len(attentions), self.model_tester.num_hidden_layers)

            # check that output_attentions also work using config
            del inputs_dict["output_attentions"]
            config.output_attentions = True
            model = model_class(config)
            model.to(torch_device)
            model.eval()
            with torch.no_grad():
238
                outputs = model(**self._prepare_for_class(inputs_dict, model_class), return_dict=True)
Weizhen's avatar
Weizhen committed
239
            attentions = outputs["attentions"] if "attentions" in outputs.keys() else outputs[-1]
240
            self.assertEqual(len(attentions), self.model_tester.num_hidden_layers)
Patrick von Platen's avatar
Patrick von Platen committed
241
242
243
244
245
246
247
248
249
250
251

            if chunk_length is not None:
                self.assertListEqual(
                    list(attentions[0].shape[-4:]),
                    [self.model_tester.num_attention_heads, encoder_seq_length, chunk_length, encoder_key_length],
                )
            else:
                self.assertListEqual(
                    list(attentions[0].shape[-3:]),
                    [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length],
                )
252
            out_len = len(outputs)
thomwolf's avatar
thomwolf committed
253

254
            if self.is_encoder_decoder:
Weizhen's avatar
Weizhen committed
255
256
257
258
259
260
261
262
                correct_outlen = (
                    self.model_tester.base_model_out_len if hasattr(self.model_tester, "base_model_out_len") else 4
                )
                decoder_attention_idx = (
                    self.model_tester.decoder_attention_idx
                    if hasattr(self.model_tester, "decoder_attention_idx")
                    else 1
                )
263

264
265
266
267
268
269
270
                # loss is at first position
                if "labels" in inputs_dict:
                    correct_outlen += 1  # loss is added to beginning
                    decoder_attention_idx += 1
                # Question Answering model returns start_logits and end_logits
                if model_class in MODEL_FOR_QUESTION_ANSWERING_MAPPING.values():
                    correct_outlen += 1  # start_logits and end_logits instead of only 1 output
Sam Shleifer's avatar
Sam Shleifer committed
271
                    decoder_attention_idx += 1
Weizhen's avatar
Weizhen committed
272

Sam Shleifer's avatar
Sam Shleifer committed
273
274
275
276
                self.assertEqual(out_len, correct_outlen)

                decoder_attentions = outputs[decoder_attention_idx]
                self.assertIsInstance(decoder_attentions, (list, tuple))
277
                self.assertEqual(len(decoder_attentions), self.model_tester.num_hidden_layers)
thomwolf's avatar
thomwolf committed
278
                self.assertListEqual(
279
280
                    list(decoder_attentions[0].shape[-3:]),
                    [self.model_tester.num_attention_heads, decoder_seq_length, decoder_key_length],
281
                )
thomwolf's avatar
thomwolf committed
282

283
            # Check attention is always last and order is fine
284
            inputs_dict["output_attentions"] = True
Joseph Liu's avatar
Joseph Liu committed
285
            inputs_dict["output_hidden_states"] = True
286
287
288
289
            model = model_class(config)
            model.to(torch_device)
            model.eval()
            with torch.no_grad():
290
                outputs = model(**self._prepare_for_class(inputs_dict, model_class))
291

Weizhen's avatar
Weizhen committed
292
293
294
295
296
297
298
299
300
            if hasattr(self.model_tester, "num_hidden_states_types"):
                added_hidden_states = self.model_tester.num_hidden_states_types
            elif self.is_encoder_decoder:
                added_hidden_states = 2
            else:
                added_hidden_states = 1
            self.assertEqual(out_len + added_hidden_states, len(outputs))

            self_attentions = outputs["attentions"] if "attentions" in outputs else outputs[-1]
301
            self.assertEqual(len(self_attentions), self.model_tester.num_hidden_layers)
Patrick von Platen's avatar
Patrick von Platen committed
302
303
304
305
306
307
308
309
310
311
            if chunk_length is not None:
                self.assertListEqual(
                    list(self_attentions[0].shape[-4:]),
                    [self.model_tester.num_attention_heads, encoder_seq_length, chunk_length, encoder_key_length],
                )
            else:
                self.assertListEqual(
                    list(self_attentions[0].shape[-3:]),
                    [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length],
                )
thomwolf's avatar
thomwolf committed
312

Patrick von Platen's avatar
Patrick von Platen committed
313
    def test_torchscript(self):
314
315
        config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
        self._create_and_check_torchscript(config, inputs_dict)
thomwolf's avatar
thomwolf committed
316

Patrick von Platen's avatar
Patrick von Platen committed
317
    def test_torchscript_output_attentions(self):
318
319
320
        config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
        config.output_attentions = True
        self._create_and_check_torchscript(config, inputs_dict)
thomwolf's avatar
thomwolf committed
321

Patrick von Platen's avatar
Patrick von Platen committed
322
    def test_torchscript_output_hidden_state(self):
323
324
325
        config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
        config.output_hidden_states = True
        self._create_and_check_torchscript(config, inputs_dict)
thomwolf's avatar
thomwolf committed
326

327
    def _create_and_check_torchscript(self, config, inputs_dict):
Patrick von Platen's avatar
Patrick von Platen committed
328
        if not self.test_torchscript:
329
            return
330

331
332
333
334
335
336
        configs_no_init = _config_zero_init(config)  # To be sure we have no Nan
        configs_no_init.torchscript = True
        for model_class in self.all_model_classes:
            model = model_class(config=configs_no_init)
            model.to(torch_device)
            model.eval()
337
            inputs = self._prepare_for_class(inputs_dict, model_class)
thomwolf's avatar
thomwolf committed
338

339
            try:
340
341
342
343
344
345
346
347
348
349
350
351
352
                if model.config.is_encoder_decoder:
                    model.config.use_cache = False  # TODO: this should be deleted after bug #7474 is solved
                    input_ids = inputs["input_ids"]
                    attention_mask = inputs["attention_mask"]
                    decoder_input_ids = inputs["decoder_input_ids"]
                    decoder_attention_mask = inputs["decoder_attention_mask"]

                    traced_model = torch.jit.trace(
                        model, (input_ids, attention_mask, decoder_input_ids, decoder_attention_mask)
                    )
                else:
                    input_ids = inputs["input_ids"]
                    traced_model = torch.jit.trace(model, input_ids)
353
354
            except RuntimeError:
                self.fail("Couldn't trace module.")
thomwolf's avatar
thomwolf committed
355

356
            with tempfile.TemporaryDirectory() as tmp_dir_name:
357
                pt_file_name = os.path.join(tmp_dir_name, "traced_model.pt")
thomwolf's avatar
thomwolf committed
358

359
                try:
360
                    torch.jit.save(traced_model, pt_file_name)
361
362
                except Exception:
                    self.fail("Couldn't save module.")
thomwolf's avatar
thomwolf committed
363

364
365
366
367
                try:
                    loaded_model = torch.jit.load(pt_file_name)
                except Exception:
                    self.fail("Couldn't load module.")
LysandreJik's avatar
LysandreJik committed
368

369
370
            model.to(torch_device)
            model.eval()
thomwolf's avatar
thomwolf committed
371

372
373
            loaded_model.to(torch_device)
            loaded_model.eval()
thomwolf's avatar
thomwolf committed
374

375
376
377
378
            model_state_dict = model.state_dict()
            loaded_model_state_dict = loaded_model.state_dict()

            self.assertEqual(set(model_state_dict.keys()), set(loaded_model_state_dict.keys()))
thomwolf's avatar
thomwolf committed
379

380
            models_equal = True
381
382
            for layer_name, p1 in model_state_dict.items():
                p2 = loaded_model_state_dict[layer_name]
383
384
                if p1.data.ne(p2.data).sum() > 0:
                    models_equal = False
thomwolf's avatar
thomwolf committed
385

386
            self.assertTrue(models_equal)
thomwolf's avatar
thomwolf committed
387

Patrick von Platen's avatar
Patrick von Platen committed
388
389
    def test_headmasking(self):
        if not self.test_head_masking:
390
            return
391

392
393
394
        global_rng.seed(42)
        config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
        global_rng.seed()
LysandreJik's avatar
LysandreJik committed
395

396
        inputs_dict["output_attentions"] = True
397
398
399
400
401
402
        config.output_hidden_states = True
        configs_no_init = _config_zero_init(config)  # To be sure we have no Nan
        for model_class in self.all_model_classes:
            model = model_class(config=configs_no_init)
            model.to(torch_device)
            model.eval()
LysandreJik's avatar
LysandreJik committed
403

404
405
406
            # Prepare head_mask
            # Set require_grad after having prepared the tensor to avoid error (leaf variable has been moved into the graph interior)
            head_mask = torch.ones(
Lysandre's avatar
Lysandre committed
407
408
409
                self.model_tester.num_hidden_layers,
                self.model_tester.num_attention_heads,
                device=torch_device,
410
411
412
413
            )
            head_mask[0, 0] = 0
            head_mask[-1, :-1] = 0
            head_mask.requires_grad_(requires_grad=True)
414
            inputs = self._prepare_for_class(inputs_dict, model_class).copy()
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
            inputs["head_mask"] = head_mask

            outputs = model(**inputs)

            # Test that we can get a gradient back for importance score computation
            output = sum(t.sum() for t in outputs[0])
            output = output.sum()
            output.backward()
            multihead_outputs = head_mask.grad

            attentions = outputs[-1]

            # Remove Nan
            for t in attentions:
                self.assertLess(
                    torch.sum(torch.isnan(t)), t.numel() / 4
                )  # Check we don't have more than 25% nans (arbitrary)
            attentions = [
                t.masked_fill(torch.isnan(t), 0.0) for t in attentions
            ]  # remove them (the test is less complete)

            self.assertIsNotNone(multihead_outputs)
            self.assertEqual(len(multihead_outputs), self.model_tester.num_hidden_layers)
            self.assertAlmostEqual(attentions[0][..., 0, :, :].flatten().sum().item(), 0.0)
            self.assertNotEqual(attentions[0][..., -1, :, :].flatten().sum().item(), 0.0)
            self.assertNotEqual(attentions[1][..., 0, :, :].flatten().sum().item(), 0.0)
            self.assertAlmostEqual(attentions[-1][..., -2, :, :].flatten().sum().item(), 0.0)
            self.assertNotEqual(attentions[-1][..., -1, :, :].flatten().sum().item(), 0.0)

Patrick von Platen's avatar
Patrick von Platen committed
444
445
    def test_head_pruning(self):
        if not self.test_pruning:
446
447
448
            return

        for model_class in self.all_model_classes:
Lysandre's avatar
Lysandre committed
449
450
451
452
            (
                config,
                inputs_dict,
            ) = self.model_tester.prepare_config_and_inputs_for_common()
453

454
455
            if "head_mask" in inputs_dict:
                del inputs_dict["head_mask"]
456

457
            inputs_dict["output_attentions"] = True
458
459
460
461
            config.output_hidden_states = False
            model = model_class(config=config)
            model.to(torch_device)
            model.eval()
462
463
464
465
            heads_to_prune = {
                0: list(range(1, self.model_tester.num_attention_heads)),
                -1: [0],
            }
466
467
            model.prune_heads(heads_to_prune)
            with torch.no_grad():
468
                outputs = model(**self._prepare_for_class(inputs_dict, model_class))
469

470
            attentions = outputs[-1]
471

472
473
474
            self.assertEqual(attentions[0].shape[-3], 1)
            self.assertEqual(attentions[1].shape[-3], self.model_tester.num_attention_heads)
            self.assertEqual(attentions[-1].shape[-3], self.model_tester.num_attention_heads - 1)
LysandreJik's avatar
LysandreJik committed
475

Patrick von Platen's avatar
Patrick von Platen committed
476
477
    def test_head_pruning_save_load_from_pretrained(self):
        if not self.test_pruning:
478
            return
LysandreJik's avatar
LysandreJik committed
479

480
        for model_class in self.all_model_classes:
Lysandre's avatar
Lysandre committed
481
482
483
484
            (
                config,
                inputs_dict,
            ) = self.model_tester.prepare_config_and_inputs_for_common()
485
486
487

            if "head_mask" in inputs_dict:
                del inputs_dict["head_mask"]
488

489
            inputs_dict["output_attentions"] = True
490
491
492
493
            config.output_hidden_states = False
            model = model_class(config=config)
            model.to(torch_device)
            model.eval()
494
495
496
497
            heads_to_prune = {
                0: list(range(1, self.model_tester.num_attention_heads)),
                -1: [0],
            }
498
            model.prune_heads(heads_to_prune)
499

500
            with tempfile.TemporaryDirectory() as temp_dir_name:
501
502
                model.save_pretrained(temp_dir_name)
                model = model_class.from_pretrained(temp_dir_name)
503
                model.to(torch_device)
504

505
            with torch.no_grad():
506
                outputs = model(**self._prepare_for_class(inputs_dict, model_class))
507
508
509
510
            attentions = outputs[-1]
            self.assertEqual(attentions[0].shape[-3], 1)
            self.assertEqual(attentions[1].shape[-3], self.model_tester.num_attention_heads)
            self.assertEqual(attentions[-1].shape[-3], self.model_tester.num_attention_heads - 1)
511

Patrick von Platen's avatar
Patrick von Platen committed
512
513
    def test_head_pruning_save_load_from_config_init(self):
        if not self.test_pruning:
514
            return
515

516
        for model_class in self.all_model_classes:
Lysandre's avatar
Lysandre committed
517
518
519
520
            (
                config,
                inputs_dict,
            ) = self.model_tester.prepare_config_and_inputs_for_common()
521

522
523
            if "head_mask" in inputs_dict:
                del inputs_dict["head_mask"]
524

525
            inputs_dict["output_attentions"] = True
526
            config.output_hidden_states = False
527

528
529
530
531
            heads_to_prune = {
                0: list(range(1, self.model_tester.num_attention_heads)),
                -1: [0],
            }
532
            config.pruned_heads = heads_to_prune
533

534
535
536
            model = model_class(config=config)
            model.to(torch_device)
            model.eval()
537

538
            with torch.no_grad():
539
                outputs = model(**self._prepare_for_class(inputs_dict, model_class))
540
            attentions = outputs[-1]
541

542
543
544
            self.assertEqual(attentions[0].shape[-3], 1)
            self.assertEqual(attentions[1].shape[-3], self.model_tester.num_attention_heads)
            self.assertEqual(attentions[-1].shape[-3], self.model_tester.num_attention_heads - 1)
545

Patrick von Platen's avatar
Patrick von Platen committed
546
547
    def test_head_pruning_integration(self):
        if not self.test_pruning:
548
            return
549

550
        for model_class in self.all_model_classes:
Lysandre's avatar
Lysandre committed
551
552
553
554
            (
                config,
                inputs_dict,
            ) = self.model_tester.prepare_config_and_inputs_for_common()
555

556
557
            if "head_mask" in inputs_dict:
                del inputs_dict["head_mask"]
558

559
            inputs_dict["output_attentions"] = True
560
            config.output_hidden_states = False
561

562
563
            heads_to_prune = {0: [0], 1: [1, 2]}
            config.pruned_heads = heads_to_prune
564

565
566
567
            model = model_class(config=config)
            model.to(torch_device)
            model.eval()
568

569
            with torch.no_grad():
570
                outputs = model(**self._prepare_for_class(inputs_dict, model_class))
571
            attentions = outputs[-1]
572

573
574
575
576
            self.assertEqual(attentions[0].shape[-3], self.model_tester.num_attention_heads - 1)
            self.assertEqual(attentions[1].shape[-3], self.model_tester.num_attention_heads - 2)
            self.assertEqual(attentions[2].shape[-3], self.model_tester.num_attention_heads)
            self.assertEqual(attentions[3].shape[-3], self.model_tester.num_attention_heads)
thomwolf's avatar
thomwolf committed
577

578
            with tempfile.TemporaryDirectory() as temp_dir_name:
579
580
                model.save_pretrained(temp_dir_name)
                model = model_class.from_pretrained(temp_dir_name)
581
                model.to(torch_device)
thomwolf's avatar
thomwolf committed
582

583
            with torch.no_grad():
584
                outputs = model(**self._prepare_for_class(inputs_dict, model_class))
585
            attentions = outputs[-1]
LysandreJik's avatar
LysandreJik committed
586

587
588
589
590
            self.assertEqual(attentions[0].shape[-3], self.model_tester.num_attention_heads - 1)
            self.assertEqual(attentions[1].shape[-3], self.model_tester.num_attention_heads - 2)
            self.assertEqual(attentions[2].shape[-3], self.model_tester.num_attention_heads)
            self.assertEqual(attentions[3].shape[-3], self.model_tester.num_attention_heads)
thomwolf's avatar
thomwolf committed
591

592
593
            heads_to_prune = {0: [0], 2: [1, 2]}
            model.prune_heads(heads_to_prune)
594

595
            with torch.no_grad():
596
                outputs = model(**self._prepare_for_class(inputs_dict, model_class))
597
            attentions = outputs[-1]
598

599
600
601
602
            self.assertEqual(attentions[0].shape[-3], self.model_tester.num_attention_heads - 1)
            self.assertEqual(attentions[1].shape[-3], self.model_tester.num_attention_heads - 2)
            self.assertEqual(attentions[2].shape[-3], self.model_tester.num_attention_heads - 2)
            self.assertEqual(attentions[3].shape[-3], self.model_tester.num_attention_heads)
603

604
            self.assertDictEqual(model.config.pruned_heads, {0: [0], 1: [1, 2], 2: [1, 2]})
thomwolf's avatar
thomwolf committed
605

Patrick von Platen's avatar
Patrick von Platen committed
606
    def test_hidden_states_output(self):
Joseph Liu's avatar
Joseph Liu committed
607
        def check_hidden_states_output(inputs_dict, config, model_class):
608
            model = model_class(config)
609
            model.to(torch_device)
thomwolf's avatar
thomwolf committed
610
            model.eval()
Joseph Liu's avatar
Joseph Liu committed
611

thomwolf's avatar
thomwolf committed
612
            with torch.no_grad():
Weizhen's avatar
Weizhen committed
613
614
                outputs = model(**self._prepare_for_class(inputs_dict, model_class), return_dict=True)
            hidden_states = outputs["hidden_states"] if "hidden_states" in outputs else outputs[-1]
Patrick von Platen's avatar
Patrick von Platen committed
615

Sylvain Gugger's avatar
Sylvain Gugger committed
616
617
618
619
            expected_num_layers = getattr(
                self.model_tester, "expected_num_hidden_layers", self.model_tester.num_hidden_layers + 1
            )
            self.assertEqual(len(hidden_states), expected_num_layers)
Patrick von Platen's avatar
Patrick von Platen committed
620
621
622
623
624
625
626
            if hasattr(self.model_tester, "encoder_seq_length"):
                seq_length = self.model_tester.encoder_seq_length
                if hasattr(self.model_tester, "chunk_length") and self.model_tester.chunk_length > 1:
                    seq_length = seq_length * self.model_tester.chunk_length
            else:
                seq_length = self.model_tester.seq_length

627
            self.assertListEqual(
Lysandre's avatar
Lysandre committed
628
629
                list(hidden_states[0].shape[-2:]),
                [seq_length, self.model_tester.hidden_size],
630
            )
thomwolf's avatar
thomwolf committed
631

Joseph Liu's avatar
Joseph Liu committed
632
633
634
635
636
637
638
639
640
641
642
643
        config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()

        for model_class in self.all_model_classes:
            inputs_dict["output_hidden_states"] = True
            check_hidden_states_output(inputs_dict, config, model_class)

            # check that output_hidden_states also work using config
            del inputs_dict["output_hidden_states"]
            config.output_hidden_states = True

            check_hidden_states_output(inputs_dict, config, model_class)

Pradhy729's avatar
Pradhy729 committed
644
    def test_feed_forward_chunking(self):
Lysandre's avatar
Lysandre committed
645
646
647
648
        (
            original_config,
            inputs_dict,
        ) = self.model_tester.prepare_config_and_inputs_for_common()
Pradhy729's avatar
Pradhy729 committed
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
        for model_class in self.all_model_classes:
            torch.manual_seed(0)
            config = copy.deepcopy(original_config)
            model = model_class(config)
            model.to(torch_device)
            model.eval()

            hidden_states_no_chunk = model(**self._prepare_for_class(inputs_dict, model_class))[0]

            torch.manual_seed(0)
            config.chunk_size_feed_forward = 1
            model = model_class(config)
            model.to(torch_device)
            model.eval()

            hidden_states_with_chunk = model(**self._prepare_for_class(inputs_dict, model_class))[0]
            self.assertTrue(torch.allclose(hidden_states_no_chunk, hidden_states_with_chunk, atol=1e-3))

Patrick von Platen's avatar
Patrick von Platen committed
667
    def test_resize_tokens_embeddings(self):
Lysandre's avatar
Lysandre committed
668
669
670
671
        (
            original_config,
            inputs_dict,
        ) = self.model_tester.prepare_config_and_inputs_for_common()
Patrick von Platen's avatar
Patrick von Platen committed
672
        if not self.test_resize_embeddings:
673
674
675
676
677
            return

        for model_class in self.all_model_classes:
            config = copy.deepcopy(original_config)
            model = model_class(config)
678
            model.to(torch_device)
679

Patrick von Platen's avatar
Patrick von Platen committed
680
681
682
            if self.model_tester.is_training is False:
                model.eval()

683
684
685
686
687
688
689
690
691
692
            model_vocab_size = config.vocab_size
            # Retrieve the embeddings and clone theme
            model_embed = model.resize_token_embeddings(model_vocab_size)
            cloned_embeddings = model_embed.weight.clone()

            # Check that resizing the token embeddings with a larger vocab size increases the model's vocab size
            model_embed = model.resize_token_embeddings(model_vocab_size + 10)
            self.assertEqual(model.config.vocab_size, model_vocab_size + 10)
            # Check that it actually resizes the embeddings matrix
            self.assertEqual(model_embed.weight.shape[0], cloned_embeddings.shape[0] + 10)
693
            # Check that the model can still do a forward pass successfully (every parameter should be resized)
694
            model(**self._prepare_for_class(inputs_dict, model_class))
695
696
697
698
699
700
701

            # Check that resizing the token embeddings with a smaller vocab size decreases the model's vocab size
            model_embed = model.resize_token_embeddings(model_vocab_size - 15)
            self.assertEqual(model.config.vocab_size, model_vocab_size - 15)
            # Check that it actually resizes the embeddings matrix
            self.assertEqual(model_embed.weight.shape[0], cloned_embeddings.shape[0] - 15)

702
703
704
            # Check that the model can still do a forward pass successfully (every parameter should be resized)
            # Input ids should be clamped to the maximum size of the vocabulary
            inputs_dict["input_ids"].clamp_(max=model_vocab_size - 15 - 1)
705
            model(**self._prepare_for_class(inputs_dict, model_class))
706

707
708
709
710
711
712
713
714
            # Check that adding and removing tokens has not modified the first part of the embedding matrix.
            models_equal = True
            for p1, p2 in zip(cloned_embeddings, model_embed.weight):
                if p1.data.ne(p2.data).sum() > 0:
                    models_equal = False

            self.assertTrue(models_equal)

Patrick von Platen's avatar
Patrick von Platen committed
715
    def test_model_common_attributes(self):
716
717
718
719
720
721
722
723
724
        config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()

        for model_class in self.all_model_classes:
            model = model_class(config)
            self.assertIsInstance(model.get_input_embeddings(), (torch.nn.Embedding, AdaptiveEmbedding))
            model.set_input_embeddings(torch.nn.Embedding(10, 10))
            x = model.get_output_embeddings()
            self.assertTrue(x is None or isinstance(x, torch.nn.Linear))

725
    def test_correct_missing_keys(self):
726
727
        if not self.test_missing_keys:
            return
728
729
730
731
732
733
734
735
736
737
738
739
740
741
        config, _ = self.model_tester.prepare_config_and_inputs_for_common()

        for model_class in self.all_model_classes:
            model = model_class(config)
            base_model_prefix = model.base_model_prefix

            if hasattr(model, base_model_prefix):
                with tempfile.TemporaryDirectory() as temp_dir_name:
                    model.base_model.save_pretrained(temp_dir_name)
                    model, loading_info = model_class.from_pretrained(temp_dir_name, output_loading_info=True)

                    with self.subTest(msg="Missing keys for {}".format(model.__class__.__name__)):
                        self.assertGreater(len(loading_info["missing_keys"]), 0)

742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
    def test_tie_model_weights(self):
        if not self.test_torchscript:
            return

        config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()

        def check_same_values(layer_1, layer_2):
            equal = True
            for p1, p2 in zip(layer_1.weight, layer_2.weight):
                if p1.data.ne(p2.data).sum() > 0:
                    equal = False
            return equal

        for model_class in self.all_model_classes:
            config.torchscript = True
            model_not_tied = model_class(config)
            if model_not_tied.get_output_embeddings() is None:
                continue

            config_tied = copy.deepcopy(config)
            config_tied.torchscript = False
            model_tied = model_class(config_tied)
            params_tied = list(model_tied.parameters())
            # Check that the embedding layer and decoding layer are the same in size and in value
            # self.assertTrue(check_same_values(embeddings, decoding))

            # # Check that after modification, they remain the same.
            # embeddings.weight.data.div_(2)
            # # Check that the embedding layer and decoding layer are the same in size and in value
            # self.assertTrue(embeddings.weight.shape, decoding.weight.shape)
            # self.assertTrue(check_same_values(embeddings, decoding))

            # # Check that after modification, they remain the same.
            # decoding.weight.data.div_(4)
            # # Check that the embedding layer and decoding layer are the same in size and in value
            # self.assertTrue(embeddings.weight.shape, decoding.weight.shape)
            # self.assertTrue(check_same_values(embeddings, decoding))

            # Check that after resize they remain tied.
            model_tied.resize_token_embeddings(config.vocab_size + 10)
            params_tied_2 = list(model_tied.parameters())
            self.assertEqual(len(params_tied_2), len(params_tied))

            # decoding.weight.data.mul_(20)
            # # Check that the embedding layer and decoding layer are the same in size and in value
            # self.assertTrue(model.transformer.wte.weight.shape, model.lm_head.weight.shape)
            # self.assertTrue(check_same_values(model.transformer.wte, model.lm_head))

790
791
792
793
    def test_model_outputs_equivalence(self):

        config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()

Sam Shleifer's avatar
Sam Shleifer committed
794
795
796
797
        def set_nan_tensor_to_zero(t):
            t[t != t] = 0
            return t

798
799
800
801
802
803
804
805
806
807
808
809
810
        def check_equivalence(model, tuple_inputs, dict_inputs, additional_kwargs={}):
            with torch.no_grad():
                tuple_output = model(**tuple_inputs, return_dict=False, **additional_kwargs)
                dict_output = model(**dict_inputs, return_dict=True, **additional_kwargs).to_tuple()

                def recursive_check(tuple_object, dict_object):
                    if isinstance(tuple_object, (List, Tuple)):
                        for tuple_iterable_value, dict_iterable_value in zip(tuple_object, dict_object):
                            recursive_check(tuple_iterable_value, dict_iterable_value)
                    elif tuple_object is None:
                        return
                    else:
                        self.assertTrue(
Sam Shleifer's avatar
Sam Shleifer committed
811
812
813
                            torch.allclose(
                                set_nan_tensor_to_zero(tuple_object), set_nan_tensor_to_zero(dict_object), atol=1e-5
                            ),
814
                            msg=f"Tuple and dict output are not equal. Difference: {torch.max(torch.abs(tuple_object - dict_object))}. Tuple has `nan`: {torch.isnan(tuple_object).any()} and `inf`: {torch.isinf(tuple_object)}. Dict has `nan`: {torch.isnan(dict_object).any()} and `inf`: {torch.isinf(dict_object)}.",
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
                        )

                recursive_check(tuple_output, dict_output)

        for model_class in self.all_model_classes:
            model = model_class(config)
            model.to(torch_device)
            model.eval()

            tuple_inputs = self._prepare_for_class(inputs_dict, model_class)
            dict_inputs = self._prepare_for_class(inputs_dict, model_class)
            check_equivalence(model, tuple_inputs, dict_inputs)

            tuple_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True)
            dict_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True)
            check_equivalence(model, tuple_inputs, dict_inputs)

            tuple_inputs = self._prepare_for_class(inputs_dict, model_class)
            dict_inputs = self._prepare_for_class(inputs_dict, model_class)
            check_equivalence(model, tuple_inputs, dict_inputs, {"output_hidden_states": True})

            tuple_inputs = self._prepare_for_class(inputs_dict, model_class)
            dict_inputs = self._prepare_for_class(inputs_dict, model_class)
            check_equivalence(model, tuple_inputs, dict_inputs, {"output_attentions": True})

            tuple_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True)
            dict_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True)
            check_equivalence(model, tuple_inputs, dict_inputs, {"output_hidden_states": True})

            tuple_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True)
            dict_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True)
            check_equivalence(model, tuple_inputs, dict_inputs, {"output_attentions": True})

            tuple_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True)
            dict_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True)
            check_equivalence(
                model, tuple_inputs, dict_inputs, {"output_hidden_states": True, "output_attentions": True}
            )

Patrick von Platen's avatar
Patrick von Platen committed
854
    def test_inputs_embeds(self):
Sam Shleifer's avatar
Sam Shleifer committed
855

856
857
858
859
        config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()

        for model_class in self.all_model_classes:
            model = model_class(config)
860
            model.to(torch_device)
thomwolf's avatar
thomwolf committed
861
            model.eval()
862

863
            inputs = copy.deepcopy(self._prepare_for_class(inputs_dict, model_class))
Weizhen's avatar
Weizhen committed
864

865
866
867
868
869
870
871
872
873
            if not self.is_encoder_decoder:
                input_ids = inputs["input_ids"]
                del inputs["input_ids"]
            else:
                encoder_input_ids = inputs["input_ids"]
                decoder_input_ids = inputs.get("decoder_input_ids", encoder_input_ids)
                del inputs["input_ids"]
                inputs.pop("decoder_input_ids", None)

874
875
            wte = model.get_input_embeddings()
            if not self.is_encoder_decoder:
876
                inputs["inputs_embeds"] = wte(input_ids)
877
            else:
878
879
                inputs["inputs_embeds"] = wte(encoder_input_ids)
                inputs["decoder_inputs_embeds"] = wte(decoder_input_ids)
880

thomwolf's avatar
thomwolf committed
881
            with torch.no_grad():
Weizhen's avatar
Weizhen committed
882
                model(**inputs)[0]
883

884
    @require_torch_multigpu
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
    def test_multigpu_data_parallel_forward(self):
        config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()

        # some params shouldn't be scattered by nn.DataParallel
        # so just remove them if they are present.
        blacklist_non_batched_params = ["head_mask"]
        for k in blacklist_non_batched_params:
            inputs_dict.pop(k, None)

        # move input tensors to cuda:O
        for k, v in inputs_dict.items():
            if torch.is_tensor(v):
                inputs_dict[k] = v.to(0)

        for model_class in self.all_model_classes:
            model = model_class(config=config)
            model.to(0)
            model.eval()

            # Wrap model in nn.DataParallel
            model = torch.nn.DataParallel(model)
            with torch.no_grad():
907
                _ = model(**self._prepare_for_class(inputs_dict, model_class))
908

909

910
global_rng = random.Random()
thomwolf's avatar
thomwolf committed
911
912


thomwolf's avatar
thomwolf committed
913
def ids_tensor(shape, vocab_size, rng=None, name=None):
914
    #  Creates a random int32 tensor of the shape within the vocab size
thomwolf's avatar
thomwolf committed
915
    if rng is None:
916
        rng = global_rng
thomwolf's avatar
thomwolf committed
917

thomwolf's avatar
thomwolf committed
918
919
920
    total_dims = 1
    for dim in shape:
        total_dims *= dim
thomwolf's avatar
thomwolf committed
921

thomwolf's avatar
thomwolf committed
922
923
924
    values = []
    for _ in range(total_dims):
        values.append(rng.randint(0, vocab_size - 1))
thomwolf's avatar
thomwolf committed
925

926
    return torch.tensor(data=values, dtype=torch.long, device=torch_device).view(shape).contiguous()
thomwolf's avatar
thomwolf committed
927
928


929
930
931
932
933
934
935
def random_attention_mask(shape, rng=None, name=None):
    attn_mask = ids_tensor(shape, vocab_size=2, rng=None, name=None)
    # make sure that at least one token is attended to for each batch
    attn_mask[:, -1] = 1
    return attn_mask


936
def floats_tensor(shape, scale=1.0, rng=None, name=None):
Patrick von Platen's avatar
Patrick von Platen committed
937
    """Creates a random float32 tensor"""
938
939
940
941
942
943
944
945
946
947
948
    if rng is None:
        rng = global_rng

    total_dims = 1
    for dim in shape:
        total_dims *= dim

    values = []
    for _ in range(total_dims):
        values.append(rng.random() * scale)

949
    return torch.tensor(data=values, dtype=torch.float, device=torch_device).view(shape).contiguous()
950
951


952
@require_torch
thomwolf's avatar
thomwolf committed
953
class ModelUtilsTest(unittest.TestCase):
954
    @slow
Patrick von Platen's avatar
Patrick von Platen committed
955
    def test_model_from_pretrained(self):
956
        for model_name in BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
thomwolf's avatar
thomwolf committed
957
958
959
960
961
962
963
964
965
966
967
968
            config = BertConfig.from_pretrained(model_name)
            self.assertIsNotNone(config)
            self.assertIsInstance(config, PretrainedConfig)

            model = BertModel.from_pretrained(model_name)
            model, loading_info = BertModel.from_pretrained(model_name, output_loading_info=True)
            self.assertIsNotNone(model)
            self.assertIsInstance(model, PreTrainedModel)
            for value in loading_info.values():
                self.assertEqual(len(value), 0)

            config = BertConfig.from_pretrained(model_name, output_attentions=True, output_hidden_states=True)
Lysandre Debut's avatar
Lysandre Debut committed
969
970
971
972

            # Not sure this is the intended behavior. TODO fix Lysandre & Thom
            config.name_or_path = model_name

thomwolf's avatar
thomwolf committed
973
974
975
            model = BertModel.from_pretrained(model_name, output_attentions=True, output_hidden_states=True)
            self.assertEqual(model.config.output_hidden_states, True)
            self.assertEqual(model.config, config)