test_modeling_common.py 49.9 KB
Newer Older
thomwolf's avatar
thomwolf committed
1
2
3
4
5
6
7
8
9
10
11
12
13
14
# coding=utf-8
# Copyright 2019 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
Aymeric Augustin's avatar
Aymeric Augustin committed
15

16
import copy
17
import inspect
18
import os.path
Aymeric Augustin's avatar
Aymeric Augustin committed
19
import random
20
import tempfile
thomwolf's avatar
thomwolf committed
21
import unittest
22
from typing import List, Tuple
thomwolf's avatar
thomwolf committed
23

24
from transformers import is_torch_available
25
from transformers.testing_utils import require_torch, require_torch_multigpu, slow, torch_device
26

Aymeric Augustin's avatar
Aymeric Augustin committed
27

28
if is_torch_available():
29
    import numpy as np
30
    import torch
thomwolf's avatar
thomwolf committed
31

32
    from transformers import (
33
        BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
34
35
        MODEL_FOR_CAUSAL_LM_MAPPING,
        MODEL_FOR_MASKED_LM_MAPPING,
36
37
        MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
        MODEL_FOR_QUESTION_ANSWERING_MAPPING,
38
39
40
        MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
        MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
        MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
41
42
43
44
45
        AdaptiveEmbedding,
        BertConfig,
        BertModel,
        PretrainedConfig,
        PreTrainedModel,
46
        top_k_top_p_filtering,
47
    )
thomwolf's avatar
thomwolf committed
48

49

50
51
52
def _config_zero_init(config):
    configs_no_init = copy.deepcopy(config)
    for key in configs_no_init.__dict__.keys():
53
        if "_range" in key or "_std" in key or "initializer_factor" in key:
Lysandre Debut's avatar
Lysandre Debut committed
54
            setattr(configs_no_init, key, 1e-10)
55
56
    return configs_no_init

thomwolf's avatar
thomwolf committed
57

58
59
60
61
62
@require_torch
class ModelTesterMixin:

    model_tester = None
    all_model_classes = ()
63
    all_generative_model_classes = ()
Patrick von Platen's avatar
Patrick von Platen committed
64
65
66
67
    test_torchscript = True
    test_pruning = True
    test_resize_embeddings = True
    test_head_masking = True
68
    test_missing_keys = True
69
70
    is_encoder_decoder = False

71
72
    def _prepare_for_class(self, inputs_dict, model_class, return_labels=False):
        inputs_dict = copy.deepcopy(inputs_dict)
73
        if model_class in MODEL_FOR_MULTIPLE_CHOICE_MAPPING.values():
74
            inputs_dict = {
75
                k: v.unsqueeze(1).expand(-1, self.model_tester.num_choices, -1).contiguous()
76
                if isinstance(v, torch.Tensor) and v.ndim > 1
Sylvain Gugger's avatar
Sylvain Gugger committed
77
                else v
78
79
                for k, v in inputs_dict.items()
            }
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103

        if return_labels:
            if model_class in MODEL_FOR_MULTIPLE_CHOICE_MAPPING.values():
                inputs_dict["labels"] = torch.ones(self.model_tester.batch_size, dtype=torch.long, device=torch_device)
            elif model_class in MODEL_FOR_QUESTION_ANSWERING_MAPPING.values():
                inputs_dict["start_positions"] = torch.zeros(
                    self.model_tester.batch_size, dtype=torch.long, device=torch_device
                )
                inputs_dict["end_positions"] = torch.zeros(
                    self.model_tester.batch_size, dtype=torch.long, device=torch_device
                )
            elif model_class in MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING.values():
                inputs_dict["labels"] = torch.zeros(
                    self.model_tester.batch_size, dtype=torch.long, device=torch_device
                )
            elif model_class in [
                *MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING.values(),
                *MODEL_FOR_CAUSAL_LM_MAPPING.values(),
                *MODEL_FOR_MASKED_LM_MAPPING.values(),
                *MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING.values(),
            ]:
                inputs_dict["labels"] = torch.zeros(
                    (self.model_tester.batch_size, self.model_tester.seq_length), dtype=torch.long, device=torch_device
                )
104
105
        return inputs_dict

Patrick von Platen's avatar
Patrick von Platen committed
106
    def test_save_load(self):
107
108
109
110
111
112
113
        config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()

        for model_class in self.all_model_classes:
            model = model_class(config)
            model.to(torch_device)
            model.eval()
            with torch.no_grad():
114
                outputs = model(**self._prepare_for_class(inputs_dict, model_class))
Weizhen's avatar
Weizhen committed
115

116
            out_2 = outputs[0].cpu().numpy()
117
            out_2[np.isnan(out_2)] = 0
118

119
            with tempfile.TemporaryDirectory() as tmpdirname:
120
121
                model.save_pretrained(tmpdirname)
                model = model_class.from_pretrained(tmpdirname)
122
                model.to(torch_device)
123
                with torch.no_grad():
124
                    after_outputs = model(**self._prepare_for_class(inputs_dict, model_class))
thomwolf's avatar
thomwolf committed
125

126
127
128
                # Make sure we don't have nans
                out_1 = after_outputs[0].cpu().numpy()
                out_1[np.isnan(out_1)] = 0
thomwolf's avatar
thomwolf committed
129
130
                max_diff = np.amax(np.abs(out_1 - out_2))
                self.assertLessEqual(max_diff, 1e-5)
131

Patrick von Platen's avatar
Patrick von Platen committed
132
    def test_initialization(self):
133
134
135
136
137
138
139
140
        config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()

        configs_no_init = _config_zero_init(config)
        for model_class in self.all_model_classes:
            model = model_class(config=configs_no_init)
            for name, param in model.named_parameters():
                if param.requires_grad:
                    self.assertIn(
Lysandre Debut's avatar
Lysandre Debut committed
141
                        ((param.data.mean() * 1e9).round() / 1e9).item(),
142
143
144
                        [0.0, 1.0],
                        msg="Parameter {} of model {} seems not properly initialized".format(name, model_class),
                    )
thomwolf's avatar
thomwolf committed
145

Patrick von Platen's avatar
Patrick von Platen committed
146
    def test_determinism(self):
147
148
149
150
151
152
153
        config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()

        for model_class in self.all_model_classes:
            model = model_class(config)
            model.to(torch_device)
            model.eval()
            with torch.no_grad():
154
155
                first = model(**self._prepare_for_class(inputs_dict, model_class))[0]
                second = model(**self._prepare_for_class(inputs_dict, model_class))[0]
Weizhen's avatar
Weizhen committed
156

157
158
159
160
161
162
163
            out_1 = first.cpu().numpy()
            out_2 = second.cpu().numpy()
            out_1 = out_1[~np.isnan(out_1)]
            out_2 = out_2[~np.isnan(out_2)]
            max_diff = np.amax(np.abs(out_1 - out_2))
            self.assertLessEqual(max_diff, 1e-5)

164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
    def test_forward_signature(self):
        config, _ = self.model_tester.prepare_config_and_inputs_for_common()

        for model_class in self.all_model_classes:
            model = model_class(config)
            signature = inspect.signature(model.forward)
            # signature.parameters is an OrderedDict => so arg_names order is deterministic
            arg_names = [*signature.parameters.keys()]

            if model.config.is_encoder_decoder:
                expected_arg_names = [
                    "input_ids",
                    "attention_mask",
                    "decoder_input_ids",
                    "decoder_attention_mask",
                    "encoder_outputs",
                ]
                self.assertListEqual(arg_names[:5], expected_arg_names)
            else:
                expected_arg_names = ["input_ids"]
                self.assertListEqual(arg_names[:1], expected_arg_names)

Patrick von Platen's avatar
Patrick von Platen committed
186
    def test_attention_outputs(self):
187
        config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
Weizhen's avatar
Weizhen committed
188
189
        config.return_dict = True

sshleifer's avatar
sshleifer committed
190
        seq_len = getattr(self.model_tester, "seq_length", None)
sshleifer's avatar
sshleifer committed
191
192
        decoder_seq_length = getattr(self.model_tester, "decoder_seq_length", seq_len)
        encoder_seq_length = getattr(self.model_tester, "encoder_seq_length", seq_len)
Weizhen's avatar
Weizhen committed
193
        decoder_key_length = getattr(self.model_tester, "decoder_key_length", decoder_seq_length)
194
        encoder_key_length = getattr(self.model_tester, "key_length", encoder_seq_length)
Patrick von Platen's avatar
Patrick von Platen committed
195
196
197
        chunk_length = getattr(self.model_tester, "chunk_length", None)
        if chunk_length is not None and hasattr(self.model_tester, "num_hashes"):
            encoder_seq_length = encoder_seq_length * self.model_tester.num_hashes
198
199

        for model_class in self.all_model_classes:
200
            inputs_dict["output_attentions"] = True
Joseph Liu's avatar
Joseph Liu committed
201
            inputs_dict["output_hidden_states"] = False
202
203
204
205
            model = model_class(config)
            model.to(torch_device)
            model.eval()
            with torch.no_grad():
206
                outputs = model(**self._prepare_for_class(inputs_dict, model_class))
207
            attentions = outputs[-1]
208
209
210
211
212
213
214
215
216
            self.assertEqual(len(attentions), self.model_tester.num_hidden_layers)

            # check that output_attentions also work using config
            del inputs_dict["output_attentions"]
            config.output_attentions = True
            model = model_class(config)
            model.to(torch_device)
            model.eval()
            with torch.no_grad():
217
                outputs = model(**self._prepare_for_class(inputs_dict, model_class), return_dict=True)
Weizhen's avatar
Weizhen committed
218
            attentions = outputs["attentions"] if "attentions" in outputs.keys() else outputs[-1]
219
            self.assertEqual(len(attentions), self.model_tester.num_hidden_layers)
Patrick von Platen's avatar
Patrick von Platen committed
220
221
222
223
224
225
226
227
228
229
230

            if chunk_length is not None:
                self.assertListEqual(
                    list(attentions[0].shape[-4:]),
                    [self.model_tester.num_attention_heads, encoder_seq_length, chunk_length, encoder_key_length],
                )
            else:
                self.assertListEqual(
                    list(attentions[0].shape[-3:]),
                    [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length],
                )
231
            out_len = len(outputs)
thomwolf's avatar
thomwolf committed
232

233
            if self.is_encoder_decoder:
Weizhen's avatar
Weizhen committed
234
235
236
237
238
239
240
241
                correct_outlen = (
                    self.model_tester.base_model_out_len if hasattr(self.model_tester, "base_model_out_len") else 4
                )
                decoder_attention_idx = (
                    self.model_tester.decoder_attention_idx
                    if hasattr(self.model_tester, "decoder_attention_idx")
                    else 1
                )
242

243
244
245
246
247
248
249
                # loss is at first position
                if "labels" in inputs_dict:
                    correct_outlen += 1  # loss is added to beginning
                    decoder_attention_idx += 1
                # Question Answering model returns start_logits and end_logits
                if model_class in MODEL_FOR_QUESTION_ANSWERING_MAPPING.values():
                    correct_outlen += 1  # start_logits and end_logits instead of only 1 output
Sam Shleifer's avatar
Sam Shleifer committed
250
                    decoder_attention_idx += 1
Weizhen's avatar
Weizhen committed
251

Sam Shleifer's avatar
Sam Shleifer committed
252
253
254
255
                self.assertEqual(out_len, correct_outlen)

                decoder_attentions = outputs[decoder_attention_idx]
                self.assertIsInstance(decoder_attentions, (list, tuple))
256
                self.assertEqual(len(decoder_attentions), self.model_tester.num_hidden_layers)
thomwolf's avatar
thomwolf committed
257
                self.assertListEqual(
258
259
                    list(decoder_attentions[0].shape[-3:]),
                    [self.model_tester.num_attention_heads, decoder_seq_length, decoder_key_length],
260
                )
thomwolf's avatar
thomwolf committed
261

262
            # Check attention is always last and order is fine
263
            inputs_dict["output_attentions"] = True
Joseph Liu's avatar
Joseph Liu committed
264
            inputs_dict["output_hidden_states"] = True
265
266
267
268
            model = model_class(config)
            model.to(torch_device)
            model.eval()
            with torch.no_grad():
269
                outputs = model(**self._prepare_for_class(inputs_dict, model_class))
270

Weizhen's avatar
Weizhen committed
271
272
273
274
275
276
277
278
279
            if hasattr(self.model_tester, "num_hidden_states_types"):
                added_hidden_states = self.model_tester.num_hidden_states_types
            elif self.is_encoder_decoder:
                added_hidden_states = 2
            else:
                added_hidden_states = 1
            self.assertEqual(out_len + added_hidden_states, len(outputs))

            self_attentions = outputs["attentions"] if "attentions" in outputs else outputs[-1]
280
            self.assertEqual(len(self_attentions), self.model_tester.num_hidden_layers)
Patrick von Platen's avatar
Patrick von Platen committed
281
282
283
284
285
286
287
288
289
290
            if chunk_length is not None:
                self.assertListEqual(
                    list(self_attentions[0].shape[-4:]),
                    [self.model_tester.num_attention_heads, encoder_seq_length, chunk_length, encoder_key_length],
                )
            else:
                self.assertListEqual(
                    list(self_attentions[0].shape[-3:]),
                    [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length],
                )
thomwolf's avatar
thomwolf committed
291

Patrick von Platen's avatar
Patrick von Platen committed
292
    def test_torchscript(self):
293
294
        config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
        self._create_and_check_torchscript(config, inputs_dict)
thomwolf's avatar
thomwolf committed
295

Patrick von Platen's avatar
Patrick von Platen committed
296
    def test_torchscript_output_attentions(self):
297
298
299
        config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
        config.output_attentions = True
        self._create_and_check_torchscript(config, inputs_dict)
thomwolf's avatar
thomwolf committed
300

Patrick von Platen's avatar
Patrick von Platen committed
301
    def test_torchscript_output_hidden_state(self):
302
303
304
        config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
        config.output_hidden_states = True
        self._create_and_check_torchscript(config, inputs_dict)
thomwolf's avatar
thomwolf committed
305

306
    def _create_and_check_torchscript(self, config, inputs_dict):
Patrick von Platen's avatar
Patrick von Platen committed
307
        if not self.test_torchscript:
308
            return
309

310
311
312
313
314
315
        configs_no_init = _config_zero_init(config)  # To be sure we have no Nan
        configs_no_init.torchscript = True
        for model_class in self.all_model_classes:
            model = model_class(config=configs_no_init)
            model.to(torch_device)
            model.eval()
316
            inputs = self._prepare_for_class(inputs_dict, model_class)
thomwolf's avatar
thomwolf committed
317

318
            try:
319
320
321
322
323
324
325
326
327
328
329
330
331
                if model.config.is_encoder_decoder:
                    model.config.use_cache = False  # TODO: this should be deleted after bug #7474 is solved
                    input_ids = inputs["input_ids"]
                    attention_mask = inputs["attention_mask"]
                    decoder_input_ids = inputs["decoder_input_ids"]
                    decoder_attention_mask = inputs["decoder_attention_mask"]

                    traced_model = torch.jit.trace(
                        model, (input_ids, attention_mask, decoder_input_ids, decoder_attention_mask)
                    )
                else:
                    input_ids = inputs["input_ids"]
                    traced_model = torch.jit.trace(model, input_ids)
332
333
            except RuntimeError:
                self.fail("Couldn't trace module.")
thomwolf's avatar
thomwolf committed
334

335
            with tempfile.TemporaryDirectory() as tmp_dir_name:
336
                pt_file_name = os.path.join(tmp_dir_name, "traced_model.pt")
thomwolf's avatar
thomwolf committed
337

338
                try:
339
                    torch.jit.save(traced_model, pt_file_name)
340
341
                except Exception:
                    self.fail("Couldn't save module.")
thomwolf's avatar
thomwolf committed
342

343
344
345
346
                try:
                    loaded_model = torch.jit.load(pt_file_name)
                except Exception:
                    self.fail("Couldn't load module.")
LysandreJik's avatar
LysandreJik committed
347

348
349
            model.to(torch_device)
            model.eval()
thomwolf's avatar
thomwolf committed
350

351
352
            loaded_model.to(torch_device)
            loaded_model.eval()
thomwolf's avatar
thomwolf committed
353

354
355
356
357
            model_state_dict = model.state_dict()
            loaded_model_state_dict = loaded_model.state_dict()

            self.assertEqual(set(model_state_dict.keys()), set(loaded_model_state_dict.keys()))
thomwolf's avatar
thomwolf committed
358

359
            models_equal = True
360
361
            for layer_name, p1 in model_state_dict.items():
                p2 = loaded_model_state_dict[layer_name]
362
363
                if p1.data.ne(p2.data).sum() > 0:
                    models_equal = False
thomwolf's avatar
thomwolf committed
364

365
            self.assertTrue(models_equal)
thomwolf's avatar
thomwolf committed
366

Patrick von Platen's avatar
Patrick von Platen committed
367
368
    def test_headmasking(self):
        if not self.test_head_masking:
369
            return
370

371
372
373
        global_rng.seed(42)
        config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
        global_rng.seed()
LysandreJik's avatar
LysandreJik committed
374

375
        inputs_dict["output_attentions"] = True
376
377
378
379
380
381
        config.output_hidden_states = True
        configs_no_init = _config_zero_init(config)  # To be sure we have no Nan
        for model_class in self.all_model_classes:
            model = model_class(config=configs_no_init)
            model.to(torch_device)
            model.eval()
LysandreJik's avatar
LysandreJik committed
382

383
384
385
            # Prepare head_mask
            # Set require_grad after having prepared the tensor to avoid error (leaf variable has been moved into the graph interior)
            head_mask = torch.ones(
Lysandre's avatar
Lysandre committed
386
387
388
                self.model_tester.num_hidden_layers,
                self.model_tester.num_attention_heads,
                device=torch_device,
389
390
391
392
            )
            head_mask[0, 0] = 0
            head_mask[-1, :-1] = 0
            head_mask.requires_grad_(requires_grad=True)
393
            inputs = self._prepare_for_class(inputs_dict, model_class).copy()
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
            inputs["head_mask"] = head_mask

            outputs = model(**inputs)

            # Test that we can get a gradient back for importance score computation
            output = sum(t.sum() for t in outputs[0])
            output = output.sum()
            output.backward()
            multihead_outputs = head_mask.grad

            attentions = outputs[-1]

            # Remove Nan
            for t in attentions:
                self.assertLess(
                    torch.sum(torch.isnan(t)), t.numel() / 4
                )  # Check we don't have more than 25% nans (arbitrary)
            attentions = [
                t.masked_fill(torch.isnan(t), 0.0) for t in attentions
            ]  # remove them (the test is less complete)

            self.assertIsNotNone(multihead_outputs)
            self.assertEqual(len(multihead_outputs), self.model_tester.num_hidden_layers)
            self.assertAlmostEqual(attentions[0][..., 0, :, :].flatten().sum().item(), 0.0)
            self.assertNotEqual(attentions[0][..., -1, :, :].flatten().sum().item(), 0.0)
            self.assertNotEqual(attentions[1][..., 0, :, :].flatten().sum().item(), 0.0)
            self.assertAlmostEqual(attentions[-1][..., -2, :, :].flatten().sum().item(), 0.0)
            self.assertNotEqual(attentions[-1][..., -1, :, :].flatten().sum().item(), 0.0)

Patrick von Platen's avatar
Patrick von Platen committed
423
424
    def test_head_pruning(self):
        if not self.test_pruning:
425
426
427
            return

        for model_class in self.all_model_classes:
Lysandre's avatar
Lysandre committed
428
429
430
431
            (
                config,
                inputs_dict,
            ) = self.model_tester.prepare_config_and_inputs_for_common()
432

433
434
            if "head_mask" in inputs_dict:
                del inputs_dict["head_mask"]
435

436
            inputs_dict["output_attentions"] = True
437
438
439
440
            config.output_hidden_states = False
            model = model_class(config=config)
            model.to(torch_device)
            model.eval()
441
442
443
444
            heads_to_prune = {
                0: list(range(1, self.model_tester.num_attention_heads)),
                -1: [0],
            }
445
446
            model.prune_heads(heads_to_prune)
            with torch.no_grad():
447
                outputs = model(**self._prepare_for_class(inputs_dict, model_class))
448

449
            attentions = outputs[-1]
450

451
452
453
            self.assertEqual(attentions[0].shape[-3], 1)
            self.assertEqual(attentions[1].shape[-3], self.model_tester.num_attention_heads)
            self.assertEqual(attentions[-1].shape[-3], self.model_tester.num_attention_heads - 1)
LysandreJik's avatar
LysandreJik committed
454

Patrick von Platen's avatar
Patrick von Platen committed
455
456
    def test_head_pruning_save_load_from_pretrained(self):
        if not self.test_pruning:
457
            return
LysandreJik's avatar
LysandreJik committed
458

459
        for model_class in self.all_model_classes:
Lysandre's avatar
Lysandre committed
460
461
462
463
            (
                config,
                inputs_dict,
            ) = self.model_tester.prepare_config_and_inputs_for_common()
464
465
466

            if "head_mask" in inputs_dict:
                del inputs_dict["head_mask"]
467

468
            inputs_dict["output_attentions"] = True
469
470
471
472
            config.output_hidden_states = False
            model = model_class(config=config)
            model.to(torch_device)
            model.eval()
473
474
475
476
            heads_to_prune = {
                0: list(range(1, self.model_tester.num_attention_heads)),
                -1: [0],
            }
477
            model.prune_heads(heads_to_prune)
478

479
            with tempfile.TemporaryDirectory() as temp_dir_name:
480
481
                model.save_pretrained(temp_dir_name)
                model = model_class.from_pretrained(temp_dir_name)
482
                model.to(torch_device)
483

484
            with torch.no_grad():
485
                outputs = model(**self._prepare_for_class(inputs_dict, model_class))
486
487
488
489
            attentions = outputs[-1]
            self.assertEqual(attentions[0].shape[-3], 1)
            self.assertEqual(attentions[1].shape[-3], self.model_tester.num_attention_heads)
            self.assertEqual(attentions[-1].shape[-3], self.model_tester.num_attention_heads - 1)
490

Patrick von Platen's avatar
Patrick von Platen committed
491
492
    def test_head_pruning_save_load_from_config_init(self):
        if not self.test_pruning:
493
            return
494

495
        for model_class in self.all_model_classes:
Lysandre's avatar
Lysandre committed
496
497
498
499
            (
                config,
                inputs_dict,
            ) = self.model_tester.prepare_config_and_inputs_for_common()
500

501
502
            if "head_mask" in inputs_dict:
                del inputs_dict["head_mask"]
503

504
            inputs_dict["output_attentions"] = True
505
            config.output_hidden_states = False
506

507
508
509
510
            heads_to_prune = {
                0: list(range(1, self.model_tester.num_attention_heads)),
                -1: [0],
            }
511
            config.pruned_heads = heads_to_prune
512

513
514
515
            model = model_class(config=config)
            model.to(torch_device)
            model.eval()
516

517
            with torch.no_grad():
518
                outputs = model(**self._prepare_for_class(inputs_dict, model_class))
519
            attentions = outputs[-1]
520

521
522
523
            self.assertEqual(attentions[0].shape[-3], 1)
            self.assertEqual(attentions[1].shape[-3], self.model_tester.num_attention_heads)
            self.assertEqual(attentions[-1].shape[-3], self.model_tester.num_attention_heads - 1)
524

Patrick von Platen's avatar
Patrick von Platen committed
525
526
    def test_head_pruning_integration(self):
        if not self.test_pruning:
527
            return
528

529
        for model_class in self.all_model_classes:
Lysandre's avatar
Lysandre committed
530
531
532
533
            (
                config,
                inputs_dict,
            ) = self.model_tester.prepare_config_and_inputs_for_common()
534

535
536
            if "head_mask" in inputs_dict:
                del inputs_dict["head_mask"]
537

538
            inputs_dict["output_attentions"] = True
539
            config.output_hidden_states = False
540

541
542
            heads_to_prune = {0: [0], 1: [1, 2]}
            config.pruned_heads = heads_to_prune
543

544
545
546
            model = model_class(config=config)
            model.to(torch_device)
            model.eval()
547

548
            with torch.no_grad():
549
                outputs = model(**self._prepare_for_class(inputs_dict, model_class))
550
            attentions = outputs[-1]
551

552
553
554
555
            self.assertEqual(attentions[0].shape[-3], self.model_tester.num_attention_heads - 1)
            self.assertEqual(attentions[1].shape[-3], self.model_tester.num_attention_heads - 2)
            self.assertEqual(attentions[2].shape[-3], self.model_tester.num_attention_heads)
            self.assertEqual(attentions[3].shape[-3], self.model_tester.num_attention_heads)
thomwolf's avatar
thomwolf committed
556

557
            with tempfile.TemporaryDirectory() as temp_dir_name:
558
559
                model.save_pretrained(temp_dir_name)
                model = model_class.from_pretrained(temp_dir_name)
560
                model.to(torch_device)
thomwolf's avatar
thomwolf committed
561

562
            with torch.no_grad():
563
                outputs = model(**self._prepare_for_class(inputs_dict, model_class))
564
            attentions = outputs[-1]
LysandreJik's avatar
LysandreJik committed
565

566
567
568
569
            self.assertEqual(attentions[0].shape[-3], self.model_tester.num_attention_heads - 1)
            self.assertEqual(attentions[1].shape[-3], self.model_tester.num_attention_heads - 2)
            self.assertEqual(attentions[2].shape[-3], self.model_tester.num_attention_heads)
            self.assertEqual(attentions[3].shape[-3], self.model_tester.num_attention_heads)
thomwolf's avatar
thomwolf committed
570

571
572
            heads_to_prune = {0: [0], 2: [1, 2]}
            model.prune_heads(heads_to_prune)
573

574
            with torch.no_grad():
575
                outputs = model(**self._prepare_for_class(inputs_dict, model_class))
576
            attentions = outputs[-1]
577

578
579
580
581
            self.assertEqual(attentions[0].shape[-3], self.model_tester.num_attention_heads - 1)
            self.assertEqual(attentions[1].shape[-3], self.model_tester.num_attention_heads - 2)
            self.assertEqual(attentions[2].shape[-3], self.model_tester.num_attention_heads - 2)
            self.assertEqual(attentions[3].shape[-3], self.model_tester.num_attention_heads)
582

583
            self.assertDictEqual(model.config.pruned_heads, {0: [0], 1: [1, 2], 2: [1, 2]})
thomwolf's avatar
thomwolf committed
584

Patrick von Platen's avatar
Patrick von Platen committed
585
    def test_hidden_states_output(self):
Joseph Liu's avatar
Joseph Liu committed
586
        def check_hidden_states_output(inputs_dict, config, model_class):
587
            model = model_class(config)
588
            model.to(torch_device)
thomwolf's avatar
thomwolf committed
589
            model.eval()
Joseph Liu's avatar
Joseph Liu committed
590

thomwolf's avatar
thomwolf committed
591
            with torch.no_grad():
Weizhen's avatar
Weizhen committed
592
593
                outputs = model(**self._prepare_for_class(inputs_dict, model_class), return_dict=True)
            hidden_states = outputs["hidden_states"] if "hidden_states" in outputs else outputs[-1]
Patrick von Platen's avatar
Patrick von Platen committed
594

Sylvain Gugger's avatar
Sylvain Gugger committed
595
596
597
598
            expected_num_layers = getattr(
                self.model_tester, "expected_num_hidden_layers", self.model_tester.num_hidden_layers + 1
            )
            self.assertEqual(len(hidden_states), expected_num_layers)
Patrick von Platen's avatar
Patrick von Platen committed
599
600
601
602
603
604
605
            if hasattr(self.model_tester, "encoder_seq_length"):
                seq_length = self.model_tester.encoder_seq_length
                if hasattr(self.model_tester, "chunk_length") and self.model_tester.chunk_length > 1:
                    seq_length = seq_length * self.model_tester.chunk_length
            else:
                seq_length = self.model_tester.seq_length

606
            self.assertListEqual(
Lysandre's avatar
Lysandre committed
607
608
                list(hidden_states[0].shape[-2:]),
                [seq_length, self.model_tester.hidden_size],
609
            )
thomwolf's avatar
thomwolf committed
610

Joseph Liu's avatar
Joseph Liu committed
611
612
613
614
615
616
617
618
619
620
621
622
        config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()

        for model_class in self.all_model_classes:
            inputs_dict["output_hidden_states"] = True
            check_hidden_states_output(inputs_dict, config, model_class)

            # check that output_hidden_states also work using config
            del inputs_dict["output_hidden_states"]
            config.output_hidden_states = True

            check_hidden_states_output(inputs_dict, config, model_class)

Pradhy729's avatar
Pradhy729 committed
623
    def test_feed_forward_chunking(self):
Lysandre's avatar
Lysandre committed
624
625
626
627
        (
            original_config,
            inputs_dict,
        ) = self.model_tester.prepare_config_and_inputs_for_common()
Pradhy729's avatar
Pradhy729 committed
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
        for model_class in self.all_model_classes:
            torch.manual_seed(0)
            config = copy.deepcopy(original_config)
            model = model_class(config)
            model.to(torch_device)
            model.eval()

            hidden_states_no_chunk = model(**self._prepare_for_class(inputs_dict, model_class))[0]

            torch.manual_seed(0)
            config.chunk_size_feed_forward = 1
            model = model_class(config)
            model.to(torch_device)
            model.eval()

            hidden_states_with_chunk = model(**self._prepare_for_class(inputs_dict, model_class))[0]
            self.assertTrue(torch.allclose(hidden_states_no_chunk, hidden_states_with_chunk, atol=1e-3))

Patrick von Platen's avatar
Patrick von Platen committed
646
    def test_resize_tokens_embeddings(self):
Lysandre's avatar
Lysandre committed
647
648
649
650
        (
            original_config,
            inputs_dict,
        ) = self.model_tester.prepare_config_and_inputs_for_common()
Patrick von Platen's avatar
Patrick von Platen committed
651
        if not self.test_resize_embeddings:
652
653
654
655
656
            return

        for model_class in self.all_model_classes:
            config = copy.deepcopy(original_config)
            model = model_class(config)
657
            model.to(torch_device)
658

Patrick von Platen's avatar
Patrick von Platen committed
659
660
661
            if self.model_tester.is_training is False:
                model.eval()

662
663
664
665
666
667
668
669
670
671
            model_vocab_size = config.vocab_size
            # Retrieve the embeddings and clone theme
            model_embed = model.resize_token_embeddings(model_vocab_size)
            cloned_embeddings = model_embed.weight.clone()

            # Check that resizing the token embeddings with a larger vocab size increases the model's vocab size
            model_embed = model.resize_token_embeddings(model_vocab_size + 10)
            self.assertEqual(model.config.vocab_size, model_vocab_size + 10)
            # Check that it actually resizes the embeddings matrix
            self.assertEqual(model_embed.weight.shape[0], cloned_embeddings.shape[0] + 10)
672
            # Check that the model can still do a forward pass successfully (every parameter should be resized)
673
            model(**self._prepare_for_class(inputs_dict, model_class))
674
675
676
677
678
679
680

            # Check that resizing the token embeddings with a smaller vocab size decreases the model's vocab size
            model_embed = model.resize_token_embeddings(model_vocab_size - 15)
            self.assertEqual(model.config.vocab_size, model_vocab_size - 15)
            # Check that it actually resizes the embeddings matrix
            self.assertEqual(model_embed.weight.shape[0], cloned_embeddings.shape[0] - 15)

681
682
683
            # Check that the model can still do a forward pass successfully (every parameter should be resized)
            # Input ids should be clamped to the maximum size of the vocabulary
            inputs_dict["input_ids"].clamp_(max=model_vocab_size - 15 - 1)
684
            model(**self._prepare_for_class(inputs_dict, model_class))
685

686
687
688
689
690
691
692
693
            # Check that adding and removing tokens has not modified the first part of the embedding matrix.
            models_equal = True
            for p1, p2 in zip(cloned_embeddings, model_embed.weight):
                if p1.data.ne(p2.data).sum() > 0:
                    models_equal = False

            self.assertTrue(models_equal)

Patrick von Platen's avatar
Patrick von Platen committed
694
    def test_model_common_attributes(self):
695
696
697
698
699
700
701
702
703
        config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()

        for model_class in self.all_model_classes:
            model = model_class(config)
            self.assertIsInstance(model.get_input_embeddings(), (torch.nn.Embedding, AdaptiveEmbedding))
            model.set_input_embeddings(torch.nn.Embedding(10, 10))
            x = model.get_output_embeddings()
            self.assertTrue(x is None or isinstance(x, torch.nn.Linear))

704
    def test_correct_missing_keys(self):
705
706
        if not self.test_missing_keys:
            return
707
708
709
710
711
712
713
714
715
716
717
718
719
720
        config, _ = self.model_tester.prepare_config_and_inputs_for_common()

        for model_class in self.all_model_classes:
            model = model_class(config)
            base_model_prefix = model.base_model_prefix

            if hasattr(model, base_model_prefix):
                with tempfile.TemporaryDirectory() as temp_dir_name:
                    model.base_model.save_pretrained(temp_dir_name)
                    model, loading_info = model_class.from_pretrained(temp_dir_name, output_loading_info=True)

                    with self.subTest(msg="Missing keys for {}".format(model.__class__.__name__)):
                        self.assertGreater(len(loading_info["missing_keys"]), 0)

721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
    def test_tie_model_weights(self):
        if not self.test_torchscript:
            return

        config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()

        def check_same_values(layer_1, layer_2):
            equal = True
            for p1, p2 in zip(layer_1.weight, layer_2.weight):
                if p1.data.ne(p2.data).sum() > 0:
                    equal = False
            return equal

        for model_class in self.all_model_classes:
            config.torchscript = True
            model_not_tied = model_class(config)
            if model_not_tied.get_output_embeddings() is None:
                continue

            config_tied = copy.deepcopy(config)
            config_tied.torchscript = False
            model_tied = model_class(config_tied)
            params_tied = list(model_tied.parameters())
            # Check that the embedding layer and decoding layer are the same in size and in value
            # self.assertTrue(check_same_values(embeddings, decoding))

            # # Check that after modification, they remain the same.
            # embeddings.weight.data.div_(2)
            # # Check that the embedding layer and decoding layer are the same in size and in value
            # self.assertTrue(embeddings.weight.shape, decoding.weight.shape)
            # self.assertTrue(check_same_values(embeddings, decoding))

            # # Check that after modification, they remain the same.
            # decoding.weight.data.div_(4)
            # # Check that the embedding layer and decoding layer are the same in size and in value
            # self.assertTrue(embeddings.weight.shape, decoding.weight.shape)
            # self.assertTrue(check_same_values(embeddings, decoding))

            # Check that after resize they remain tied.
            model_tied.resize_token_embeddings(config.vocab_size + 10)
            params_tied_2 = list(model_tied.parameters())
            self.assertEqual(len(params_tied_2), len(params_tied))

            # decoding.weight.data.mul_(20)
            # # Check that the embedding layer and decoding layer are the same in size and in value
            # self.assertTrue(model.transformer.wte.weight.shape, model.lm_head.weight.shape)
            # self.assertTrue(check_same_values(model.transformer.wte, model.lm_head))

769
770
771
772
    def test_model_outputs_equivalence(self):

        config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()

Sam Shleifer's avatar
Sam Shleifer committed
773
774
775
776
        def set_nan_tensor_to_zero(t):
            t[t != t] = 0
            return t

777
778
779
780
781
782
783
784
785
786
787
788
789
        def check_equivalence(model, tuple_inputs, dict_inputs, additional_kwargs={}):
            with torch.no_grad():
                tuple_output = model(**tuple_inputs, return_dict=False, **additional_kwargs)
                dict_output = model(**dict_inputs, return_dict=True, **additional_kwargs).to_tuple()

                def recursive_check(tuple_object, dict_object):
                    if isinstance(tuple_object, (List, Tuple)):
                        for tuple_iterable_value, dict_iterable_value in zip(tuple_object, dict_object):
                            recursive_check(tuple_iterable_value, dict_iterable_value)
                    elif tuple_object is None:
                        return
                    else:
                        self.assertTrue(
Sam Shleifer's avatar
Sam Shleifer committed
790
791
792
                            torch.allclose(
                                set_nan_tensor_to_zero(tuple_object), set_nan_tensor_to_zero(dict_object), atol=1e-5
                            ),
793
                            msg=f"Tuple and dict output are not equal. Difference: {torch.max(torch.abs(tuple_object - dict_object))}. Tuple has `nan`: {torch.isnan(tuple_object).any()} and `inf`: {torch.isinf(tuple_object)}. Dict has `nan`: {torch.isnan(dict_object).any()} and `inf`: {torch.isinf(dict_object)}.",
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
                        )

                recursive_check(tuple_output, dict_output)

        for model_class in self.all_model_classes:
            model = model_class(config)
            model.to(torch_device)
            model.eval()

            tuple_inputs = self._prepare_for_class(inputs_dict, model_class)
            dict_inputs = self._prepare_for_class(inputs_dict, model_class)
            check_equivalence(model, tuple_inputs, dict_inputs)

            tuple_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True)
            dict_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True)
            check_equivalence(model, tuple_inputs, dict_inputs)

            tuple_inputs = self._prepare_for_class(inputs_dict, model_class)
            dict_inputs = self._prepare_for_class(inputs_dict, model_class)
            check_equivalence(model, tuple_inputs, dict_inputs, {"output_hidden_states": True})

            tuple_inputs = self._prepare_for_class(inputs_dict, model_class)
            dict_inputs = self._prepare_for_class(inputs_dict, model_class)
            check_equivalence(model, tuple_inputs, dict_inputs, {"output_attentions": True})

            tuple_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True)
            dict_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True)
            check_equivalence(model, tuple_inputs, dict_inputs, {"output_hidden_states": True})

            tuple_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True)
            dict_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True)
            check_equivalence(model, tuple_inputs, dict_inputs, {"output_attentions": True})

            tuple_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True)
            dict_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True)
            check_equivalence(
                model, tuple_inputs, dict_inputs, {"output_hidden_states": True, "output_attentions": True}
            )

Patrick von Platen's avatar
Patrick von Platen committed
833
    def test_inputs_embeds(self):
Sam Shleifer's avatar
Sam Shleifer committed
834

835
836
837
838
        config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()

        for model_class in self.all_model_classes:
            model = model_class(config)
839
            model.to(torch_device)
thomwolf's avatar
thomwolf committed
840
            model.eval()
841

842
            inputs = copy.deepcopy(self._prepare_for_class(inputs_dict, model_class))
Weizhen's avatar
Weizhen committed
843

844
845
846
847
848
849
850
851
852
            if not self.is_encoder_decoder:
                input_ids = inputs["input_ids"]
                del inputs["input_ids"]
            else:
                encoder_input_ids = inputs["input_ids"]
                decoder_input_ids = inputs.get("decoder_input_ids", encoder_input_ids)
                del inputs["input_ids"]
                inputs.pop("decoder_input_ids", None)

853
854
            wte = model.get_input_embeddings()
            if not self.is_encoder_decoder:
855
                inputs["inputs_embeds"] = wte(input_ids)
856
            else:
857
858
                inputs["inputs_embeds"] = wte(encoder_input_ids)
                inputs["decoder_inputs_embeds"] = wte(decoder_input_ids)
859

thomwolf's avatar
thomwolf committed
860
            with torch.no_grad():
Weizhen's avatar
Weizhen committed
861
                model(**inputs)[0]
862

863
    def test_lm_head_model_random_no_beam_search_generate(self):
864
        config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
865
        input_ids = inputs_dict["input_ids"] if "input_ids" in inputs_dict else inputs_dict["inputs"]
866

Patrick von Platen's avatar
Patrick von Platen committed
867
868
869
        # make sure that input_ids is at most of size 15
        input_ids = input_ids[..., :15]

870
        # iterate over all generative models
871
        for model_class in self.all_generative_model_classes:
872
            model = model_class(config).to(torch_device)
Patrick von Platen's avatar
Patrick von Platen committed
873
            model.eval()
874
875

            if config.bos_token_id is None:
876
                # if bos token id is not defined, model needs input_ids
877
                with self.assertRaises(AssertionError):
878
                    model.generate(do_sample=True, max_length=5)
879
                # num_return_sequences = 1
880
                self._check_generated_ids(model.generate(input_ids, do_sample=True))
881
            else:
882
                # num_return_sequences = 1
883
                self._check_generated_ids(model.generate(do_sample=True, max_length=5))
884

885
            with self.assertRaises(AssertionError):
886
                # generating multiple sequences when no beam search generation
887
                # is not allowed as it would always generate the same sequences
888
                model.generate(input_ids, do_sample=False, num_beams=1, num_return_sequences=2)
889

890
891
            # num_return_sequences > 1, sample
            self._check_generated_ids(model.generate(input_ids, do_sample=True, num_return_sequences=2))
892
893

            # check bad words tokens language generation
894
            # create list of 1-seq bad token and list of 2-seq of bad tokens
895
896
897
898
            bad_words_ids = [
                self._generate_random_bad_tokens(1, model.config),
                self._generate_random_bad_tokens(2, model.config),
            ]
899
            output_tokens = model.generate(
900
                input_ids, do_sample=True, bad_words_ids=bad_words_ids, num_return_sequences=2
901
            )
902
            # only count generated tokens
903
904
            generated_ids = output_tokens[:, input_ids.shape[-1] :]
            self.assertFalse(self._check_match_tokens(generated_ids.tolist(), bad_words_ids))
905

906
907
    def test_lm_head_model_random_beam_search_generate(self):
        config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
908
909
910
        input_ids = (inputs_dict["input_ids"] if "input_ids" in inputs_dict else inputs_dict["inputs"]).to(
            torch_device
        )
911

Patrick von Platen's avatar
Patrick von Platen committed
912
913
914
        # make sure that input_ids is at most of size 15
        input_ids = input_ids[..., :15]

915
        for model_class in self.all_generative_model_classes:
916
            model = model_class(config).to(torch_device)
Patrick von Platen's avatar
Patrick von Platen committed
917
            model.eval()
918
919
920
921
922
923
924
925
926
927
928
929
930

            if config.bos_token_id is None:
                # if bos token id is not defined mobel needs input_ids, num_return_sequences = 1
                self._check_generated_ids(model.generate(input_ids, do_sample=True, num_beams=2))
            else:
                # num_return_sequences = 1
                self._check_generated_ids(model.generate(do_sample=True, max_length=5, num_beams=2))

            with self.assertRaises(AssertionError):
                # generating more sequences than having beams leads is not possible
                model.generate(input_ids, do_sample=False, num_return_sequences=3, num_beams=2)

            # num_return_sequences > 1, sample
Lysandre's avatar
Lysandre committed
931
932
933
934
935
936
937
938
            self._check_generated_ids(
                model.generate(
                    input_ids,
                    do_sample=True,
                    num_beams=2,
                    num_return_sequences=2,
                )
            )
939
940
941
942
943
            # num_return_sequences > 1, greedy
            self._check_generated_ids(model.generate(input_ids, do_sample=False, num_beams=2, num_return_sequences=2))

            # check bad words tokens language generation
            # create list of 1-seq bad token and list of 2-seq of bad tokens
944
945
946
947
            bad_words_ids = [
                self._generate_random_bad_tokens(1, model.config),
                self._generate_random_bad_tokens(2, model.config),
            ]
948
            output_tokens = model.generate(
949
                input_ids, do_sample=False, bad_words_ids=bad_words_ids, num_beams=2, num_return_sequences=2
950
            )
951
            # only count generated tokens
952
953
954
            generated_ids = output_tokens[:, input_ids.shape[-1] :]
            self.assertFalse(self._check_match_tokens(generated_ids.tolist(), bad_words_ids))

955
    def _generate_random_bad_tokens(self, num_bad_tokens: int, config) -> List[int]:
956
        # special tokens cannot be bad tokens
957
        special_tokens = [x for x in [config.bos_token_id, config.eos_token_id, config.pad_token_id] if x is not None]
958
959
960
        # create random bad tokens that are not special tokens
        bad_tokens = []
        while len(bad_tokens) < num_bad_tokens:
961
            token = ids_tensor((1, 1), self.model_tester.vocab_size).squeeze(0).cpu().numpy()[0]
962
963
964
965
            if token not in special_tokens:
                bad_tokens.append(token)
        return bad_tokens

966
    def _check_generated_ids(self, output_ids):
967
968
969
970
        for token_id in output_ids[0].tolist():
            self.assertGreaterEqual(token_id, 0)
            self.assertLess(token_id, self.model_tester.vocab_size)

971
972
973
974
975
976
977
978
979
980
981
982
    def _check_match_tokens(self, generated_ids, bad_words_ids):
        # for all bad word tokens
        for bad_word_ids in bad_words_ids:
            # for all slices in batch
            for generated_ids_slice in generated_ids:
                # for all word idx
                for i in range(len(bad_word_ids), len(generated_ids_slice)):
                    # if tokens match
                    if generated_ids_slice[i - len(bad_word_ids) : i] == bad_word_ids:
                        return True
        return False

983
    @require_torch_multigpu
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
    def test_multigpu_data_parallel_forward(self):
        config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()

        # some params shouldn't be scattered by nn.DataParallel
        # so just remove them if they are present.
        blacklist_non_batched_params = ["head_mask"]
        for k in blacklist_non_batched_params:
            inputs_dict.pop(k, None)

        # move input tensors to cuda:O
        for k, v in inputs_dict.items():
            if torch.is_tensor(v):
                inputs_dict[k] = v.to(0)

        for model_class in self.all_model_classes:
            model = model_class(config=config)
            model.to(0)
            model.eval()

            # Wrap model in nn.DataParallel
            model = torch.nn.DataParallel(model)
            with torch.no_grad():
1006
                _ = model(**self._prepare_for_class(inputs_dict, model_class))
1007

1008

1009
global_rng = random.Random()
thomwolf's avatar
thomwolf committed
1010
1011


thomwolf's avatar
thomwolf committed
1012
def ids_tensor(shape, vocab_size, rng=None, name=None):
1013
    #  Creates a random int32 tensor of the shape within the vocab size
thomwolf's avatar
thomwolf committed
1014
    if rng is None:
1015
        rng = global_rng
thomwolf's avatar
thomwolf committed
1016

thomwolf's avatar
thomwolf committed
1017
1018
1019
    total_dims = 1
    for dim in shape:
        total_dims *= dim
thomwolf's avatar
thomwolf committed
1020

thomwolf's avatar
thomwolf committed
1021
1022
1023
    values = []
    for _ in range(total_dims):
        values.append(rng.randint(0, vocab_size - 1))
thomwolf's avatar
thomwolf committed
1024

1025
    return torch.tensor(data=values, dtype=torch.long, device=torch_device).view(shape).contiguous()
thomwolf's avatar
thomwolf committed
1026
1027


1028
1029
1030
1031
1032
1033
1034
def random_attention_mask(shape, rng=None, name=None):
    attn_mask = ids_tensor(shape, vocab_size=2, rng=None, name=None)
    # make sure that at least one token is attended to for each batch
    attn_mask[:, -1] = 1
    return attn_mask


1035
def floats_tensor(shape, scale=1.0, rng=None, name=None):
Patrick von Platen's avatar
Patrick von Platen committed
1036
    """Creates a random float32 tensor"""
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
    if rng is None:
        rng = global_rng

    total_dims = 1
    for dim in shape:
        total_dims *= dim

    values = []
    for _ in range(total_dims):
        values.append(rng.random() * scale)

1048
    return torch.tensor(data=values, dtype=torch.float, device=torch_device).view(shape).contiguous()
1049
1050


1051
@require_torch
thomwolf's avatar
thomwolf committed
1052
class ModelUtilsTest(unittest.TestCase):
1053
    @slow
Patrick von Platen's avatar
Patrick von Platen committed
1054
    def test_model_from_pretrained(self):
1055
        for model_name in BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
thomwolf's avatar
thomwolf committed
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
            config = BertConfig.from_pretrained(model_name)
            self.assertIsNotNone(config)
            self.assertIsInstance(config, PretrainedConfig)

            model = BertModel.from_pretrained(model_name)
            model, loading_info = BertModel.from_pretrained(model_name, output_loading_info=True)
            self.assertIsNotNone(model)
            self.assertIsInstance(model, PreTrainedModel)
            for value in loading_info.values():
                self.assertEqual(len(value), 0)

            config = BertConfig.from_pretrained(model_name, output_attentions=True, output_hidden_states=True)
            model = BertModel.from_pretrained(model_name, output_attentions=True, output_hidden_states=True)
            self.assertEqual(model.config.output_hidden_states, True)
            self.assertEqual(model.config, config)
1071
1072
1073
1074
1075
1076


@require_torch
class UtilsFunctionsTest(unittest.TestCase):

    # tests whether the top_k_top_p function behaves as expected
Patrick von Platen's avatar
Patrick von Platen committed
1077
    def test_top_k_top_p_filtering(self):
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
        logits = torch.tensor(
            [
                [
                    8.2220991,  # 3rd highest value; idx. 0
                    -0.5620044,
                    5.23229752,
                    4.0386393,
                    -6.8798378,
                    -0.54785802,
                    -3.2012153,
                    2.92777176,
                    1.88171953,
                    7.35341276,  # 5th highest value; idx. 9
                    8.43207833,  # 2nd highest value; idx. 10
                    -9.85711836,
                    -5.96209236,
                    -1.13039161,
                    -7.1115294,
                    -0.8369633,
                    -5.3186408,
                    7.06427407,
                    0.81369344,
                    -0.82023817,
                    -5.9179796,
                    0.58813443,
                    -6.99778438,
                    4.71551189,
                    -0.18771637,
                    7.44020759,  # 4th highest value; idx. 25
                    9.38450987,  # 1st highest value; idx. 26
                    2.12662941,
                    -9.32562038,
                    2.35652522,
                ],  # cummulative prob of 5 highest values <= 0.6
                [
                    0.58425518,
                    4.53139238,
                    -5.57510464,
                    -6.28030699,
                    -7.19529503,
                    -4.02122551,
                    1.39337037,
                    -6.06707057,
                    1.59480517,
                    -9.643119,
                    0.03907799,
                    0.67231762,
                    -8.88206726,
                    6.27115922,  # 4th highest value; idx. 13
                    2.28520723,
                    4.82767506,
                    4.30421368,
                    8.8275313,  # 2nd highest value; idx. 17
                    5.44029958,  # 5th highest value; idx. 18
                    -4.4735794,
                    7.38579536,  # 3rd highest value; idx. 20
                    -2.91051663,
                    2.61946077,
                    -2.5674762,
                    -9.48959302,
                    -4.02922645,
                    -1.35416918,
                    9.67702323,  # 1st highest value; idx. 27
                    -5.89478553,
                    1.85370467,
                ],  # cummulative prob of 5 highest values <= 0.6
            ],
            dtype=torch.float,
            device=torch_device,
        )

        non_inf_expected_idx = torch.tensor(
            [[0, 0], [0, 9], [0, 10], [0, 25], [0, 26], [1, 13], [1, 17], [1, 18], [1, 20], [1, 27]],
            dtype=torch.long,
            device=torch_device,
        )  # expected non filtered idx as noted above

        non_inf_expected_output = torch.tensor(
            [
                8.2221,
                7.3534,
                8.4321,
                7.4402,
                9.3845,
                6.2712,
                8.8275,
                5.4403,
                7.3858,
                9.6770,
            ],  # expected non filtered values as noted above
            dtype=torch.float,
            device=torch_device,
        )

        output = top_k_top_p_filtering(logits, top_k=10, top_p=0.6, min_tokens_to_keep=4)
        non_inf_output = output[output != -float("inf")].to(device=torch_device)
        non_inf_idx = (output != -float("inf")).nonzero().to(device=torch_device)

        self.assertTrue(torch.allclose(non_inf_expected_output, non_inf_output, atol=1e-12))
        self.assertTrue(torch.all(torch.eq(non_inf_expected_idx, non_inf_idx)))