test_modeling_tf_common.py 53.7 KB
Newer Older
thomwolf's avatar
thomwolf committed
1
2
3
4
5
6
7
8
9
10
11
12
13
14
# coding=utf-8
# Copyright 2019 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
Aymeric Augustin's avatar
Aymeric Augustin committed
15

thomwolf's avatar
thomwolf committed
16
17

import copy
18
import inspect
Aymeric Augustin's avatar
Aymeric Augustin committed
19
import os
thomwolf's avatar
thomwolf committed
20
import random
Aymeric Augustin's avatar
Aymeric Augustin committed
21
import tempfile
22
import unittest
23
from importlib import import_module
24
from typing import List, Tuple
thomwolf's avatar
thomwolf committed
25

26
27
from transformers import is_tf_available
from transformers.testing_utils import _tf_gpu_memory_limit, is_pt_tf_cross_test, require_tf, slow
28

Aymeric Augustin's avatar
Aymeric Augustin committed
29

30
if is_tf_available():
thomwolf's avatar
thomwolf committed
31
    import numpy as np
32
    import tensorflow as tf
33

34
    from transformers import (
35
36
        TF_MODEL_FOR_CAUSAL_LM_MAPPING,
        TF_MODEL_FOR_MASKED_LM_MAPPING,
37
        TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
38
        TF_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING,
39
        TF_MODEL_FOR_PRETRAINING_MAPPING,
40
        TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
41
        TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
42
43
        TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
        TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
44
45
        TFSharedEmbeddings,
        tf_top_k_top_p_filtering,
46
    )
47

Julien Chaumond's avatar
Julien Chaumond committed
48
49
50
51
52
    if _tf_gpu_memory_limit is not None:
        gpus = tf.config.list_physical_devices("GPU")
        for gpu in gpus:
            # Restrict TensorFlow to only allocate x GB of memory on the GPUs
            try:
Julien Plu's avatar
Julien Plu committed
53
54
                tf.config.set_logical_device_configuration(
                    gpu, [tf.config.LogicalDeviceConfiguration(memory_limit=_tf_gpu_memory_limit)]
Julien Chaumond's avatar
Julien Chaumond committed
55
                )
Julien Plu's avatar
Julien Plu committed
56
                logical_gpus = tf.config.list_logical_devices("GPU")
Julien Chaumond's avatar
Julien Chaumond committed
57
58
59
60
                print("Logical GPUs", logical_gpus)
            except RuntimeError as e:
                # Virtual devices must be set before GPUs have been initialized
                print(e)
thomwolf's avatar
thomwolf committed
61

62

thomwolf's avatar
thomwolf committed
63
64
65
def _config_zero_init(config):
    configs_no_init = copy.deepcopy(config)
    for key in configs_no_init.__dict__.keys():
66
        if "_range" in key or "_std" in key:
thomwolf's avatar
thomwolf committed
67
68
69
70
            setattr(configs_no_init, key, 0.0)
    return configs_no_init


71
72
@require_tf
class TFModelTesterMixin:
73

74
75
    model_tester = None
    all_model_classes = ()
76
    all_generative_model_classes = ()
77
78
    test_resize_embeddings = True
    is_encoder_decoder = False
79

Lysandre Debut's avatar
Lysandre Debut committed
80
    def _prepare_for_class(self, inputs_dict, model_class, return_labels=False) -> dict:
81
82
        inputs_dict = copy.deepcopy(inputs_dict)

83
        if model_class in TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING.values():
84
            inputs_dict = {
85
86
                k: tf.tile(tf.expand_dims(v, 1), (1, self.model_tester.num_choices) + (1,) * (v.ndim - 1))
                if isinstance(v, tf.Tensor) and v.ndim > 0
87
88
89
                else v
                for k, v in inputs_dict.items()
            }
90
91
92

        if return_labels:
            if model_class in TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING.values():
93
                inputs_dict["labels"] = tf.ones(self.model_tester.batch_size, dtype=tf.int32)
94
            elif model_class in TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING.values():
95
96
                inputs_dict["start_positions"] = tf.zeros(self.model_tester.batch_size, dtype=tf.int32)
                inputs_dict["end_positions"] = tf.zeros(self.model_tester.batch_size, dtype=tf.int32)
97
            elif model_class in TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING.values():
98
                inputs_dict["labels"] = tf.zeros(self.model_tester.batch_size, dtype=tf.int32)
99
100
            elif model_class in TF_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING.values():
                inputs_dict["next_sentence_label"] = tf.zeros(self.model_tester.batch_size, dtype=tf.int32)
101
102
103
104
            elif model_class in [
                *TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING.values(),
                *TF_MODEL_FOR_CAUSAL_LM_MAPPING.values(),
                *TF_MODEL_FOR_MASKED_LM_MAPPING.values(),
105
                *TF_MODEL_FOR_PRETRAINING_MAPPING.values(),
106
107
108
109
110
                *TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING.values(),
            ]:
                inputs_dict["labels"] = tf.zeros(
                    (self.model_tester.batch_size, self.model_tester.seq_length), dtype=tf.int32
                )
111
112
        return inputs_dict

113
114
    def test_initialization(self):
        pass
115

116
117
    def test_save_load(self):
        config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
118

119
120
        for model_class in self.all_model_classes:
            model = model_class(config)
121
            outputs = model(self._prepare_for_class(inputs_dict, model_class))
122

123
            with tempfile.TemporaryDirectory() as tmpdirname:
Julien Plu's avatar
Julien Plu committed
124
                model.save_pretrained(tmpdirname, saved_model=False)
125
                model = model_class.from_pretrained(tmpdirname)
126
                after_outputs = model(self._prepare_for_class(inputs_dict, model_class))
127

128
                self.assert_outputs_same(after_outputs, outputs)
129

130
131
132
133
134
135
136
137
138
139
140
141
142
    def test_graph_mode(self):
        config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
        for model_class in self.all_model_classes:
            inputs = self._prepare_for_class(inputs_dict, model_class)
            model = model_class(config)

            @tf.function
            def run_in_graph_mode():
                return model(inputs)

            outputs = run_in_graph_mode()
            self.assertIsNotNone(outputs)

143
144
145
146
147
148
149
150
151
152
153
    def test_forward_signature(self):
        config, _ = self.model_tester.prepare_config_and_inputs_for_common()

        for model_class in self.all_model_classes:
            model = model_class(config)
            signature = inspect.signature(model.call)
            # signature.parameters is an OrderedDict => so arg_names order is deterministic
            arg_names = [*signature.parameters.keys()]

            if model.config.is_encoder_decoder:
                expected_arg_names = [
Julien Plu's avatar
Julien Plu committed
154
                    "input_ids",
155
156
157
158
159
160
161
162
                    "attention_mask",
                    "decoder_input_ids",
                    "decoder_attention_mask",
                    "encoder_outputs",
                ]
                self.assertListEqual(arg_names[:5], expected_arg_names)

            else:
Julien Plu's avatar
Julien Plu committed
163
                expected_arg_names = ["input_ids"]
164
165
                self.assertListEqual(arg_names[:1], expected_arg_names)

Julien Plu's avatar
Julien Plu committed
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
    def test_saved_model_creation(self):
        config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
        config.output_hidden_states = False
        config.output_attentions = False

        if hasattr(config, "use_cache"):
            config.use_cache = False

        model_class = self.all_model_classes[0]

        class_inputs_dict = self._prepare_for_class(inputs_dict, model_class)
        model = model_class(config)

        model(class_inputs_dict)

        with tempfile.TemporaryDirectory() as tmpdirname:
            model.save_pretrained(tmpdirname, saved_model=True)
            saved_model_dir = os.path.join(tmpdirname, "saved_model")
            self.assertTrue(os.path.exists(saved_model_dir))

    @slow
    def test_saved_model_creation_extended(self):
        config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
        config.output_hidden_states = True
        config.output_attentions = True

        if hasattr(config, "use_cache"):
            config.use_cache = True

        for model_class in self.all_model_classes:
            class_inputs_dict = self._prepare_for_class(inputs_dict, model_class)
            model = model_class(config)

            model(class_inputs_dict)

            with tempfile.TemporaryDirectory() as tmpdirname:
                model.save_pretrained(tmpdirname, saved_model=True)
                saved_model_dir = os.path.join(tmpdirname, "saved_model")
                self.assertTrue(os.path.exists(saved_model_dir))

Julien Plu's avatar
Julien Plu committed
206
207
208
209
210
211
    @slow
    def test_saved_model_with_hidden_states_output(self):
        config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
        config.output_hidden_states = True

        for model_class in self.all_model_classes:
Lysandre Debut's avatar
Lysandre Debut committed
212
            class_inputs_dict = self._prepare_for_class(inputs_dict, model_class)
Julien Plu's avatar
Julien Plu committed
213
214
215
216
217
            # A saved model is always executed in graph mode, since we merged the PR #8777
            # the booleans in graph mode are always the ones in the config, then we update
            # the use_cache property if it exists in order to have similar booleans with the inputs
            if "use_cache" in class_inputs_dict:
                config.use_cache = class_inputs_dict.pop("use_cache")
Julien Plu's avatar
Julien Plu committed
218
            model = model_class(config)
Lysandre Debut's avatar
Lysandre Debut committed
219
            num_out = len(model(class_inputs_dict))
Julien Plu's avatar
Julien Plu committed
220
221

            with tempfile.TemporaryDirectory() as tmpdirname:
Julien Plu's avatar
Julien Plu committed
222
223
224
                model.save_pretrained(tmpdirname)
                saved_model_dir = os.path.join(tmpdirname, "saved_model")
                model = tf.keras.models.load_model(saved_model_dir)
Lysandre Debut's avatar
Lysandre Debut committed
225
                outputs = model(class_inputs_dict)
226
227
228
229
230
231

                if self.is_encoder_decoder:
                    output = outputs["encoder_hidden_states"] if isinstance(outputs, dict) else outputs[-1]
                else:
                    output = outputs["hidden_states"] if isinstance(outputs, dict) else outputs[-1]

Sylvain Gugger's avatar
Sylvain Gugger committed
232
                hidden_states = [t.numpy() for t in output]
Julien Plu's avatar
Julien Plu committed
233
                self.assertEqual(len(outputs), num_out)
Lysandre Debut's avatar
Lysandre Debut committed
234
235
236
237
                expected_num_layers = getattr(
                    self.model_tester, "expected_num_hidden_layers", self.model_tester.num_hidden_layers + 1
                )
                self.assertEqual(len(hidden_states), expected_num_layers)
Julien Plu's avatar
Julien Plu committed
238
                self.assertListEqual(
Lysandre's avatar
Lysandre committed
239
240
                    list(hidden_states[0].shape[-2:]),
                    [self.model_tester.seq_length, self.model_tester.hidden_size],
Julien Plu's avatar
Julien Plu committed
241
242
243
244
245
246
                )

    @slow
    def test_saved_model_with_attentions_output(self):
        config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
        config.output_attentions = True
Lysandre Debut's avatar
Lysandre Debut committed
247
248
249

        encoder_seq_length = getattr(self.model_tester, "encoder_seq_length", self.model_tester.seq_length)
        encoder_key_length = getattr(self.model_tester, "key_length", encoder_seq_length)
Julien Plu's avatar
Julien Plu committed
250
251

        for model_class in self.all_model_classes:
Lysandre Debut's avatar
Lysandre Debut committed
252
            class_inputs_dict = self._prepare_for_class(inputs_dict, model_class)
Julien Plu's avatar
Julien Plu committed
253
254
255
256
257
            # A saved model is always executed in graph mode, since we merged the PR #8777
            # the booleans in graph mode are always the ones in the config, then we update
            # the use_cache property if it exists in order to have similar booleans with the inputs
            if "use_cache" in class_inputs_dict:
                config.use_cache = class_inputs_dict.pop("use_cache")
Julien Plu's avatar
Julien Plu committed
258
            model = model_class(config)
Lysandre Debut's avatar
Lysandre Debut committed
259
            num_out = len(model(class_inputs_dict))
Julien Plu's avatar
Julien Plu committed
260
261

            with tempfile.TemporaryDirectory() as tmpdirname:
Julien Plu's avatar
Julien Plu committed
262
263
264
                saved_model_dir = os.path.join(tmpdirname, "saved_model")
                model.save_pretrained(saved_model_dir)
                model = tf.keras.models.load_model(saved_model_dir)
Lysandre Debut's avatar
Lysandre Debut committed
265
                outputs = model(class_inputs_dict)
266
267
268
269
270
271

                if self.is_encoder_decoder:
                    output = outputs["encoder_attentions"] if isinstance(outputs, dict) else outputs[-1]
                else:
                    output = outputs["attentions"] if isinstance(outputs, dict) else outputs[-1]

Sylvain Gugger's avatar
Sylvain Gugger committed
272
                attentions = [t.numpy() for t in output]
Julien Plu's avatar
Julien Plu committed
273
274
275
276
277
278
279
                self.assertEqual(len(outputs), num_out)
                self.assertEqual(len(attentions), self.model_tester.num_hidden_layers)
                self.assertListEqual(
                    list(attentions[0].shape[-3:]),
                    [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length],
                )

280
281
282
283
284
285
286
287
    def test_keras_save_load(self):
        config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()

        tf_main_layer_classes = set(
            module_member
            for model_class in self.all_model_classes
            for module in (import_module(model_class.__module__),)
            for module_member_name in dir(module)
288
            if module_member_name.endswith("MainLayer")
289
            for module_member in (getattr(module, module_member_name),)
290
291
292
            if isinstance(module_member, type)
            and tf.keras.layers.Layer in module_member.__bases__
            and getattr(module_member, "_keras_serializable", False)
293
294
        )
        for main_layer_class in tf_main_layer_classes:
Julien Plu's avatar
Julien Plu committed
295
296
297
298
            # T5MainLayer needs an embed_tokens parameter when called without the inputs_embeds parameter
            if "T5" in main_layer_class.__name__:
                # Take the same values than in TFT5ModelTester for this shared layer
                shared = TFSharedEmbeddings(99, 32, name="shared")
Julien Plu's avatar
Julien Plu committed
299
                config.use_cache = inputs_dict.pop("use_cache", None)
Julien Plu's avatar
Julien Plu committed
300
301
302
                main_layer = main_layer_class(config, embed_tokens=shared)
            else:
                main_layer = main_layer_class(config)
Julien Plu's avatar
Julien Plu committed
303

304
305
306
            symbolic_inputs = {
                name: tf.keras.Input(tensor.shape[1:], dtype=tensor.dtype) for name, tensor in inputs_dict.items()
            }
Julien Plu's avatar
Julien Plu committed
307

308
309
310
311
312
313
            model = tf.keras.Model(symbolic_inputs, outputs=main_layer(symbolic_inputs))
            outputs = model(inputs_dict)

            with tempfile.TemporaryDirectory() as tmpdirname:
                filepath = os.path.join(tmpdirname, "keras_model.h5")
                model.save(filepath)
Julien Plu's avatar
Julien Plu committed
314
315
316
317
318
319
320
321
322
323
324
325
                if "T5" in main_layer_class.__name__:
                    model = tf.keras.models.load_model(
                        filepath,
                        custom_objects={
                            main_layer_class.__name__: main_layer_class,
                            "TFSharedEmbeddings": TFSharedEmbeddings,
                        },
                    )
                else:
                    model = tf.keras.models.load_model(
                        filepath, custom_objects={main_layer_class.__name__: main_layer_class}
                    )
326
327
328
329
330
331
                assert isinstance(model, tf.keras.Model)
                after_outputs = model(inputs_dict)
                self.assert_outputs_same(after_outputs, outputs)

    def assert_outputs_same(self, after_outputs, outputs):
        # Make sure we don't have nans
Julien Plu's avatar
Julien Plu committed
332
333
        if isinstance(after_outputs, tf.Tensor):
            out_1 = after_outputs.numpy()
Sylvain Gugger's avatar
Sylvain Gugger committed
334
        elif isinstance(after_outputs, dict):
335
            out_1 = after_outputs[list(after_outputs.keys())[0]].numpy()
Julien Plu's avatar
Julien Plu committed
336
337
        else:
            out_1 = after_outputs[0].numpy()
338
        out_2 = outputs[0].numpy()
339
        self.assertEqual(out_1.shape, out_2.shape)
340
341
342
343
        out_1 = out_1[~np.isnan(out_1)]
        out_2 = out_2[~np.isnan(out_2)]
        max_diff = np.amax(np.abs(out_1 - out_2))
        self.assertLessEqual(max_diff, 1e-5)
344

345
    @is_pt_tf_cross_test
346
    def test_pt_tf_model_equivalence(self):
thomwolf's avatar
thomwolf committed
347

348
        import torch
349

350
        import transformers
thomwolf's avatar
thomwolf committed
351

352
        config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
thomwolf's avatar
thomwolf committed
353

354
        for model_class in self.all_model_classes:
355
            pt_model_class_name = model_class.__name__[2:]  # Skip the "TF" at the beginning
356
            pt_model_class = getattr(transformers, pt_model_class_name)
thomwolf's avatar
thomwolf committed
357

358
            config.output_hidden_states = True
359

360
361
            tf_model = model_class(config)
            pt_model = pt_model_class(config)
thomwolf's avatar
thomwolf committed
362

363
            # Check we can load pt model in tf and vice-versa with model => model functions
364

365
366
367
            tf_model = transformers.load_pytorch_model_in_tf2_model(
                tf_model, pt_model, tf_inputs=self._prepare_for_class(inputs_dict, model_class)
            )
368
            pt_model = transformers.load_tf2_model_in_pytorch_model(pt_model, tf_model)
369

370
371
            # Check predictions on first output (logits/hidden-states) are close enought given low-level computational differences
            pt_model.eval()
Julien Plu's avatar
Julien Plu committed
372
373
374
375
376
377
378
            pt_inputs_dict = {}
            for name, key in self._prepare_for_class(inputs_dict, model_class).items():
                if type(key) == bool:
                    pt_inputs_dict[name] = key
                else:
                    pt_inputs_dict[name] = torch.from_numpy(key.numpy()).to(torch.long)

379
380
381
382
            # need to rename encoder-decoder "inputs" for PyTorch
            if "inputs" in pt_inputs_dict and self.is_encoder_decoder:
                pt_inputs_dict["input_ids"] = pt_inputs_dict.pop("inputs")

383
384
            with torch.no_grad():
                pto = pt_model(**pt_inputs_dict)
385
            tfo = tf_model(self._prepare_for_class(inputs_dict, model_class), training=False)
386
387
            tf_hidden_states = tfo[0].numpy()
            pt_hidden_states = pto[0].numpy()
Lysandre's avatar
Lysandre committed
388

389
390
391
392
393
394
395
            tf_nans = np.copy(np.isnan(tf_hidden_states))
            pt_nans = np.copy(np.isnan(pt_hidden_states))

            pt_hidden_states[tf_nans] = 0
            tf_hidden_states[tf_nans] = 0
            pt_hidden_states[pt_nans] = 0
            tf_hidden_states[pt_nans] = 0
Lysandre's avatar
Lysandre committed
396

397
            max_diff = np.amax(np.abs(tf_hidden_states - pt_hidden_states))
398
            self.assertLessEqual(max_diff, 4e-2)
399
400

            # Check we can load pt model in tf and vice-versa with checkpoint => model functions
401
            with tempfile.TemporaryDirectory() as tmpdirname:
402
403
404
405
406
407
408
409
410
411
                pt_checkpoint_path = os.path.join(tmpdirname, "pt_model.bin")
                torch.save(pt_model.state_dict(), pt_checkpoint_path)
                tf_model = transformers.load_pytorch_checkpoint_in_tf2_model(tf_model, pt_checkpoint_path)

                tf_checkpoint_path = os.path.join(tmpdirname, "tf_model.h5")
                tf_model.save_weights(tf_checkpoint_path)
                pt_model = transformers.load_tf2_checkpoint_in_pytorch_model(pt_model, tf_checkpoint_path)

            # Check predictions on first output (logits/hidden-states) are close enought given low-level computational differences
            pt_model.eval()
Julien Plu's avatar
Julien Plu committed
412
413
414
415
416
417
418
            pt_inputs_dict = {}
            for name, key in self._prepare_for_class(inputs_dict, model_class).items():
                if type(key) == bool:
                    key = np.array(key, dtype=bool)
                    pt_inputs_dict[name] = torch.from_numpy(key).to(torch.long)
                else:
                    pt_inputs_dict[name] = torch.from_numpy(key.numpy()).to(torch.long)
419
420
421
422
            # need to rename encoder-decoder "inputs" for PyTorch
            if "inputs" in pt_inputs_dict and self.is_encoder_decoder:
                pt_inputs_dict["input_ids"] = pt_inputs_dict.pop("inputs")

423
424
            with torch.no_grad():
                pto = pt_model(**pt_inputs_dict)
425
            tfo = tf_model(self._prepare_for_class(inputs_dict, model_class))
426
427
            tfo = tfo[0].numpy()
            pto = pto[0].numpy()
428
429
430
431
432
433
434
435
            tf_nans = np.copy(np.isnan(tfo))
            pt_nans = np.copy(np.isnan(pto))

            pto[tf_nans] = 0
            tfo[tf_nans] = 0
            pto[pt_nans] = 0
            tfo[pt_nans] = 0

436
            max_diff = np.amax(np.abs(tfo - pto))
sgugger's avatar
sgugger committed
437
            self.assertLessEqual(max_diff, 4e-2)
438

439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
    def test_train_pipeline_custom_model(self):
        config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
        tf_main_layer_classes = set(
            module_member
            for model_class in self.all_model_classes
            for module in (import_module(model_class.__module__),)
            for module_member_name in dir(module)
            if module_member_name.endswith("MainLayer")
            for module_member in (getattr(module, module_member_name),)
            if isinstance(module_member, type)
            and tf.keras.layers.Layer in module_member.__bases__
            and getattr(module_member, "_keras_serializable", False)
        )

        for main_layer_class in tf_main_layer_classes:
            # T5MainLayer needs an embed_tokens parameter when called without the inputs_embeds parameter
            if "T5" in main_layer_class.__name__:
                # Take the same values than in TFT5ModelTester for this shared layer
                shared = TFSharedEmbeddings(self.model_tester.vocab_size, self.model_tester.hidden_size, name="shared")
                config.use_cache = False
                main_layer = main_layer_class(config, embed_tokens=shared)
                del inputs_dict["use_cache"]
            else:
                main_layer = main_layer_class(config)

            symbolic_inputs = {
                name: tf.keras.Input(tensor.shape[1:], dtype=tensor.dtype) for name, tensor in inputs_dict.items()
            }

            if hasattr(self.model_tester, "num_labels"):
                num_labels = self.model_tester.num_labels
            else:
                num_labels = 2

            X = tf.data.Dataset.from_tensor_slices(
Julien Plu's avatar
Julien Plu committed
474
                (inputs_dict, np.ones((self.model_tester.batch_size, self.model_tester.seq_length, num_labels, 1)))
475
476
477
478
479
480
            ).batch(1)

            hidden_states = main_layer(symbolic_inputs)[0]
            outputs = tf.keras.layers.Dense(num_labels, activation="softmax", name="outputs")(hidden_states)
            model = tf.keras.models.Model(inputs=symbolic_inputs, outputs=[outputs])

Julien Plu's avatar
Julien Plu committed
481
            model.compile(loss="binary_crossentropy", optimizer="adam", metrics=["binary_accuracy"])
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
            model.fit(X, epochs=1)

            with tempfile.TemporaryDirectory() as tmpdirname:
                filepath = os.path.join(tmpdirname, "keras_model.h5")
                model.save(filepath)
                if "T5" in main_layer_class.__name__:
                    model = tf.keras.models.load_model(
                        filepath,
                        custom_objects={
                            main_layer_class.__name__: main_layer_class,
                            "TFSharedEmbeddings": TFSharedEmbeddings,
                        },
                    )
                else:
                    model = tf.keras.models.load_model(
                        filepath, custom_objects={main_layer_class.__name__: main_layer_class}
                    )
                assert isinstance(model, tf.keras.Model)
                model(inputs_dict)

502
503
    def test_compile_tf_model(self):
        config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
Julien Plu's avatar
Julien Plu committed
504
        max_input = getattr(self.model_tester, "max_position_embeddings", 512)
505
506
507
508
509
        optimizer = tf.keras.optimizers.Adam(learning_rate=3e-5, epsilon=1e-08, clipnorm=1.0)
        loss = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)
        metric = tf.keras.metrics.SparseCategoricalAccuracy("accuracy")

        for model_class in self.all_model_classes:
510
511
512
            if self.is_encoder_decoder:
                input_ids = {
                    "decoder_input_ids": tf.keras.Input(
Julien Plu's avatar
Julien Plu committed
513
514
515
                        batch_shape=(2, max_input),
                        name="decoder_input_ids",
                        dtype="int32",
516
                    ),
Julien Plu's avatar
Julien Plu committed
517
                    "input_ids": tf.keras.Input(batch_shape=(2, max_input), name="input_ids", dtype="int32"),
518
519
                }
            elif model_class in TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING.values():
Julien Plu's avatar
Julien Plu committed
520
                input_ids = tf.keras.Input(batch_shape=(4, 2, max_input), name="input_ids", dtype="int32")
521
            else:
Julien Plu's avatar
Julien Plu committed
522
                input_ids = tf.keras.Input(batch_shape=(2, max_input), name="input_ids", dtype="int32")
523

524
525
            # Prepare our model
            model = model_class(config)
526
            model(self._prepare_for_class(inputs_dict, model_class))  # Model must be called before saving.
527
            # Let's load it from the disk to be sure we can use pretrained weights
528
            with tempfile.TemporaryDirectory() as tmpdirname:
Julien Plu's avatar
Julien Plu committed
529
                model.save_pretrained(tmpdirname, saved_model=False)
530
531
532
533
534
                model = model_class.from_pretrained(tmpdirname)

            outputs_dict = model(input_ids)
            hidden_states = outputs_dict[0]

535
            # Add a dense layer on top to test integration with other keras modules
536
537
538
539
540
541
542
543
544
545
546
            outputs = tf.keras.layers.Dense(2, activation="softmax", name="outputs")(hidden_states)

            # Compile extended model
            extended_model = tf.keras.Model(inputs=[input_ids], outputs=[outputs])
            extended_model.compile(optimizer=optimizer, loss=loss, metrics=[metric])

    def test_keyword_and_dict_args(self):
        config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()

        for model_class in self.all_model_classes:
            model = model_class(config)
547
548
549
            inputs = self._prepare_for_class(inputs_dict, model_class)

            outputs_dict = model(inputs)
550

551
            inputs_keywords = copy.deepcopy(self._prepare_for_class(inputs_dict, model_class))
552
            input_ids = inputs_keywords.pop("input_ids", None)
553
554
555
556
557
558
559
560
            outputs_keywords = model(input_ids, **inputs_keywords)
            output_dict = outputs_dict[0].numpy()
            output_keywords = outputs_keywords[0].numpy()

            self.assertLess(np.sum(np.abs(output_dict - output_keywords)), 1e-6)

    def test_attention_outputs(self):
        config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
561
        config.return_dict = True
562
563
564
565
        decoder_seq_length = getattr(self.model_tester, "decoder_seq_length", self.model_tester.seq_length)
        encoder_seq_length = getattr(self.model_tester, "encoder_seq_length", self.model_tester.seq_length)
        decoder_key_length = getattr(self.model_tester, "key_length", decoder_seq_length)
        encoder_key_length = getattr(self.model_tester, "key_length", encoder_seq_length)
566

Julien Plu's avatar
Julien Plu committed
567
568
569
570
571
572
573
574
575
576
577
        def check_decoder_attentions_output(outputs):
            out_len = len(outputs)
            self.assertEqual(out_len % 2, 0)
            decoder_attentions = outputs.decoder_attentions
            self.assertEqual(len(decoder_attentions), self.model_tester.num_hidden_layers)
            self.assertListEqual(
                list(decoder_attentions[0].shape[-3:]),
                [self.model_tester.num_attention_heads, decoder_seq_length, decoder_key_length],
            )

        def check_encoder_attentions_output(outputs):
578
579
580
            attentions = [
                t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
            ]
581
582
583
584
            self.assertEqual(len(attentions), self.model_tester.num_hidden_layers)
            self.assertListEqual(
                list(attentions[0].shape[-3:]),
                [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length],
585
            )
Julien Plu's avatar
Julien Plu committed
586
587
588
589
590
591
592

        for model_class in self.all_model_classes:
            inputs_dict["output_attentions"] = True
            inputs_dict["use_cache"] = False
            config.output_hidden_states = False
            model = model_class(config)
            outputs = model(self._prepare_for_class(inputs_dict, model_class))
593
            out_len = len(outputs)
Julien Plu's avatar
Julien Plu committed
594
595
            self.assertEqual(config.output_hidden_states, False)
            check_encoder_attentions_output(outputs)
thomwolf's avatar
thomwolf committed
596

597
            if self.is_encoder_decoder:
Julien Plu's avatar
Julien Plu committed
598
599
600
601
                model = model_class(config)
                outputs = model(self._prepare_for_class(inputs_dict, model_class))
                self.assertEqual(config.output_hidden_states, False)
                check_decoder_attentions_output(outputs)
thomwolf's avatar
thomwolf committed
602

603
604
            # Check that output attentions can also be changed via the config
            del inputs_dict["output_attentions"]
605
            config.output_attentions = True
606
            model = model_class(config)
607
            outputs = model(self._prepare_for_class(inputs_dict, model_class))
Julien Plu's avatar
Julien Plu committed
608
609
            self.assertEqual(config.output_hidden_states, False)
            check_encoder_attentions_output(outputs)
610
611
612

            # Check attention is always last and order is fine
            inputs_dict["output_attentions"] = True
613
614
            config.output_hidden_states = True
            model = model_class(config)
615
            outputs = model(self._prepare_for_class(inputs_dict, model_class))
Julien Plu's avatar
Julien Plu committed
616

617
618
            self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1), len(outputs))
            self.assertEqual(model.config.output_hidden_states, True)
Julien Plu's avatar
Julien Plu committed
619
            check_encoder_attentions_output(outputs)
620

621
622
623
    def test_hidden_states_output(self):
        config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()

Joseph Liu's avatar
Joseph Liu committed
624
        def check_hidden_states_output(config, inputs_dict, model_class):
625
            model = model_class(config)
626
            outputs = model(self._prepare_for_class(inputs_dict, model_class))
627
628
629
            expected_num_layers = getattr(
                self.model_tester, "expected_num_hidden_layers", self.model_tester.num_hidden_layers + 1
            )
Julien Plu's avatar
Julien Plu committed
630

Julien Plu's avatar
Julien Plu committed
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
            if model.config.is_encoder_decoder:
                encoder_hidden_states = outputs.encoder_hidden_states
                decoder_hidden_states = outputs.decoder_hidden_states

                self.assertEqual(config.output_attentions, False)
                self.assertEqual(len(encoder_hidden_states), expected_num_layers)
                self.assertListEqual(
                    list(encoder_hidden_states[0].shape[-2:]),
                    [self.model_tester.seq_length, self.model_tester.hidden_size],
                )
                self.assertEqual(len(decoder_hidden_states), expected_num_layers)
                self.assertListEqual(
                    list(decoder_hidden_states[0].shape[-2:]),
                    [self.model_tester.seq_length, self.model_tester.hidden_size],
                )
            else:
                hidden_states = outputs.hidden_states
                self.assertEqual(config.output_attentions, False)
                self.assertEqual(len(hidden_states), expected_num_layers)
                self.assertListEqual(
                    list(hidden_states[0].shape[-2:]),
                    [self.model_tester.seq_length, self.model_tester.hidden_size],
                )
654

Joseph Liu's avatar
Joseph Liu committed
655
656
657
658
659
660
661
662
        for model_class in self.all_model_classes:
            inputs_dict["output_hidden_states"] = True
            check_hidden_states_output(config, inputs_dict, model_class)

            del inputs_dict["output_hidden_states"]
            config.output_hidden_states = True
            check_hidden_states_output(config, inputs_dict, model_class)

663
664
    def test_model_common_attributes(self):
        config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
665
666
667
668
669
        list_lm_models = (
            list(TF_MODEL_FOR_CAUSAL_LM_MAPPING.values())
            + list(TF_MODEL_FOR_MASKED_LM_MAPPING.values())
            + list(TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING.values())
        )
670
671
672

        for model_class in self.all_model_classes:
            model = model_class(config)
673
            assert isinstance(model.get_input_embeddings(), tf.keras.layers.Layer)
674
675

            if model_class in list_lm_models:
676
                x = model.get_output_embeddings()
677
                assert isinstance(x, tf.keras.layers.Layer)
678
679
680
681
                name = model.get_bias()
                assert isinstance(name, dict)
                for k, v in name.items():
                    assert isinstance(v, tf.Variable)
682
            else:
683
                x = model.get_output_embeddings()
684
                assert x is None
685
686
                name = model.get_bias()
                assert name is None
687
688
689
690
691
692

    def test_determinism(self):
        config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()

        for model_class in self.all_model_classes:
            model = model_class(config)
693
            first, second = (
694
695
                model(self._prepare_for_class(inputs_dict, model_class), training=False)[0],
                model(self._prepare_for_class(inputs_dict, model_class), training=False)[0],
696
            )
697
698
699
700
701
702
703
            out_1 = first.numpy()
            out_2 = second.numpy()
            out_1 = out_1[~np.isnan(out_1)]
            out_2 = out_2[~np.isnan(out_2)]
            max_diff = np.amax(np.abs(out_1 - out_2))
            self.assertLessEqual(max_diff, 1e-5)

704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
    def test_model_outputs_equivalence(self):

        config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()

        def check_equivalence(model, tuple_inputs, dict_inputs, additional_kwargs={}):
            tuple_output = model(tuple_inputs, return_dict=False, **additional_kwargs)
            dict_output = model(dict_inputs, return_dict=True, **additional_kwargs).to_tuple()

            def recursive_check(tuple_object, dict_object):
                if isinstance(tuple_object, (List, Tuple)):
                    for tuple_iterable_value, dict_iterable_value in zip(tuple_object, dict_object):
                        recursive_check(tuple_iterable_value, dict_iterable_value)
                elif tuple_object is None:
                    return
                else:
                    self.assertTrue(
                        all(tf.equal(tuple_object, dict_object)),
                        msg=f"Tuple and dict output are not equal. Difference: {tf.math.reduce_max(tf.abs(tuple_object - dict_object))}",
                    )

                recursive_check(tuple_output, dict_output)

        for model_class in self.all_model_classes:
            model = model_class(config)

            tuple_inputs = self._prepare_for_class(inputs_dict, model_class)
            dict_inputs = self._prepare_for_class(inputs_dict, model_class)
            check_equivalence(model, tuple_inputs, dict_inputs)

            tuple_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True)
            dict_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True)
            check_equivalence(model, tuple_inputs, dict_inputs)

            tuple_inputs = self._prepare_for_class(inputs_dict, model_class)
            dict_inputs = self._prepare_for_class(inputs_dict, model_class)
            check_equivalence(model, tuple_inputs, dict_inputs, {"output_hidden_states": True})

            tuple_inputs = self._prepare_for_class(inputs_dict, model_class)
            dict_inputs = self._prepare_for_class(inputs_dict, model_class)
            check_equivalence(model, tuple_inputs, dict_inputs, {"output_attentions": True})

            tuple_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True)
            dict_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True)
            check_equivalence(model, tuple_inputs, dict_inputs, {"output_hidden_states": True})

            tuple_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True)
            dict_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True)
            check_equivalence(model, tuple_inputs, dict_inputs, {"output_attentions": True})

            tuple_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True)
            dict_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True)
            check_equivalence(
                model, tuple_inputs, dict_inputs, {"output_hidden_states": True, "output_attentions": True}
            )

759
760
761
762
763
764
765
    def _get_embeds(self, wte, input_ids):
        # ^^ In our TF models, the input_embeddings can take slightly different forms,
        # so we try a few of them.
        # We used to fall back to just synthetically creating a dummy tensor of ones:
        try:
            x = wte(input_ids, mode="embedding")
        except Exception:
thomwolf's avatar
thomwolf committed
766
            try:
767
                x = wte([input_ids], mode="embedding")
768
            except Exception:
thomwolf's avatar
thomwolf committed
769
                try:
770
                    x = wte([input_ids, None, None, None], mode="embedding")
771
                except Exception:
772
                    if hasattr(self.model_tester, "embedding_size"):
Lysandre's avatar
Lysandre committed
773
774
775
776
                        x = tf.ones(
                            input_ids.shape + [self.model_tester.embedding_size],
                            dtype=tf.dtypes.float32,
                        )
777
                    else:
Lysandre's avatar
Lysandre committed
778
779
780
781
                        x = tf.ones(
                            input_ids.shape + [self.model_tester.hidden_size],
                            dtype=tf.dtypes.float32,
                        )
782
783
784
785
786
787
788
789
        return x

    def test_inputs_embeds(self):
        config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()

        for model_class in self.all_model_classes:
            model = model_class(config)

790
791
792
793
794
            inputs = copy.deepcopy(self._prepare_for_class(inputs_dict, model_class))
            if not self.is_encoder_decoder:
                input_ids = inputs["input_ids"]
                del inputs["input_ids"]
            else:
795
                encoder_input_ids = inputs["input_ids"]
796
                decoder_input_ids = inputs.get("decoder_input_ids", encoder_input_ids)
797
                del inputs["input_ids"]
798
799
                inputs.pop("decoder_input_ids", None)

800
            wte = model.get_input_embeddings()
thomwolf's avatar
thomwolf committed
801
            if not self.is_encoder_decoder:
802
                inputs["inputs_embeds"] = self._get_embeds(wte, input_ids)
thomwolf's avatar
thomwolf committed
803
            else:
804
805
                inputs["inputs_embeds"] = self._get_embeds(wte, encoder_input_ids)
                inputs["decoder_inputs_embeds"] = self._get_embeds(wte, decoder_input_ids)
806

807
            model(inputs)
808

809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
    def test_numpy_arrays_inputs(self):
        config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()

        def prepare_numpy_arrays(inputs_dict):
            inputs_np_dict = {}
            for k, v in inputs_dict.items():
                if tf.is_tensor(v):
                    inputs_np_dict[k] = v.numpy()
                else:
                    inputs_np_dict[k] = np.array(k)

            return inputs_np_dict

        for model_class in self.all_model_classes:
            model = model_class(config)

            inputs = self._prepare_for_class(inputs_dict, model_class)
            inputs_np = prepare_numpy_arrays(inputs)

            model(inputs_np)

830
831
832
833
    def test_resize_token_embeddings(self):
        if not self.test_resize_embeddings:
            return
        config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854

        def _get_word_embedding_weight(model, embedding_layer):
            if hasattr(embedding_layer, "word_embeddings"):
                return embedding_layer.word_embeddings
            elif hasattr(embedding_layer, "weight"):
                return embedding_layer.weight
            elif hasattr(embedding_layer, "decoder"):
                return embedding_layer.decoder
            else:
                # Here we build the word embeddings weights if not exists.
                # And then we retry to get the attribute once built.
                model(model.dummy_inputs)
                if hasattr(embedding_layer, "word_embeddings"):
                    return embedding_layer.word_embeddings
                elif hasattr(embedding_layer, "weight"):
                    return embedding_layer.weight
                elif hasattr(embedding_layer, "decoder"):
                    return embedding_layer.decoder
                else:
                    return None

855
856
857
858
        for model_class in self.all_model_classes:
            for size in [config.vocab_size - 10, config.vocab_size + 10, None]:
                # build the embeddings
                model = model_class(config=config)
859
860
861
                old_input_embeddings = _get_word_embedding_weight(model, model.get_input_embeddings())
                old_bias = model.get_bias()
                old_output_embeddings = _get_word_embedding_weight(model, model.get_output_embeddings())
862
                # reshape the embeddings
863
864
865
866
867
868
                model.resize_token_embeddings(size)
                new_input_embeddings = _get_word_embedding_weight(model, model.get_input_embeddings())
                new_bias = model.get_bias()
                new_output_embeddings = _get_word_embedding_weight(model, model.get_output_embeddings())

                # check that the resized embeddings size matches the desired size.
869
                assert_size = size if size is not None else config.vocab_size
870
871
                self.assertEqual(new_input_embeddings.shape[0], assert_size)

872
873
                # check that weights remain the same after resizing
                models_equal = True
874
875
                for p1, p2 in zip(old_input_embeddings.value(), new_input_embeddings.value()):
                    if tf.math.reduce_sum(tf.math.abs(p1 - p2)) > 0:
876
877
878
                        models_equal = False
                self.assertTrue(models_equal)

879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
                if old_bias is not None and new_bias is not None:
                    for old_weight, new_weight in zip(old_bias.values(), new_bias.values()):
                        self.assertEqual(new_weight.shape[0], assert_size)

                        models_equal = True
                        for p1, p2 in zip(old_weight.value(), new_weight.value()):
                            if tf.math.reduce_sum(tf.math.abs(p1 - p2)) > 0:
                                models_equal = False
                        self.assertTrue(models_equal)

                if old_output_embeddings is not None and new_output_embeddings is not None:
                    self.assertEqual(new_output_embeddings.shape[0], assert_size)
                    self.assertEqual(new_output_embeddings.shape[1], old_output_embeddings.shape[1])

                    models_equal = True
                    for p1, p2 in zip(old_output_embeddings.value(), new_output_embeddings.value()):
                        if tf.math.reduce_sum(tf.math.abs(p1 - p2)) > 0:
                            models_equal = False
                    self.assertTrue(models_equal)

899
    def test_lm_head_model_random_no_beam_search_generate(self):
900
        config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
Julien Plu's avatar
Julien Plu committed
901
        input_ids = inputs_dict["input_ids"]
902

903
        # iterate over all generative models
904
905
906
907
        for model_class in self.all_generative_model_classes:
            model = model_class(config)

            if config.bos_token_id is None:
908
                # if bos token id is not defined mobel needs input_ids
909
                with self.assertRaises(AssertionError):
910
                    model.generate(do_sample=True, max_length=5)
911
                # num_return_sequences = 1
912
                self._check_generated_ids(model.generate(input_ids, do_sample=True))
913
            else:
914
                # num_return_sequences = 1
915
                self._check_generated_ids(model.generate(do_sample=True, max_length=5))
916
917

            with self.assertRaises(AssertionError):
918
                # generating multiple sequences when no beam search generation
919
920
921
                # is not allowed as it would always generate the same sequences
                model.generate(input_ids, do_sample=False, num_return_sequences=2)

922
923
            # num_return_sequences > 1, sample
            self._check_generated_ids(model.generate(input_ids, do_sample=True, num_return_sequences=2))
924
925

            # check bad words tokens language generation
926
927
            # create list of 1-seq bad token and list of 2-seq of bad tokens
            bad_words_ids = [self._generate_random_bad_tokens(1, model), self._generate_random_bad_tokens(2, model)]
928
            output_tokens = model.generate(
929
                input_ids, do_sample=True, bad_words_ids=bad_words_ids, num_return_sequences=2
930
            )
931
            # only count generated tokens
932
933
            generated_ids = output_tokens[:, input_ids.shape[-1] :]
            self.assertFalse(self._check_match_tokens(generated_ids.numpy().tolist(), bad_words_ids))
934

935
936
    def test_lm_head_model_random_beam_search_generate(self):
        config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
Julien Plu's avatar
Julien Plu committed
937
        input_ids = inputs_dict["input_ids"]
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953

        for model_class in self.all_generative_model_classes:
            model = model_class(config)

            if config.bos_token_id is None:
                # if bos token id is not defined mobel needs input_ids, num_return_sequences = 1
                self._check_generated_ids(model.generate(input_ids, do_sample=True, num_beams=2))
            else:
                # num_return_sequences = 1
                self._check_generated_ids(model.generate(do_sample=True, max_length=5, num_beams=2))

            with self.assertRaises(AssertionError):
                # generating more sequences than having beams leads is not possible
                model.generate(input_ids, do_sample=False, num_return_sequences=3, num_beams=2)

            # num_return_sequences > 1, sample
Lysandre's avatar
Lysandre committed
954
955
956
957
958
959
960
961
            self._check_generated_ids(
                model.generate(
                    input_ids,
                    do_sample=True,
                    num_beams=2,
                    num_return_sequences=2,
                )
            )
962
963
964
965
966
967
            # num_return_sequences > 1, greedy
            self._check_generated_ids(model.generate(input_ids, do_sample=False, num_beams=2, num_return_sequences=2))

            # check bad words tokens language generation
            # create list of 1-seq bad token and list of 2-seq of bad tokens
            bad_words_ids = [self._generate_random_bad_tokens(1, model), self._generate_random_bad_tokens(2, model)]
968
            output_tokens = model.generate(
969
                input_ids, do_sample=False, bad_words_ids=bad_words_ids, num_beams=2, num_return_sequences=2
970
            )
971
            # only count generated tokens
972
973
974
            generated_ids = output_tokens[:, input_ids.shape[-1] :]
            self.assertFalse(self._check_match_tokens(generated_ids.numpy().tolist(), bad_words_ids))

975
976
977
978
979
980
981
    def test_loss_computation(self):
        config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
        for model_class in self.all_model_classes:
            model = model_class(config)
            if getattr(model, "compute_loss", None):
                # The number of elements in the loss should be the same as the number of elements in the label
                prepared_for_class = self._prepare_for_class(inputs_dict.copy(), model_class, return_labels=True)
982
983
984
                added_label = prepared_for_class[
                    sorted(list(prepared_for_class.keys() - inputs_dict.keys()), reverse=True)[0]
                ]
985
986
                loss_size = tf.size(added_label)

987
988
989
990
991
                if model.__class__ in TF_MODEL_FOR_CAUSAL_LM_MAPPING.values():
                    # if loss is causal lm loss, labels are shift, so that one label per batch
                    # is cut
                    loss_size = loss_size - self.model_tester.batch_size

992
993
994
                # Test that model correctly compute the loss with kwargs
                prepared_for_class = self._prepare_for_class(inputs_dict.copy(), model_class, return_labels=True)
                input_ids = prepared_for_class.pop("input_ids")
995

996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
                loss = model(input_ids, **prepared_for_class)[0]
                self.assertEqual(loss.shape, [loss_size])

                # Test that model correctly compute the loss with a dict
                prepared_for_class = self._prepare_for_class(inputs_dict.copy(), model_class, return_labels=True)
                loss = model(prepared_for_class)[0]
                self.assertEqual(loss.shape, [loss_size])

                # Test that model correctly compute the loss with a tuple
                prepared_for_class = self._prepare_for_class(inputs_dict.copy(), model_class, return_labels=True)

                # Get keys that were added with the _prepare_for_class function
                label_keys = prepared_for_class.keys() - inputs_dict.keys()
1009
1010
                signature = inspect.signature(model.call).parameters
                signature_names = list(signature.keys())
1011
1012

                # Create a dictionary holding the location of the tensors in the tuple
1013
                tuple_index_mapping = {0: "input_ids"}
1014
                for label_key in label_keys:
1015
                    label_key_index = signature_names.index(label_key)
1016
1017
                    tuple_index_mapping[label_key_index] = label_key
                sorted_tuple_index_mapping = sorted(tuple_index_mapping.items())
1018
1019
1020
1021
1022
1023
                # Initialize a list with their default values, update the values and convert to a tuple
                list_input = []

                for name in signature_names:
                    if name != "kwargs":
                        list_input.append(signature[name].default)
1024
1025

                for index, value in sorted_tuple_index_mapping:
1026
1027
                    list_input[index] = prepared_for_class[value]

1028
1029
1030
                tuple_input = tuple(list_input)

                # Send to model
1031
1032
                loss = model(tuple_input[:-1])[0]

1033
1034
                self.assertEqual(loss.shape, [loss_size])

1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
    def _generate_random_bad_tokens(self, num_bad_tokens, model):
        # special tokens cannot be bad tokens
        special_tokens = []
        if model.config.bos_token_id is not None:
            special_tokens.append(model.config.bos_token_id)
        if model.config.pad_token_id is not None:
            special_tokens.append(model.config.pad_token_id)
        if model.config.eos_token_id is not None:
            special_tokens.append(model.config.eos_token_id)

        # create random bad tokens that are not special tokens
        bad_tokens = []
        while len(bad_tokens) < num_bad_tokens:
            token = tf.squeeze(ids_tensor((1, 1), self.model_tester.vocab_size), 0).numpy()[0]
            if token not in special_tokens:
                bad_tokens.append(token)
        return bad_tokens

1053
    def _check_generated_ids(self, output_ids):
1054
1055
1056
1057
        for token_id in output_ids[0].numpy().tolist():
            self.assertGreaterEqual(token_id, 0)
            self.assertLess(token_id, self.model_tester.vocab_size)

1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
    def _check_match_tokens(self, generated_ids, bad_words_ids):
        # for all bad word tokens
        for bad_word_ids in bad_words_ids:
            # for all slices in batch
            for generated_ids_slice in generated_ids:
                # for all word idx
                for i in range(len(bad_word_ids), len(generated_ids_slice)):
                    # if tokens match
                    if generated_ids_slice[i - len(bad_word_ids) : i] == bad_word_ids:
                        return True
        return False

thomwolf's avatar
thomwolf committed
1070

thomwolf's avatar
thomwolf committed
1071
def ids_tensor(shape, vocab_size, rng=None, name=None, dtype=None):
thomwolf's avatar
thomwolf committed
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
    """Creates a random int32 tensor of the shape within the vocab size."""
    if rng is None:
        rng = random.Random()

    total_dims = 1
    for dim in shape:
        total_dims *= dim

    values = []
    for _ in range(total_dims):
        values.append(rng.randint(0, vocab_size - 1))

1084
    output = tf.constant(values, shape=shape, dtype=dtype if dtype is not None else tf.int32)
thomwolf's avatar
thomwolf committed
1085
1086

    return output
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164


@require_tf
class UtilsFunctionsTest(unittest.TestCase):

    # tests whether the top_k_top_p_filtering function behaves as expected
    def test_top_k_top_p_filtering(self):
        logits = tf.convert_to_tensor(
            [
                [
                    8.2220991,  # 3rd highest value; idx. 0
                    -0.5620044,
                    5.23229752,
                    4.0386393,
                    -6.8798378,
                    -0.54785802,
                    -3.2012153,
                    2.92777176,
                    1.88171953,
                    7.35341276,  # 5th highest value; idx. 9
                    8.43207833,  # 2nd highest value; idx. 10
                    -9.85711836,
                    -5.96209236,
                    -1.13039161,
                    -7.1115294,
                    -0.8369633,
                    -5.3186408,
                    7.06427407,
                    0.81369344,
                    -0.82023817,
                    -5.9179796,
                    0.58813443,
                    -6.99778438,
                    4.71551189,
                    -0.18771637,
                    7.44020759,  # 4th highest value; idx. 25
                    9.38450987,  # 1st highest value; idx. 26
                    2.12662941,
                    -9.32562038,
                    2.35652522,
                ],  # cummulative prob of 5 highest values <= 0.6
                [
                    0.58425518,
                    4.53139238,
                    -5.57510464,
                    -6.28030699,
                    -7.19529503,
                    -4.02122551,
                    1.39337037,
                    -6.06707057,
                    1.59480517,
                    -9.643119,
                    0.03907799,
                    0.67231762,
                    -8.88206726,
                    6.27115922,  # 4th highest value; idx. 13
                    2.28520723,
                    4.82767506,
                    4.30421368,
                    8.8275313,  # 2nd highest value; idx. 17
                    5.44029958,  # 5th highest value; idx. 18
                    -4.4735794,
                    7.38579536,  # 3rd highest value; idx. 20
                    -2.91051663,
                    2.61946077,
                    -2.5674762,
                    -9.48959302,
                    -4.02922645,
                    -1.35416918,
                    9.67702323,  # 1st highest value; idx. 27
                    -5.89478553,
                    1.85370467,
                ],  # cummulative prob of 5 highest values <= 0.6
            ],
            dtype=tf.float32,
        )

        non_inf_expected_idx = tf.convert_to_tensor(
Lysandre's avatar
Lysandre committed
1165
1166
            [[0, 0], [0, 9], [0, 10], [0, 25], [0, 26], [1, 13], [1, 17], [1, 18], [1, 20], [1, 27]],
            dtype=tf.int32,
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
        )  # expected non filtered idx as noted above

        non_inf_expected_output = tf.convert_to_tensor(
            [8.222099, 7.3534126, 8.432078, 7.4402075, 9.38451, 6.271159, 8.827531, 5.4402995, 7.3857956, 9.677023],
            dtype=tf.float32,
        )  # expected non filtered values as noted above

        output = tf_top_k_top_p_filtering(logits, top_k=10, top_p=0.6, min_tokens_to_keep=4)

        non_inf_output = output[output != -float("inf")]
        non_inf_idx = tf.cast(
Lysandre's avatar
Lysandre committed
1178
1179
            tf.where(tf.not_equal(output, tf.constant(-float("inf"), dtype=tf.float32))),
            dtype=tf.int32,
1180
1181
1182
1183
        )

        tf.debugging.assert_near(non_inf_output, non_inf_expected_output, rtol=1e-12)
        tf.debugging.assert_equal(non_inf_idx, non_inf_expected_idx)