test_modeling_tf_common.py 57.8 KB
Newer Older
thomwolf's avatar
thomwolf committed
1
2
3
4
5
6
7
8
9
10
11
12
13
14
# coding=utf-8
# Copyright 2019 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
Aymeric Augustin's avatar
Aymeric Augustin committed
15

thomwolf's avatar
thomwolf committed
16
17

import copy
18
import inspect
19
import json
Aymeric Augustin's avatar
Aymeric Augustin committed
20
import os
thomwolf's avatar
thomwolf committed
21
import random
Aymeric Augustin's avatar
Aymeric Augustin committed
22
import tempfile
23
import unittest
24
from importlib import import_module
25
from typing import List, Tuple
thomwolf's avatar
thomwolf committed
26

27
from transformers import is_tf_available
28
from transformers.models.auto import get_values
Lysandre Debut's avatar
Lysandre Debut committed
29
30
31
32
33
34
35
36
from transformers.testing_utils import (
    _tf_gpu_memory_limit,
    is_pt_tf_cross_test,
    require_onnx,
    require_tf,
    slow,
    tooslow,
)
37

Aymeric Augustin's avatar
Aymeric Augustin committed
38

39
if is_tf_available():
thomwolf's avatar
thomwolf committed
40
    import numpy as np
41
    import tensorflow as tf
42

43
    from transformers import (
44
45
        TF_MODEL_FOR_CAUSAL_LM_MAPPING,
        TF_MODEL_FOR_MASKED_LM_MAPPING,
46
        TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
47
        TF_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING,
48
        TF_MODEL_FOR_PRETRAINING_MAPPING,
49
        TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
50
        TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
51
52
        TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
        TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
53
54
        TFSharedEmbeddings,
        tf_top_k_top_p_filtering,
55
    )
56

Julien Chaumond's avatar
Julien Chaumond committed
57
58
59
60
61
    if _tf_gpu_memory_limit is not None:
        gpus = tf.config.list_physical_devices("GPU")
        for gpu in gpus:
            # Restrict TensorFlow to only allocate x GB of memory on the GPUs
            try:
Julien Plu's avatar
Julien Plu committed
62
63
                tf.config.set_logical_device_configuration(
                    gpu, [tf.config.LogicalDeviceConfiguration(memory_limit=_tf_gpu_memory_limit)]
Julien Chaumond's avatar
Julien Chaumond committed
64
                )
Julien Plu's avatar
Julien Plu committed
65
                logical_gpus = tf.config.list_logical_devices("GPU")
Julien Chaumond's avatar
Julien Chaumond committed
66
67
68
69
                print("Logical GPUs", logical_gpus)
            except RuntimeError as e:
                # Virtual devices must be set before GPUs have been initialized
                print(e)
thomwolf's avatar
thomwolf committed
70

71

thomwolf's avatar
thomwolf committed
72
73
74
def _config_zero_init(config):
    configs_no_init = copy.deepcopy(config)
    for key in configs_no_init.__dict__.keys():
75
        if "_range" in key or "_std" in key:
thomwolf's avatar
thomwolf committed
76
77
78
79
            setattr(configs_no_init, key, 0.0)
    return configs_no_init


80
81
@require_tf
class TFModelTesterMixin:
82

83
84
    model_tester = None
    all_model_classes = ()
85
    all_generative_model_classes = ()
86
    test_resize_embeddings = True
87
    test_head_masking = True
88
    is_encoder_decoder = False
89

Lysandre Debut's avatar
Lysandre Debut committed
90
    def _prepare_for_class(self, inputs_dict, model_class, return_labels=False) -> dict:
91
92
        inputs_dict = copy.deepcopy(inputs_dict)

93
        if model_class in get_values(TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING):
94
            inputs_dict = {
95
96
                k: tf.tile(tf.expand_dims(v, 1), (1, self.model_tester.num_choices) + (1,) * (v.ndim - 1))
                if isinstance(v, tf.Tensor) and v.ndim > 0
97
98
99
                else v
                for k, v in inputs_dict.items()
            }
100
101

        if return_labels:
102
            if model_class in get_values(TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING):
103
                inputs_dict["labels"] = tf.ones(self.model_tester.batch_size, dtype=tf.int32)
104
            elif model_class in get_values(TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING):
105
106
                inputs_dict["start_positions"] = tf.zeros(self.model_tester.batch_size, dtype=tf.int32)
                inputs_dict["end_positions"] = tf.zeros(self.model_tester.batch_size, dtype=tf.int32)
107
            elif model_class in get_values(TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING):
108
                inputs_dict["labels"] = tf.zeros(self.model_tester.batch_size, dtype=tf.int32)
109
            elif model_class in get_values(TF_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING):
110
                inputs_dict["next_sentence_label"] = tf.zeros(self.model_tester.batch_size, dtype=tf.int32)
111
            elif model_class in [
112
113
114
115
116
                *get_values(TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING),
                *get_values(TF_MODEL_FOR_CAUSAL_LM_MAPPING),
                *get_values(TF_MODEL_FOR_MASKED_LM_MAPPING),
                *get_values(TF_MODEL_FOR_PRETRAINING_MAPPING),
                *get_values(TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING),
117
118
119
120
            ]:
                inputs_dict["labels"] = tf.zeros(
                    (self.model_tester.batch_size, self.model_tester.seq_length), dtype=tf.int32
                )
121
122
        return inputs_dict

123
124
    def test_initialization(self):
        pass
125

126
127
    def test_save_load(self):
        config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
128

129
130
        for model_class in self.all_model_classes:
            model = model_class(config)
131
            outputs = model(self._prepare_for_class(inputs_dict, model_class))
132

133
            with tempfile.TemporaryDirectory() as tmpdirname:
Julien Plu's avatar
Julien Plu committed
134
                model.save_pretrained(tmpdirname, saved_model=False)
135
                model = model_class.from_pretrained(tmpdirname)
136
                after_outputs = model(self._prepare_for_class(inputs_dict, model_class))
137

138
                self.assert_outputs_same(after_outputs, outputs)
139

Lysandre Debut's avatar
Lysandre Debut committed
140
    @tooslow
141
142
143
144
145
146
147
148
149
150
151
152
153
    def test_graph_mode(self):
        config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
        for model_class in self.all_model_classes:
            inputs = self._prepare_for_class(inputs_dict, model_class)
            model = model_class(config)

            @tf.function
            def run_in_graph_mode():
                return model(inputs)

            outputs = run_in_graph_mode()
            self.assertIsNotNone(outputs)

Lysandre Debut's avatar
Lysandre Debut committed
154
    @tooslow
Julien Plu's avatar
Julien Plu committed
155
156
157
158
159
160
161
162
163
164
165
166
167
    def test_xla_mode(self):
        config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
        for model_class in self.all_model_classes:
            inputs = self._prepare_for_class(inputs_dict, model_class)
            model = model_class(config)

            @tf.function(experimental_compile=True)
            def run_in_graph_mode():
                return model(inputs)

            outputs = run_in_graph_mode()
            self.assertIsNotNone(outputs)

168
169
170
171
172
173
174
175
176
177
178
    def test_forward_signature(self):
        config, _ = self.model_tester.prepare_config_and_inputs_for_common()

        for model_class in self.all_model_classes:
            model = model_class(config)
            signature = inspect.signature(model.call)
            # signature.parameters is an OrderedDict => so arg_names order is deterministic
            arg_names = [*signature.parameters.keys()]

            if model.config.is_encoder_decoder:
                expected_arg_names = [
Julien Plu's avatar
Julien Plu committed
179
                    "input_ids",
180
181
182
183
                    "attention_mask",
                    "decoder_input_ids",
                    "decoder_attention_mask",
                ]
184
185
186
187
188
189
                expected_arg_names.extend(
                    ["head_mask", "decoder_head_mask", "encoder_outputs"]
                    if "head_mask" and "decoder_head_mask" in arg_names
                    else ["encoder_outputs"]
                )
                self.assertListEqual(arg_names[: len(expected_arg_names)], expected_arg_names)
190
191

            else:
Julien Plu's avatar
Julien Plu committed
192
                expected_arg_names = ["input_ids"]
193
194
                self.assertListEqual(arg_names[:1], expected_arg_names)

Lysandre Debut's avatar
Lysandre Debut committed
195
    @tooslow
Julien Plu's avatar
Julien Plu committed
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
    def test_saved_model_creation(self):
        config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
        config.output_hidden_states = False
        config.output_attentions = False

        if hasattr(config, "use_cache"):
            config.use_cache = False

        model_class = self.all_model_classes[0]

        class_inputs_dict = self._prepare_for_class(inputs_dict, model_class)
        model = model_class(config)

        model(class_inputs_dict)

        with tempfile.TemporaryDirectory() as tmpdirname:
            model.save_pretrained(tmpdirname, saved_model=True)
Julien Plu's avatar
Julien Plu committed
213
            saved_model_dir = os.path.join(tmpdirname, "saved_model", "1")
Julien Plu's avatar
Julien Plu committed
214
215
            self.assertTrue(os.path.exists(saved_model_dir))

Lysandre Debut's avatar
Lysandre Debut committed
216
    @tooslow
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
    def test_saved_model_creation_extended(self):
        config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
        config.output_hidden_states = True
        config.output_attentions = True

        if hasattr(config, "use_cache"):
            config.use_cache = True

        encoder_seq_length = getattr(self.model_tester, "encoder_seq_length", self.model_tester.seq_length)
        encoder_key_length = getattr(self.model_tester, "key_length", encoder_seq_length)

        for model_class in self.all_model_classes:
            class_inputs_dict = self._prepare_for_class(inputs_dict, model_class)
            model = model_class(config)
            num_out = len(model(class_inputs_dict))

            with tempfile.TemporaryDirectory() as tmpdirname:
                model.save_pretrained(tmpdirname, saved_model=True)
                saved_model_dir = os.path.join(tmpdirname, "saved_model", "1")
                model = tf.keras.models.load_model(saved_model_dir)
                outputs = model(class_inputs_dict)

                if self.is_encoder_decoder:
                    output_hidden_states = outputs["encoder_hidden_states"]
                    output_attentions = outputs["encoder_attentions"]
                else:
                    output_hidden_states = outputs["hidden_states"]
                    output_attentions = outputs["attentions"]

                self.assertEqual(len(outputs), num_out)

                expected_num_layers = getattr(
                    self.model_tester, "expected_num_hidden_layers", self.model_tester.num_hidden_layers + 1
                )

                self.assertEqual(len(output_hidden_states), expected_num_layers)
                self.assertListEqual(
                    list(output_hidden_states[0].shape[-2:]),
                    [self.model_tester.seq_length, self.model_tester.hidden_size],
                )

                self.assertEqual(len(output_attentions), self.model_tester.num_hidden_layers)
                self.assertListEqual(
                    list(output_attentions[0].shape[-3:]),
                    [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length],
                )

264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
    def test_onnx_compliancy(self):
        if not self.test_onnx:
            return

        config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
        INTERNAL_OPS = [
            "Assert",
            "AssignVariableOp",
            "EmptyTensorList",
            "ReadVariableOp",
            "ResourceGather",
            "TruncatedNormal",
            "VarHandleOp",
            "VarIsInitializedOp",
        ]
        onnx_ops = []

        with open(os.path.join(".", "utils", "tf_ops", "onnx.json")) as f:
            onnx_opsets = json.load(f)["opsets"]

        for i in range(1, self.onnx_min_opset + 1):
            onnx_ops.extend(onnx_opsets[str(i)])

        for model_class in self.all_model_classes:
            model_op_names = set()

            with tf.Graph().as_default() as g:
                model = model_class(config)
                model(model.dummy_inputs)

                for op in g.get_operations():
                    model_op_names.add(op.node_def.op)

            model_op_names = sorted(model_op_names)
            incompatible_ops = []

            for op in model_op_names:
                if op not in onnx_ops and op not in INTERNAL_OPS:
                    incompatible_ops.append(op)

            self.assertEqual(len(incompatible_ops), 0, incompatible_ops)

    @require_onnx
    @slow
    def test_onnx_runtime_optimize(self):
        if not self.test_onnx:
            return

        import keras2onnx
        import onnxruntime

        config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()

        for model_class in self.all_model_classes:
            model = model_class(config)
            model(model.dummy_inputs)

            onnx_model = keras2onnx.convert_keras(model, model.name, target_opset=self.onnx_min_opset)

            onnxruntime.InferenceSession(onnx_model.SerializeToString())

Lysandre Debut's avatar
Lysandre Debut committed
325
    @tooslow
326
327
328
329
330
331
332
333
334
335
336
337
338
339
    def test_mixed_precision(self):
        tf.keras.mixed_precision.experimental.set_policy("mixed_float16")

        config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()

        for model_class in self.all_model_classes:
            class_inputs_dict = self._prepare_for_class(inputs_dict, model_class)
            model = model_class(config)
            outputs = model(class_inputs_dict)

            self.assertIsNotNone(outputs)

        tf.keras.mixed_precision.experimental.set_policy("float32")

340
341
342
343
344
345
346
347
    def test_keras_save_load(self):
        config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()

        tf_main_layer_classes = set(
            module_member
            for model_class in self.all_model_classes
            for module in (import_module(model_class.__module__),)
            for module_member_name in dir(module)
348
            if module_member_name.endswith("MainLayer")
349
            for module_member in (getattr(module, module_member_name),)
350
351
352
            if isinstance(module_member, type)
            and tf.keras.layers.Layer in module_member.__bases__
            and getattr(module_member, "_keras_serializable", False)
353
354
        )
        for main_layer_class in tf_main_layer_classes:
Julien Plu's avatar
Julien Plu committed
355
356
357
358
            # T5MainLayer needs an embed_tokens parameter when called without the inputs_embeds parameter
            if "T5" in main_layer_class.__name__:
                # Take the same values than in TFT5ModelTester for this shared layer
                shared = TFSharedEmbeddings(99, 32, name="shared")
Julien Plu's avatar
Julien Plu committed
359
                config.use_cache = inputs_dict.pop("use_cache", None)
Julien Plu's avatar
Julien Plu committed
360
361
362
                main_layer = main_layer_class(config, embed_tokens=shared)
            else:
                main_layer = main_layer_class(config)
Julien Plu's avatar
Julien Plu committed
363

364
365
366
            symbolic_inputs = {
                name: tf.keras.Input(tensor.shape[1:], dtype=tensor.dtype) for name, tensor in inputs_dict.items()
            }
Julien Plu's avatar
Julien Plu committed
367

368
369
370
371
372
373
            model = tf.keras.Model(symbolic_inputs, outputs=main_layer(symbolic_inputs))
            outputs = model(inputs_dict)

            with tempfile.TemporaryDirectory() as tmpdirname:
                filepath = os.path.join(tmpdirname, "keras_model.h5")
                model.save(filepath)
Julien Plu's avatar
Julien Plu committed
374
375
376
377
378
379
380
381
382
383
384
385
                if "T5" in main_layer_class.__name__:
                    model = tf.keras.models.load_model(
                        filepath,
                        custom_objects={
                            main_layer_class.__name__: main_layer_class,
                            "TFSharedEmbeddings": TFSharedEmbeddings,
                        },
                    )
                else:
                    model = tf.keras.models.load_model(
                        filepath, custom_objects={main_layer_class.__name__: main_layer_class}
                    )
386
387
388
389
390
391
                assert isinstance(model, tf.keras.Model)
                after_outputs = model(inputs_dict)
                self.assert_outputs_same(after_outputs, outputs)

    def assert_outputs_same(self, after_outputs, outputs):
        # Make sure we don't have nans
Julien Plu's avatar
Julien Plu committed
392
393
        if isinstance(after_outputs, tf.Tensor):
            out_1 = after_outputs.numpy()
Sylvain Gugger's avatar
Sylvain Gugger committed
394
        elif isinstance(after_outputs, dict):
395
            out_1 = after_outputs[list(after_outputs.keys())[0]].numpy()
Julien Plu's avatar
Julien Plu committed
396
397
        else:
            out_1 = after_outputs[0].numpy()
398
        out_2 = outputs[0].numpy()
399
        self.assertEqual(out_1.shape, out_2.shape)
400
401
402
403
        out_1 = out_1[~np.isnan(out_1)]
        out_2 = out_2[~np.isnan(out_2)]
        max_diff = np.amax(np.abs(out_1 - out_2))
        self.assertLessEqual(max_diff, 1e-5)
404

405
    @is_pt_tf_cross_test
406
    def test_pt_tf_model_equivalence(self):
thomwolf's avatar
thomwolf committed
407

408
        import torch
409

410
        import transformers
thomwolf's avatar
thomwolf committed
411

412
        config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
thomwolf's avatar
thomwolf committed
413

414
        for model_class in self.all_model_classes:
415
            pt_model_class_name = model_class.__name__[2:]  # Skip the "TF" at the beginning
416
            pt_model_class = getattr(transformers, pt_model_class_name)
thomwolf's avatar
thomwolf committed
417

418
            config.output_hidden_states = True
419

420
421
            tf_model = model_class(config)
            pt_model = pt_model_class(config)
thomwolf's avatar
thomwolf committed
422

423
            # Check we can load pt model in tf and vice-versa with model => model functions
424

425
426
427
            tf_model = transformers.load_pytorch_model_in_tf2_model(
                tf_model, pt_model, tf_inputs=self._prepare_for_class(inputs_dict, model_class)
            )
428
            pt_model = transformers.load_tf2_model_in_pytorch_model(pt_model, tf_model)
429

430
431
            # Check predictions on first output (logits/hidden-states) are close enought given low-level computational differences
            pt_model.eval()
Julien Plu's avatar
Julien Plu committed
432
433
434
435
436
437
438
            pt_inputs_dict = {}
            for name, key in self._prepare_for_class(inputs_dict, model_class).items():
                if type(key) == bool:
                    pt_inputs_dict[name] = key
                else:
                    pt_inputs_dict[name] = torch.from_numpy(key.numpy()).to(torch.long)

439
440
441
442
            # need to rename encoder-decoder "inputs" for PyTorch
            if "inputs" in pt_inputs_dict and self.is_encoder_decoder:
                pt_inputs_dict["input_ids"] = pt_inputs_dict.pop("inputs")

443
444
            with torch.no_grad():
                pto = pt_model(**pt_inputs_dict)
445
            tfo = tf_model(self._prepare_for_class(inputs_dict, model_class), training=False)
446
447
            tf_hidden_states = tfo[0].numpy()
            pt_hidden_states = pto[0].numpy()
Lysandre's avatar
Lysandre committed
448

449
450
451
452
453
454
455
            tf_nans = np.copy(np.isnan(tf_hidden_states))
            pt_nans = np.copy(np.isnan(pt_hidden_states))

            pt_hidden_states[tf_nans] = 0
            tf_hidden_states[tf_nans] = 0
            pt_hidden_states[pt_nans] = 0
            tf_hidden_states[pt_nans] = 0
Lysandre's avatar
Lysandre committed
456

457
            max_diff = np.amax(np.abs(tf_hidden_states - pt_hidden_states))
458
            self.assertLessEqual(max_diff, 4e-2)
459
460

            # Check we can load pt model in tf and vice-versa with checkpoint => model functions
461
            with tempfile.TemporaryDirectory() as tmpdirname:
462
463
464
465
466
467
468
469
470
471
                pt_checkpoint_path = os.path.join(tmpdirname, "pt_model.bin")
                torch.save(pt_model.state_dict(), pt_checkpoint_path)
                tf_model = transformers.load_pytorch_checkpoint_in_tf2_model(tf_model, pt_checkpoint_path)

                tf_checkpoint_path = os.path.join(tmpdirname, "tf_model.h5")
                tf_model.save_weights(tf_checkpoint_path)
                pt_model = transformers.load_tf2_checkpoint_in_pytorch_model(pt_model, tf_checkpoint_path)

            # Check predictions on first output (logits/hidden-states) are close enought given low-level computational differences
            pt_model.eval()
Julien Plu's avatar
Julien Plu committed
472
473
474
475
476
477
478
            pt_inputs_dict = {}
            for name, key in self._prepare_for_class(inputs_dict, model_class).items():
                if type(key) == bool:
                    key = np.array(key, dtype=bool)
                    pt_inputs_dict[name] = torch.from_numpy(key).to(torch.long)
                else:
                    pt_inputs_dict[name] = torch.from_numpy(key.numpy()).to(torch.long)
479
480
481
482
            # need to rename encoder-decoder "inputs" for PyTorch
            if "inputs" in pt_inputs_dict and self.is_encoder_decoder:
                pt_inputs_dict["input_ids"] = pt_inputs_dict.pop("inputs")

483
484
            with torch.no_grad():
                pto = pt_model(**pt_inputs_dict)
485
            tfo = tf_model(self._prepare_for_class(inputs_dict, model_class))
486
487
            tfo = tfo[0].numpy()
            pto = pto[0].numpy()
488
489
490
491
492
493
494
495
            tf_nans = np.copy(np.isnan(tfo))
            pt_nans = np.copy(np.isnan(pto))

            pto[tf_nans] = 0
            tfo[tf_nans] = 0
            pto[pt_nans] = 0
            tfo[pt_nans] = 0

496
            max_diff = np.amax(np.abs(tfo - pto))
sgugger's avatar
sgugger committed
497
            self.assertLessEqual(max_diff, 4e-2)
498

Lysandre Debut's avatar
Lysandre Debut committed
499
    @tooslow
500
501
    def test_train_pipeline_custom_model(self):
        config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
502
503
504
505
506
        # head_mask and decoder_head_mask has different shapes than other input args
        if "head_mask" in inputs_dict:
            del inputs_dict["head_mask"]
        if "decoder_head_mask" in inputs_dict:
            del inputs_dict["decoder_head_mask"]
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
        tf_main_layer_classes = set(
            module_member
            for model_class in self.all_model_classes
            for module in (import_module(model_class.__module__),)
            for module_member_name in dir(module)
            if module_member_name.endswith("MainLayer")
            for module_member in (getattr(module, module_member_name),)
            if isinstance(module_member, type)
            and tf.keras.layers.Layer in module_member.__bases__
            and getattr(module_member, "_keras_serializable", False)
        )

        for main_layer_class in tf_main_layer_classes:
            # T5MainLayer needs an embed_tokens parameter when called without the inputs_embeds parameter
            if "T5" in main_layer_class.__name__:
                # Take the same values than in TFT5ModelTester for this shared layer
                shared = TFSharedEmbeddings(self.model_tester.vocab_size, self.model_tester.hidden_size, name="shared")
                config.use_cache = False
                main_layer = main_layer_class(config, embed_tokens=shared)
            else:
                main_layer = main_layer_class(config)

            symbolic_inputs = {
                name: tf.keras.Input(tensor.shape[1:], dtype=tensor.dtype) for name, tensor in inputs_dict.items()
            }

            if hasattr(self.model_tester, "num_labels"):
                num_labels = self.model_tester.num_labels
            else:
                num_labels = 2

            X = tf.data.Dataset.from_tensor_slices(
Julien Plu's avatar
Julien Plu committed
539
                (inputs_dict, np.ones((self.model_tester.batch_size, self.model_tester.seq_length, num_labels, 1)))
540
541
542
543
544
545
            ).batch(1)

            hidden_states = main_layer(symbolic_inputs)[0]
            outputs = tf.keras.layers.Dense(num_labels, activation="softmax", name="outputs")(hidden_states)
            model = tf.keras.models.Model(inputs=symbolic_inputs, outputs=[outputs])

Julien Plu's avatar
Julien Plu committed
546
            model.compile(loss="binary_crossentropy", optimizer="adam", metrics=["binary_accuracy"])
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
            model.fit(X, epochs=1)

            with tempfile.TemporaryDirectory() as tmpdirname:
                filepath = os.path.join(tmpdirname, "keras_model.h5")
                model.save(filepath)
                if "T5" in main_layer_class.__name__:
                    model = tf.keras.models.load_model(
                        filepath,
                        custom_objects={
                            main_layer_class.__name__: main_layer_class,
                            "TFSharedEmbeddings": TFSharedEmbeddings,
                        },
                    )
                else:
                    model = tf.keras.models.load_model(
                        filepath, custom_objects={main_layer_class.__name__: main_layer_class}
                    )
                assert isinstance(model, tf.keras.Model)
                model(inputs_dict)

567
568
    def test_compile_tf_model(self):
        config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
Julien Plu's avatar
Julien Plu committed
569
        max_input = getattr(self.model_tester, "max_position_embeddings", 512)
570
571
572
573
574
        optimizer = tf.keras.optimizers.Adam(learning_rate=3e-5, epsilon=1e-08, clipnorm=1.0)
        loss = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)
        metric = tf.keras.metrics.SparseCategoricalAccuracy("accuracy")

        for model_class in self.all_model_classes:
575
576
577
            if self.is_encoder_decoder:
                input_ids = {
                    "decoder_input_ids": tf.keras.Input(
Julien Plu's avatar
Julien Plu committed
578
579
580
                        batch_shape=(2, max_input),
                        name="decoder_input_ids",
                        dtype="int32",
581
                    ),
Julien Plu's avatar
Julien Plu committed
582
                    "input_ids": tf.keras.Input(batch_shape=(2, max_input), name="input_ids", dtype="int32"),
583
                }
584
            elif model_class in get_values(TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING):
Julien Plu's avatar
Julien Plu committed
585
                input_ids = tf.keras.Input(batch_shape=(4, 2, max_input), name="input_ids", dtype="int32")
586
            else:
Julien Plu's avatar
Julien Plu committed
587
                input_ids = tf.keras.Input(batch_shape=(2, max_input), name="input_ids", dtype="int32")
588

589
590
            # Prepare our model
            model = model_class(config)
591
            model(self._prepare_for_class(inputs_dict, model_class))  # Model must be called before saving.
592
            # Let's load it from the disk to be sure we can use pretrained weights
593
            with tempfile.TemporaryDirectory() as tmpdirname:
Julien Plu's avatar
Julien Plu committed
594
                model.save_pretrained(tmpdirname, saved_model=False)
595
596
597
598
599
                model = model_class.from_pretrained(tmpdirname)

            outputs_dict = model(input_ids)
            hidden_states = outputs_dict[0]

600
            # Add a dense layer on top to test integration with other keras modules
601
602
603
604
605
606
607
608
609
610
611
            outputs = tf.keras.layers.Dense(2, activation="softmax", name="outputs")(hidden_states)

            # Compile extended model
            extended_model = tf.keras.Model(inputs=[input_ids], outputs=[outputs])
            extended_model.compile(optimizer=optimizer, loss=loss, metrics=[metric])

    def test_keyword_and_dict_args(self):
        config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()

        for model_class in self.all_model_classes:
            model = model_class(config)
612
613
614
            inputs = self._prepare_for_class(inputs_dict, model_class)

            outputs_dict = model(inputs)
615

616
            inputs_keywords = copy.deepcopy(self._prepare_for_class(inputs_dict, model_class))
617
            input_ids = inputs_keywords.pop("input_ids", None)
618
619
620
621
622
623
624
625
            outputs_keywords = model(input_ids, **inputs_keywords)
            output_dict = outputs_dict[0].numpy()
            output_keywords = outputs_keywords[0].numpy()

            self.assertLess(np.sum(np.abs(output_dict - output_keywords)), 1e-6)

    def test_attention_outputs(self):
        config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
626
        config.return_dict = True
627
628
629
630
        decoder_seq_length = getattr(self.model_tester, "decoder_seq_length", self.model_tester.seq_length)
        encoder_seq_length = getattr(self.model_tester, "encoder_seq_length", self.model_tester.seq_length)
        decoder_key_length = getattr(self.model_tester, "key_length", decoder_seq_length)
        encoder_key_length = getattr(self.model_tester, "key_length", encoder_seq_length)
631

Julien Plu's avatar
Julien Plu committed
632
633
634
635
636
637
638
639
640
641
642
        def check_decoder_attentions_output(outputs):
            out_len = len(outputs)
            self.assertEqual(out_len % 2, 0)
            decoder_attentions = outputs.decoder_attentions
            self.assertEqual(len(decoder_attentions), self.model_tester.num_hidden_layers)
            self.assertListEqual(
                list(decoder_attentions[0].shape[-3:]),
                [self.model_tester.num_attention_heads, decoder_seq_length, decoder_key_length],
            )

        def check_encoder_attentions_output(outputs):
643
644
645
            attentions = [
                t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
            ]
646
647
648
649
            self.assertEqual(len(attentions), self.model_tester.num_hidden_layers)
            self.assertListEqual(
                list(attentions[0].shape[-3:]),
                [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length],
650
            )
Julien Plu's avatar
Julien Plu committed
651
652
653
654
655
656
657

        for model_class in self.all_model_classes:
            inputs_dict["output_attentions"] = True
            inputs_dict["use_cache"] = False
            config.output_hidden_states = False
            model = model_class(config)
            outputs = model(self._prepare_for_class(inputs_dict, model_class))
658
            out_len = len(outputs)
Julien Plu's avatar
Julien Plu committed
659
660
            self.assertEqual(config.output_hidden_states, False)
            check_encoder_attentions_output(outputs)
thomwolf's avatar
thomwolf committed
661

662
            if self.is_encoder_decoder:
Julien Plu's avatar
Julien Plu committed
663
664
665
666
                model = model_class(config)
                outputs = model(self._prepare_for_class(inputs_dict, model_class))
                self.assertEqual(config.output_hidden_states, False)
                check_decoder_attentions_output(outputs)
thomwolf's avatar
thomwolf committed
667

668
669
            # Check that output attentions can also be changed via the config
            del inputs_dict["output_attentions"]
670
            config.output_attentions = True
671
            model = model_class(config)
672
            outputs = model(self._prepare_for_class(inputs_dict, model_class))
Julien Plu's avatar
Julien Plu committed
673
674
            self.assertEqual(config.output_hidden_states, False)
            check_encoder_attentions_output(outputs)
675
676
677

            # Check attention is always last and order is fine
            inputs_dict["output_attentions"] = True
678
679
            config.output_hidden_states = True
            model = model_class(config)
680
            outputs = model(self._prepare_for_class(inputs_dict, model_class))
Julien Plu's avatar
Julien Plu committed
681

682
683
            self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1), len(outputs))
            self.assertEqual(model.config.output_hidden_states, True)
Julien Plu's avatar
Julien Plu committed
684
            check_encoder_attentions_output(outputs)
685

686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
    def test_headmasking(self):
        if not self.test_head_masking:
            return

        random.Random().seed(42)
        config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
        random.Random().seed()

        inputs_dict["output_attentions"] = True
        config.output_hidden_states = True
        configs_no_init = _config_zero_init(config)  # To be sure we have no Nan
        for model_class in self.all_model_classes:
            model = model_class(config=configs_no_init)

            # Prepare head_mask
            def prepare_layer_head_mask(i, attention_heads, num_hidden_layers):
                if i == 0:
                    return tf.concat(
                        (tf.zeros(1, dtype=tf.float32), tf.ones(attention_heads - 1, dtype=tf.float32)), 0
                    )
                elif i == num_hidden_layers - 1:
                    return tf.concat(
                        (tf.zeros(attention_heads - 1, dtype=tf.float32), tf.ones(1, dtype=tf.float32)), 0
                    )
                else:
                    return tf.ones(attention_heads, dtype=tf.float32)

            head_mask = tf.stack(
                [
                    prepare_layer_head_mask(i, config.num_attention_heads, config.num_hidden_layers)
                    for i in range(config.num_hidden_layers)
                ],
                0,
            )

            inputs = self._prepare_for_class(inputs_dict, model_class).copy()
            inputs["head_mask"] = head_mask
            if model.config.is_encoder_decoder:
                signature = inspect.signature(model.call)
                arg_names = [*signature.parameters.keys()]
                if "decoder_head_mask" in arg_names:  # necessary diferentiation because of T5 model
                    inputs["decoder_head_mask"] = head_mask

            outputs = model(**inputs, return_dict=True)

            def check_attentions_validity(attentions):
                # Remove Nan
                for t in attentions:
                    self.assertLess(
                        (tf.math.reduce_sum(tf.cast(tf.math.is_nan(t), tf.float32))).numpy(), (tf.size(t) / 4).numpy()
                    )  # Check we don't have more than 25% nans (arbitrary)

                attentions = [
                    tf.where(tf.math.is_nan(t), 0.0, t) for t in attentions
                ]  # remove them (the test is less complete)

                self.assertAlmostEqual(tf.math.reduce_sum(attentions[0][..., 0, :, :]).numpy(), 0.0)
                self.assertNotEqual(tf.math.reduce_sum(attentions[0][..., -1, :, :]).numpy(), 0.0)
                if len(attentions) > 2:  # encoder-decodere models have only 2 layers in each modules
                    self.assertNotEqual(tf.math.reduce_sum(attentions[1][..., 0, :, :]).numpy(), 0.0)
                self.assertAlmostEqual(tf.math.reduce_sum(attentions[-1][..., -2, :, :]).numpy(), 0.0)
                self.assertNotEqual(tf.math.reduce_sum(attentions[-1][..., -1, :, :]).numpy(), 0.0)

            if model.config.is_encoder_decoder:
                check_attentions_validity(outputs.encoder_attentions)
                check_attentions_validity(outputs.decoder_attentions)
            else:
                check_attentions_validity(outputs.attentions)

755
756
757
    def test_hidden_states_output(self):
        config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()

Joseph Liu's avatar
Joseph Liu committed
758
        def check_hidden_states_output(config, inputs_dict, model_class):
759
            model = model_class(config)
760
            outputs = model(self._prepare_for_class(inputs_dict, model_class))
761
762
763
            expected_num_layers = getattr(
                self.model_tester, "expected_num_hidden_layers", self.model_tester.num_hidden_layers + 1
            )
Julien Plu's avatar
Julien Plu committed
764

Julien Plu's avatar
Julien Plu committed
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
            if model.config.is_encoder_decoder:
                encoder_hidden_states = outputs.encoder_hidden_states
                decoder_hidden_states = outputs.decoder_hidden_states

                self.assertEqual(config.output_attentions, False)
                self.assertEqual(len(encoder_hidden_states), expected_num_layers)
                self.assertListEqual(
                    list(encoder_hidden_states[0].shape[-2:]),
                    [self.model_tester.seq_length, self.model_tester.hidden_size],
                )
                self.assertEqual(len(decoder_hidden_states), expected_num_layers)
                self.assertListEqual(
                    list(decoder_hidden_states[0].shape[-2:]),
                    [self.model_tester.seq_length, self.model_tester.hidden_size],
                )
            else:
                hidden_states = outputs.hidden_states
                self.assertEqual(config.output_attentions, False)
                self.assertEqual(len(hidden_states), expected_num_layers)
                self.assertListEqual(
                    list(hidden_states[0].shape[-2:]),
                    [self.model_tester.seq_length, self.model_tester.hidden_size],
                )
788

Joseph Liu's avatar
Joseph Liu committed
789
790
791
792
793
794
795
796
        for model_class in self.all_model_classes:
            inputs_dict["output_hidden_states"] = True
            check_hidden_states_output(config, inputs_dict, model_class)

            del inputs_dict["output_hidden_states"]
            config.output_hidden_states = True
            check_hidden_states_output(config, inputs_dict, model_class)

797
798
    def test_model_common_attributes(self):
        config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
799
        list_lm_models = (
800
801
802
            get_values(TF_MODEL_FOR_CAUSAL_LM_MAPPING)
            + get_values(TF_MODEL_FOR_MASKED_LM_MAPPING)
            + get_values(TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING)
803
        )
804
805
806

        for model_class in self.all_model_classes:
            model = model_class(config)
807
            assert isinstance(model.get_input_embeddings(), tf.keras.layers.Layer)
808
809

            if model_class in list_lm_models:
810
                x = model.get_output_embeddings()
811
                assert isinstance(x, tf.keras.layers.Layer)
812
813
814
815
                name = model.get_bias()
                assert isinstance(name, dict)
                for k, v in name.items():
                    assert isinstance(v, tf.Variable)
816
            else:
817
                x = model.get_output_embeddings()
818
                assert x is None
819
820
                name = model.get_bias()
                assert name is None
821
822
823
824
825
826

    def test_determinism(self):
        config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()

        for model_class in self.all_model_classes:
            model = model_class(config)
827
            first, second = (
828
829
                model(self._prepare_for_class(inputs_dict, model_class), training=False)[0],
                model(self._prepare_for_class(inputs_dict, model_class), training=False)[0],
830
            )
831
832
833
834
835
836
837
            out_1 = first.numpy()
            out_2 = second.numpy()
            out_1 = out_1[~np.isnan(out_1)]
            out_2 = out_2[~np.isnan(out_2)]
            max_diff = np.amax(np.abs(out_1 - out_2))
            self.assertLessEqual(max_diff, 1e-5)

838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
    def test_model_outputs_equivalence(self):

        config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()

        def check_equivalence(model, tuple_inputs, dict_inputs, additional_kwargs={}):
            tuple_output = model(tuple_inputs, return_dict=False, **additional_kwargs)
            dict_output = model(dict_inputs, return_dict=True, **additional_kwargs).to_tuple()

            def recursive_check(tuple_object, dict_object):
                if isinstance(tuple_object, (List, Tuple)):
                    for tuple_iterable_value, dict_iterable_value in zip(tuple_object, dict_object):
                        recursive_check(tuple_iterable_value, dict_iterable_value)
                elif tuple_object is None:
                    return
                else:
                    self.assertTrue(
                        all(tf.equal(tuple_object, dict_object)),
                        msg=f"Tuple and dict output are not equal. Difference: {tf.math.reduce_max(tf.abs(tuple_object - dict_object))}",
                    )

                recursive_check(tuple_output, dict_output)

        for model_class in self.all_model_classes:
            model = model_class(config)

            tuple_inputs = self._prepare_for_class(inputs_dict, model_class)
            dict_inputs = self._prepare_for_class(inputs_dict, model_class)
            check_equivalence(model, tuple_inputs, dict_inputs)

            tuple_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True)
            dict_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True)
            check_equivalence(model, tuple_inputs, dict_inputs)

            tuple_inputs = self._prepare_for_class(inputs_dict, model_class)
            dict_inputs = self._prepare_for_class(inputs_dict, model_class)
            check_equivalence(model, tuple_inputs, dict_inputs, {"output_hidden_states": True})

            tuple_inputs = self._prepare_for_class(inputs_dict, model_class)
            dict_inputs = self._prepare_for_class(inputs_dict, model_class)
            check_equivalence(model, tuple_inputs, dict_inputs, {"output_attentions": True})

            tuple_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True)
            dict_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True)
            check_equivalence(model, tuple_inputs, dict_inputs, {"output_hidden_states": True})

            tuple_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True)
            dict_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True)
            check_equivalence(model, tuple_inputs, dict_inputs, {"output_attentions": True})

            tuple_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True)
            dict_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True)
            check_equivalence(
                model, tuple_inputs, dict_inputs, {"output_hidden_states": True, "output_attentions": True}
            )

893
894
895
896
897
898
    def test_inputs_embeds(self):
        config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()

        for model_class in self.all_model_classes:
            model = model_class(config)

899
900
            inputs = copy.deepcopy(inputs_dict)

901
902
903
904
            if not self.is_encoder_decoder:
                input_ids = inputs["input_ids"]
                del inputs["input_ids"]
            else:
905
                encoder_input_ids = inputs["input_ids"]
906
                decoder_input_ids = inputs.get("decoder_input_ids", encoder_input_ids)
907
                del inputs["input_ids"]
908
909
                inputs.pop("decoder_input_ids", None)

thomwolf's avatar
thomwolf committed
910
            if not self.is_encoder_decoder:
911
                inputs["inputs_embeds"] = model.get_input_embeddings()(input_ids)
thomwolf's avatar
thomwolf committed
912
            else:
913
914
                inputs["inputs_embeds"] = model.get_input_embeddings()(encoder_input_ids)
                inputs["decoder_inputs_embeds"] = model.get_input_embeddings()(decoder_input_ids)
915

916
917
            inputs = self._prepare_for_class(inputs, model_class)

918
            model(inputs)
919

Lysandre Debut's avatar
Lysandre Debut committed
920
    @tooslow
Julien Plu's avatar
Julien Plu committed
921
922
923
924
925
926
    def test_graph_mode_with_inputs_embeds(self):
        config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()

        for model_class in self.all_model_classes:
            model = model_class(config)

927
928
            inputs = copy.deepcopy(inputs_dict)

Julien Plu's avatar
Julien Plu committed
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
            if not self.is_encoder_decoder:
                input_ids = inputs["input_ids"]
                del inputs["input_ids"]
            else:
                encoder_input_ids = inputs["input_ids"]
                decoder_input_ids = inputs.get("decoder_input_ids", encoder_input_ids)
                del inputs["input_ids"]
                inputs.pop("decoder_input_ids", None)

            if not self.is_encoder_decoder:
                inputs["inputs_embeds"] = model.get_input_embeddings()(input_ids)
            else:
                inputs["inputs_embeds"] = model.get_input_embeddings()(encoder_input_ids)
                inputs["decoder_inputs_embeds"] = model.get_input_embeddings()(decoder_input_ids)

944
945
            inputs = self._prepare_for_class(inputs, model_class)

Julien Plu's avatar
Julien Plu committed
946
947
948
949
950
951
952
            @tf.function
            def run_in_graph_mode():
                return model(inputs)

            outputs = run_in_graph_mode()
            self.assertIsNotNone(outputs)

953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
    def test_numpy_arrays_inputs(self):
        config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()

        def prepare_numpy_arrays(inputs_dict):
            inputs_np_dict = {}
            for k, v in inputs_dict.items():
                if tf.is_tensor(v):
                    inputs_np_dict[k] = v.numpy()
                else:
                    inputs_np_dict[k] = np.array(k)

            return inputs_np_dict

        for model_class in self.all_model_classes:
            model = model_class(config)

            inputs = self._prepare_for_class(inputs_dict, model_class)
            inputs_np = prepare_numpy_arrays(inputs)

            model(inputs_np)

974
975
976
977
    def test_resize_token_embeddings(self):
        if not self.test_resize_embeddings:
            return
        config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
978
979

        def _get_word_embedding_weight(model, embedding_layer):
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
            embeds = getattr(embedding_layer, "weight", None)
            if embeds is not None:
                return embeds

            embeds = getattr(embedding_layer, "decoder", None)
            if embeds is not None:
                return embeds

            model(model.dummy_inputs)

            embeds = getattr(embedding_layer, "weight", None)
            if embeds is not None:
                return embeds

            embeds = getattr(embedding_layer, "decoder", None)
            if embeds is not None:
                return embeds

            return None
999

1000
1001
1002
1003
        for model_class in self.all_model_classes:
            for size in [config.vocab_size - 10, config.vocab_size + 10, None]:
                # build the embeddings
                model = model_class(config=config)
1004
1005
1006
                old_input_embeddings = _get_word_embedding_weight(model, model.get_input_embeddings())
                old_bias = model.get_bias()
                old_output_embeddings = _get_word_embedding_weight(model, model.get_output_embeddings())
1007
                # reshape the embeddings
1008
1009
1010
1011
1012
1013
                model.resize_token_embeddings(size)
                new_input_embeddings = _get_word_embedding_weight(model, model.get_input_embeddings())
                new_bias = model.get_bias()
                new_output_embeddings = _get_word_embedding_weight(model, model.get_output_embeddings())

                # check that the resized embeddings size matches the desired size.
1014
                assert_size = size if size is not None else config.vocab_size
1015
1016
                self.assertEqual(new_input_embeddings.shape[0], assert_size)

1017
1018
                # check that weights remain the same after resizing
                models_equal = True
1019
1020
                for p1, p2 in zip(old_input_embeddings.value(), new_input_embeddings.value()):
                    if tf.math.reduce_sum(tf.math.abs(p1 - p2)) > 0:
1021
1022
1023
                        models_equal = False
                self.assertTrue(models_equal)

1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
                if old_bias is not None and new_bias is not None:
                    for old_weight, new_weight in zip(old_bias.values(), new_bias.values()):
                        self.assertEqual(new_weight.shape[0], assert_size)

                        models_equal = True
                        for p1, p2 in zip(old_weight.value(), new_weight.value()):
                            if tf.math.reduce_sum(tf.math.abs(p1 - p2)) > 0:
                                models_equal = False
                        self.assertTrue(models_equal)

                if old_output_embeddings is not None and new_output_embeddings is not None:
                    self.assertEqual(new_output_embeddings.shape[0], assert_size)
                    self.assertEqual(new_output_embeddings.shape[1], old_output_embeddings.shape[1])

                    models_equal = True
                    for p1, p2 in zip(old_output_embeddings.value(), new_output_embeddings.value()):
                        if tf.math.reduce_sum(tf.math.abs(p1 - p2)) > 0:
                            models_equal = False
                    self.assertTrue(models_equal)

1044
    def test_lm_head_model_random_no_beam_search_generate(self):
1045
        config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
Julien Plu's avatar
Julien Plu committed
1046
        input_ids = inputs_dict["input_ids"]
1047

1048
        # iterate over all generative models
1049
1050
1051
1052
        for model_class in self.all_generative_model_classes:
            model = model_class(config)

            if config.bos_token_id is None:
1053
                # if bos token id is not defined mobel needs input_ids
1054
                with self.assertRaises(AssertionError):
1055
                    model.generate(do_sample=True, max_length=5)
1056
                # num_return_sequences = 1
1057
                self._check_generated_ids(model.generate(input_ids, do_sample=True))
1058
            else:
1059
                # num_return_sequences = 1
1060
                self._check_generated_ids(model.generate(do_sample=True, max_length=5))
1061
1062

            with self.assertRaises(AssertionError):
1063
                # generating multiple sequences when no beam search generation
1064
1065
1066
                # is not allowed as it would always generate the same sequences
                model.generate(input_ids, do_sample=False, num_return_sequences=2)

1067
1068
            # num_return_sequences > 1, sample
            self._check_generated_ids(model.generate(input_ids, do_sample=True, num_return_sequences=2))
1069
1070

            # check bad words tokens language generation
1071
1072
            # create list of 1-seq bad token and list of 2-seq of bad tokens
            bad_words_ids = [self._generate_random_bad_tokens(1, model), self._generate_random_bad_tokens(2, model)]
1073
            output_tokens = model.generate(
1074
                input_ids, do_sample=True, bad_words_ids=bad_words_ids, num_return_sequences=2
1075
            )
1076
            # only count generated tokens
1077
1078
            generated_ids = output_tokens[:, input_ids.shape[-1] :]
            self.assertFalse(self._check_match_tokens(generated_ids.numpy().tolist(), bad_words_ids))
1079

1080
1081
    def test_lm_head_model_random_beam_search_generate(self):
        config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
Julien Plu's avatar
Julien Plu committed
1082
        input_ids = inputs_dict["input_ids"]
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098

        for model_class in self.all_generative_model_classes:
            model = model_class(config)

            if config.bos_token_id is None:
                # if bos token id is not defined mobel needs input_ids, num_return_sequences = 1
                self._check_generated_ids(model.generate(input_ids, do_sample=True, num_beams=2))
            else:
                # num_return_sequences = 1
                self._check_generated_ids(model.generate(do_sample=True, max_length=5, num_beams=2))

            with self.assertRaises(AssertionError):
                # generating more sequences than having beams leads is not possible
                model.generate(input_ids, do_sample=False, num_return_sequences=3, num_beams=2)

            # num_return_sequences > 1, sample
Lysandre's avatar
Lysandre committed
1099
1100
1101
1102
1103
1104
1105
1106
            self._check_generated_ids(
                model.generate(
                    input_ids,
                    do_sample=True,
                    num_beams=2,
                    num_return_sequences=2,
                )
            )
1107
1108
1109
1110
1111
1112
            # num_return_sequences > 1, greedy
            self._check_generated_ids(model.generate(input_ids, do_sample=False, num_beams=2, num_return_sequences=2))

            # check bad words tokens language generation
            # create list of 1-seq bad token and list of 2-seq of bad tokens
            bad_words_ids = [self._generate_random_bad_tokens(1, model), self._generate_random_bad_tokens(2, model)]
1113
            output_tokens = model.generate(
1114
                input_ids, do_sample=False, bad_words_ids=bad_words_ids, num_beams=2, num_return_sequences=2
1115
            )
1116
            # only count generated tokens
1117
1118
1119
            generated_ids = output_tokens[:, input_ids.shape[-1] :]
            self.assertFalse(self._check_match_tokens(generated_ids.numpy().tolist(), bad_words_ids))

1120
1121
1122
1123
1124
1125
1126
    def test_loss_computation(self):
        config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
        for model_class in self.all_model_classes:
            model = model_class(config)
            if getattr(model, "compute_loss", None):
                # The number of elements in the loss should be the same as the number of elements in the label
                prepared_for_class = self._prepare_for_class(inputs_dict.copy(), model_class, return_labels=True)
1127
1128
1129
                added_label = prepared_for_class[
                    sorted(list(prepared_for_class.keys() - inputs_dict.keys()), reverse=True)[0]
                ]
1130
1131
                loss_size = tf.size(added_label)

1132
                if model.__class__ in get_values(TF_MODEL_FOR_CAUSAL_LM_MAPPING):
1133
1134
1135
1136
                    # if loss is causal lm loss, labels are shift, so that one label per batch
                    # is cut
                    loss_size = loss_size - self.model_tester.batch_size

1137
1138
1139
                # Test that model correctly compute the loss with kwargs
                prepared_for_class = self._prepare_for_class(inputs_dict.copy(), model_class, return_labels=True)
                input_ids = prepared_for_class.pop("input_ids")
1140

1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
                loss = model(input_ids, **prepared_for_class)[0]
                self.assertEqual(loss.shape, [loss_size])

                # Test that model correctly compute the loss with a dict
                prepared_for_class = self._prepare_for_class(inputs_dict.copy(), model_class, return_labels=True)
                loss = model(prepared_for_class)[0]
                self.assertEqual(loss.shape, [loss_size])

                # Test that model correctly compute the loss with a tuple
                prepared_for_class = self._prepare_for_class(inputs_dict.copy(), model_class, return_labels=True)

                # Get keys that were added with the _prepare_for_class function
                label_keys = prepared_for_class.keys() - inputs_dict.keys()
1154
1155
                signature = inspect.signature(model.call).parameters
                signature_names = list(signature.keys())
1156
1157

                # Create a dictionary holding the location of the tensors in the tuple
1158
                tuple_index_mapping = {0: "input_ids"}
1159
                for label_key in label_keys:
1160
                    label_key_index = signature_names.index(label_key)
1161
1162
                    tuple_index_mapping[label_key_index] = label_key
                sorted_tuple_index_mapping = sorted(tuple_index_mapping.items())
1163
1164
1165
1166
1167
1168
                # Initialize a list with their default values, update the values and convert to a tuple
                list_input = []

                for name in signature_names:
                    if name != "kwargs":
                        list_input.append(signature[name].default)
1169
1170

                for index, value in sorted_tuple_index_mapping:
1171
1172
                    list_input[index] = prepared_for_class[value]

1173
1174
1175
                tuple_input = tuple(list_input)

                # Send to model
1176
1177
                loss = model(tuple_input[:-1])[0]

1178
1179
                self.assertEqual(loss.shape, [loss_size])

1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
    def _generate_random_bad_tokens(self, num_bad_tokens, model):
        # special tokens cannot be bad tokens
        special_tokens = []
        if model.config.bos_token_id is not None:
            special_tokens.append(model.config.bos_token_id)
        if model.config.pad_token_id is not None:
            special_tokens.append(model.config.pad_token_id)
        if model.config.eos_token_id is not None:
            special_tokens.append(model.config.eos_token_id)

        # create random bad tokens that are not special tokens
        bad_tokens = []
        while len(bad_tokens) < num_bad_tokens:
            token = tf.squeeze(ids_tensor((1, 1), self.model_tester.vocab_size), 0).numpy()[0]
            if token not in special_tokens:
                bad_tokens.append(token)
        return bad_tokens

1198
    def _check_generated_ids(self, output_ids):
1199
1200
1201
1202
        for token_id in output_ids[0].numpy().tolist():
            self.assertGreaterEqual(token_id, 0)
            self.assertLess(token_id, self.model_tester.vocab_size)

1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
    def _check_match_tokens(self, generated_ids, bad_words_ids):
        # for all bad word tokens
        for bad_word_ids in bad_words_ids:
            # for all slices in batch
            for generated_ids_slice in generated_ids:
                # for all word idx
                for i in range(len(bad_word_ids), len(generated_ids_slice)):
                    # if tokens match
                    if generated_ids_slice[i - len(bad_word_ids) : i] == bad_word_ids:
                        return True
        return False

thomwolf's avatar
thomwolf committed
1215

thomwolf's avatar
thomwolf committed
1216
def ids_tensor(shape, vocab_size, rng=None, name=None, dtype=None):
thomwolf's avatar
thomwolf committed
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
    """Creates a random int32 tensor of the shape within the vocab size."""
    if rng is None:
        rng = random.Random()

    total_dims = 1
    for dim in shape:
        total_dims *= dim

    values = []
    for _ in range(total_dims):
        values.append(rng.randint(0, vocab_size - 1))

1229
    output = tf.constant(values, shape=shape, dtype=dtype if dtype is not None else tf.int32)
thomwolf's avatar
thomwolf committed
1230
1231

    return output
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309


@require_tf
class UtilsFunctionsTest(unittest.TestCase):

    # tests whether the top_k_top_p_filtering function behaves as expected
    def test_top_k_top_p_filtering(self):
        logits = tf.convert_to_tensor(
            [
                [
                    8.2220991,  # 3rd highest value; idx. 0
                    -0.5620044,
                    5.23229752,
                    4.0386393,
                    -6.8798378,
                    -0.54785802,
                    -3.2012153,
                    2.92777176,
                    1.88171953,
                    7.35341276,  # 5th highest value; idx. 9
                    8.43207833,  # 2nd highest value; idx. 10
                    -9.85711836,
                    -5.96209236,
                    -1.13039161,
                    -7.1115294,
                    -0.8369633,
                    -5.3186408,
                    7.06427407,
                    0.81369344,
                    -0.82023817,
                    -5.9179796,
                    0.58813443,
                    -6.99778438,
                    4.71551189,
                    -0.18771637,
                    7.44020759,  # 4th highest value; idx. 25
                    9.38450987,  # 1st highest value; idx. 26
                    2.12662941,
                    -9.32562038,
                    2.35652522,
                ],  # cummulative prob of 5 highest values <= 0.6
                [
                    0.58425518,
                    4.53139238,
                    -5.57510464,
                    -6.28030699,
                    -7.19529503,
                    -4.02122551,
                    1.39337037,
                    -6.06707057,
                    1.59480517,
                    -9.643119,
                    0.03907799,
                    0.67231762,
                    -8.88206726,
                    6.27115922,  # 4th highest value; idx. 13
                    2.28520723,
                    4.82767506,
                    4.30421368,
                    8.8275313,  # 2nd highest value; idx. 17
                    5.44029958,  # 5th highest value; idx. 18
                    -4.4735794,
                    7.38579536,  # 3rd highest value; idx. 20
                    -2.91051663,
                    2.61946077,
                    -2.5674762,
                    -9.48959302,
                    -4.02922645,
                    -1.35416918,
                    9.67702323,  # 1st highest value; idx. 27
                    -5.89478553,
                    1.85370467,
                ],  # cummulative prob of 5 highest values <= 0.6
            ],
            dtype=tf.float32,
        )

        non_inf_expected_idx = tf.convert_to_tensor(
Lysandre's avatar
Lysandre committed
1310
1311
            [[0, 0], [0, 9], [0, 10], [0, 25], [0, 26], [1, 13], [1, 17], [1, 18], [1, 20], [1, 27]],
            dtype=tf.int32,
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322
        )  # expected non filtered idx as noted above

        non_inf_expected_output = tf.convert_to_tensor(
            [8.222099, 7.3534126, 8.432078, 7.4402075, 9.38451, 6.271159, 8.827531, 5.4402995, 7.3857956, 9.677023],
            dtype=tf.float32,
        )  # expected non filtered values as noted above

        output = tf_top_k_top_p_filtering(logits, top_k=10, top_p=0.6, min_tokens_to_keep=4)

        non_inf_output = output[output != -float("inf")]
        non_inf_idx = tf.cast(
Lysandre's avatar
Lysandre committed
1323
1324
            tf.where(tf.not_equal(output, tf.constant(-float("inf"), dtype=tf.float32))),
            dtype=tf.int32,
1325
1326
1327
1328
        )

        tf.debugging.assert_near(non_inf_output, non_inf_expected_output, rtol=1e-12)
        tf.debugging.assert_equal(non_inf_idx, non_inf_expected_idx)