test_modeling_tf_common.py 65.2 KB
Newer Older
thomwolf's avatar
thomwolf committed
1
2
3
4
5
6
7
8
9
10
11
12
13
14
# coding=utf-8
# Copyright 2019 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
Aymeric Augustin's avatar
Aymeric Augustin committed
15

thomwolf's avatar
thomwolf committed
16
17

import copy
18
import inspect
19
import json
Aymeric Augustin's avatar
Aymeric Augustin committed
20
import os
thomwolf's avatar
thomwolf committed
21
import random
Aymeric Augustin's avatar
Aymeric Augustin committed
22
import tempfile
23
import unittest
24
from importlib import import_module
25
from typing import List, Tuple
thomwolf's avatar
thomwolf committed
26

27
from huggingface_hub import delete_repo, login
Sylvain Gugger's avatar
Sylvain Gugger committed
28
from requests.exceptions import HTTPError
29
from transformers import is_tf_available
30
from transformers.models.auto import get_values
31
from transformers.testing_utils import tooslow  # noqa: F401
Lysandre Debut's avatar
Lysandre Debut committed
32
from transformers.testing_utils import (
Sylvain Gugger's avatar
Sylvain Gugger committed
33
34
    PASS,
    USER,
35
    CaptureLogger,
Lysandre Debut's avatar
Lysandre Debut committed
36
37
    _tf_gpu_memory_limit,
    is_pt_tf_cross_test,
Sylvain Gugger's avatar
Sylvain Gugger committed
38
    is_staging_test,
Lysandre Debut's avatar
Lysandre Debut committed
39
    require_tf,
40
    require_tf2onnx,
Lysandre Debut's avatar
Lysandre Debut committed
41
42
    slow,
)
43
from transformers.utils import logging
44

Aymeric Augustin's avatar
Aymeric Augustin committed
45

46
if is_tf_available():
thomwolf's avatar
thomwolf committed
47
    import numpy as np
48
    import tensorflow as tf
49

50
    from transformers import (
51
        TF_MODEL_FOR_CAUSAL_LM_MAPPING,
Yih-Dar's avatar
Yih-Dar committed
52
        TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
53
        TF_MODEL_FOR_MASKED_LM_MAPPING,
54
        TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
55
        TF_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING,
56
        TF_MODEL_FOR_PRETRAINING_MAPPING,
57
        TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
58
        TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
59
        TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
Joao Gante's avatar
Joao Gante committed
60
        TF_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING,
61
        TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
Sylvain Gugger's avatar
Sylvain Gugger committed
62
        BertConfig,
63
        TFAutoModel,
64
        TFAutoModelForSequenceClassification,
Sylvain Gugger's avatar
Sylvain Gugger committed
65
        TFBertModel,
66
67
        TFSharedEmbeddings,
        tf_top_k_top_p_filtering,
68
    )
69
70
71
72
73
74
75
76
77
78
    from transformers.generation_tf_utils import (
        TFBeamSampleDecoderOnlyOutput,
        TFBeamSampleEncoderDecoderOutput,
        TFBeamSearchDecoderOnlyOutput,
        TFBeamSearchEncoderDecoderOutput,
        TFGreedySearchDecoderOnlyOutput,
        TFGreedySearchEncoderDecoderOutput,
        TFSampleDecoderOnlyOutput,
        TFSampleEncoderDecoderOutput,
    )
79

Julien Chaumond's avatar
Julien Chaumond committed
80
81
82
83
84
    if _tf_gpu_memory_limit is not None:
        gpus = tf.config.list_physical_devices("GPU")
        for gpu in gpus:
            # Restrict TensorFlow to only allocate x GB of memory on the GPUs
            try:
Julien Plu's avatar
Julien Plu committed
85
86
                tf.config.set_logical_device_configuration(
                    gpu, [tf.config.LogicalDeviceConfiguration(memory_limit=_tf_gpu_memory_limit)]
Julien Chaumond's avatar
Julien Chaumond committed
87
                )
Julien Plu's avatar
Julien Plu committed
88
                logical_gpus = tf.config.list_logical_devices("GPU")
Julien Chaumond's avatar
Julien Chaumond committed
89
90
91
92
                print("Logical GPUs", logical_gpus)
            except RuntimeError as e:
                # Virtual devices must be set before GPUs have been initialized
                print(e)
thomwolf's avatar
thomwolf committed
93

94

thomwolf's avatar
thomwolf committed
95
96
97
def _config_zero_init(config):
    configs_no_init = copy.deepcopy(config)
    for key in configs_no_init.__dict__.keys():
98
        if "_range" in key or "_std" in key:
thomwolf's avatar
thomwolf committed
99
100
101
102
            setattr(configs_no_init, key, 0.0)
    return configs_no_init


103
104
@require_tf
class TFModelTesterMixin:
105

106
107
    model_tester = None
    all_model_classes = ()
108
    all_generative_model_classes = ()
109
    test_mismatched_shapes = True
110
    test_resize_embeddings = True
111
    test_head_masking = True
112
    is_encoder_decoder = False
113

Lysandre Debut's avatar
Lysandre Debut committed
114
    def _prepare_for_class(self, inputs_dict, model_class, return_labels=False) -> dict:
115
116
        inputs_dict = copy.deepcopy(inputs_dict)

117
        if model_class in get_values(TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING):
118
            inputs_dict = {
119
120
                k: tf.tile(tf.expand_dims(v, 1), (1, self.model_tester.num_choices) + (1,) * (v.ndim - 1))
                if isinstance(v, tf.Tensor) and v.ndim > 0
121
122
123
                else v
                for k, v in inputs_dict.items()
            }
124
125

        if return_labels:
126
            if model_class in get_values(TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING):
127
                inputs_dict["labels"] = tf.ones(self.model_tester.batch_size, dtype=tf.int32)
128
            elif model_class in get_values(TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING):
129
130
                inputs_dict["start_positions"] = tf.zeros(self.model_tester.batch_size, dtype=tf.int32)
                inputs_dict["end_positions"] = tf.zeros(self.model_tester.batch_size, dtype=tf.int32)
Yih-Dar's avatar
Yih-Dar committed
131
132
133
134
            elif model_class in [
                *get_values(TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING),
                *get_values(TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING),
            ]:
135
                inputs_dict["labels"] = tf.zeros(self.model_tester.batch_size, dtype=tf.int32)
136
            elif model_class in get_values(TF_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING):
137
                inputs_dict["next_sentence_label"] = tf.zeros(self.model_tester.batch_size, dtype=tf.int32)
138
            elif model_class in [
139
140
141
142
143
                *get_values(TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING),
                *get_values(TF_MODEL_FOR_CAUSAL_LM_MAPPING),
                *get_values(TF_MODEL_FOR_MASKED_LM_MAPPING),
                *get_values(TF_MODEL_FOR_PRETRAINING_MAPPING),
                *get_values(TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING),
Joao Gante's avatar
Joao Gante committed
144
                *get_values(TF_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING),
145
146
147
148
            ]:
                inputs_dict["labels"] = tf.zeros(
                    (self.model_tester.batch_size, self.model_tester.seq_length), dtype=tf.int32
                )
149
150
        return inputs_dict

151
152
    def test_initialization(self):
        pass
153

154
155
    def test_save_load(self):
        config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
156

157
158
        for model_class in self.all_model_classes:
            model = model_class(config)
159
            outputs = model(self._prepare_for_class(inputs_dict, model_class))
160

161
            with tempfile.TemporaryDirectory() as tmpdirname:
Julien Plu's avatar
Julien Plu committed
162
                model.save_pretrained(tmpdirname, saved_model=False)
163
                model = model_class.from_pretrained(tmpdirname)
164
                after_outputs = model(self._prepare_for_class(inputs_dict, model_class))
165

166
                self.assert_outputs_same(after_outputs, outputs)
167

168
169
170
171
172
173
    def test_save_load_config(self):
        config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()

        for model_class in self.all_model_classes:
            model = model_class(config)
            outputs = model(self._prepare_for_class(inputs_dict, model_class))
174
175
176
            model_config = model.get_config()
            # make sure that returned config is jsonifiable, which is required by keras
            json.dumps(model_config)
177
            new_model = model_class.from_config(model.get_config())
178
179
            # make sure it also accepts a normal config
            _ = model_class.from_config(model.config)
180
181
182
183
184
185
            _ = new_model(self._prepare_for_class(inputs_dict, model_class))  # Build model
            new_model.set_weights(model.get_weights())
            after_outputs = new_model(self._prepare_for_class(inputs_dict, model_class))

            self.assert_outputs_same(after_outputs, outputs)

186
187
188
189
190
191
192
193
194
195
196
    def test_forward_signature(self):
        config, _ = self.model_tester.prepare_config_and_inputs_for_common()

        for model_class in self.all_model_classes:
            model = model_class(config)
            signature = inspect.signature(model.call)
            # signature.parameters is an OrderedDict => so arg_names order is deterministic
            arg_names = [*signature.parameters.keys()]

            if model.config.is_encoder_decoder:
                expected_arg_names = [
Julien Plu's avatar
Julien Plu committed
197
                    "input_ids",
198
199
200
201
                    "attention_mask",
                    "decoder_input_ids",
                    "decoder_attention_mask",
                ]
202
                expected_arg_names.extend(
203
204
205
206
207
208
                    ["head_mask", "decoder_head_mask"] if "head_mask" and "decoder_head_mask" in arg_names else []
                )
                # Necessary to handle BART with newly added cross_attn_head_mask
                expected_arg_names.extend(
                    ["cross_attn_head_mask", "encoder_outputs"]
                    if "cross_attn_head_mask" in arg_names
209
210
211
                    else ["encoder_outputs"]
                )
                self.assertListEqual(arg_names[: len(expected_arg_names)], expected_arg_names)
212
213

            else:
Julien Plu's avatar
Julien Plu committed
214
                expected_arg_names = ["input_ids"]
215
216
                self.assertListEqual(arg_names[:1], expected_arg_names)

217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
    def test_onnx_compliancy(self):
        if not self.test_onnx:
            return

        config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
        INTERNAL_OPS = [
            "Assert",
            "AssignVariableOp",
            "EmptyTensorList",
            "ReadVariableOp",
            "ResourceGather",
            "TruncatedNormal",
            "VarHandleOp",
            "VarIsInitializedOp",
        ]
        onnx_ops = []

        with open(os.path.join(".", "utils", "tf_ops", "onnx.json")) as f:
            onnx_opsets = json.load(f)["opsets"]

        for i in range(1, self.onnx_min_opset + 1):
            onnx_ops.extend(onnx_opsets[str(i)])

        for model_class in self.all_model_classes:
            model_op_names = set()

            with tf.Graph().as_default() as g:
                model = model_class(config)
                model(model.dummy_inputs)

                for op in g.get_operations():
                    model_op_names.add(op.node_def.op)

            model_op_names = sorted(model_op_names)
            incompatible_ops = []

            for op in model_op_names:
                if op not in onnx_ops and op not in INTERNAL_OPS:
                    incompatible_ops.append(op)

            self.assertEqual(len(incompatible_ops), 0, incompatible_ops)

259
    @require_tf2onnx
260
261
262
263
264
265
    @slow
    def test_onnx_runtime_optimize(self):
        if not self.test_onnx:
            return

        import onnxruntime
266
        import tf2onnx
267
268
269
270
271
272
273

        config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()

        for model_class in self.all_model_classes:
            model = model_class(config)
            model(model.dummy_inputs)

274
            onnx_model_proto, _ = tf2onnx.convert.from_keras(model, opset=self.onnx_min_opset)
275

276
            onnxruntime.InferenceSession(onnx_model_proto.SerializeToString())
277

278
279
280
281
282
283
284
285
    def test_keras_save_load(self):
        config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()

        tf_main_layer_classes = set(
            module_member
            for model_class in self.all_model_classes
            for module in (import_module(model_class.__module__),)
            for module_member_name in dir(module)
286
            if module_member_name.endswith("MainLayer")
Yih-Dar's avatar
Yih-Dar committed
287
288
            # This condition is required, since `modeling_tf_clip.py` has 3 classes whose names end with `MainLayer`.
            and module_member_name[: -len("MainLayer")] == model_class.__name__[: -len("Model")]
289
            for module_member in (getattr(module, module_member_name),)
290
291
292
            if isinstance(module_member, type)
            and tf.keras.layers.Layer in module_member.__bases__
            and getattr(module_member, "_keras_serializable", False)
293
294
        )
        for main_layer_class in tf_main_layer_classes:
Julien Plu's avatar
Julien Plu committed
295
296
297
298
            # T5MainLayer needs an embed_tokens parameter when called without the inputs_embeds parameter
            if "T5" in main_layer_class.__name__:
                # Take the same values than in TFT5ModelTester for this shared layer
                shared = TFSharedEmbeddings(99, 32, name="shared")
Julien Plu's avatar
Julien Plu committed
299
                config.use_cache = inputs_dict.pop("use_cache", None)
Julien Plu's avatar
Julien Plu committed
300
301
302
                main_layer = main_layer_class(config, embed_tokens=shared)
            else:
                main_layer = main_layer_class(config)
Julien Plu's avatar
Julien Plu committed
303

304
305
306
            symbolic_inputs = {
                name: tf.keras.Input(tensor.shape[1:], dtype=tensor.dtype) for name, tensor in inputs_dict.items()
            }
Julien Plu's avatar
Julien Plu committed
307

308
309
310
311
312
313
            model = tf.keras.Model(symbolic_inputs, outputs=main_layer(symbolic_inputs))
            outputs = model(inputs_dict)

            with tempfile.TemporaryDirectory() as tmpdirname:
                filepath = os.path.join(tmpdirname, "keras_model.h5")
                model.save(filepath)
Julien Plu's avatar
Julien Plu committed
314
315
316
317
318
319
320
321
322
323
324
325
                if "T5" in main_layer_class.__name__:
                    model = tf.keras.models.load_model(
                        filepath,
                        custom_objects={
                            main_layer_class.__name__: main_layer_class,
                            "TFSharedEmbeddings": TFSharedEmbeddings,
                        },
                    )
                else:
                    model = tf.keras.models.load_model(
                        filepath, custom_objects={main_layer_class.__name__: main_layer_class}
                    )
326
327
328
329
330
331
                assert isinstance(model, tf.keras.Model)
                after_outputs = model(inputs_dict)
                self.assert_outputs_same(after_outputs, outputs)

    def assert_outputs_same(self, after_outputs, outputs):
        # Make sure we don't have nans
Julien Plu's avatar
Julien Plu committed
332
333
        if isinstance(after_outputs, tf.Tensor):
            out_1 = after_outputs.numpy()
Sylvain Gugger's avatar
Sylvain Gugger committed
334
        elif isinstance(after_outputs, dict):
335
            out_1 = after_outputs[list(after_outputs.keys())[0]].numpy()
Julien Plu's avatar
Julien Plu committed
336
337
        else:
            out_1 = after_outputs[0].numpy()
338
        out_2 = outputs[0].numpy()
339
        self.assertEqual(out_1.shape, out_2.shape)
340
341
342
343
        out_1 = out_1[~np.isnan(out_1)]
        out_2 = out_2[~np.isnan(out_2)]
        max_diff = np.amax(np.abs(out_1 - out_2))
        self.assertLessEqual(max_diff, 1e-5)
344

345
    @is_pt_tf_cross_test
346
347
    def test_pt_tf_model_equivalence(self):
        import torch
348

349
        import transformers
thomwolf's avatar
thomwolf committed
350

351
        config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
thomwolf's avatar
thomwolf committed
352

353
        for model_class in self.all_model_classes:
354
            pt_model_class_name = model_class.__name__[2:]  # Skip the "TF" at the beginning
355
            pt_model_class = getattr(transformers, pt_model_class_name)
thomwolf's avatar
thomwolf committed
356

357
            config.output_hidden_states = True
358

359
360
            tf_model = model_class(config)
            pt_model = pt_model_class(config)
thomwolf's avatar
thomwolf committed
361

362
            # Check we can load pt model in tf and vice-versa with model => model functions
363
364
365
            tf_model = transformers.load_pytorch_model_in_tf2_model(
                tf_model, pt_model, tf_inputs=self._prepare_for_class(inputs_dict, model_class)
            )
366
            pt_model = transformers.load_tf2_model_in_pytorch_model(pt_model, tf_model)
367

368
369
            # Check predictions on first output (logits/hidden-states) are close enought given low-level computational differences
            pt_model.eval()
Julien Plu's avatar
Julien Plu committed
370
371
372
373
            pt_inputs_dict = {}
            for name, key in self._prepare_for_class(inputs_dict, model_class).items():
                if type(key) == bool:
                    pt_inputs_dict[name] = key
Will Rice's avatar
Will Rice committed
374
375
                elif name == "input_values":
                    pt_inputs_dict[name] = torch.from_numpy(key.numpy()).to(torch.float32)
Yih-Dar's avatar
Yih-Dar committed
376
377
                elif name == "pixel_values":
                    pt_inputs_dict[name] = torch.from_numpy(key.numpy()).to(torch.float32)
Joao Gante's avatar
Joao Gante committed
378
379
                elif name == "input_features":
                    pt_inputs_dict[name] = torch.from_numpy(key.numpy()).to(torch.float32)
Julien Plu's avatar
Julien Plu committed
380
381
382
                else:
                    pt_inputs_dict[name] = torch.from_numpy(key.numpy()).to(torch.long)

383
384
            with torch.no_grad():
                pto = pt_model(**pt_inputs_dict)
385
            tfo = tf_model(self._prepare_for_class(inputs_dict, model_class), training=False)
Will Rice's avatar
Will Rice committed
386

387
388
            tf_hidden_states = tfo[0].numpy()
            pt_hidden_states = pto[0].numpy()
Lysandre's avatar
Lysandre committed
389

390
391
392
393
394
395
396
            tf_nans = np.copy(np.isnan(tf_hidden_states))
            pt_nans = np.copy(np.isnan(pt_hidden_states))

            pt_hidden_states[tf_nans] = 0
            tf_hidden_states[tf_nans] = 0
            pt_hidden_states[pt_nans] = 0
            tf_hidden_states[pt_nans] = 0
Lysandre's avatar
Lysandre committed
397

398
            max_diff = np.amax(np.abs(tf_hidden_states - pt_hidden_states))
399
            self.assertLessEqual(max_diff, 4e-2)
400
401

            # Check we can load pt model in tf and vice-versa with checkpoint => model functions
402
            with tempfile.TemporaryDirectory() as tmpdirname:
403
404
405
406
407
408
409
410
411
412
                pt_checkpoint_path = os.path.join(tmpdirname, "pt_model.bin")
                torch.save(pt_model.state_dict(), pt_checkpoint_path)
                tf_model = transformers.load_pytorch_checkpoint_in_tf2_model(tf_model, pt_checkpoint_path)

                tf_checkpoint_path = os.path.join(tmpdirname, "tf_model.h5")
                tf_model.save_weights(tf_checkpoint_path)
                pt_model = transformers.load_tf2_checkpoint_in_pytorch_model(pt_model, tf_checkpoint_path)

            # Check predictions on first output (logits/hidden-states) are close enought given low-level computational differences
            pt_model.eval()
Julien Plu's avatar
Julien Plu committed
413
414
415
416
417
            pt_inputs_dict = {}
            for name, key in self._prepare_for_class(inputs_dict, model_class).items():
                if type(key) == bool:
                    key = np.array(key, dtype=bool)
                    pt_inputs_dict[name] = torch.from_numpy(key).to(torch.long)
Will Rice's avatar
Will Rice committed
418
419
                elif name == "input_values":
                    pt_inputs_dict[name] = torch.from_numpy(key.numpy()).to(torch.float32)
Yih-Dar's avatar
Yih-Dar committed
420
421
                elif name == "pixel_values":
                    pt_inputs_dict[name] = torch.from_numpy(key.numpy()).to(torch.float32)
Joao Gante's avatar
Joao Gante committed
422
423
                elif name == "input_features":
                    pt_inputs_dict[name] = torch.from_numpy(key.numpy()).to(torch.float32)
Julien Plu's avatar
Julien Plu committed
424
425
                else:
                    pt_inputs_dict[name] = torch.from_numpy(key.numpy()).to(torch.long)
426

427
428
            with torch.no_grad():
                pto = pt_model(**pt_inputs_dict)
429
            tfo = tf_model(self._prepare_for_class(inputs_dict, model_class))
430
431
            tfo = tfo[0].numpy()
            pto = pto[0].numpy()
432
433
434
435
436
437
438
439
            tf_nans = np.copy(np.isnan(tfo))
            pt_nans = np.copy(np.isnan(pto))

            pto[tf_nans] = 0
            tfo[tf_nans] = 0
            pto[pt_nans] = 0
            tfo[pt_nans] = 0

440
            max_diff = np.amax(np.abs(tfo - pto))
sgugger's avatar
sgugger committed
441
            self.assertLessEqual(max_diff, 4e-2)
442
443
444

    def test_compile_tf_model(self):
        config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
Julien Plu's avatar
Julien Plu committed
445
        max_input = getattr(self.model_tester, "max_position_embeddings", 512)
446
447
448
449
450
        optimizer = tf.keras.optimizers.Adam(learning_rate=3e-5, epsilon=1e-08, clipnorm=1.0)
        loss = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)
        metric = tf.keras.metrics.SparseCategoricalAccuracy("accuracy")

        for model_class in self.all_model_classes:
Joao Gante's avatar
Joao Gante committed
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
            if model_class.__name__ in ["TFSpeech2TextModel", "TFSpeech2TextForConditionalGeneration"]:
                inputs = {
                    "decoder_input_ids": tf.keras.Input(
                        batch_shape=(2, max_input),
                        name="decoder_input_ids",
                        dtype="int32",
                    ),
                    "input_features": tf.keras.Input(
                        batch_shape=(
                            2,
                            max_input,
                            self.model_tester.input_feat_per_channel * self.model_tester.input_channels,
                        ),
                        name="input_features",
                        dtype="float32",
                    ),
                }
            elif self.is_encoder_decoder:
Yih-Dar's avatar
Yih-Dar committed
469
                inputs = {
470
                    "decoder_input_ids": tf.keras.Input(
Julien Plu's avatar
Julien Plu committed
471
472
473
                        batch_shape=(2, max_input),
                        name="decoder_input_ids",
                        dtype="int32",
474
                    ),
Julien Plu's avatar
Julien Plu committed
475
                    "input_ids": tf.keras.Input(batch_shape=(2, max_input), name="input_ids", dtype="int32"),
476
                }
Sayak Paul's avatar
Sayak Paul committed
477
478
            # `pixel_values` implies that the input is an image
            elif model_class.main_input_name == "pixel_values":
Yih-Dar's avatar
Yih-Dar committed
479
480
481
482
483
484
485
486
487
488
                inputs = tf.keras.Input(
                    batch_shape=(
                        3,
                        self.model_tester.num_channels,
                        self.model_tester.image_size,
                        self.model_tester.image_size,
                    ),
                    name="pixel_values",
                    dtype="float32",
                )
Yih-Dar's avatar
Yih-Dar committed
489
490
491
492
493
494
495
496
497
498
499
500
501
502
            elif model_class.__name__ in ["TFCLIPModel"]:
                inputs = {
                    "input_ids": tf.keras.Input(batch_shape=(3, max_input), name="input_ids", dtype="int32"),
                    "pixel_values": tf.keras.Input(
                        batch_shape=(
                            3,
                            self.model_tester.vision_model_tester.num_channels,
                            self.model_tester.vision_model_tester.image_size,
                            self.model_tester.vision_model_tester.image_size,
                        ),
                        name="pixel_values",
                        dtype="float32",
                    ),
                }
503
            elif model_class in get_values(TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING):
Yih-Dar's avatar
Yih-Dar committed
504
                inputs = tf.keras.Input(batch_shape=(4, 2, max_input), name="input_ids", dtype="int32")
505
            else:
Yih-Dar's avatar
Yih-Dar committed
506
                inputs = tf.keras.Input(batch_shape=(2, max_input), name="input_ids", dtype="int32")
507

508
509
            # Prepare our model
            model = model_class(config)
510
            model(self._prepare_for_class(inputs_dict, model_class))  # Model must be called before saving.
511
            # Let's load it from the disk to be sure we can use pretrained weights
512
            with tempfile.TemporaryDirectory() as tmpdirname:
Julien Plu's avatar
Julien Plu committed
513
                model.save_pretrained(tmpdirname, saved_model=False)
514
515
                model = model_class.from_pretrained(tmpdirname)

Yih-Dar's avatar
Yih-Dar committed
516
            outputs_dict = model(inputs)
517
518
            hidden_states = outputs_dict[0]

519
            # Add a dense layer on top to test integration with other keras modules
520
521
522
            outputs = tf.keras.layers.Dense(2, activation="softmax", name="outputs")(hidden_states)

            # Compile extended model
Yih-Dar's avatar
Yih-Dar committed
523
            extended_model = tf.keras.Model(inputs=[inputs], outputs=[outputs])
524
525
526
527
528
529
530
            extended_model.compile(optimizer=optimizer, loss=loss, metrics=[metric])

    def test_keyword_and_dict_args(self):
        config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()

        for model_class in self.all_model_classes:
            model = model_class(config)
531
532
533
            inputs = self._prepare_for_class(inputs_dict, model_class)

            outputs_dict = model(inputs)
534

535
            inputs_keywords = copy.deepcopy(self._prepare_for_class(inputs_dict, model_class))
Joao Gante's avatar
Joao Gante committed
536
            outputs_keywords = model(**inputs_keywords)
537
538
539
540
541
542
543
            output_dict = outputs_dict[0].numpy()
            output_keywords = outputs_keywords[0].numpy()

            self.assertLess(np.sum(np.abs(output_dict - output_keywords)), 1e-6)

    def test_attention_outputs(self):
        config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
544
        config.return_dict = True
545
546
547
548
        decoder_seq_length = getattr(self.model_tester, "decoder_seq_length", self.model_tester.seq_length)
        encoder_seq_length = getattr(self.model_tester, "encoder_seq_length", self.model_tester.seq_length)
        decoder_key_length = getattr(self.model_tester, "key_length", decoder_seq_length)
        encoder_key_length = getattr(self.model_tester, "key_length", encoder_seq_length)
549

Julien Plu's avatar
Julien Plu committed
550
551
        def check_decoder_attentions_output(outputs):
            out_len = len(outputs)
552
            self.assertEqual(min(out_len % 2, out_len % 5), 0)  # differentiation due to newly added cross_attentions
Julien Plu's avatar
Julien Plu committed
553
554
555
556
557
558
559
560
            decoder_attentions = outputs.decoder_attentions
            self.assertEqual(len(decoder_attentions), self.model_tester.num_hidden_layers)
            self.assertListEqual(
                list(decoder_attentions[0].shape[-3:]),
                [self.model_tester.num_attention_heads, decoder_seq_length, decoder_key_length],
            )

        def check_encoder_attentions_output(outputs):
561
562
563
            attentions = [
                t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
            ]
564
565
566
567
            self.assertEqual(len(attentions), self.model_tester.num_hidden_layers)
            self.assertListEqual(
                list(attentions[0].shape[-3:]),
                [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length],
568
            )
Julien Plu's avatar
Julien Plu committed
569
570
571
572
573
574
575

        for model_class in self.all_model_classes:
            inputs_dict["output_attentions"] = True
            inputs_dict["use_cache"] = False
            config.output_hidden_states = False
            model = model_class(config)
            outputs = model(self._prepare_for_class(inputs_dict, model_class))
576
            out_len = len(outputs)
Julien Plu's avatar
Julien Plu committed
577
578
            self.assertEqual(config.output_hidden_states, False)
            check_encoder_attentions_output(outputs)
thomwolf's avatar
thomwolf committed
579

580
            if self.is_encoder_decoder:
Julien Plu's avatar
Julien Plu committed
581
582
583
584
                model = model_class(config)
                outputs = model(self._prepare_for_class(inputs_dict, model_class))
                self.assertEqual(config.output_hidden_states, False)
                check_decoder_attentions_output(outputs)
thomwolf's avatar
thomwolf committed
585

586
587
            # Check that output attentions can also be changed via the config
            del inputs_dict["output_attentions"]
588
            config.output_attentions = True
589
            model = model_class(config)
590
            outputs = model(self._prepare_for_class(inputs_dict, model_class))
Julien Plu's avatar
Julien Plu committed
591
592
            self.assertEqual(config.output_hidden_states, False)
            check_encoder_attentions_output(outputs)
593
594
595

            # Check attention is always last and order is fine
            inputs_dict["output_attentions"] = True
596
597
            config.output_hidden_states = True
            model = model_class(config)
598
            outputs = model(self._prepare_for_class(inputs_dict, model_class))
Julien Plu's avatar
Julien Plu committed
599

600
601
            self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1), len(outputs))
            self.assertEqual(model.config.output_hidden_states, True)
Julien Plu's avatar
Julien Plu committed
602
            check_encoder_attentions_output(outputs)
603

604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
    def test_headmasking(self):
        if not self.test_head_masking:
            return

        random.Random().seed(42)
        config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
        random.Random().seed()

        inputs_dict["output_attentions"] = True
        config.output_hidden_states = True
        configs_no_init = _config_zero_init(config)  # To be sure we have no Nan
        for model_class in self.all_model_classes:
            model = model_class(config=configs_no_init)

            # Prepare head_mask
            def prepare_layer_head_mask(i, attention_heads, num_hidden_layers):
                if i == 0:
                    return tf.concat(
                        (tf.zeros(1, dtype=tf.float32), tf.ones(attention_heads - 1, dtype=tf.float32)), 0
                    )
                elif i == num_hidden_layers - 1:
                    return tf.concat(
                        (tf.zeros(attention_heads - 1, dtype=tf.float32), tf.ones(1, dtype=tf.float32)), 0
                    )
                else:
                    return tf.ones(attention_heads, dtype=tf.float32)

            head_mask = tf.stack(
                [
                    prepare_layer_head_mask(i, config.num_attention_heads, config.num_hidden_layers)
                    for i in range(config.num_hidden_layers)
                ],
                0,
            )

            inputs = self._prepare_for_class(inputs_dict, model_class).copy()
            inputs["head_mask"] = head_mask
            if model.config.is_encoder_decoder:
                signature = inspect.signature(model.call)
                arg_names = [*signature.parameters.keys()]
                if "decoder_head_mask" in arg_names:  # necessary diferentiation because of T5 model
                    inputs["decoder_head_mask"] = head_mask
646
647
                if "cross_attn_head_mask" in arg_names:
                    inputs["cross_attn_head_mask"] = head_mask
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671

            outputs = model(**inputs, return_dict=True)

            def check_attentions_validity(attentions):
                # Remove Nan
                for t in attentions:
                    self.assertLess(
                        (tf.math.reduce_sum(tf.cast(tf.math.is_nan(t), tf.float32))).numpy(), (tf.size(t) / 4).numpy()
                    )  # Check we don't have more than 25% nans (arbitrary)

                attentions = [
                    tf.where(tf.math.is_nan(t), 0.0, t) for t in attentions
                ]  # remove them (the test is less complete)

                self.assertAlmostEqual(tf.math.reduce_sum(attentions[0][..., 0, :, :]).numpy(), 0.0)
                self.assertNotEqual(tf.math.reduce_sum(attentions[0][..., -1, :, :]).numpy(), 0.0)
                if len(attentions) > 2:  # encoder-decodere models have only 2 layers in each modules
                    self.assertNotEqual(tf.math.reduce_sum(attentions[1][..., 0, :, :]).numpy(), 0.0)
                self.assertAlmostEqual(tf.math.reduce_sum(attentions[-1][..., -2, :, :]).numpy(), 0.0)
                self.assertNotEqual(tf.math.reduce_sum(attentions[-1][..., -1, :, :]).numpy(), 0.0)

            if model.config.is_encoder_decoder:
                check_attentions_validity(outputs.encoder_attentions)
                check_attentions_validity(outputs.decoder_attentions)
672
673
                if "cross_attn_head_mask" in arg_names:
                    check_attentions_validity(outputs.cross_attentions)
674
675
676
            else:
                check_attentions_validity(outputs.attentions)

677
678
679
    def test_hidden_states_output(self):
        config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()

Joseph Liu's avatar
Joseph Liu committed
680
        def check_hidden_states_output(config, inputs_dict, model_class):
681
            model = model_class(config)
682
            outputs = model(self._prepare_for_class(inputs_dict, model_class))
683
684
685
            expected_num_layers = getattr(
                self.model_tester, "expected_num_hidden_layers", self.model_tester.num_hidden_layers + 1
            )
Julien Plu's avatar
Julien Plu committed
686

Julien Plu's avatar
Julien Plu committed
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
            if model.config.is_encoder_decoder:
                encoder_hidden_states = outputs.encoder_hidden_states
                decoder_hidden_states = outputs.decoder_hidden_states

                self.assertEqual(config.output_attentions, False)
                self.assertEqual(len(encoder_hidden_states), expected_num_layers)
                self.assertListEqual(
                    list(encoder_hidden_states[0].shape[-2:]),
                    [self.model_tester.seq_length, self.model_tester.hidden_size],
                )
                self.assertEqual(len(decoder_hidden_states), expected_num_layers)
                self.assertListEqual(
                    list(decoder_hidden_states[0].shape[-2:]),
                    [self.model_tester.seq_length, self.model_tester.hidden_size],
                )
            else:
                hidden_states = outputs.hidden_states
                self.assertEqual(config.output_attentions, False)
                self.assertEqual(len(hidden_states), expected_num_layers)
                self.assertListEqual(
                    list(hidden_states[0].shape[-2:]),
                    [self.model_tester.seq_length, self.model_tester.hidden_size],
                )
710

Joseph Liu's avatar
Joseph Liu committed
711
712
713
714
715
716
717
718
        for model_class in self.all_model_classes:
            inputs_dict["output_hidden_states"] = True
            check_hidden_states_output(config, inputs_dict, model_class)

            del inputs_dict["output_hidden_states"]
            config.output_hidden_states = True
            check_hidden_states_output(config, inputs_dict, model_class)

719
720
    def test_model_common_attributes(self):
        config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
Joao Gante's avatar
Joao Gante committed
721
        text_in_text_out_models = (
722
723
724
            get_values(TF_MODEL_FOR_CAUSAL_LM_MAPPING)
            + get_values(TF_MODEL_FOR_MASKED_LM_MAPPING)
            + get_values(TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING)
725
        )
Joao Gante's avatar
Joao Gante committed
726
        speech_in_text_out_models = get_values(TF_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING)
727
728
729

        for model_class in self.all_model_classes:
            model = model_class(config)
730
            assert isinstance(model.get_input_embeddings(), tf.keras.layers.Layer)
Joao Gante's avatar
Joao Gante committed
731
            if model_class in text_in_text_out_models:
732
                x = model.get_output_embeddings()
733
                assert isinstance(x, tf.keras.layers.Layer)
734
735
736
737
                name = model.get_bias()
                assert isinstance(name, dict)
                for k, v in name.items():
                    assert isinstance(v, tf.Variable)
Joao Gante's avatar
Joao Gante committed
738
739
740
741
742
            elif model_class in speech_in_text_out_models:
                x = model.get_output_embeddings()
                assert isinstance(x, tf.keras.layers.Layer)
                name = model.get_bias()
                assert name is None
743
            else:
744
                x = model.get_output_embeddings()
745
                assert x is None
746
747
                name = model.get_bias()
                assert name is None
748
749
750
751
752
753

    def test_determinism(self):
        config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()

        for model_class in self.all_model_classes:
            model = model_class(config)
754
            first, second = (
755
756
                model(self._prepare_for_class(inputs_dict, model_class), training=False)[0],
                model(self._prepare_for_class(inputs_dict, model_class), training=False)[0],
757
            )
758
759
760
761
762
763
764
            out_1 = first.numpy()
            out_2 = second.numpy()
            out_1 = out_1[~np.isnan(out_1)]
            out_2 = out_2[~np.isnan(out_2)]
            max_diff = np.amax(np.abs(out_1 - out_2))
            self.assertLessEqual(max_diff, 1e-5)

765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
    def test_model_outputs_equivalence(self):

        config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()

        def check_equivalence(model, tuple_inputs, dict_inputs, additional_kwargs={}):
            tuple_output = model(tuple_inputs, return_dict=False, **additional_kwargs)
            dict_output = model(dict_inputs, return_dict=True, **additional_kwargs).to_tuple()

            def recursive_check(tuple_object, dict_object):
                if isinstance(tuple_object, (List, Tuple)):
                    for tuple_iterable_value, dict_iterable_value in zip(tuple_object, dict_object):
                        recursive_check(tuple_iterable_value, dict_iterable_value)
                elif tuple_object is None:
                    return
                else:
                    self.assertTrue(
                        all(tf.equal(tuple_object, dict_object)),
                        msg=f"Tuple and dict output are not equal. Difference: {tf.math.reduce_max(tf.abs(tuple_object - dict_object))}",
                    )

                recursive_check(tuple_output, dict_output)

        for model_class in self.all_model_classes:
            model = model_class(config)

            tuple_inputs = self._prepare_for_class(inputs_dict, model_class)
            dict_inputs = self._prepare_for_class(inputs_dict, model_class)
            check_equivalence(model, tuple_inputs, dict_inputs)

            tuple_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True)
            dict_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True)
            check_equivalence(model, tuple_inputs, dict_inputs)

            tuple_inputs = self._prepare_for_class(inputs_dict, model_class)
            dict_inputs = self._prepare_for_class(inputs_dict, model_class)
            check_equivalence(model, tuple_inputs, dict_inputs, {"output_hidden_states": True})

            tuple_inputs = self._prepare_for_class(inputs_dict, model_class)
            dict_inputs = self._prepare_for_class(inputs_dict, model_class)
            check_equivalence(model, tuple_inputs, dict_inputs, {"output_attentions": True})

            tuple_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True)
            dict_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True)
            check_equivalence(model, tuple_inputs, dict_inputs, {"output_hidden_states": True})

            tuple_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True)
            dict_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True)
            check_equivalence(model, tuple_inputs, dict_inputs, {"output_attentions": True})

            tuple_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True)
            dict_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True)
            check_equivalence(
                model, tuple_inputs, dict_inputs, {"output_hidden_states": True, "output_attentions": True}
            )

820
821
822
823
824
825
    def test_inputs_embeds(self):
        config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()

        for model_class in self.all_model_classes:
            model = model_class(config)

826
827
            inputs = copy.deepcopy(inputs_dict)

828
829
830
831
            if not self.is_encoder_decoder:
                input_ids = inputs["input_ids"]
                del inputs["input_ids"]
            else:
832
                encoder_input_ids = inputs["input_ids"]
833
                decoder_input_ids = inputs.get("decoder_input_ids", encoder_input_ids)
834
                del inputs["input_ids"]
835
836
                inputs.pop("decoder_input_ids", None)

thomwolf's avatar
thomwolf committed
837
            if not self.is_encoder_decoder:
838
                inputs["inputs_embeds"] = model.get_input_embeddings()(input_ids)
thomwolf's avatar
thomwolf committed
839
            else:
840
841
                inputs["inputs_embeds"] = model.get_input_embeddings()(encoder_input_ids)
                inputs["decoder_inputs_embeds"] = model.get_input_embeddings()(decoder_input_ids)
842

843
844
            inputs = self._prepare_for_class(inputs, model_class)

845
            model(inputs)
846

847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
    def test_numpy_arrays_inputs(self):
        config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()

        def prepare_numpy_arrays(inputs_dict):
            inputs_np_dict = {}
            for k, v in inputs_dict.items():
                if tf.is_tensor(v):
                    inputs_np_dict[k] = v.numpy()
                else:
                    inputs_np_dict[k] = np.array(k)

            return inputs_np_dict

        for model_class in self.all_model_classes:
            model = model_class(config)

            inputs = self._prepare_for_class(inputs_dict, model_class)
            inputs_np = prepare_numpy_arrays(inputs)

866
867
868
            output_for_dict_input = model(inputs_np)
            output_for_kw_input = model(**inputs_np)
            self.assert_outputs_same(output_for_dict_input, output_for_kw_input)
869

870
871
872
873
    def test_resize_token_embeddings(self):
        if not self.test_resize_embeddings:
            return
        config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
874
875

        def _get_word_embedding_weight(model, embedding_layer):
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
            embeds = getattr(embedding_layer, "weight", None)
            if embeds is not None:
                return embeds

            embeds = getattr(embedding_layer, "decoder", None)
            if embeds is not None:
                return embeds

            model(model.dummy_inputs)

            embeds = getattr(embedding_layer, "weight", None)
            if embeds is not None:
                return embeds

            embeds = getattr(embedding_layer, "decoder", None)
            if embeds is not None:
                return embeds

            return None
895

896
897
898
899
        for model_class in self.all_model_classes:
            for size in [config.vocab_size - 10, config.vocab_size + 10, None]:
                # build the embeddings
                model = model_class(config=config)
900
901
902
                old_input_embeddings = _get_word_embedding_weight(model, model.get_input_embeddings())
                old_bias = model.get_bias()
                old_output_embeddings = _get_word_embedding_weight(model, model.get_output_embeddings())
903
                # reshape the embeddings
904
905
906
907
908
909
                model.resize_token_embeddings(size)
                new_input_embeddings = _get_word_embedding_weight(model, model.get_input_embeddings())
                new_bias = model.get_bias()
                new_output_embeddings = _get_word_embedding_weight(model, model.get_output_embeddings())

                # check that the resized embeddings size matches the desired size.
910
                assert_size = size if size is not None else config.vocab_size
911
912
                self.assertEqual(new_input_embeddings.shape[0], assert_size)

913
914
                # check that weights remain the same after resizing
                models_equal = True
915
916
                for p1, p2 in zip(old_input_embeddings.value(), new_input_embeddings.value()):
                    if tf.math.reduce_sum(tf.math.abs(p1 - p2)) > 0:
917
918
919
                        models_equal = False
                self.assertTrue(models_equal)

920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
                if old_bias is not None and new_bias is not None:
                    for old_weight, new_weight in zip(old_bias.values(), new_bias.values()):
                        self.assertEqual(new_weight.shape[0], assert_size)

                        models_equal = True
                        for p1, p2 in zip(old_weight.value(), new_weight.value()):
                            if tf.math.reduce_sum(tf.math.abs(p1 - p2)) > 0:
                                models_equal = False
                        self.assertTrue(models_equal)

                if old_output_embeddings is not None and new_output_embeddings is not None:
                    self.assertEqual(new_output_embeddings.shape[0], assert_size)
                    self.assertEqual(new_output_embeddings.shape[1], old_output_embeddings.shape[1])

                    models_equal = True
                    for p1, p2 in zip(old_output_embeddings.value(), new_output_embeddings.value()):
                        if tf.math.reduce_sum(tf.math.abs(p1 - p2)) > 0:
                            models_equal = False
                    self.assertTrue(models_equal)

940
    def test_lm_head_model_random_no_beam_search_generate(self):
941
        config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
Will Rice's avatar
Will Rice committed
942
        input_ids = inputs_dict.get("input_ids", None)
943

944
        # iterate over all generative models
945
946
947
948
        for model_class in self.all_generative_model_classes:
            model = model_class(config)

            if config.bos_token_id is None:
Joao Gante's avatar
Joao Gante committed
949
                # if bos token id is not defined model needs input_ids
950
                with self.assertRaises(ValueError):
951
                    model.generate(do_sample=True, max_length=5)
952
                # num_return_sequences = 1
953
                self._check_generated_ids(model.generate(input_ids, do_sample=True))
Joao Gante's avatar
Joao Gante committed
954
955
            elif model_class.__name__ not in ["TFSpeech2TextForConditionalGeneration"]:
                # Models with non-text inputs won't work here; num_return_sequences = 1
956
                self._check_generated_ids(model.generate(do_sample=True, max_length=5))
957

958
            with self.assertRaises(ValueError):
959
                # generating multiple sequences when no beam search generation
960
961
962
                # is not allowed as it would always generate the same sequences
                model.generate(input_ids, do_sample=False, num_return_sequences=2)

963
964
            # num_return_sequences > 1, sample
            self._check_generated_ids(model.generate(input_ids, do_sample=True, num_return_sequences=2))
965
966

            # check bad words tokens language generation
967
968
            # create list of 1-seq bad token and list of 2-seq of bad tokens
            bad_words_ids = [self._generate_random_bad_tokens(1, model), self._generate_random_bad_tokens(2, model)]
969
            output_tokens = model.generate(
970
                input_ids, do_sample=True, bad_words_ids=bad_words_ids, num_return_sequences=2
971
            )
972
            # only count generated tokens
973
974
            generated_ids = output_tokens[:, input_ids.shape[-1] :]
            self.assertFalse(self._check_match_tokens(generated_ids.numpy().tolist(), bad_words_ids))
975

976
977
978
    def test_lm_head_model_no_beam_search_generate_dict_outputs(self):
        config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
        input_ids = inputs_dict.get("input_ids", None)
Joao Gante's avatar
Joao Gante committed
979
980
        if input_ids is None:
            input_ids = inputs_dict.get("input_features", None)
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008

        # iterate over all generative models
        for model_class in self.all_generative_model_classes:
            model = model_class(config)
            output_greedy = model.generate(
                input_ids,
                do_sample=False,
                output_scores=True,
                output_hidden_states=True,
                output_attentions=True,
                return_dict_in_generate=True,
            )
            output_sample = model.generate(
                input_ids,
                do_sample=True,
                output_scores=True,
                output_hidden_states=True,
                output_attentions=True,
                return_dict_in_generate=True,
            )

            if model.config.is_encoder_decoder:
                self.assertIsInstance(output_greedy, TFGreedySearchEncoderDecoderOutput)
                self.assertIsInstance(output_sample, TFSampleEncoderDecoderOutput)
            else:
                self.assertIsInstance(output_greedy, TFGreedySearchDecoderOnlyOutput)
                self.assertIsInstance(output_sample, TFSampleDecoderOnlyOutput)

1009
1010
    def test_lm_head_model_random_beam_search_generate(self):
        config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
Will Rice's avatar
Will Rice committed
1011
        input_ids = inputs_dict.get("input_ids", None)
1012
1013
1014
1015
1016

        for model_class in self.all_generative_model_classes:
            model = model_class(config)

            if config.bos_token_id is None:
Joao Gante's avatar
Joao Gante committed
1017
                # if bos token id is not defined model needs input_ids, num_return_sequences = 1
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
                self._check_generated_ids(model.generate(input_ids, do_sample=True, num_beams=2))
            else:
                # num_return_sequences = 1
                self._check_generated_ids(model.generate(do_sample=True, max_length=5, num_beams=2))

            with self.assertRaises(AssertionError):
                # generating more sequences than having beams leads is not possible
                model.generate(input_ids, do_sample=False, num_return_sequences=3, num_beams=2)

            # num_return_sequences > 1, sample
Lysandre's avatar
Lysandre committed
1028
1029
1030
1031
1032
1033
1034
1035
            self._check_generated_ids(
                model.generate(
                    input_ids,
                    do_sample=True,
                    num_beams=2,
                    num_return_sequences=2,
                )
            )
1036
1037
1038
1039
1040
1041
            # num_return_sequences > 1, greedy
            self._check_generated_ids(model.generate(input_ids, do_sample=False, num_beams=2, num_return_sequences=2))

            # check bad words tokens language generation
            # create list of 1-seq bad token and list of 2-seq of bad tokens
            bad_words_ids = [self._generate_random_bad_tokens(1, model), self._generate_random_bad_tokens(2, model)]
1042
            output_tokens = model.generate(
1043
                input_ids, do_sample=False, bad_words_ids=bad_words_ids, num_beams=2, num_return_sequences=2
1044
            )
1045
            # only count generated tokens
1046
1047
1048
            generated_ids = output_tokens[:, input_ids.shape[-1] :]
            self.assertFalse(self._check_match_tokens(generated_ids.numpy().tolist(), bad_words_ids))

1049
1050
1051
    def test_lm_head_model_beam_search_generate_dict_outputs(self):
        config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
        input_ids = inputs_dict.get("input_ids", None)
Joao Gante's avatar
Joao Gante committed
1052
1053
        if input_ids is None:
            input_ids = inputs_dict.get("input_features", None)
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083

        # iterate over all generative models
        for model_class in self.all_generative_model_classes:
            model = model_class(config)
            output_beam_search = model.generate(
                input_ids,
                num_beams=2,
                do_sample=False,
                output_scores=True,
                output_hidden_states=True,
                output_attentions=True,
                return_dict_in_generate=True,
            )
            output_beam_sample = model.generate(
                input_ids,
                num_beams=2,
                do_sample=True,
                output_scores=True,
                output_hidden_states=True,
                output_attentions=True,
                return_dict_in_generate=True,
            )

            if model.config.is_encoder_decoder:
                self.assertIsInstance(output_beam_search, TFBeamSearchEncoderDecoderOutput)
                self.assertIsInstance(output_beam_sample, TFBeamSampleEncoderDecoderOutput)
            else:
                self.assertIsInstance(output_beam_search, TFBeamSearchDecoderOnlyOutput)
                self.assertIsInstance(output_beam_sample, TFBeamSampleDecoderOnlyOutput)

1084
1085
1086
1087
    def test_loss_computation(self):
        config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
        for model_class in self.all_model_classes:
            model = model_class(config)
1088
            if getattr(model, "hf_compute_loss", None):
1089
1090
                # The number of elements in the loss should be the same as the number of elements in the label
                prepared_for_class = self._prepare_for_class(inputs_dict.copy(), model_class, return_labels=True)
1091
1092
1093
                added_label = prepared_for_class[
                    sorted(list(prepared_for_class.keys() - inputs_dict.keys()), reverse=True)[0]
                ]
1094
1095
                loss_size = tf.size(added_label)

1096
                if model.__class__ in get_values(TF_MODEL_FOR_CAUSAL_LM_MAPPING):
1097
1098
1099
1100
                    # if loss is causal lm loss, labels are shift, so that one label per batch
                    # is cut
                    loss_size = loss_size - self.model_tester.batch_size

1101
1102
                # Test that model correctly compute the loss with kwargs
                prepared_for_class = self._prepare_for_class(inputs_dict.copy(), model_class, return_labels=True)
Joao Gante's avatar
Joao Gante committed
1103
1104
1105
                possible_input_names = {"input_ids", "pixel_values", "input_features"}
                input_name = possible_input_names.intersection(set(prepared_for_class)).pop()
                model_input = prepared_for_class.pop(input_name)
1106

Joao Gante's avatar
Joao Gante committed
1107
                loss = model(model_input, **prepared_for_class)[0]
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
                self.assertEqual(loss.shape, [loss_size])

                # Test that model correctly compute the loss with a dict
                prepared_for_class = self._prepare_for_class(inputs_dict.copy(), model_class, return_labels=True)
                loss = model(prepared_for_class)[0]
                self.assertEqual(loss.shape, [loss_size])

                # Test that model correctly compute the loss with a tuple
                prepared_for_class = self._prepare_for_class(inputs_dict.copy(), model_class, return_labels=True)

                # Get keys that were added with the _prepare_for_class function
                label_keys = prepared_for_class.keys() - inputs_dict.keys()
1120
1121
                signature = inspect.signature(model.call).parameters
                signature_names = list(signature.keys())
1122
1123

                # Create a dictionary holding the location of the tensors in the tuple
Yih-Dar's avatar
Yih-Dar committed
1124
                tuple_index_mapping = {0: input_name}
1125
                for label_key in label_keys:
1126
                    label_key_index = signature_names.index(label_key)
1127
1128
                    tuple_index_mapping[label_key_index] = label_key
                sorted_tuple_index_mapping = sorted(tuple_index_mapping.items())
1129
1130
1131
1132
1133
1134
                # Initialize a list with their default values, update the values and convert to a tuple
                list_input = []

                for name in signature_names:
                    if name != "kwargs":
                        list_input.append(signature[name].default)
1135
1136

                for index, value in sorted_tuple_index_mapping:
1137
1138
                    list_input[index] = prepared_for_class[value]

1139
1140
1141
                tuple_input = tuple(list_input)

                # Send to model
1142
1143
                loss = model(tuple_input[:-1])[0]

1144
1145
                self.assertEqual(loss.shape, [loss_size])

1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
    def test_generate_with_headmasking(self):
        attention_names = ["encoder_attentions", "decoder_attentions", "cross_attentions"]
        config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()

        for model_class in self.all_generative_model_classes:
            model = model_class(config)

            # We want to test only encoder-decoder models
            if not config.is_encoder_decoder:
                continue

            head_masking = {
                "head_mask": tf.zeros((config.encoder_layers, config.encoder_attention_heads)),
                "decoder_head_mask": tf.zeros((config.decoder_layers, config.decoder_attention_heads)),
                "cross_attn_head_mask": tf.zeros((config.decoder_layers, config.decoder_attention_heads)),
            }

            signature = inspect.signature(model.call)
            if set(head_masking.keys()) < set([*signature.parameters.keys()]):
                continue

            for attn_name, (name, mask) in zip(attention_names, head_masking.items()):
                out = model.generate(
                    inputs_dict["input_ids"],
                    num_beams=1,
                    max_length=inputs_dict["input_ids"] + 5,
                    output_attentions=True,
                    return_dict_in_generate=True,
                    **{name: mask},
                )
                # We check the state of decoder_attentions and cross_attentions just from the last step
                attn_weights = out[attn_name] if attn_name == attention_names[0] else out[attn_name][-1]
                self.assertEqual(sum([tf.reduce_sum(w).numpy() for w in attn_weights]), 0.0)

1180
    def test_load_with_mismatched_shapes(self):
1181
1182
        if not self.test_mismatched_shapes:
            return
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
        config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()

        for model_class in self.all_model_classes:
            if model_class not in get_values(TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING):
                continue

            with self.subTest(msg=f"Testing {model_class}"):
                with tempfile.TemporaryDirectory() as tmp_dir:
                    model = model_class(config)
                    inputs = self._prepare_for_class(inputs_dict, model_class)
                    _ = model(**inputs)
                    model.save_pretrained(tmp_dir)

                    # Fails when we don't set ignore_mismatched_sizes=True
                    with self.assertRaises(ValueError):
                        new_model = TFAutoModelForSequenceClassification.from_pretrained(tmp_dir, num_labels=42)
1199
1200
                    with self.assertRaises(ValueError):
                        new_model_without_prefix = TFAutoModel.from_pretrained(tmp_dir, vocab_size=10)
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211

                    logger = logging.get_logger("transformers.modeling_tf_utils")
                    with CaptureLogger(logger) as cl:
                        new_model = TFAutoModelForSequenceClassification.from_pretrained(
                            tmp_dir, num_labels=42, ignore_mismatched_sizes=True
                        )
                    self.assertIn("the shapes did not match", cl.out)

                    logits = new_model(**inputs).logits
                    self.assertEqual(logits.shape[1], 42)

1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
                    with CaptureLogger(logger) as cl:
                        new_model_without_prefix = TFAutoModel.from_pretrained(
                            tmp_dir, vocab_size=10, ignore_mismatched_sizes=True
                        )
                    self.assertIn("the shapes did not match", cl.out)

                    # Although Tf models always have a prefix pointing to `MainLayer`,
                    # we still add this "without prefix" test to keep a consistency between tf and pt tests.
                    input_ids = ids_tensor((2, 8), 10)
                    if self.is_encoder_decoder:
                        new_model_without_prefix(input_ids, decoder_input_ids=input_ids)
                    else:
                        new_model_without_prefix(input_ids)

1226
1227
1228
1229
1230
1231
1232
    def test_model_main_input_name(self):
        for model_class in self.all_model_classes:
            model_signature = inspect.signature(getattr(model_class, "call"))
            # The main input is the name of the argument after `self`
            observed_main_input_name = list(model_signature.parameters.keys())[1]
            self.assertEqual(model_class.main_input_name, observed_main_input_name)

1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
    def _generate_random_bad_tokens(self, num_bad_tokens, model):
        # special tokens cannot be bad tokens
        special_tokens = []
        if model.config.bos_token_id is not None:
            special_tokens.append(model.config.bos_token_id)
        if model.config.pad_token_id is not None:
            special_tokens.append(model.config.pad_token_id)
        if model.config.eos_token_id is not None:
            special_tokens.append(model.config.eos_token_id)

        # create random bad tokens that are not special tokens
        bad_tokens = []
        while len(bad_tokens) < num_bad_tokens:
            token = tf.squeeze(ids_tensor((1, 1), self.model_tester.vocab_size), 0).numpy()[0]
            if token not in special_tokens:
                bad_tokens.append(token)
        return bad_tokens

1251
    def _check_generated_ids(self, output_ids):
1252
1253
1254
1255
        for token_id in output_ids[0].numpy().tolist():
            self.assertGreaterEqual(token_id, 0)
            self.assertLess(token_id, self.model_tester.vocab_size)

1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
    def _check_match_tokens(self, generated_ids, bad_words_ids):
        # for all bad word tokens
        for bad_word_ids in bad_words_ids:
            # for all slices in batch
            for generated_ids_slice in generated_ids:
                # for all word idx
                for i in range(len(bad_word_ids), len(generated_ids_slice)):
                    # if tokens match
                    if generated_ids_slice[i - len(bad_word_ids) : i] == bad_word_ids:
                        return True
        return False

thomwolf's avatar
thomwolf committed
1268

thomwolf's avatar
thomwolf committed
1269
def ids_tensor(shape, vocab_size, rng=None, name=None, dtype=None):
thomwolf's avatar
thomwolf committed
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
    """Creates a random int32 tensor of the shape within the vocab size."""
    if rng is None:
        rng = random.Random()

    total_dims = 1
    for dim in shape:
        total_dims *= dim

    values = []
    for _ in range(total_dims):
        values.append(rng.randint(0, vocab_size - 1))

1282
    output = tf.constant(values, shape=shape, dtype=dtype if dtype is not None else tf.int32)
thomwolf's avatar
thomwolf committed
1283
1284

    return output
1285
1286


Yih-Dar's avatar
Yih-Dar committed
1287
1288
1289
1290
1291
1292
1293
def random_attention_mask(shape, rng=None, name=None, dtype=None):
    attn_mask = ids_tensor(shape, vocab_size=2, rng=None, name=None, dtype=dtype)
    # make sure that at least one token is attended to for each batch
    attn_mask = tf.concat([tf.constant(value=1, shape=(shape[0], 1), dtype=dtype), attn_mask[:, 1:]], axis=1)
    return attn_mask


1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
def floats_tensor(shape, scale=1.0, rng=None, name=None, dtype=None):
    """Creates a random float32 tensor"""
    if rng is None:
        rng = random.Random()

    total_dims = 1
    for dim in shape:
        total_dims *= dim

    values = []
    for _ in range(total_dims):
        values.append(rng.random() * scale)

    return tf.reshape(tf.constant(values, dtype=dtype if dtype is not None else tf.float32), shape=shape)


1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372
1373
1374
1375
1376
1377
1378
1379
1380
1381
1382
1383
1384
1385
@require_tf
class UtilsFunctionsTest(unittest.TestCase):

    # tests whether the top_k_top_p_filtering function behaves as expected
    def test_top_k_top_p_filtering(self):
        logits = tf.convert_to_tensor(
            [
                [
                    8.2220991,  # 3rd highest value; idx. 0
                    -0.5620044,
                    5.23229752,
                    4.0386393,
                    -6.8798378,
                    -0.54785802,
                    -3.2012153,
                    2.92777176,
                    1.88171953,
                    7.35341276,  # 5th highest value; idx. 9
                    8.43207833,  # 2nd highest value; idx. 10
                    -9.85711836,
                    -5.96209236,
                    -1.13039161,
                    -7.1115294,
                    -0.8369633,
                    -5.3186408,
                    7.06427407,
                    0.81369344,
                    -0.82023817,
                    -5.9179796,
                    0.58813443,
                    -6.99778438,
                    4.71551189,
                    -0.18771637,
                    7.44020759,  # 4th highest value; idx. 25
                    9.38450987,  # 1st highest value; idx. 26
                    2.12662941,
                    -9.32562038,
                    2.35652522,
                ],  # cummulative prob of 5 highest values <= 0.6
                [
                    0.58425518,
                    4.53139238,
                    -5.57510464,
                    -6.28030699,
                    -7.19529503,
                    -4.02122551,
                    1.39337037,
                    -6.06707057,
                    1.59480517,
                    -9.643119,
                    0.03907799,
                    0.67231762,
                    -8.88206726,
                    6.27115922,  # 4th highest value; idx. 13
                    2.28520723,
                    4.82767506,
                    4.30421368,
                    8.8275313,  # 2nd highest value; idx. 17
                    5.44029958,  # 5th highest value; idx. 18
                    -4.4735794,
                    7.38579536,  # 3rd highest value; idx. 20
                    -2.91051663,
                    2.61946077,
                    -2.5674762,
                    -9.48959302,
                    -4.02922645,
                    -1.35416918,
                    9.67702323,  # 1st highest value; idx. 27
                    -5.89478553,
                    1.85370467,
                ],  # cummulative prob of 5 highest values <= 0.6
            ],
            dtype=tf.float32,
        )

        non_inf_expected_idx = tf.convert_to_tensor(
Lysandre's avatar
Lysandre committed
1386
1387
            [[0, 0], [0, 9], [0, 10], [0, 25], [0, 26], [1, 13], [1, 17], [1, 18], [1, 20], [1, 27]],
            dtype=tf.int32,
1388
1389
1390
1391
1392
1393
1394
1395
1396
1397
1398
        )  # expected non filtered idx as noted above

        non_inf_expected_output = tf.convert_to_tensor(
            [8.222099, 7.3534126, 8.432078, 7.4402075, 9.38451, 6.271159, 8.827531, 5.4402995, 7.3857956, 9.677023],
            dtype=tf.float32,
        )  # expected non filtered values as noted above

        output = tf_top_k_top_p_filtering(logits, top_k=10, top_p=0.6, min_tokens_to_keep=4)

        non_inf_output = output[output != -float("inf")]
        non_inf_idx = tf.cast(
Lysandre's avatar
Lysandre committed
1399
1400
            tf.where(tf.not_equal(output, tf.constant(-float("inf"), dtype=tf.float32))),
            dtype=tf.int32,
1401
1402
1403
1404
        )

        tf.debugging.assert_near(non_inf_output, non_inf_expected_output, rtol=1e-12)
        tf.debugging.assert_equal(non_inf_idx, non_inf_expected_idx)
Sylvain Gugger's avatar
Sylvain Gugger committed
1405
1406
1407
1408
1409
1410
1411


@require_tf
@is_staging_test
class TFModelPushToHubTester(unittest.TestCase):
    @classmethod
    def setUpClass(cls):
1412
        cls._token = login(username=USER, password=PASS)
Sylvain Gugger's avatar
Sylvain Gugger committed
1413
1414
1415
1416

    @classmethod
    def tearDownClass(cls):
        try:
1417
            delete_repo(token=cls._token, name="test-model-tf")
Sylvain Gugger's avatar
Sylvain Gugger committed
1418
1419
1420
1421
        except HTTPError:
            pass

        try:
1422
            delete_repo(token=cls._token, name="test-model-tf-org", organization="valid_org")
Sylvain Gugger's avatar
Sylvain Gugger committed
1423
1424
1425
1426
1427
1428
1429
1430
1431
1432
1433
        except HTTPError:
            pass

    def test_push_to_hub(self):
        config = BertConfig(
            vocab_size=99, hidden_size=32, num_hidden_layers=5, num_attention_heads=4, intermediate_size=37
        )
        model = TFBertModel(config)
        # Make sure model is properly initialized
        _ = model(model.dummy_inputs)
        with tempfile.TemporaryDirectory() as tmp_dir:
1434
            model.save_pretrained(os.path.join(tmp_dir, "test-model-tf"), push_to_hub=True, use_auth_token=self._token)
Sylvain Gugger's avatar
Sylvain Gugger committed
1435

1436
            new_model = TFBertModel.from_pretrained(f"{USER}/test-model-tf")
Sylvain Gugger's avatar
Sylvain Gugger committed
1437
1438
1439
1440
1441
1442
            models_equal = True
            for p1, p2 in zip(model.weights, new_model.weights):
                if tf.math.reduce_sum(tf.math.abs(p1 - p2)) > 0:
                    models_equal = False
            self.assertTrue(models_equal)

Matt's avatar
Matt committed
1443
1444
1445
1446
1447
1448
1449
1450
1451
    def test_push_to_hub_with_model_card(self):
        config = BertConfig(
            vocab_size=99, hidden_size=32, num_hidden_layers=5, num_attention_heads=4, intermediate_size=37
        )
        model = TFBertModel(config)
        with tempfile.TemporaryDirectory() as tmp_dir:
            model.push_to_hub(os.path.join(tmp_dir, "test-model-tf"))
            self.assertTrue(os.path.isfile(os.path.join(tmp_dir, "test-model-card-tf", "README.md")))

Sylvain Gugger's avatar
Sylvain Gugger committed
1452
1453
1454
1455
1456
1457
1458
    def test_push_to_hub_in_organization(self):
        config = BertConfig(
            vocab_size=99, hidden_size=32, num_hidden_layers=5, num_attention_heads=4, intermediate_size=37
        )
        model = TFBertModel(config)
        with tempfile.TemporaryDirectory() as tmp_dir:
            model.save_pretrained(
1459
                os.path.join(tmp_dir, "test-model-tf-org"),
Sylvain Gugger's avatar
Sylvain Gugger committed
1460
1461
1462
1463
1464
                push_to_hub=True,
                use_auth_token=self._token,
                organization="valid_org",
            )

1465
            new_model = TFBertModel.from_pretrained("valid_org/test-model-tf-org")
Sylvain Gugger's avatar
Sylvain Gugger committed
1466
1467
1468
1469
1470
            models_equal = True
            for p1, p2 in zip(model.weights, new_model.weights):
                if tf.math.reduce_sum(tf.math.abs(p1 - p2)) > 0:
                    models_equal = False
            self.assertTrue(models_equal)