test_modeling_tf_common.py 70.4 KB
Newer Older
thomwolf's avatar
thomwolf committed
1
2
3
4
5
6
7
8
9
10
11
12
13
14
# coding=utf-8
# Copyright 2019 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
Aymeric Augustin's avatar
Aymeric Augustin committed
15

thomwolf's avatar
thomwolf committed
16
17

import copy
18
import inspect
19
import json
Aymeric Augustin's avatar
Aymeric Augustin committed
20
import os
thomwolf's avatar
thomwolf committed
21
import random
Aymeric Augustin's avatar
Aymeric Augustin committed
22
import tempfile
23
import unittest
24
from importlib import import_module
25
from typing import List, Tuple
thomwolf's avatar
thomwolf committed
26

27
from huggingface_hub import delete_repo, login
Sylvain Gugger's avatar
Sylvain Gugger committed
28
from requests.exceptions import HTTPError
29
from transformers import is_tf_available
30
from transformers.models.auto import get_values
Lysandre Debut's avatar
Lysandre Debut committed
31
from transformers.testing_utils import (
Sylvain Gugger's avatar
Sylvain Gugger committed
32
33
    PASS,
    USER,
34
    CaptureLogger,
Lysandre Debut's avatar
Lysandre Debut committed
35
36
    _tf_gpu_memory_limit,
    is_pt_tf_cross_test,
Sylvain Gugger's avatar
Sylvain Gugger committed
37
    is_staging_test,
38
    require_keras2onnx,
Lysandre Debut's avatar
Lysandre Debut committed
39
40
41
42
    require_tf,
    slow,
    tooslow,
)
43
from transformers.utils import logging
44

Aymeric Augustin's avatar
Aymeric Augustin committed
45

46
if is_tf_available():
thomwolf's avatar
thomwolf committed
47
    import numpy as np
48
    import tensorflow as tf
49

50
    from transformers import (
51
        TF_MODEL_FOR_CAUSAL_LM_MAPPING,
Yih-Dar's avatar
Yih-Dar committed
52
        TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
53
        TF_MODEL_FOR_MASKED_LM_MAPPING,
54
        TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
55
        TF_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING,
56
        TF_MODEL_FOR_PRETRAINING_MAPPING,
57
        TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
58
        TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
59
60
        TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
        TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
Sylvain Gugger's avatar
Sylvain Gugger committed
61
        BertConfig,
62
        TFAutoModel,
63
        TFAutoModelForSequenceClassification,
Sylvain Gugger's avatar
Sylvain Gugger committed
64
        TFBertModel,
65
66
        TFSharedEmbeddings,
        tf_top_k_top_p_filtering,
67
    )
68
69
70
71
72
73
74
75
76
77
    from transformers.generation_tf_utils import (
        TFBeamSampleDecoderOnlyOutput,
        TFBeamSampleEncoderDecoderOutput,
        TFBeamSearchDecoderOnlyOutput,
        TFBeamSearchEncoderDecoderOutput,
        TFGreedySearchDecoderOnlyOutput,
        TFGreedySearchEncoderDecoderOutput,
        TFSampleDecoderOnlyOutput,
        TFSampleEncoderDecoderOutput,
    )
78

Julien Chaumond's avatar
Julien Chaumond committed
79
80
81
82
83
    if _tf_gpu_memory_limit is not None:
        gpus = tf.config.list_physical_devices("GPU")
        for gpu in gpus:
            # Restrict TensorFlow to only allocate x GB of memory on the GPUs
            try:
Julien Plu's avatar
Julien Plu committed
84
85
                tf.config.set_logical_device_configuration(
                    gpu, [tf.config.LogicalDeviceConfiguration(memory_limit=_tf_gpu_memory_limit)]
Julien Chaumond's avatar
Julien Chaumond committed
86
                )
Julien Plu's avatar
Julien Plu committed
87
                logical_gpus = tf.config.list_logical_devices("GPU")
Julien Chaumond's avatar
Julien Chaumond committed
88
89
90
91
                print("Logical GPUs", logical_gpus)
            except RuntimeError as e:
                # Virtual devices must be set before GPUs have been initialized
                print(e)
thomwolf's avatar
thomwolf committed
92

93

thomwolf's avatar
thomwolf committed
94
95
96
def _config_zero_init(config):
    configs_no_init = copy.deepcopy(config)
    for key in configs_no_init.__dict__.keys():
97
        if "_range" in key or "_std" in key:
thomwolf's avatar
thomwolf committed
98
99
100
101
            setattr(configs_no_init, key, 0.0)
    return configs_no_init


102
103
@require_tf
class TFModelTesterMixin:
104

105
106
    model_tester = None
    all_model_classes = ()
107
    all_generative_model_classes = ()
108
    test_mismatched_shapes = True
109
    test_resize_embeddings = True
110
    test_head_masking = True
111
    is_encoder_decoder = False
112

Lysandre Debut's avatar
Lysandre Debut committed
113
    def _prepare_for_class(self, inputs_dict, model_class, return_labels=False) -> dict:
114
115
        inputs_dict = copy.deepcopy(inputs_dict)

116
        if model_class in get_values(TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING):
117
            inputs_dict = {
118
119
                k: tf.tile(tf.expand_dims(v, 1), (1, self.model_tester.num_choices) + (1,) * (v.ndim - 1))
                if isinstance(v, tf.Tensor) and v.ndim > 0
120
121
122
                else v
                for k, v in inputs_dict.items()
            }
123
124

        if return_labels:
125
            if model_class in get_values(TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING):
126
                inputs_dict["labels"] = tf.ones(self.model_tester.batch_size, dtype=tf.int32)
127
            elif model_class in get_values(TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING):
128
129
                inputs_dict["start_positions"] = tf.zeros(self.model_tester.batch_size, dtype=tf.int32)
                inputs_dict["end_positions"] = tf.zeros(self.model_tester.batch_size, dtype=tf.int32)
Yih-Dar's avatar
Yih-Dar committed
130
131
132
133
            elif model_class in [
                *get_values(TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING),
                *get_values(TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING),
            ]:
134
                inputs_dict["labels"] = tf.zeros(self.model_tester.batch_size, dtype=tf.int32)
135
            elif model_class in get_values(TF_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING):
136
                inputs_dict["next_sentence_label"] = tf.zeros(self.model_tester.batch_size, dtype=tf.int32)
137
            elif model_class in [
138
139
140
141
142
                *get_values(TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING),
                *get_values(TF_MODEL_FOR_CAUSAL_LM_MAPPING),
                *get_values(TF_MODEL_FOR_MASKED_LM_MAPPING),
                *get_values(TF_MODEL_FOR_PRETRAINING_MAPPING),
                *get_values(TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING),
143
144
145
146
            ]:
                inputs_dict["labels"] = tf.zeros(
                    (self.model_tester.batch_size, self.model_tester.seq_length), dtype=tf.int32
                )
147
148
        return inputs_dict

149
150
    def test_initialization(self):
        pass
151

152
153
    def test_save_load(self):
        config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
154

155
156
        for model_class in self.all_model_classes:
            model = model_class(config)
157
            outputs = model(self._prepare_for_class(inputs_dict, model_class))
158

159
            with tempfile.TemporaryDirectory() as tmpdirname:
Julien Plu's avatar
Julien Plu committed
160
                model.save_pretrained(tmpdirname, saved_model=False)
161
                model = model_class.from_pretrained(tmpdirname)
162
                after_outputs = model(self._prepare_for_class(inputs_dict, model_class))
163

164
                self.assert_outputs_same(after_outputs, outputs)
165

166
167
168
169
170
171
172
173
174
175
176
177
178
179
    def test_save_load_config(self):
        config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()

        for model_class in self.all_model_classes:
            model = model_class(config)
            outputs = model(self._prepare_for_class(inputs_dict, model_class))

            new_model = model_class.from_config(model.get_config())
            _ = new_model(self._prepare_for_class(inputs_dict, model_class))  # Build model
            new_model.set_weights(model.get_weights())
            after_outputs = new_model(self._prepare_for_class(inputs_dict, model_class))

            self.assert_outputs_same(after_outputs, outputs)

Lysandre Debut's avatar
Lysandre Debut committed
180
    @tooslow
181
182
183
184
185
186
187
188
189
190
191
192
193
    def test_graph_mode(self):
        config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
        for model_class in self.all_model_classes:
            inputs = self._prepare_for_class(inputs_dict, model_class)
            model = model_class(config)

            @tf.function
            def run_in_graph_mode():
                return model(inputs)

            outputs = run_in_graph_mode()
            self.assertIsNotNone(outputs)

Lysandre Debut's avatar
Lysandre Debut committed
194
    @tooslow
Julien Plu's avatar
Julien Plu committed
195
196
197
198
199
200
201
202
203
204
205
206
207
    def test_xla_mode(self):
        config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
        for model_class in self.all_model_classes:
            inputs = self._prepare_for_class(inputs_dict, model_class)
            model = model_class(config)

            @tf.function(experimental_compile=True)
            def run_in_graph_mode():
                return model(inputs)

            outputs = run_in_graph_mode()
            self.assertIsNotNone(outputs)

208
209
210
211
212
213
214
215
216
217
218
    def test_forward_signature(self):
        config, _ = self.model_tester.prepare_config_and_inputs_for_common()

        for model_class in self.all_model_classes:
            model = model_class(config)
            signature = inspect.signature(model.call)
            # signature.parameters is an OrderedDict => so arg_names order is deterministic
            arg_names = [*signature.parameters.keys()]

            if model.config.is_encoder_decoder:
                expected_arg_names = [
Julien Plu's avatar
Julien Plu committed
219
                    "input_ids",
220
221
222
223
                    "attention_mask",
                    "decoder_input_ids",
                    "decoder_attention_mask",
                ]
224
                expected_arg_names.extend(
225
226
227
228
229
230
                    ["head_mask", "decoder_head_mask"] if "head_mask" and "decoder_head_mask" in arg_names else []
                )
                # Necessary to handle BART with newly added cross_attn_head_mask
                expected_arg_names.extend(
                    ["cross_attn_head_mask", "encoder_outputs"]
                    if "cross_attn_head_mask" in arg_names
231
232
233
                    else ["encoder_outputs"]
                )
                self.assertListEqual(arg_names[: len(expected_arg_names)], expected_arg_names)
234
235

            else:
Julien Plu's avatar
Julien Plu committed
236
                expected_arg_names = ["input_ids"]
237
238
                self.assertListEqual(arg_names[:1], expected_arg_names)

Lysandre Debut's avatar
Lysandre Debut committed
239
    @tooslow
Julien Plu's avatar
Julien Plu committed
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
    def test_saved_model_creation(self):
        config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
        config.output_hidden_states = False
        config.output_attentions = False

        if hasattr(config, "use_cache"):
            config.use_cache = False

        model_class = self.all_model_classes[0]

        class_inputs_dict = self._prepare_for_class(inputs_dict, model_class)
        model = model_class(config)

        model(class_inputs_dict)

        with tempfile.TemporaryDirectory() as tmpdirname:
            model.save_pretrained(tmpdirname, saved_model=True)
Julien Plu's avatar
Julien Plu committed
257
            saved_model_dir = os.path.join(tmpdirname, "saved_model", "1")
Julien Plu's avatar
Julien Plu committed
258
259
            self.assertTrue(os.path.exists(saved_model_dir))

Lysandre Debut's avatar
Lysandre Debut committed
260
    @tooslow
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
    def test_saved_model_creation_extended(self):
        config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
        config.output_hidden_states = True
        config.output_attentions = True

        if hasattr(config, "use_cache"):
            config.use_cache = True

        encoder_seq_length = getattr(self.model_tester, "encoder_seq_length", self.model_tester.seq_length)
        encoder_key_length = getattr(self.model_tester, "key_length", encoder_seq_length)

        for model_class in self.all_model_classes:
            class_inputs_dict = self._prepare_for_class(inputs_dict, model_class)
            model = model_class(config)
            num_out = len(model(class_inputs_dict))

            with tempfile.TemporaryDirectory() as tmpdirname:
                model.save_pretrained(tmpdirname, saved_model=True)
                saved_model_dir = os.path.join(tmpdirname, "saved_model", "1")
                model = tf.keras.models.load_model(saved_model_dir)
                outputs = model(class_inputs_dict)

                if self.is_encoder_decoder:
                    output_hidden_states = outputs["encoder_hidden_states"]
                    output_attentions = outputs["encoder_attentions"]
                else:
                    output_hidden_states = outputs["hidden_states"]
                    output_attentions = outputs["attentions"]

                self.assertEqual(len(outputs), num_out)

                expected_num_layers = getattr(
                    self.model_tester, "expected_num_hidden_layers", self.model_tester.num_hidden_layers + 1
                )

                self.assertEqual(len(output_hidden_states), expected_num_layers)
                self.assertListEqual(
                    list(output_hidden_states[0].shape[-2:]),
                    [self.model_tester.seq_length, self.model_tester.hidden_size],
                )

                self.assertEqual(len(output_attentions), self.model_tester.num_hidden_layers)
                self.assertListEqual(
                    list(output_attentions[0].shape[-3:]),
                    [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length],
                )

308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
    def test_onnx_compliancy(self):
        if not self.test_onnx:
            return

        config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
        INTERNAL_OPS = [
            "Assert",
            "AssignVariableOp",
            "EmptyTensorList",
            "ReadVariableOp",
            "ResourceGather",
            "TruncatedNormal",
            "VarHandleOp",
            "VarIsInitializedOp",
        ]
        onnx_ops = []

        with open(os.path.join(".", "utils", "tf_ops", "onnx.json")) as f:
            onnx_opsets = json.load(f)["opsets"]

        for i in range(1, self.onnx_min_opset + 1):
            onnx_ops.extend(onnx_opsets[str(i)])

        for model_class in self.all_model_classes:
            model_op_names = set()

            with tf.Graph().as_default() as g:
                model = model_class(config)
                model(model.dummy_inputs)

                for op in g.get_operations():
                    model_op_names.add(op.node_def.op)

            model_op_names = sorted(model_op_names)
            incompatible_ops = []

            for op in model_op_names:
                if op not in onnx_ops and op not in INTERNAL_OPS:
                    incompatible_ops.append(op)

            self.assertEqual(len(incompatible_ops), 0, incompatible_ops)

350
    @require_keras2onnx
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
    @slow
    def test_onnx_runtime_optimize(self):
        if not self.test_onnx:
            return

        import keras2onnx
        import onnxruntime

        config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()

        for model_class in self.all_model_classes:
            model = model_class(config)
            model(model.dummy_inputs)

            onnx_model = keras2onnx.convert_keras(model, model.name, target_opset=self.onnx_min_opset)

            onnxruntime.InferenceSession(onnx_model.SerializeToString())

Lysandre Debut's avatar
Lysandre Debut committed
369
    @tooslow
370
371
372
373
374
375
376
377
378
379
380
381
382
383
    def test_mixed_precision(self):
        tf.keras.mixed_precision.experimental.set_policy("mixed_float16")

        config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()

        for model_class in self.all_model_classes:
            class_inputs_dict = self._prepare_for_class(inputs_dict, model_class)
            model = model_class(config)
            outputs = model(class_inputs_dict)

            self.assertIsNotNone(outputs)

        tf.keras.mixed_precision.experimental.set_policy("float32")

384
385
386
387
388
389
390
391
    def test_keras_save_load(self):
        config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()

        tf_main_layer_classes = set(
            module_member
            for model_class in self.all_model_classes
            for module in (import_module(model_class.__module__),)
            for module_member_name in dir(module)
392
            if module_member_name.endswith("MainLayer")
393
            for module_member in (getattr(module, module_member_name),)
394
395
396
            if isinstance(module_member, type)
            and tf.keras.layers.Layer in module_member.__bases__
            and getattr(module_member, "_keras_serializable", False)
397
398
        )
        for main_layer_class in tf_main_layer_classes:
Julien Plu's avatar
Julien Plu committed
399
400
401
402
            # T5MainLayer needs an embed_tokens parameter when called without the inputs_embeds parameter
            if "T5" in main_layer_class.__name__:
                # Take the same values than in TFT5ModelTester for this shared layer
                shared = TFSharedEmbeddings(99, 32, name="shared")
Julien Plu's avatar
Julien Plu committed
403
                config.use_cache = inputs_dict.pop("use_cache", None)
Julien Plu's avatar
Julien Plu committed
404
405
406
                main_layer = main_layer_class(config, embed_tokens=shared)
            else:
                main_layer = main_layer_class(config)
Julien Plu's avatar
Julien Plu committed
407

408
409
410
            symbolic_inputs = {
                name: tf.keras.Input(tensor.shape[1:], dtype=tensor.dtype) for name, tensor in inputs_dict.items()
            }
Julien Plu's avatar
Julien Plu committed
411

412
413
414
415
416
417
            model = tf.keras.Model(symbolic_inputs, outputs=main_layer(symbolic_inputs))
            outputs = model(inputs_dict)

            with tempfile.TemporaryDirectory() as tmpdirname:
                filepath = os.path.join(tmpdirname, "keras_model.h5")
                model.save(filepath)
Julien Plu's avatar
Julien Plu committed
418
419
420
421
422
423
424
425
426
427
428
429
                if "T5" in main_layer_class.__name__:
                    model = tf.keras.models.load_model(
                        filepath,
                        custom_objects={
                            main_layer_class.__name__: main_layer_class,
                            "TFSharedEmbeddings": TFSharedEmbeddings,
                        },
                    )
                else:
                    model = tf.keras.models.load_model(
                        filepath, custom_objects={main_layer_class.__name__: main_layer_class}
                    )
430
431
432
433
434
435
                assert isinstance(model, tf.keras.Model)
                after_outputs = model(inputs_dict)
                self.assert_outputs_same(after_outputs, outputs)

    def assert_outputs_same(self, after_outputs, outputs):
        # Make sure we don't have nans
Julien Plu's avatar
Julien Plu committed
436
437
        if isinstance(after_outputs, tf.Tensor):
            out_1 = after_outputs.numpy()
Sylvain Gugger's avatar
Sylvain Gugger committed
438
        elif isinstance(after_outputs, dict):
439
            out_1 = after_outputs[list(after_outputs.keys())[0]].numpy()
Julien Plu's avatar
Julien Plu committed
440
441
        else:
            out_1 = after_outputs[0].numpy()
442
        out_2 = outputs[0].numpy()
443
        self.assertEqual(out_1.shape, out_2.shape)
444
445
446
447
        out_1 = out_1[~np.isnan(out_1)]
        out_2 = out_2[~np.isnan(out_2)]
        max_diff = np.amax(np.abs(out_1 - out_2))
        self.assertLessEqual(max_diff, 1e-5)
448

449
    @is_pt_tf_cross_test
450
451
    def test_pt_tf_model_equivalence(self):
        import torch
452

453
        import transformers
thomwolf's avatar
thomwolf committed
454

455
        config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
thomwolf's avatar
thomwolf committed
456

457
        for model_class in self.all_model_classes:
458
            pt_model_class_name = model_class.__name__[2:]  # Skip the "TF" at the beginning
459
            pt_model_class = getattr(transformers, pt_model_class_name)
thomwolf's avatar
thomwolf committed
460

461
            config.output_hidden_states = True
462

463
464
            tf_model = model_class(config)
            pt_model = pt_model_class(config)
thomwolf's avatar
thomwolf committed
465

466
            # Check we can load pt model in tf and vice-versa with model => model functions
467

468
469
470
            tf_model = transformers.load_pytorch_model_in_tf2_model(
                tf_model, pt_model, tf_inputs=self._prepare_for_class(inputs_dict, model_class)
            )
471
            pt_model = transformers.load_tf2_model_in_pytorch_model(pt_model, tf_model)
472

473
474
            # Check predictions on first output (logits/hidden-states) are close enought given low-level computational differences
            pt_model.eval()
Julien Plu's avatar
Julien Plu committed
475
476
477
478
            pt_inputs_dict = {}
            for name, key in self._prepare_for_class(inputs_dict, model_class).items():
                if type(key) == bool:
                    pt_inputs_dict[name] = key
Will Rice's avatar
Will Rice committed
479
480
                elif name == "input_values":
                    pt_inputs_dict[name] = torch.from_numpy(key.numpy()).to(torch.float32)
Yih-Dar's avatar
Yih-Dar committed
481
482
                elif name == "pixel_values":
                    pt_inputs_dict[name] = torch.from_numpy(key.numpy()).to(torch.float32)
Julien Plu's avatar
Julien Plu committed
483
484
485
                else:
                    pt_inputs_dict[name] = torch.from_numpy(key.numpy()).to(torch.long)

486
487
488
489
            # need to rename encoder-decoder "inputs" for PyTorch
            if "inputs" in pt_inputs_dict and self.is_encoder_decoder:
                pt_inputs_dict["input_ids"] = pt_inputs_dict.pop("inputs")

490
491
            with torch.no_grad():
                pto = pt_model(**pt_inputs_dict)
492
            tfo = tf_model(self._prepare_for_class(inputs_dict, model_class), training=False)
Will Rice's avatar
Will Rice committed
493

494
495
            tf_hidden_states = tfo[0].numpy()
            pt_hidden_states = pto[0].numpy()
Lysandre's avatar
Lysandre committed
496

497
498
499
500
501
502
503
            tf_nans = np.copy(np.isnan(tf_hidden_states))
            pt_nans = np.copy(np.isnan(pt_hidden_states))

            pt_hidden_states[tf_nans] = 0
            tf_hidden_states[tf_nans] = 0
            pt_hidden_states[pt_nans] = 0
            tf_hidden_states[pt_nans] = 0
Lysandre's avatar
Lysandre committed
504

505
            max_diff = np.amax(np.abs(tf_hidden_states - pt_hidden_states))
506
            self.assertLessEqual(max_diff, 4e-2)
507
508

            # Check we can load pt model in tf and vice-versa with checkpoint => model functions
509
            with tempfile.TemporaryDirectory() as tmpdirname:
510
511
512
513
514
515
516
517
518
519
                pt_checkpoint_path = os.path.join(tmpdirname, "pt_model.bin")
                torch.save(pt_model.state_dict(), pt_checkpoint_path)
                tf_model = transformers.load_pytorch_checkpoint_in_tf2_model(tf_model, pt_checkpoint_path)

                tf_checkpoint_path = os.path.join(tmpdirname, "tf_model.h5")
                tf_model.save_weights(tf_checkpoint_path)
                pt_model = transformers.load_tf2_checkpoint_in_pytorch_model(pt_model, tf_checkpoint_path)

            # Check predictions on first output (logits/hidden-states) are close enought given low-level computational differences
            pt_model.eval()
Julien Plu's avatar
Julien Plu committed
520
521
522
523
524
            pt_inputs_dict = {}
            for name, key in self._prepare_for_class(inputs_dict, model_class).items():
                if type(key) == bool:
                    key = np.array(key, dtype=bool)
                    pt_inputs_dict[name] = torch.from_numpy(key).to(torch.long)
Will Rice's avatar
Will Rice committed
525
526
                elif name == "input_values":
                    pt_inputs_dict[name] = torch.from_numpy(key.numpy()).to(torch.float32)
Yih-Dar's avatar
Yih-Dar committed
527
528
                elif name == "pixel_values":
                    pt_inputs_dict[name] = torch.from_numpy(key.numpy()).to(torch.float32)
Julien Plu's avatar
Julien Plu committed
529
530
                else:
                    pt_inputs_dict[name] = torch.from_numpy(key.numpy()).to(torch.long)
531
532
533
534
            # need to rename encoder-decoder "inputs" for PyTorch
            if "inputs" in pt_inputs_dict and self.is_encoder_decoder:
                pt_inputs_dict["input_ids"] = pt_inputs_dict.pop("inputs")

535
536
            with torch.no_grad():
                pto = pt_model(**pt_inputs_dict)
537
            tfo = tf_model(self._prepare_for_class(inputs_dict, model_class))
538
539
            tfo = tfo[0].numpy()
            pto = pto[0].numpy()
540
541
542
543
544
545
546
547
            tf_nans = np.copy(np.isnan(tfo))
            pt_nans = np.copy(np.isnan(pto))

            pto[tf_nans] = 0
            tfo[tf_nans] = 0
            pto[pt_nans] = 0
            tfo[pt_nans] = 0

548
            max_diff = np.amax(np.abs(tfo - pto))
sgugger's avatar
sgugger committed
549
            self.assertLessEqual(max_diff, 4e-2)
550

Lysandre Debut's avatar
Lysandre Debut committed
551
    @tooslow
552
553
    def test_train_pipeline_custom_model(self):
        config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
554
555
556
557
558
        # head_mask and decoder_head_mask has different shapes than other input args
        if "head_mask" in inputs_dict:
            del inputs_dict["head_mask"]
        if "decoder_head_mask" in inputs_dict:
            del inputs_dict["decoder_head_mask"]
559
560
        if "cross_attn_head_mask" in inputs_dict:
            del inputs_dict["cross_attn_head_mask"]
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
        tf_main_layer_classes = set(
            module_member
            for model_class in self.all_model_classes
            for module in (import_module(model_class.__module__),)
            for module_member_name in dir(module)
            if module_member_name.endswith("MainLayer")
            for module_member in (getattr(module, module_member_name),)
            if isinstance(module_member, type)
            and tf.keras.layers.Layer in module_member.__bases__
            and getattr(module_member, "_keras_serializable", False)
        )

        for main_layer_class in tf_main_layer_classes:
            # T5MainLayer needs an embed_tokens parameter when called without the inputs_embeds parameter
            if "T5" in main_layer_class.__name__:
                # Take the same values than in TFT5ModelTester for this shared layer
                shared = TFSharedEmbeddings(self.model_tester.vocab_size, self.model_tester.hidden_size, name="shared")
                config.use_cache = False
                main_layer = main_layer_class(config, embed_tokens=shared)
            else:
                main_layer = main_layer_class(config)

            symbolic_inputs = {
                name: tf.keras.Input(tensor.shape[1:], dtype=tensor.dtype) for name, tensor in inputs_dict.items()
            }

            if hasattr(self.model_tester, "num_labels"):
                num_labels = self.model_tester.num_labels
            else:
                num_labels = 2

            X = tf.data.Dataset.from_tensor_slices(
Julien Plu's avatar
Julien Plu committed
593
                (inputs_dict, np.ones((self.model_tester.batch_size, self.model_tester.seq_length, num_labels, 1)))
594
595
596
597
598
599
            ).batch(1)

            hidden_states = main_layer(symbolic_inputs)[0]
            outputs = tf.keras.layers.Dense(num_labels, activation="softmax", name="outputs")(hidden_states)
            model = tf.keras.models.Model(inputs=symbolic_inputs, outputs=[outputs])

Julien Plu's avatar
Julien Plu committed
600
            model.compile(loss="binary_crossentropy", optimizer="adam", metrics=["binary_accuracy"])
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
            model.fit(X, epochs=1)

            with tempfile.TemporaryDirectory() as tmpdirname:
                filepath = os.path.join(tmpdirname, "keras_model.h5")
                model.save(filepath)
                if "T5" in main_layer_class.__name__:
                    model = tf.keras.models.load_model(
                        filepath,
                        custom_objects={
                            main_layer_class.__name__: main_layer_class,
                            "TFSharedEmbeddings": TFSharedEmbeddings,
                        },
                    )
                else:
                    model = tf.keras.models.load_model(
                        filepath, custom_objects={main_layer_class.__name__: main_layer_class}
                    )
                assert isinstance(model, tf.keras.Model)
                model(inputs_dict)

621
622
    def test_compile_tf_model(self):
        config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
Julien Plu's avatar
Julien Plu committed
623
        max_input = getattr(self.model_tester, "max_position_embeddings", 512)
624
625
626
627
628
        optimizer = tf.keras.optimizers.Adam(learning_rate=3e-5, epsilon=1e-08, clipnorm=1.0)
        loss = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)
        metric = tf.keras.metrics.SparseCategoricalAccuracy("accuracy")

        for model_class in self.all_model_classes:
629
            if self.is_encoder_decoder:
Yih-Dar's avatar
Yih-Dar committed
630
                inputs = {
631
                    "decoder_input_ids": tf.keras.Input(
Julien Plu's avatar
Julien Plu committed
632
633
634
                        batch_shape=(2, max_input),
                        name="decoder_input_ids",
                        dtype="int32",
635
                    ),
Julien Plu's avatar
Julien Plu committed
636
                    "input_ids": tf.keras.Input(batch_shape=(2, max_input), name="input_ids", dtype="int32"),
637
                }
Yih-Dar's avatar
Yih-Dar committed
638
639
640
641
642
643
644
645
646
647
648
649
            # TODO: A better way to handle vision models
            elif model_class.__name__ in ["TFViTModel", "TFViTForImageClassification"]:
                inputs = tf.keras.Input(
                    batch_shape=(
                        3,
                        self.model_tester.num_channels,
                        self.model_tester.image_size,
                        self.model_tester.image_size,
                    ),
                    name="pixel_values",
                    dtype="float32",
                )
650
            elif model_class in get_values(TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING):
Yih-Dar's avatar
Yih-Dar committed
651
                inputs = tf.keras.Input(batch_shape=(4, 2, max_input), name="input_ids", dtype="int32")
652
            else:
Yih-Dar's avatar
Yih-Dar committed
653
                inputs = tf.keras.Input(batch_shape=(2, max_input), name="input_ids", dtype="int32")
654

655
656
            # Prepare our model
            model = model_class(config)
657
            model(self._prepare_for_class(inputs_dict, model_class))  # Model must be called before saving.
658
            # Let's load it from the disk to be sure we can use pretrained weights
659
            with tempfile.TemporaryDirectory() as tmpdirname:
Julien Plu's avatar
Julien Plu committed
660
                model.save_pretrained(tmpdirname, saved_model=False)
661
662
                model = model_class.from_pretrained(tmpdirname)

Yih-Dar's avatar
Yih-Dar committed
663
            outputs_dict = model(inputs)
664
665
            hidden_states = outputs_dict[0]

666
            # Add a dense layer on top to test integration with other keras modules
667
668
669
            outputs = tf.keras.layers.Dense(2, activation="softmax", name="outputs")(hidden_states)

            # Compile extended model
Yih-Dar's avatar
Yih-Dar committed
670
            extended_model = tf.keras.Model(inputs=[inputs], outputs=[outputs])
671
672
673
674
675
676
677
            extended_model.compile(optimizer=optimizer, loss=loss, metrics=[metric])

    def test_keyword_and_dict_args(self):
        config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()

        for model_class in self.all_model_classes:
            model = model_class(config)
678
679
680
            inputs = self._prepare_for_class(inputs_dict, model_class)

            outputs_dict = model(inputs)
681

682
            inputs_keywords = copy.deepcopy(self._prepare_for_class(inputs_dict, model_class))
683
            input_ids = inputs_keywords.pop("input_ids", None)
Yih-Dar's avatar
Yih-Dar committed
684
685
            if input_ids is None:
                input_ids = inputs_keywords.pop("pixel_values", None)
686
687
688
689
690
691
692
693
            outputs_keywords = model(input_ids, **inputs_keywords)
            output_dict = outputs_dict[0].numpy()
            output_keywords = outputs_keywords[0].numpy()

            self.assertLess(np.sum(np.abs(output_dict - output_keywords)), 1e-6)

    def test_attention_outputs(self):
        config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
694
        config.return_dict = True
695
696
697
698
        decoder_seq_length = getattr(self.model_tester, "decoder_seq_length", self.model_tester.seq_length)
        encoder_seq_length = getattr(self.model_tester, "encoder_seq_length", self.model_tester.seq_length)
        decoder_key_length = getattr(self.model_tester, "key_length", decoder_seq_length)
        encoder_key_length = getattr(self.model_tester, "key_length", encoder_seq_length)
699

Julien Plu's avatar
Julien Plu committed
700
701
        def check_decoder_attentions_output(outputs):
            out_len = len(outputs)
702
            self.assertEqual(min(out_len % 2, out_len % 5), 0)  # differentiation due to newly added cross_attentions
Julien Plu's avatar
Julien Plu committed
703
704
705
706
707
708
709
710
            decoder_attentions = outputs.decoder_attentions
            self.assertEqual(len(decoder_attentions), self.model_tester.num_hidden_layers)
            self.assertListEqual(
                list(decoder_attentions[0].shape[-3:]),
                [self.model_tester.num_attention_heads, decoder_seq_length, decoder_key_length],
            )

        def check_encoder_attentions_output(outputs):
711
712
713
            attentions = [
                t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
            ]
714
715
716
717
            self.assertEqual(len(attentions), self.model_tester.num_hidden_layers)
            self.assertListEqual(
                list(attentions[0].shape[-3:]),
                [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length],
718
            )
Julien Plu's avatar
Julien Plu committed
719
720
721
722
723
724
725

        for model_class in self.all_model_classes:
            inputs_dict["output_attentions"] = True
            inputs_dict["use_cache"] = False
            config.output_hidden_states = False
            model = model_class(config)
            outputs = model(self._prepare_for_class(inputs_dict, model_class))
726
            out_len = len(outputs)
Julien Plu's avatar
Julien Plu committed
727
728
            self.assertEqual(config.output_hidden_states, False)
            check_encoder_attentions_output(outputs)
thomwolf's avatar
thomwolf committed
729

730
            if self.is_encoder_decoder:
Julien Plu's avatar
Julien Plu committed
731
732
733
734
                model = model_class(config)
                outputs = model(self._prepare_for_class(inputs_dict, model_class))
                self.assertEqual(config.output_hidden_states, False)
                check_decoder_attentions_output(outputs)
thomwolf's avatar
thomwolf committed
735

736
737
            # Check that output attentions can also be changed via the config
            del inputs_dict["output_attentions"]
738
            config.output_attentions = True
739
            model = model_class(config)
740
            outputs = model(self._prepare_for_class(inputs_dict, model_class))
Julien Plu's avatar
Julien Plu committed
741
742
            self.assertEqual(config.output_hidden_states, False)
            check_encoder_attentions_output(outputs)
743
744
745

            # Check attention is always last and order is fine
            inputs_dict["output_attentions"] = True
746
747
            config.output_hidden_states = True
            model = model_class(config)
748
            outputs = model(self._prepare_for_class(inputs_dict, model_class))
Julien Plu's avatar
Julien Plu committed
749

750
751
            self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1), len(outputs))
            self.assertEqual(model.config.output_hidden_states, True)
Julien Plu's avatar
Julien Plu committed
752
            check_encoder_attentions_output(outputs)
753

754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
    def test_headmasking(self):
        if not self.test_head_masking:
            return

        random.Random().seed(42)
        config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
        random.Random().seed()

        inputs_dict["output_attentions"] = True
        config.output_hidden_states = True
        configs_no_init = _config_zero_init(config)  # To be sure we have no Nan
        for model_class in self.all_model_classes:
            model = model_class(config=configs_no_init)

            # Prepare head_mask
            def prepare_layer_head_mask(i, attention_heads, num_hidden_layers):
                if i == 0:
                    return tf.concat(
                        (tf.zeros(1, dtype=tf.float32), tf.ones(attention_heads - 1, dtype=tf.float32)), 0
                    )
                elif i == num_hidden_layers - 1:
                    return tf.concat(
                        (tf.zeros(attention_heads - 1, dtype=tf.float32), tf.ones(1, dtype=tf.float32)), 0
                    )
                else:
                    return tf.ones(attention_heads, dtype=tf.float32)

            head_mask = tf.stack(
                [
                    prepare_layer_head_mask(i, config.num_attention_heads, config.num_hidden_layers)
                    for i in range(config.num_hidden_layers)
                ],
                0,
            )

            inputs = self._prepare_for_class(inputs_dict, model_class).copy()
            inputs["head_mask"] = head_mask
            if model.config.is_encoder_decoder:
                signature = inspect.signature(model.call)
                arg_names = [*signature.parameters.keys()]
                if "decoder_head_mask" in arg_names:  # necessary diferentiation because of T5 model
                    inputs["decoder_head_mask"] = head_mask
796
797
                if "cross_attn_head_mask" in arg_names:
                    inputs["cross_attn_head_mask"] = head_mask
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821

            outputs = model(**inputs, return_dict=True)

            def check_attentions_validity(attentions):
                # Remove Nan
                for t in attentions:
                    self.assertLess(
                        (tf.math.reduce_sum(tf.cast(tf.math.is_nan(t), tf.float32))).numpy(), (tf.size(t) / 4).numpy()
                    )  # Check we don't have more than 25% nans (arbitrary)

                attentions = [
                    tf.where(tf.math.is_nan(t), 0.0, t) for t in attentions
                ]  # remove them (the test is less complete)

                self.assertAlmostEqual(tf.math.reduce_sum(attentions[0][..., 0, :, :]).numpy(), 0.0)
                self.assertNotEqual(tf.math.reduce_sum(attentions[0][..., -1, :, :]).numpy(), 0.0)
                if len(attentions) > 2:  # encoder-decodere models have only 2 layers in each modules
                    self.assertNotEqual(tf.math.reduce_sum(attentions[1][..., 0, :, :]).numpy(), 0.0)
                self.assertAlmostEqual(tf.math.reduce_sum(attentions[-1][..., -2, :, :]).numpy(), 0.0)
                self.assertNotEqual(tf.math.reduce_sum(attentions[-1][..., -1, :, :]).numpy(), 0.0)

            if model.config.is_encoder_decoder:
                check_attentions_validity(outputs.encoder_attentions)
                check_attentions_validity(outputs.decoder_attentions)
822
823
                if "cross_attn_head_mask" in arg_names:
                    check_attentions_validity(outputs.cross_attentions)
824
825
826
            else:
                check_attentions_validity(outputs.attentions)

827
828
829
    def test_hidden_states_output(self):
        config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()

Joseph Liu's avatar
Joseph Liu committed
830
        def check_hidden_states_output(config, inputs_dict, model_class):
831
            model = model_class(config)
832
            outputs = model(self._prepare_for_class(inputs_dict, model_class))
833
834
835
            expected_num_layers = getattr(
                self.model_tester, "expected_num_hidden_layers", self.model_tester.num_hidden_layers + 1
            )
Julien Plu's avatar
Julien Plu committed
836

Julien Plu's avatar
Julien Plu committed
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
            if model.config.is_encoder_decoder:
                encoder_hidden_states = outputs.encoder_hidden_states
                decoder_hidden_states = outputs.decoder_hidden_states

                self.assertEqual(config.output_attentions, False)
                self.assertEqual(len(encoder_hidden_states), expected_num_layers)
                self.assertListEqual(
                    list(encoder_hidden_states[0].shape[-2:]),
                    [self.model_tester.seq_length, self.model_tester.hidden_size],
                )
                self.assertEqual(len(decoder_hidden_states), expected_num_layers)
                self.assertListEqual(
                    list(decoder_hidden_states[0].shape[-2:]),
                    [self.model_tester.seq_length, self.model_tester.hidden_size],
                )
            else:
                hidden_states = outputs.hidden_states
                self.assertEqual(config.output_attentions, False)
                self.assertEqual(len(hidden_states), expected_num_layers)
                self.assertListEqual(
                    list(hidden_states[0].shape[-2:]),
                    [self.model_tester.seq_length, self.model_tester.hidden_size],
                )
860

Joseph Liu's avatar
Joseph Liu committed
861
862
863
864
865
866
867
868
        for model_class in self.all_model_classes:
            inputs_dict["output_hidden_states"] = True
            check_hidden_states_output(config, inputs_dict, model_class)

            del inputs_dict["output_hidden_states"]
            config.output_hidden_states = True
            check_hidden_states_output(config, inputs_dict, model_class)

869
870
    def test_model_common_attributes(self):
        config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
871
        list_lm_models = (
872
873
874
            get_values(TF_MODEL_FOR_CAUSAL_LM_MAPPING)
            + get_values(TF_MODEL_FOR_MASKED_LM_MAPPING)
            + get_values(TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING)
875
        )
876
877
878

        for model_class in self.all_model_classes:
            model = model_class(config)
879
            assert isinstance(model.get_input_embeddings(), tf.keras.layers.Layer)
880
881

            if model_class in list_lm_models:
882
                x = model.get_output_embeddings()
883
                assert isinstance(x, tf.keras.layers.Layer)
884
885
886
887
                name = model.get_bias()
                assert isinstance(name, dict)
                for k, v in name.items():
                    assert isinstance(v, tf.Variable)
888
            else:
889
                x = model.get_output_embeddings()
890
                assert x is None
891
892
                name = model.get_bias()
                assert name is None
893
894
895
896
897
898

    def test_determinism(self):
        config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()

        for model_class in self.all_model_classes:
            model = model_class(config)
899
            first, second = (
900
901
                model(self._prepare_for_class(inputs_dict, model_class), training=False)[0],
                model(self._prepare_for_class(inputs_dict, model_class), training=False)[0],
902
            )
903
904
905
906
907
908
909
            out_1 = first.numpy()
            out_2 = second.numpy()
            out_1 = out_1[~np.isnan(out_1)]
            out_2 = out_2[~np.isnan(out_2)]
            max_diff = np.amax(np.abs(out_1 - out_2))
            self.assertLessEqual(max_diff, 1e-5)

910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
    def test_model_outputs_equivalence(self):

        config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()

        def check_equivalence(model, tuple_inputs, dict_inputs, additional_kwargs={}):
            tuple_output = model(tuple_inputs, return_dict=False, **additional_kwargs)
            dict_output = model(dict_inputs, return_dict=True, **additional_kwargs).to_tuple()

            def recursive_check(tuple_object, dict_object):
                if isinstance(tuple_object, (List, Tuple)):
                    for tuple_iterable_value, dict_iterable_value in zip(tuple_object, dict_object):
                        recursive_check(tuple_iterable_value, dict_iterable_value)
                elif tuple_object is None:
                    return
                else:
                    self.assertTrue(
                        all(tf.equal(tuple_object, dict_object)),
                        msg=f"Tuple and dict output are not equal. Difference: {tf.math.reduce_max(tf.abs(tuple_object - dict_object))}",
                    )

                recursive_check(tuple_output, dict_output)

        for model_class in self.all_model_classes:
            model = model_class(config)

            tuple_inputs = self._prepare_for_class(inputs_dict, model_class)
            dict_inputs = self._prepare_for_class(inputs_dict, model_class)
            check_equivalence(model, tuple_inputs, dict_inputs)

            tuple_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True)
            dict_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True)
            check_equivalence(model, tuple_inputs, dict_inputs)

            tuple_inputs = self._prepare_for_class(inputs_dict, model_class)
            dict_inputs = self._prepare_for_class(inputs_dict, model_class)
            check_equivalence(model, tuple_inputs, dict_inputs, {"output_hidden_states": True})

            tuple_inputs = self._prepare_for_class(inputs_dict, model_class)
            dict_inputs = self._prepare_for_class(inputs_dict, model_class)
            check_equivalence(model, tuple_inputs, dict_inputs, {"output_attentions": True})

            tuple_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True)
            dict_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True)
            check_equivalence(model, tuple_inputs, dict_inputs, {"output_hidden_states": True})

            tuple_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True)
            dict_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True)
            check_equivalence(model, tuple_inputs, dict_inputs, {"output_attentions": True})

            tuple_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True)
            dict_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True)
            check_equivalence(
                model, tuple_inputs, dict_inputs, {"output_hidden_states": True, "output_attentions": True}
            )

965
966
967
968
969
970
    def test_inputs_embeds(self):
        config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()

        for model_class in self.all_model_classes:
            model = model_class(config)

971
972
            inputs = copy.deepcopy(inputs_dict)

973
974
975
976
            if not self.is_encoder_decoder:
                input_ids = inputs["input_ids"]
                del inputs["input_ids"]
            else:
977
                encoder_input_ids = inputs["input_ids"]
978
                decoder_input_ids = inputs.get("decoder_input_ids", encoder_input_ids)
979
                del inputs["input_ids"]
980
981
                inputs.pop("decoder_input_ids", None)

thomwolf's avatar
thomwolf committed
982
            if not self.is_encoder_decoder:
983
                inputs["inputs_embeds"] = model.get_input_embeddings()(input_ids)
thomwolf's avatar
thomwolf committed
984
            else:
985
986
                inputs["inputs_embeds"] = model.get_input_embeddings()(encoder_input_ids)
                inputs["decoder_inputs_embeds"] = model.get_input_embeddings()(decoder_input_ids)
987

988
989
            inputs = self._prepare_for_class(inputs, model_class)

990
            model(inputs)
991

Lysandre Debut's avatar
Lysandre Debut committed
992
    @tooslow
Julien Plu's avatar
Julien Plu committed
993
994
995
996
997
998
    def test_graph_mode_with_inputs_embeds(self):
        config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()

        for model_class in self.all_model_classes:
            model = model_class(config)

999
1000
            inputs = copy.deepcopy(inputs_dict)

Julien Plu's avatar
Julien Plu committed
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
            if not self.is_encoder_decoder:
                input_ids = inputs["input_ids"]
                del inputs["input_ids"]
            else:
                encoder_input_ids = inputs["input_ids"]
                decoder_input_ids = inputs.get("decoder_input_ids", encoder_input_ids)
                del inputs["input_ids"]
                inputs.pop("decoder_input_ids", None)

            if not self.is_encoder_decoder:
                inputs["inputs_embeds"] = model.get_input_embeddings()(input_ids)
            else:
                inputs["inputs_embeds"] = model.get_input_embeddings()(encoder_input_ids)
                inputs["decoder_inputs_embeds"] = model.get_input_embeddings()(decoder_input_ids)

1016
1017
            inputs = self._prepare_for_class(inputs, model_class)

Julien Plu's avatar
Julien Plu committed
1018
1019
1020
1021
1022
1023
1024
            @tf.function
            def run_in_graph_mode():
                return model(inputs)

            outputs = run_in_graph_mode()
            self.assertIsNotNone(outputs)

1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
    def test_numpy_arrays_inputs(self):
        config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()

        def prepare_numpy_arrays(inputs_dict):
            inputs_np_dict = {}
            for k, v in inputs_dict.items():
                if tf.is_tensor(v):
                    inputs_np_dict[k] = v.numpy()
                else:
                    inputs_np_dict[k] = np.array(k)

            return inputs_np_dict

        for model_class in self.all_model_classes:
            model = model_class(config)

            inputs = self._prepare_for_class(inputs_dict, model_class)
            inputs_np = prepare_numpy_arrays(inputs)

            model(inputs_np)

1046
1047
1048
1049
    def test_resize_token_embeddings(self):
        if not self.test_resize_embeddings:
            return
        config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
1050
1051

        def _get_word_embedding_weight(model, embedding_layer):
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
            embeds = getattr(embedding_layer, "weight", None)
            if embeds is not None:
                return embeds

            embeds = getattr(embedding_layer, "decoder", None)
            if embeds is not None:
                return embeds

            model(model.dummy_inputs)

            embeds = getattr(embedding_layer, "weight", None)
            if embeds is not None:
                return embeds

            embeds = getattr(embedding_layer, "decoder", None)
            if embeds is not None:
                return embeds

            return None
1071

1072
1073
1074
1075
        for model_class in self.all_model_classes:
            for size in [config.vocab_size - 10, config.vocab_size + 10, None]:
                # build the embeddings
                model = model_class(config=config)
1076
1077
1078
                old_input_embeddings = _get_word_embedding_weight(model, model.get_input_embeddings())
                old_bias = model.get_bias()
                old_output_embeddings = _get_word_embedding_weight(model, model.get_output_embeddings())
1079
                # reshape the embeddings
1080
1081
1082
1083
1084
1085
                model.resize_token_embeddings(size)
                new_input_embeddings = _get_word_embedding_weight(model, model.get_input_embeddings())
                new_bias = model.get_bias()
                new_output_embeddings = _get_word_embedding_weight(model, model.get_output_embeddings())

                # check that the resized embeddings size matches the desired size.
1086
                assert_size = size if size is not None else config.vocab_size
1087
1088
                self.assertEqual(new_input_embeddings.shape[0], assert_size)

1089
1090
                # check that weights remain the same after resizing
                models_equal = True
1091
1092
                for p1, p2 in zip(old_input_embeddings.value(), new_input_embeddings.value()):
                    if tf.math.reduce_sum(tf.math.abs(p1 - p2)) > 0:
1093
1094
1095
                        models_equal = False
                self.assertTrue(models_equal)

1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
                if old_bias is not None and new_bias is not None:
                    for old_weight, new_weight in zip(old_bias.values(), new_bias.values()):
                        self.assertEqual(new_weight.shape[0], assert_size)

                        models_equal = True
                        for p1, p2 in zip(old_weight.value(), new_weight.value()):
                            if tf.math.reduce_sum(tf.math.abs(p1 - p2)) > 0:
                                models_equal = False
                        self.assertTrue(models_equal)

                if old_output_embeddings is not None and new_output_embeddings is not None:
                    self.assertEqual(new_output_embeddings.shape[0], assert_size)
                    self.assertEqual(new_output_embeddings.shape[1], old_output_embeddings.shape[1])

                    models_equal = True
                    for p1, p2 in zip(old_output_embeddings.value(), new_output_embeddings.value()):
                        if tf.math.reduce_sum(tf.math.abs(p1 - p2)) > 0:
                            models_equal = False
                    self.assertTrue(models_equal)

1116
    def test_lm_head_model_random_no_beam_search_generate(self):
1117
        config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
Will Rice's avatar
Will Rice committed
1118
        input_ids = inputs_dict.get("input_ids", None)
1119

1120
        # iterate over all generative models
1121
1122
1123
1124
        for model_class in self.all_generative_model_classes:
            model = model_class(config)

            if config.bos_token_id is None:
1125
                # if bos token id is not defined mobel needs input_ids
1126
                with self.assertRaises(AssertionError):
1127
                    model.generate(do_sample=True, max_length=5)
1128
                # num_return_sequences = 1
1129
                self._check_generated_ids(model.generate(input_ids, do_sample=True))
1130
            else:
1131
                # num_return_sequences = 1
1132
                self._check_generated_ids(model.generate(do_sample=True, max_length=5))
1133
1134

            with self.assertRaises(AssertionError):
1135
                # generating multiple sequences when no beam search generation
1136
1137
1138
                # is not allowed as it would always generate the same sequences
                model.generate(input_ids, do_sample=False, num_return_sequences=2)

1139
1140
            # num_return_sequences > 1, sample
            self._check_generated_ids(model.generate(input_ids, do_sample=True, num_return_sequences=2))
1141
1142

            # check bad words tokens language generation
1143
1144
            # create list of 1-seq bad token and list of 2-seq of bad tokens
            bad_words_ids = [self._generate_random_bad_tokens(1, model), self._generate_random_bad_tokens(2, model)]
1145
            output_tokens = model.generate(
1146
                input_ids, do_sample=True, bad_words_ids=bad_words_ids, num_return_sequences=2
1147
            )
1148
            # only count generated tokens
1149
1150
            generated_ids = output_tokens[:, input_ids.shape[-1] :]
            self.assertFalse(self._check_match_tokens(generated_ids.numpy().tolist(), bad_words_ids))
1151

1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
    def test_lm_head_model_no_beam_search_generate_dict_outputs(self):
        config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
        input_ids = inputs_dict.get("input_ids", None)

        # iterate over all generative models
        for model_class in self.all_generative_model_classes:
            model = model_class(config)
            output_greedy = model.generate(
                input_ids,
                do_sample=False,
                output_scores=True,
                output_hidden_states=True,
                output_attentions=True,
                return_dict_in_generate=True,
            )
            output_sample = model.generate(
                input_ids,
                do_sample=True,
                output_scores=True,
                output_hidden_states=True,
                output_attentions=True,
                return_dict_in_generate=True,
            )

            if model.config.is_encoder_decoder:
                self.assertIsInstance(output_greedy, TFGreedySearchEncoderDecoderOutput)
                self.assertIsInstance(output_sample, TFSampleEncoderDecoderOutput)
            else:
                self.assertIsInstance(output_greedy, TFGreedySearchDecoderOnlyOutput)
                self.assertIsInstance(output_sample, TFSampleDecoderOnlyOutput)

1183
1184
    def test_lm_head_model_random_beam_search_generate(self):
        config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
Will Rice's avatar
Will Rice committed
1185
        input_ids = inputs_dict.get("input_ids", None)
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201

        for model_class in self.all_generative_model_classes:
            model = model_class(config)

            if config.bos_token_id is None:
                # if bos token id is not defined mobel needs input_ids, num_return_sequences = 1
                self._check_generated_ids(model.generate(input_ids, do_sample=True, num_beams=2))
            else:
                # num_return_sequences = 1
                self._check_generated_ids(model.generate(do_sample=True, max_length=5, num_beams=2))

            with self.assertRaises(AssertionError):
                # generating more sequences than having beams leads is not possible
                model.generate(input_ids, do_sample=False, num_return_sequences=3, num_beams=2)

            # num_return_sequences > 1, sample
Lysandre's avatar
Lysandre committed
1202
1203
1204
1205
1206
1207
1208
1209
            self._check_generated_ids(
                model.generate(
                    input_ids,
                    do_sample=True,
                    num_beams=2,
                    num_return_sequences=2,
                )
            )
1210
1211
1212
1213
1214
1215
            # num_return_sequences > 1, greedy
            self._check_generated_ids(model.generate(input_ids, do_sample=False, num_beams=2, num_return_sequences=2))

            # check bad words tokens language generation
            # create list of 1-seq bad token and list of 2-seq of bad tokens
            bad_words_ids = [self._generate_random_bad_tokens(1, model), self._generate_random_bad_tokens(2, model)]
1216
            output_tokens = model.generate(
1217
                input_ids, do_sample=False, bad_words_ids=bad_words_ids, num_beams=2, num_return_sequences=2
1218
            )
1219
            # only count generated tokens
1220
1221
1222
            generated_ids = output_tokens[:, input_ids.shape[-1] :]
            self.assertFalse(self._check_match_tokens(generated_ids.numpy().tolist(), bad_words_ids))

1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
    def test_lm_head_model_beam_search_generate_dict_outputs(self):
        config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
        input_ids = inputs_dict.get("input_ids", None)

        # iterate over all generative models
        for model_class in self.all_generative_model_classes:
            model = model_class(config)
            output_beam_search = model.generate(
                input_ids,
                num_beams=2,
                do_sample=False,
                output_scores=True,
                output_hidden_states=True,
                output_attentions=True,
                return_dict_in_generate=True,
            )
            output_beam_sample = model.generate(
                input_ids,
                num_beams=2,
                do_sample=True,
                output_scores=True,
                output_hidden_states=True,
                output_attentions=True,
                return_dict_in_generate=True,
            )

            if model.config.is_encoder_decoder:
                self.assertIsInstance(output_beam_search, TFBeamSearchEncoderDecoderOutput)
                self.assertIsInstance(output_beam_sample, TFBeamSampleEncoderDecoderOutput)
            else:
                self.assertIsInstance(output_beam_search, TFBeamSearchDecoderOnlyOutput)
                self.assertIsInstance(output_beam_sample, TFBeamSampleDecoderOnlyOutput)

1256
1257
1258
1259
1260
1261
1262
    def test_loss_computation(self):
        config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
        for model_class in self.all_model_classes:
            model = model_class(config)
            if getattr(model, "compute_loss", None):
                # The number of elements in the loss should be the same as the number of elements in the label
                prepared_for_class = self._prepare_for_class(inputs_dict.copy(), model_class, return_labels=True)
1263
1264
1265
                added_label = prepared_for_class[
                    sorted(list(prepared_for_class.keys() - inputs_dict.keys()), reverse=True)[0]
                ]
1266
1267
                loss_size = tf.size(added_label)

1268
                if model.__class__ in get_values(TF_MODEL_FOR_CAUSAL_LM_MAPPING):
1269
1270
1271
1272
                    # if loss is causal lm loss, labels are shift, so that one label per batch
                    # is cut
                    loss_size = loss_size - self.model_tester.batch_size

1273
1274
                # Test that model correctly compute the loss with kwargs
                prepared_for_class = self._prepare_for_class(inputs_dict.copy(), model_class, return_labels=True)
Yih-Dar's avatar
Yih-Dar committed
1275
1276
                input_name = "input_ids" if "input_ids" in prepared_for_class else "pixel_values"
                input_ids = prepared_for_class.pop(input_name)
1277

1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
                loss = model(input_ids, **prepared_for_class)[0]
                self.assertEqual(loss.shape, [loss_size])

                # Test that model correctly compute the loss with a dict
                prepared_for_class = self._prepare_for_class(inputs_dict.copy(), model_class, return_labels=True)
                loss = model(prepared_for_class)[0]
                self.assertEqual(loss.shape, [loss_size])

                # Test that model correctly compute the loss with a tuple
                prepared_for_class = self._prepare_for_class(inputs_dict.copy(), model_class, return_labels=True)

                # Get keys that were added with the _prepare_for_class function
                label_keys = prepared_for_class.keys() - inputs_dict.keys()
1291
1292
                signature = inspect.signature(model.call).parameters
                signature_names = list(signature.keys())
1293
1294

                # Create a dictionary holding the location of the tensors in the tuple
Yih-Dar's avatar
Yih-Dar committed
1295
                tuple_index_mapping = {0: input_name}
1296
                for label_key in label_keys:
1297
                    label_key_index = signature_names.index(label_key)
1298
1299
                    tuple_index_mapping[label_key_index] = label_key
                sorted_tuple_index_mapping = sorted(tuple_index_mapping.items())
1300
1301
1302
1303
1304
1305
                # Initialize a list with their default values, update the values and convert to a tuple
                list_input = []

                for name in signature_names:
                    if name != "kwargs":
                        list_input.append(signature[name].default)
1306
1307

                for index, value in sorted_tuple_index_mapping:
1308
1309
                    list_input[index] = prepared_for_class[value]

1310
1311
1312
                tuple_input = tuple(list_input)

                # Send to model
1313
1314
                loss = model(tuple_input[:-1])[0]

1315
1316
                self.assertEqual(loss.shape, [loss_size])

1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
    def test_generate_with_headmasking(self):
        attention_names = ["encoder_attentions", "decoder_attentions", "cross_attentions"]
        config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()

        for model_class in self.all_generative_model_classes:
            model = model_class(config)

            # We want to test only encoder-decoder models
            if not config.is_encoder_decoder:
                continue

            head_masking = {
                "head_mask": tf.zeros((config.encoder_layers, config.encoder_attention_heads)),
                "decoder_head_mask": tf.zeros((config.decoder_layers, config.decoder_attention_heads)),
                "cross_attn_head_mask": tf.zeros((config.decoder_layers, config.decoder_attention_heads)),
            }

            signature = inspect.signature(model.call)
            if set(head_masking.keys()) < set([*signature.parameters.keys()]):
                continue

            for attn_name, (name, mask) in zip(attention_names, head_masking.items()):
                out = model.generate(
                    inputs_dict["input_ids"],
                    num_beams=1,
                    max_length=inputs_dict["input_ids"] + 5,
                    output_attentions=True,
                    return_dict_in_generate=True,
                    **{name: mask},
                )
                # We check the state of decoder_attentions and cross_attentions just from the last step
                attn_weights = out[attn_name] if attn_name == attention_names[0] else out[attn_name][-1]
                self.assertEqual(sum([tf.reduce_sum(w).numpy() for w in attn_weights]), 0.0)

1351
    def test_load_with_mismatched_shapes(self):
1352
1353
        if not self.test_mismatched_shapes:
            return
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369
        config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()

        for model_class in self.all_model_classes:
            if model_class not in get_values(TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING):
                continue

            with self.subTest(msg=f"Testing {model_class}"):
                with tempfile.TemporaryDirectory() as tmp_dir:
                    model = model_class(config)
                    inputs = self._prepare_for_class(inputs_dict, model_class)
                    _ = model(**inputs)
                    model.save_pretrained(tmp_dir)

                    # Fails when we don't set ignore_mismatched_sizes=True
                    with self.assertRaises(ValueError):
                        new_model = TFAutoModelForSequenceClassification.from_pretrained(tmp_dir, num_labels=42)
1370
1371
                    with self.assertRaises(ValueError):
                        new_model_without_prefix = TFAutoModel.from_pretrained(tmp_dir, vocab_size=10)
1372
1373
1374
1375
1376
1377
1378
1379
1380
1381
1382

                    logger = logging.get_logger("transformers.modeling_tf_utils")
                    with CaptureLogger(logger) as cl:
                        new_model = TFAutoModelForSequenceClassification.from_pretrained(
                            tmp_dir, num_labels=42, ignore_mismatched_sizes=True
                        )
                    self.assertIn("the shapes did not match", cl.out)

                    logits = new_model(**inputs).logits
                    self.assertEqual(logits.shape[1], 42)

1383
1384
1385
1386
1387
1388
1389
1390
1391
1392
1393
1394
1395
1396
                    with CaptureLogger(logger) as cl:
                        new_model_without_prefix = TFAutoModel.from_pretrained(
                            tmp_dir, vocab_size=10, ignore_mismatched_sizes=True
                        )
                    self.assertIn("the shapes did not match", cl.out)

                    # Although Tf models always have a prefix pointing to `MainLayer`,
                    # we still add this "without prefix" test to keep a consistency between tf and pt tests.
                    input_ids = ids_tensor((2, 8), 10)
                    if self.is_encoder_decoder:
                        new_model_without_prefix(input_ids, decoder_input_ids=input_ids)
                    else:
                        new_model_without_prefix(input_ids)

1397
1398
1399
1400
1401
1402
1403
1404
1405
1406
1407
1408
1409
1410
1411
1412
1413
1414
    def _generate_random_bad_tokens(self, num_bad_tokens, model):
        # special tokens cannot be bad tokens
        special_tokens = []
        if model.config.bos_token_id is not None:
            special_tokens.append(model.config.bos_token_id)
        if model.config.pad_token_id is not None:
            special_tokens.append(model.config.pad_token_id)
        if model.config.eos_token_id is not None:
            special_tokens.append(model.config.eos_token_id)

        # create random bad tokens that are not special tokens
        bad_tokens = []
        while len(bad_tokens) < num_bad_tokens:
            token = tf.squeeze(ids_tensor((1, 1), self.model_tester.vocab_size), 0).numpy()[0]
            if token not in special_tokens:
                bad_tokens.append(token)
        return bad_tokens

1415
    def _check_generated_ids(self, output_ids):
1416
1417
1418
1419
        for token_id in output_ids[0].numpy().tolist():
            self.assertGreaterEqual(token_id, 0)
            self.assertLess(token_id, self.model_tester.vocab_size)

1420
1421
1422
1423
1424
1425
1426
1427
1428
1429
1430
1431
    def _check_match_tokens(self, generated_ids, bad_words_ids):
        # for all bad word tokens
        for bad_word_ids in bad_words_ids:
            # for all slices in batch
            for generated_ids_slice in generated_ids:
                # for all word idx
                for i in range(len(bad_word_ids), len(generated_ids_slice)):
                    # if tokens match
                    if generated_ids_slice[i - len(bad_word_ids) : i] == bad_word_ids:
                        return True
        return False

thomwolf's avatar
thomwolf committed
1432

thomwolf's avatar
thomwolf committed
1433
def ids_tensor(shape, vocab_size, rng=None, name=None, dtype=None):
thomwolf's avatar
thomwolf committed
1434
1435
1436
1437
1438
1439
1440
1441
1442
1443
1444
1445
    """Creates a random int32 tensor of the shape within the vocab size."""
    if rng is None:
        rng = random.Random()

    total_dims = 1
    for dim in shape:
        total_dims *= dim

    values = []
    for _ in range(total_dims):
        values.append(rng.randint(0, vocab_size - 1))

1446
    output = tf.constant(values, shape=shape, dtype=dtype if dtype is not None else tf.int32)
thomwolf's avatar
thomwolf committed
1447
1448

    return output
1449
1450


1451
1452
1453
1454
1455
1456
1457
1458
1459
1460
1461
1462
1463
1464
1465
1466
def floats_tensor(shape, scale=1.0, rng=None, name=None, dtype=None):
    """Creates a random float32 tensor"""
    if rng is None:
        rng = random.Random()

    total_dims = 1
    for dim in shape:
        total_dims *= dim

    values = []
    for _ in range(total_dims):
        values.append(rng.random() * scale)

    return tf.reshape(tf.constant(values, dtype=dtype if dtype is not None else tf.float32), shape=shape)


1467
1468
1469
1470
1471
1472
1473
1474
1475
1476
1477
1478
1479
1480
1481
1482
1483
1484
1485
1486
1487
1488
1489
1490
1491
1492
1493
1494
1495
1496
1497
1498
1499
1500
1501
1502
1503
1504
1505
1506
1507
1508
1509
1510
1511
1512
1513
1514
1515
1516
1517
1518
1519
1520
1521
1522
1523
1524
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534
1535
1536
1537
1538
1539
1540
1541
1542
@require_tf
class UtilsFunctionsTest(unittest.TestCase):

    # tests whether the top_k_top_p_filtering function behaves as expected
    def test_top_k_top_p_filtering(self):
        logits = tf.convert_to_tensor(
            [
                [
                    8.2220991,  # 3rd highest value; idx. 0
                    -0.5620044,
                    5.23229752,
                    4.0386393,
                    -6.8798378,
                    -0.54785802,
                    -3.2012153,
                    2.92777176,
                    1.88171953,
                    7.35341276,  # 5th highest value; idx. 9
                    8.43207833,  # 2nd highest value; idx. 10
                    -9.85711836,
                    -5.96209236,
                    -1.13039161,
                    -7.1115294,
                    -0.8369633,
                    -5.3186408,
                    7.06427407,
                    0.81369344,
                    -0.82023817,
                    -5.9179796,
                    0.58813443,
                    -6.99778438,
                    4.71551189,
                    -0.18771637,
                    7.44020759,  # 4th highest value; idx. 25
                    9.38450987,  # 1st highest value; idx. 26
                    2.12662941,
                    -9.32562038,
                    2.35652522,
                ],  # cummulative prob of 5 highest values <= 0.6
                [
                    0.58425518,
                    4.53139238,
                    -5.57510464,
                    -6.28030699,
                    -7.19529503,
                    -4.02122551,
                    1.39337037,
                    -6.06707057,
                    1.59480517,
                    -9.643119,
                    0.03907799,
                    0.67231762,
                    -8.88206726,
                    6.27115922,  # 4th highest value; idx. 13
                    2.28520723,
                    4.82767506,
                    4.30421368,
                    8.8275313,  # 2nd highest value; idx. 17
                    5.44029958,  # 5th highest value; idx. 18
                    -4.4735794,
                    7.38579536,  # 3rd highest value; idx. 20
                    -2.91051663,
                    2.61946077,
                    -2.5674762,
                    -9.48959302,
                    -4.02922645,
                    -1.35416918,
                    9.67702323,  # 1st highest value; idx. 27
                    -5.89478553,
                    1.85370467,
                ],  # cummulative prob of 5 highest values <= 0.6
            ],
            dtype=tf.float32,
        )

        non_inf_expected_idx = tf.convert_to_tensor(
Lysandre's avatar
Lysandre committed
1543
1544
            [[0, 0], [0, 9], [0, 10], [0, 25], [0, 26], [1, 13], [1, 17], [1, 18], [1, 20], [1, 27]],
            dtype=tf.int32,
1545
1546
1547
1548
1549
1550
1551
1552
1553
1554
1555
        )  # expected non filtered idx as noted above

        non_inf_expected_output = tf.convert_to_tensor(
            [8.222099, 7.3534126, 8.432078, 7.4402075, 9.38451, 6.271159, 8.827531, 5.4402995, 7.3857956, 9.677023],
            dtype=tf.float32,
        )  # expected non filtered values as noted above

        output = tf_top_k_top_p_filtering(logits, top_k=10, top_p=0.6, min_tokens_to_keep=4)

        non_inf_output = output[output != -float("inf")]
        non_inf_idx = tf.cast(
Lysandre's avatar
Lysandre committed
1556
1557
            tf.where(tf.not_equal(output, tf.constant(-float("inf"), dtype=tf.float32))),
            dtype=tf.int32,
1558
1559
1560
1561
        )

        tf.debugging.assert_near(non_inf_output, non_inf_expected_output, rtol=1e-12)
        tf.debugging.assert_equal(non_inf_idx, non_inf_expected_idx)
Sylvain Gugger's avatar
Sylvain Gugger committed
1562
1563
1564
1565
1566
1567
1568


@require_tf
@is_staging_test
class TFModelPushToHubTester(unittest.TestCase):
    @classmethod
    def setUpClass(cls):
1569
        cls._token = login(username=USER, password=PASS)
Sylvain Gugger's avatar
Sylvain Gugger committed
1570
1571
1572
1573

    @classmethod
    def tearDownClass(cls):
        try:
1574
            delete_repo(token=cls._token, name="test-model-tf")
Sylvain Gugger's avatar
Sylvain Gugger committed
1575
1576
1577
1578
        except HTTPError:
            pass

        try:
1579
            delete_repo(token=cls._token, name="test-model-tf-org", organization="valid_org")
Sylvain Gugger's avatar
Sylvain Gugger committed
1580
1581
1582
1583
1584
1585
1586
1587
1588
1589
1590
        except HTTPError:
            pass

    def test_push_to_hub(self):
        config = BertConfig(
            vocab_size=99, hidden_size=32, num_hidden_layers=5, num_attention_heads=4, intermediate_size=37
        )
        model = TFBertModel(config)
        # Make sure model is properly initialized
        _ = model(model.dummy_inputs)
        with tempfile.TemporaryDirectory() as tmp_dir:
1591
            model.save_pretrained(os.path.join(tmp_dir, "test-model-tf"), push_to_hub=True, use_auth_token=self._token)
Sylvain Gugger's avatar
Sylvain Gugger committed
1592

1593
            new_model = TFBertModel.from_pretrained(f"{USER}/test-model-tf")
Sylvain Gugger's avatar
Sylvain Gugger committed
1594
1595
1596
1597
1598
1599
1600
1601
1602
1603
1604
1605
1606
            models_equal = True
            for p1, p2 in zip(model.weights, new_model.weights):
                if tf.math.reduce_sum(tf.math.abs(p1 - p2)) > 0:
                    models_equal = False
            self.assertTrue(models_equal)

    def test_push_to_hub_in_organization(self):
        config = BertConfig(
            vocab_size=99, hidden_size=32, num_hidden_layers=5, num_attention_heads=4, intermediate_size=37
        )
        model = TFBertModel(config)
        with tempfile.TemporaryDirectory() as tmp_dir:
            model.save_pretrained(
1607
                os.path.join(tmp_dir, "test-model-tf-org"),
Sylvain Gugger's avatar
Sylvain Gugger committed
1608
1609
1610
1611
1612
                push_to_hub=True,
                use_auth_token=self._token,
                organization="valid_org",
            )

1613
            new_model = TFBertModel.from_pretrained("valid_org/test-model-tf-org")
Sylvain Gugger's avatar
Sylvain Gugger committed
1614
1615
1616
1617
1618
            models_equal = True
            for p1, p2 in zip(model.weights, new_model.weights):
                if tf.math.reduce_sum(tf.math.abs(p1 - p2)) > 0:
                    models_equal = False
            self.assertTrue(models_equal)