test_modeling_tf_common.py 46.3 KB
Newer Older
thomwolf's avatar
thomwolf committed
1
2
3
4
5
6
7
8
9
10
11
12
13
14
# coding=utf-8
# Copyright 2019 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
Aymeric Augustin's avatar
Aymeric Augustin committed
15

thomwolf's avatar
thomwolf committed
16
17

import copy
18
import inspect
Aymeric Augustin's avatar
Aymeric Augustin committed
19
import os
thomwolf's avatar
thomwolf committed
20
import random
Aymeric Augustin's avatar
Aymeric Augustin committed
21
import tempfile
22
import unittest
23
from importlib import import_module
24
from typing import List, Tuple
thomwolf's avatar
thomwolf committed
25

26
27
from transformers import is_tf_available
from transformers.testing_utils import _tf_gpu_memory_limit, is_pt_tf_cross_test, require_tf, slow
28

Aymeric Augustin's avatar
Aymeric Augustin committed
29

30
if is_tf_available():
thomwolf's avatar
thomwolf committed
31
    import numpy as np
32
    import tensorflow as tf
33

34
    from transformers import (
35
36
        TF_MODEL_FOR_CAUSAL_LM_MAPPING,
        TF_MODEL_FOR_MASKED_LM_MAPPING,
37
        TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
38
        TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
39
        TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
40
41
        TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
        TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
42
43
44
        TFAdaptiveEmbedding,
        TFSharedEmbeddings,
        tf_top_k_top_p_filtering,
45
    )
46

Julien Chaumond's avatar
Julien Chaumond committed
47
48
49
50
51
52
53
54
55
56
57
58
59
    if _tf_gpu_memory_limit is not None:
        gpus = tf.config.list_physical_devices("GPU")
        for gpu in gpus:
            # Restrict TensorFlow to only allocate x GB of memory on the GPUs
            try:
                tf.config.experimental.set_virtual_device_configuration(
                    gpu, [tf.config.experimental.VirtualDeviceConfiguration(memory_limit=_tf_gpu_memory_limit)]
                )
                logical_gpus = tf.config.experimental.list_logical_devices("GPU")
                print("Logical GPUs", logical_gpus)
            except RuntimeError as e:
                # Virtual devices must be set before GPUs have been initialized
                print(e)
thomwolf's avatar
thomwolf committed
60

61

thomwolf's avatar
thomwolf committed
62
63
64
def _config_zero_init(config):
    configs_no_init = copy.deepcopy(config)
    for key in configs_no_init.__dict__.keys():
65
        if "_range" in key or "_std" in key:
thomwolf's avatar
thomwolf committed
66
67
68
69
            setattr(configs_no_init, key, 0.0)
    return configs_no_init


70
71
@require_tf
class TFModelTesterMixin:
72

73
74
    model_tester = None
    all_model_classes = ()
75
    all_generative_model_classes = ()
76
77
    test_resize_embeddings = True
    is_encoder_decoder = False
78

Lysandre Debut's avatar
Lysandre Debut committed
79
    def _prepare_for_class(self, inputs_dict, model_class, return_labels=False) -> dict:
80
81
        inputs_dict = copy.deepcopy(inputs_dict)

82
        if model_class in TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING.values():
83
            inputs_dict = {
84
85
                k: tf.tile(tf.expand_dims(v, 1), (1, self.model_tester.num_choices) + (1,) * (v.ndim - 1))
                if isinstance(v, tf.Tensor) and v.ndim > 0
86
87
88
                else v
                for k, v in inputs_dict.items()
            }
89
90
91

        if return_labels:
            if model_class in TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING.values():
92
                inputs_dict["labels"] = tf.ones(self.model_tester.batch_size, dtype=tf.int32)
93
            elif model_class in TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING.values():
94
95
                inputs_dict["start_positions"] = tf.zeros(self.model_tester.batch_size, dtype=tf.int32)
                inputs_dict["end_positions"] = tf.zeros(self.model_tester.batch_size, dtype=tf.int32)
96
            elif model_class in TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING.values():
97
98
99
100
101
102
103
104
105
106
                inputs_dict["labels"] = tf.zeros(self.model_tester.batch_size, dtype=tf.int32)
            elif model_class in [
                *TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING.values(),
                *TF_MODEL_FOR_CAUSAL_LM_MAPPING.values(),
                *TF_MODEL_FOR_MASKED_LM_MAPPING.values(),
                *TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING.values(),
            ]:
                inputs_dict["labels"] = tf.zeros(
                    (self.model_tester.batch_size, self.model_tester.seq_length), dtype=tf.int32
                )
107
108
        return inputs_dict

109
110
    def test_initialization(self):
        pass
111

112
113
    def test_save_load(self):
        config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
114

115
116
        for model_class in self.all_model_classes:
            model = model_class(config)
117
            outputs = model(self._prepare_for_class(inputs_dict, model_class))
118

119
            with tempfile.TemporaryDirectory() as tmpdirname:
120
121
                model.save_pretrained(tmpdirname)
                model = model_class.from_pretrained(tmpdirname)
122
                after_outputs = model(self._prepare_for_class(inputs_dict, model_class))
123

124
                self.assert_outputs_same(after_outputs, outputs)
125

126
127
128
129
130
131
132
133
134
135
136
137
138
    def test_graph_mode(self):
        config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
        for model_class in self.all_model_classes:
            inputs = self._prepare_for_class(inputs_dict, model_class)
            model = model_class(config)

            @tf.function
            def run_in_graph_mode():
                return model(inputs)

            outputs = run_in_graph_mode()
            self.assertIsNotNone(outputs)

139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
    def test_forward_signature(self):
        config, _ = self.model_tester.prepare_config_and_inputs_for_common()

        for model_class in self.all_model_classes:
            model = model_class(config)
            signature = inspect.signature(model.call)
            # signature.parameters is an OrderedDict => so arg_names order is deterministic
            arg_names = [*signature.parameters.keys()]

            if model.config.is_encoder_decoder:
                expected_arg_names = [
                    "inputs",
                    "attention_mask",
                    "decoder_input_ids",
                    "decoder_attention_mask",
                    "encoder_outputs",
                ]
                self.assertListEqual(arg_names[:5], expected_arg_names)

            else:
                expected_arg_names = ["inputs"]
                self.assertListEqual(arg_names[:1], expected_arg_names)

Julien Plu's avatar
Julien Plu committed
162
163
164
165
166
167
    @slow
    def test_saved_model_with_hidden_states_output(self):
        config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
        config.output_hidden_states = True

        for model_class in self.all_model_classes:
Lysandre Debut's avatar
Lysandre Debut committed
168
            class_inputs_dict = self._prepare_for_class(inputs_dict, model_class)
Julien Plu's avatar
Julien Plu committed
169
            model = model_class(config)
Lysandre Debut's avatar
Lysandre Debut committed
170
            num_out = len(model(class_inputs_dict))
Julien Plu's avatar
Julien Plu committed
171
            model._saved_model_inputs_spec = None
Lysandre Debut's avatar
Lysandre Debut committed
172
            model._set_save_spec(class_inputs_dict)
Julien Plu's avatar
Julien Plu committed
173
174
175
176

            with tempfile.TemporaryDirectory() as tmpdirname:
                tf.saved_model.save(model, tmpdirname)
                model = tf.keras.models.load_model(tmpdirname)
Lysandre Debut's avatar
Lysandre Debut committed
177
                outputs = model(class_inputs_dict)
178
179
180
181
182
183

                if self.is_encoder_decoder:
                    output = outputs["encoder_hidden_states"] if isinstance(outputs, dict) else outputs[-1]
                else:
                    output = outputs["hidden_states"] if isinstance(outputs, dict) else outputs[-1]

Sylvain Gugger's avatar
Sylvain Gugger committed
184
                hidden_states = [t.numpy() for t in output]
Julien Plu's avatar
Julien Plu committed
185
                self.assertEqual(len(outputs), num_out)
Lysandre Debut's avatar
Lysandre Debut committed
186
187
188
189
                expected_num_layers = getattr(
                    self.model_tester, "expected_num_hidden_layers", self.model_tester.num_hidden_layers + 1
                )
                self.assertEqual(len(hidden_states), expected_num_layers)
Julien Plu's avatar
Julien Plu committed
190
                self.assertListEqual(
Lysandre's avatar
Lysandre committed
191
192
                    list(hidden_states[0].shape[-2:]),
                    [self.model_tester.seq_length, self.model_tester.hidden_size],
Julien Plu's avatar
Julien Plu committed
193
194
195
196
197
198
                )

    @slow
    def test_saved_model_with_attentions_output(self):
        config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
        config.output_attentions = True
Lysandre Debut's avatar
Lysandre Debut committed
199
200
201

        encoder_seq_length = getattr(self.model_tester, "encoder_seq_length", self.model_tester.seq_length)
        encoder_key_length = getattr(self.model_tester, "key_length", encoder_seq_length)
Julien Plu's avatar
Julien Plu committed
202
203

        for model_class in self.all_model_classes:
Lysandre Debut's avatar
Lysandre Debut committed
204
            class_inputs_dict = self._prepare_for_class(inputs_dict, model_class)
Julien Plu's avatar
Julien Plu committed
205
            model = model_class(config)
Lysandre Debut's avatar
Lysandre Debut committed
206
            num_out = len(model(class_inputs_dict))
Julien Plu's avatar
Julien Plu committed
207
            model._saved_model_inputs_spec = None
Lysandre Debut's avatar
Lysandre Debut committed
208
            model._set_save_spec(class_inputs_dict)
Julien Plu's avatar
Julien Plu committed
209
210
211
212

            with tempfile.TemporaryDirectory() as tmpdirname:
                tf.saved_model.save(model, tmpdirname)
                model = tf.keras.models.load_model(tmpdirname)
Lysandre Debut's avatar
Lysandre Debut committed
213
                outputs = model(class_inputs_dict)
214
215
216
217
218
219

                if self.is_encoder_decoder:
                    output = outputs["encoder_attentions"] if isinstance(outputs, dict) else outputs[-1]
                else:
                    output = outputs["attentions"] if isinstance(outputs, dict) else outputs[-1]

Sylvain Gugger's avatar
Sylvain Gugger committed
220
                attentions = [t.numpy() for t in output]
Julien Plu's avatar
Julien Plu committed
221
222
223
224
225
226
227
                self.assertEqual(len(outputs), num_out)
                self.assertEqual(len(attentions), self.model_tester.num_hidden_layers)
                self.assertListEqual(
                    list(attentions[0].shape[-3:]),
                    [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length],
                )

228
229
230
231
232
233
234
235
    def test_keras_save_load(self):
        config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()

        tf_main_layer_classes = set(
            module_member
            for model_class in self.all_model_classes
            for module in (import_module(model_class.__module__),)
            for module_member_name in dir(module)
236
            if module_member_name.endswith("MainLayer")
237
            for module_member in (getattr(module, module_member_name),)
238
239
240
            if isinstance(module_member, type)
            and tf.keras.layers.Layer in module_member.__bases__
            and getattr(module_member, "_keras_serializable", False)
241
242
        )
        for main_layer_class in tf_main_layer_classes:
Julien Plu's avatar
Julien Plu committed
243
244
245
246
            # T5MainLayer needs an embed_tokens parameter when called without the inputs_embeds parameter
            if "T5" in main_layer_class.__name__:
                # Take the same values than in TFT5ModelTester for this shared layer
                shared = TFSharedEmbeddings(99, 32, name="shared")
247
                config.use_cache = False
Julien Plu's avatar
Julien Plu committed
248
249
250
                main_layer = main_layer_class(config, embed_tokens=shared)
            else:
                main_layer = main_layer_class(config)
251
252
253
            symbolic_inputs = {
                name: tf.keras.Input(tensor.shape[1:], dtype=tensor.dtype) for name, tensor in inputs_dict.items()
            }
Julien Plu's avatar
Julien Plu committed
254

255
256
257
258
259
260
            model = tf.keras.Model(symbolic_inputs, outputs=main_layer(symbolic_inputs))
            outputs = model(inputs_dict)

            with tempfile.TemporaryDirectory() as tmpdirname:
                filepath = os.path.join(tmpdirname, "keras_model.h5")
                model.save(filepath)
Julien Plu's avatar
Julien Plu committed
261
262
263
264
265
266
267
268
269
270
271
272
                if "T5" in main_layer_class.__name__:
                    model = tf.keras.models.load_model(
                        filepath,
                        custom_objects={
                            main_layer_class.__name__: main_layer_class,
                            "TFSharedEmbeddings": TFSharedEmbeddings,
                        },
                    )
                else:
                    model = tf.keras.models.load_model(
                        filepath, custom_objects={main_layer_class.__name__: main_layer_class}
                    )
273
274
275
276
277
278
                assert isinstance(model, tf.keras.Model)
                after_outputs = model(inputs_dict)
                self.assert_outputs_same(after_outputs, outputs)

    def assert_outputs_same(self, after_outputs, outputs):
        # Make sure we don't have nans
Julien Plu's avatar
Julien Plu committed
279
280
        if isinstance(after_outputs, tf.Tensor):
            out_1 = after_outputs.numpy()
Sylvain Gugger's avatar
Sylvain Gugger committed
281
282
        elif isinstance(after_outputs, dict):
            out_1 = after_outputs[list(after_outputs.keys())[0]]
Julien Plu's avatar
Julien Plu committed
283
284
        else:
            out_1 = after_outputs[0].numpy()
285
        out_2 = outputs[0].numpy()
286
        self.assertEqual(out_1.shape, out_2.shape)
287
288
289
290
        out_1 = out_1[~np.isnan(out_1)]
        out_2 = out_2[~np.isnan(out_2)]
        max_diff = np.amax(np.abs(out_1 - out_2))
        self.assertLessEqual(max_diff, 1e-5)
291

292
    @is_pt_tf_cross_test
293
    def test_pt_tf_model_equivalence(self):
thomwolf's avatar
thomwolf committed
294

295
        import torch
296

297
        import transformers
thomwolf's avatar
thomwolf committed
298

299
        config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
thomwolf's avatar
thomwolf committed
300

301
        for model_class in self.all_model_classes:
302
            pt_model_class_name = model_class.__name__[2:]  # Skip the "TF" at the beginning
303
            pt_model_class = getattr(transformers, pt_model_class_name)
thomwolf's avatar
thomwolf committed
304

305
            config.output_hidden_states = True
306

307
308
            tf_model = model_class(config)
            pt_model = pt_model_class(config)
thomwolf's avatar
thomwolf committed
309

310
            # Check we can load pt model in tf and vice-versa with model => model functions
311

312
313
314
            tf_model = transformers.load_pytorch_model_in_tf2_model(
                tf_model, pt_model, tf_inputs=self._prepare_for_class(inputs_dict, model_class)
            )
315
            pt_model = transformers.load_tf2_model_in_pytorch_model(pt_model, tf_model)
316

317
318
319
            # Check predictions on first output (logits/hidden-states) are close enought given low-level computational differences
            pt_model.eval()
            pt_inputs_dict = dict(
320
321
                (name, torch.from_numpy(key.numpy()).to(torch.long))
                for name, key in self._prepare_for_class(inputs_dict, model_class).items()
322
            )
323
324
325
326
            # need to rename encoder-decoder "inputs" for PyTorch
            if "inputs" in pt_inputs_dict and self.is_encoder_decoder:
                pt_inputs_dict["input_ids"] = pt_inputs_dict.pop("inputs")

327
328
            with torch.no_grad():
                pto = pt_model(**pt_inputs_dict)
329
            tfo = tf_model(self._prepare_for_class(inputs_dict, model_class), training=False)
330
331
            tf_hidden_states = tfo[0].numpy()
            pt_hidden_states = pto[0].numpy()
Lysandre's avatar
Lysandre committed
332

333
334
335
336
337
338
339
            tf_nans = np.copy(np.isnan(tf_hidden_states))
            pt_nans = np.copy(np.isnan(pt_hidden_states))

            pt_hidden_states[tf_nans] = 0
            tf_hidden_states[tf_nans] = 0
            pt_hidden_states[pt_nans] = 0
            tf_hidden_states[pt_nans] = 0
Lysandre's avatar
Lysandre committed
340

341
            max_diff = np.amax(np.abs(tf_hidden_states - pt_hidden_states))
342
            # Debug info (remove when fixed)
343
            if max_diff >= 4e-2:
344
345
346
347
348
                print("===")
                print(model_class)
                print(config)
                print(inputs_dict)
                print(pt_inputs_dict)
349
            self.assertLessEqual(max_diff, 4e-2)
350
351

            # Check we can load pt model in tf and vice-versa with checkpoint => model functions
352
            with tempfile.TemporaryDirectory() as tmpdirname:
353
354
355
356
357
358
359
360
361
362
363
                pt_checkpoint_path = os.path.join(tmpdirname, "pt_model.bin")
                torch.save(pt_model.state_dict(), pt_checkpoint_path)
                tf_model = transformers.load_pytorch_checkpoint_in_tf2_model(tf_model, pt_checkpoint_path)

                tf_checkpoint_path = os.path.join(tmpdirname, "tf_model.h5")
                tf_model.save_weights(tf_checkpoint_path)
                pt_model = transformers.load_tf2_checkpoint_in_pytorch_model(pt_model, tf_checkpoint_path)

            # Check predictions on first output (logits/hidden-states) are close enought given low-level computational differences
            pt_model.eval()
            pt_inputs_dict = dict(
364
365
                (name, torch.from_numpy(key.numpy()).to(torch.long))
                for name, key in self._prepare_for_class(inputs_dict, model_class).items()
366
            )
367
368
369
370
            # need to rename encoder-decoder "inputs" for PyTorch
            if "inputs" in pt_inputs_dict and self.is_encoder_decoder:
                pt_inputs_dict["input_ids"] = pt_inputs_dict.pop("inputs")

371
372
            with torch.no_grad():
                pto = pt_model(**pt_inputs_dict)
373
            tfo = tf_model(self._prepare_for_class(inputs_dict, model_class))
374
375
            tfo = tfo[0].numpy()
            pto = pto[0].numpy()
376
377
378
379
380
381
382
383
            tf_nans = np.copy(np.isnan(tfo))
            pt_nans = np.copy(np.isnan(pto))

            pto[tf_nans] = 0
            tfo[tf_nans] = 0
            pto[pt_nans] = 0
            tfo[pt_nans] = 0

384
            max_diff = np.amax(np.abs(tfo - pto))
sgugger's avatar
sgugger committed
385
            self.assertLessEqual(max_diff, 4e-2)
386

387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
    def test_train_pipeline_custom_model(self):
        config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
        tf_main_layer_classes = set(
            module_member
            for model_class in self.all_model_classes
            for module in (import_module(model_class.__module__),)
            for module_member_name in dir(module)
            if module_member_name.endswith("MainLayer")
            for module_member in (getattr(module, module_member_name),)
            if isinstance(module_member, type)
            and tf.keras.layers.Layer in module_member.__bases__
            and getattr(module_member, "_keras_serializable", False)
        )

        for main_layer_class in tf_main_layer_classes:
            # T5MainLayer needs an embed_tokens parameter when called without the inputs_embeds parameter
            if "T5" in main_layer_class.__name__:
                # Take the same values than in TFT5ModelTester for this shared layer
                shared = TFSharedEmbeddings(self.model_tester.vocab_size, self.model_tester.hidden_size, name="shared")
                config.use_cache = False
                main_layer = main_layer_class(config, embed_tokens=shared)
                del inputs_dict["use_cache"]
            else:
                main_layer = main_layer_class(config)

            symbolic_inputs = {
                name: tf.keras.Input(tensor.shape[1:], dtype=tensor.dtype) for name, tensor in inputs_dict.items()
            }

            if hasattr(self.model_tester, "num_labels"):
                num_labels = self.model_tester.num_labels
            else:
                num_labels = 2

            X = tf.data.Dataset.from_tensor_slices(
                (inputs_dict, np.random.randint(0, num_labels, (self.model_tester.batch_size, 1)))
            ).batch(1)

            hidden_states = main_layer(symbolic_inputs)[0]
            outputs = tf.keras.layers.Dense(num_labels, activation="softmax", name="outputs")(hidden_states)
            model = tf.keras.models.Model(inputs=symbolic_inputs, outputs=[outputs])

            model.compile(loss="binary_crossentropy", optimizer="adam", metrics=["acc"])
            model.fit(X, epochs=1)

            with tempfile.TemporaryDirectory() as tmpdirname:
                filepath = os.path.join(tmpdirname, "keras_model.h5")
                model.save(filepath)
                if "T5" in main_layer_class.__name__:
                    model = tf.keras.models.load_model(
                        filepath,
                        custom_objects={
                            main_layer_class.__name__: main_layer_class,
                            "TFSharedEmbeddings": TFSharedEmbeddings,
                        },
                    )
                else:
                    model = tf.keras.models.load_model(
                        filepath, custom_objects={main_layer_class.__name__: main_layer_class}
                    )
                assert isinstance(model, tf.keras.Model)
                model(inputs_dict)

450
451
452
453
454
455
456
457
    def test_compile_tf_model(self):
        config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()

        optimizer = tf.keras.optimizers.Adam(learning_rate=3e-5, epsilon=1e-08, clipnorm=1.0)
        loss = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)
        metric = tf.keras.metrics.SparseCategoricalAccuracy("accuracy")

        for model_class in self.all_model_classes:
458
459
460
461
462
            if self.is_encoder_decoder:
                input_ids = {
                    "decoder_input_ids": tf.keras.Input(
                        batch_shape=(2, 2000), name="decoder_input_ids", dtype="int32"
                    ),
463
                    "input_ids": tf.keras.Input(batch_shape=(2, 2000), name="input_ids", dtype="int32"),
464
465
466
467
468
469
                }
            elif model_class in TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING.values():
                input_ids = tf.keras.Input(batch_shape=(4, 2, 2000), name="input_ids", dtype="int32")
            else:
                input_ids = tf.keras.Input(batch_shape=(2, 2000), name="input_ids", dtype="int32")

470
471
            # Prepare our model
            model = model_class(config)
472
            model(self._prepare_for_class(inputs_dict, model_class))  # Model must be called before saving.
473
            # Let's load it from the disk to be sure we can use pretrained weights
474
            with tempfile.TemporaryDirectory() as tmpdirname:
475
476
477
478
479
480
                model.save_pretrained(tmpdirname)
                model = model_class.from_pretrained(tmpdirname)

            outputs_dict = model(input_ids)
            hidden_states = outputs_dict[0]

481
            # Add a dense layer on top to test integration with other keras modules
482
483
484
485
486
487
488
489
490
491
492
            outputs = tf.keras.layers.Dense(2, activation="softmax", name="outputs")(hidden_states)

            # Compile extended model
            extended_model = tf.keras.Model(inputs=[input_ids], outputs=[outputs])
            extended_model.compile(optimizer=optimizer, loss=loss, metrics=[metric])

    def test_keyword_and_dict_args(self):
        config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()

        for model_class in self.all_model_classes:
            model = model_class(config)
493
494
495
            inputs = self._prepare_for_class(inputs_dict, model_class)

            outputs_dict = model(inputs)
496

497
            inputs_keywords = copy.deepcopy(self._prepare_for_class(inputs_dict, model_class))
498
            input_ids = inputs_keywords.pop("input_ids", None)
499
500
501
502
503
504
505
506
507
            outputs_keywords = model(input_ids, **inputs_keywords)
            output_dict = outputs_dict[0].numpy()
            output_keywords = outputs_keywords[0].numpy()

            self.assertLess(np.sum(np.abs(output_dict - output_keywords)), 1e-6)

    def test_attention_outputs(self):
        config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()

508
509
510
511
        decoder_seq_length = getattr(self.model_tester, "decoder_seq_length", self.model_tester.seq_length)
        encoder_seq_length = getattr(self.model_tester, "encoder_seq_length", self.model_tester.seq_length)
        decoder_key_length = getattr(self.model_tester, "key_length", decoder_seq_length)
        encoder_key_length = getattr(self.model_tester, "key_length", encoder_seq_length)
512
513

        for model_class in self.all_model_classes:
514
            inputs_dict["output_attentions"] = True
515
            inputs_dict["use_cache"] = False
516
517
            config.output_hidden_states = False
            model = model_class(config)
518
519
            model_inputs = self._prepare_for_class(inputs_dict, model_class)
            outputs = model(model_inputs)
520
521
522
523
524
525
            attentions = [t.numpy() for t in outputs[-1]]
            self.assertEqual(model.config.output_hidden_states, False)
            self.assertEqual(len(attentions), self.model_tester.num_hidden_layers)
            self.assertListEqual(
                list(attentions[0].shape[-3:]),
                [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length],
526
            )
527
            out_len = len(outputs)
thomwolf's avatar
thomwolf committed
528

529
530
531
            if self.is_encoder_decoder:
                self.assertEqual(out_len % 2, 0)
                decoder_attentions = outputs[(out_len // 2) - 1]
532
                self.assertEqual(model.config.output_hidden_states, False)
533
                self.assertEqual(len(decoder_attentions), self.model_tester.num_hidden_layers)
534
                self.assertListEqual(
535
536
                    list(decoder_attentions[0].shape[-3:]),
                    [self.model_tester.num_attention_heads, decoder_seq_length, decoder_key_length],
537
                )
thomwolf's avatar
thomwolf committed
538

539
540
            # Check that output attentions can also be changed via the config
            del inputs_dict["output_attentions"]
541
            config.output_attentions = True
542
            model = model_class(config)
543
            outputs = model(self._prepare_for_class(inputs_dict, model_class))
544
545
546
547
548
549
550
551
552
553
            attentions = [t.numpy() for t in outputs[-1]]
            self.assertEqual(model.config.output_hidden_states, False)
            self.assertEqual(len(attentions), self.model_tester.num_hidden_layers)
            self.assertListEqual(
                list(attentions[0].shape[-3:]),
                [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length],
            )

            # Check attention is always last and order is fine
            inputs_dict["output_attentions"] = True
554
555
            config.output_hidden_states = True
            model = model_class(config)
556
            outputs = model(self._prepare_for_class(inputs_dict, model_class))
557
558
559
560
561
562
563
564
565
            self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1), len(outputs))
            self.assertEqual(model.config.output_hidden_states, True)

            attentions = [t.numpy() for t in outputs[-1]]
            self.assertEqual(len(attentions), self.model_tester.num_hidden_layers)
            self.assertListEqual(
                list(attentions[0].shape[-3:]),
                [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length],
            )
566

567
568
569
    def test_hidden_states_output(self):
        config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()

Joseph Liu's avatar
Joseph Liu committed
570
        def check_hidden_states_output(config, inputs_dict, model_class):
571
            model = model_class(config)
572
            outputs = model(self._prepare_for_class(inputs_dict, model_class))
573
            hidden_states = [t.numpy() for t in outputs[-1]]
574
575
576
577
            expected_num_layers = getattr(
                self.model_tester, "expected_num_hidden_layers", self.model_tester.num_hidden_layers + 1
            )
            self.assertEqual(len(hidden_states), expected_num_layers)
578
            self.assertListEqual(
Lysandre's avatar
Lysandre committed
579
580
                list(hidden_states[0].shape[-2:]),
                [self.model_tester.seq_length, self.model_tester.hidden_size],
581
            )
582

Joseph Liu's avatar
Joseph Liu committed
583
584
585
586
587
588
589
590
        for model_class in self.all_model_classes:
            inputs_dict["output_hidden_states"] = True
            check_hidden_states_output(config, inputs_dict, model_class)

            del inputs_dict["output_hidden_states"]
            config.output_hidden_states = True
            check_hidden_states_output(config, inputs_dict, model_class)

591
592
593
594
595
    def test_model_common_attributes(self):
        config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()

        for model_class in self.all_model_classes:
            model = model_class(config)
596
            assert isinstance(model.get_input_embeddings(), (tf.keras.layers.Layer, TFAdaptiveEmbedding))
597
598
599
600
601
602
603
604
            x = model.get_output_embeddings()
            assert x is None or isinstance(x, tf.keras.layers.Layer)

    def test_determinism(self):
        config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()

        for model_class in self.all_model_classes:
            model = model_class(config)
605
            first, second = (
606
607
                model(self._prepare_for_class(inputs_dict, model_class), training=False)[0],
                model(self._prepare_for_class(inputs_dict, model_class), training=False)[0],
608
            )
609
610
611
612
613
614
615
            out_1 = first.numpy()
            out_2 = second.numpy()
            out_1 = out_1[~np.isnan(out_1)]
            out_2 = out_2[~np.isnan(out_2)]
            max_diff = np.amax(np.abs(out_1 - out_2))
            self.assertLessEqual(max_diff, 1e-5)

616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
    def test_model_outputs_equivalence(self):

        config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()

        def check_equivalence(model, tuple_inputs, dict_inputs, additional_kwargs={}):
            tuple_output = model(tuple_inputs, return_dict=False, **additional_kwargs)
            dict_output = model(dict_inputs, return_dict=True, **additional_kwargs).to_tuple()

            def recursive_check(tuple_object, dict_object):
                if isinstance(tuple_object, (List, Tuple)):
                    for tuple_iterable_value, dict_iterable_value in zip(tuple_object, dict_object):
                        recursive_check(tuple_iterable_value, dict_iterable_value)
                elif tuple_object is None:
                    return
                else:
                    self.assertTrue(
                        all(tf.equal(tuple_object, dict_object)),
                        msg=f"Tuple and dict output are not equal. Difference: {tf.math.reduce_max(tf.abs(tuple_object - dict_object))}",
                    )

                recursive_check(tuple_output, dict_output)

        for model_class in self.all_model_classes:
            model = model_class(config)

            tuple_inputs = self._prepare_for_class(inputs_dict, model_class)
            dict_inputs = self._prepare_for_class(inputs_dict, model_class)
            check_equivalence(model, tuple_inputs, dict_inputs)

            tuple_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True)
            dict_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True)
            check_equivalence(model, tuple_inputs, dict_inputs)

            tuple_inputs = self._prepare_for_class(inputs_dict, model_class)
            dict_inputs = self._prepare_for_class(inputs_dict, model_class)
            check_equivalence(model, tuple_inputs, dict_inputs, {"output_hidden_states": True})

            tuple_inputs = self._prepare_for_class(inputs_dict, model_class)
            dict_inputs = self._prepare_for_class(inputs_dict, model_class)
            check_equivalence(model, tuple_inputs, dict_inputs, {"output_attentions": True})

            tuple_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True)
            dict_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True)
            check_equivalence(model, tuple_inputs, dict_inputs, {"output_hidden_states": True})

            tuple_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True)
            dict_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True)
            check_equivalence(model, tuple_inputs, dict_inputs, {"output_attentions": True})

            tuple_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True)
            dict_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True)
            check_equivalence(
                model, tuple_inputs, dict_inputs, {"output_hidden_states": True, "output_attentions": True}
            )

671
672
673
674
675
676
677
    def _get_embeds(self, wte, input_ids):
        # ^^ In our TF models, the input_embeddings can take slightly different forms,
        # so we try a few of them.
        # We used to fall back to just synthetically creating a dummy tensor of ones:
        try:
            x = wte(input_ids, mode="embedding")
        except Exception:
thomwolf's avatar
thomwolf committed
678
            try:
679
                x = wte([input_ids], mode="embedding")
680
            except Exception:
thomwolf's avatar
thomwolf committed
681
                try:
682
                    x = wte([input_ids, None, None, None], mode="embedding")
683
                except Exception:
684
                    if hasattr(self.model_tester, "embedding_size"):
Lysandre's avatar
Lysandre committed
685
686
687
688
                        x = tf.ones(
                            input_ids.shape + [self.model_tester.embedding_size],
                            dtype=tf.dtypes.float32,
                        )
689
                    else:
Lysandre's avatar
Lysandre committed
690
691
692
693
                        x = tf.ones(
                            input_ids.shape + [self.model_tester.hidden_size],
                            dtype=tf.dtypes.float32,
                        )
694
695
696
697
698
699
700
701
        return x

    def test_inputs_embeds(self):
        config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()

        for model_class in self.all_model_classes:
            model = model_class(config)

702
703
704
705
706
            inputs = copy.deepcopy(self._prepare_for_class(inputs_dict, model_class))
            if not self.is_encoder_decoder:
                input_ids = inputs["input_ids"]
                del inputs["input_ids"]
            else:
707
                encoder_input_ids = inputs["input_ids"]
708
                decoder_input_ids = inputs.get("decoder_input_ids", encoder_input_ids)
709
                del inputs["input_ids"]
710
711
                inputs.pop("decoder_input_ids", None)

712
            wte = model.get_input_embeddings()
thomwolf's avatar
thomwolf committed
713
            if not self.is_encoder_decoder:
714
                inputs["inputs_embeds"] = self._get_embeds(wte, input_ids)
thomwolf's avatar
thomwolf committed
715
            else:
716
717
                inputs["inputs_embeds"] = self._get_embeds(wte, encoder_input_ids)
                inputs["decoder_inputs_embeds"] = self._get_embeds(wte, decoder_input_ids)
718

719
            model(inputs)
720

721
722
723
724
725
726
727
728
729
730
731
732
733
    def test_resize_token_embeddings(self):
        if not self.test_resize_embeddings:
            return
        config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
        INPUT_SHAPE = [1, 10, config.hidden_size]
        for model_class in self.all_model_classes:
            for size in [config.vocab_size - 10, config.vocab_size + 10, None]:
                # build the embeddings
                model = model_class(config=config)
                emb_old = model.get_input_embeddings()
                emb_old.build(INPUT_SHAPE)
                # reshape the embeddings
                new_embeddings = model._get_resized_embeddings(emb_old, size)
Julien Chaumond's avatar
Julien Chaumond committed
734
                # # check that the resized embeddings size matches the desired size.
735
736
737
738
739
740
741
742
743
744
                assert_size = size if size is not None else config.vocab_size
                self.assertEqual(new_embeddings.shape[0], assert_size)
                # check that weights remain the same after resizing
                emd_old_weights = model._get_word_embeddings(emb_old)
                models_equal = True
                for p1, p2 in zip(emd_old_weights.numpy(), new_embeddings.numpy()):
                    if np.sum(abs(p1 - p2)) > 0:
                        models_equal = False
                self.assertTrue(models_equal)

745
    def test_lm_head_model_random_no_beam_search_generate(self):
746
        config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
747
        input_ids = inputs_dict["input_ids"] if "input_ids" in inputs_dict else inputs_dict["inputs"]
748

749
        # iterate over all generative models
750
751
752
753
        for model_class in self.all_generative_model_classes:
            model = model_class(config)

            if config.bos_token_id is None:
754
                # if bos token id is not defined mobel needs input_ids
755
                with self.assertRaises(AssertionError):
756
                    model.generate(do_sample=True, max_length=5)
757
                # num_return_sequences = 1
758
                self._check_generated_ids(model.generate(input_ids, do_sample=True))
759
            else:
760
                # num_return_sequences = 1
761
                self._check_generated_ids(model.generate(do_sample=True, max_length=5))
762
763

            with self.assertRaises(AssertionError):
764
                # generating multiple sequences when no beam search generation
765
766
767
                # is not allowed as it would always generate the same sequences
                model.generate(input_ids, do_sample=False, num_return_sequences=2)

768
769
            # num_return_sequences > 1, sample
            self._check_generated_ids(model.generate(input_ids, do_sample=True, num_return_sequences=2))
770
771

            # check bad words tokens language generation
772
773
            # create list of 1-seq bad token and list of 2-seq of bad tokens
            bad_words_ids = [self._generate_random_bad_tokens(1, model), self._generate_random_bad_tokens(2, model)]
774
            output_tokens = model.generate(
775
                input_ids, do_sample=True, bad_words_ids=bad_words_ids, num_return_sequences=2
776
            )
777
            # only count generated tokens
778
779
            generated_ids = output_tokens[:, input_ids.shape[-1] :]
            self.assertFalse(self._check_match_tokens(generated_ids.numpy().tolist(), bad_words_ids))
780

781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
    def test_lm_head_model_random_beam_search_generate(self):
        config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
        input_ids = inputs_dict["input_ids"] if "input_ids" in inputs_dict else inputs_dict["inputs"]

        for model_class in self.all_generative_model_classes:
            model = model_class(config)

            if config.bos_token_id is None:
                # if bos token id is not defined mobel needs input_ids, num_return_sequences = 1
                self._check_generated_ids(model.generate(input_ids, do_sample=True, num_beams=2))
            else:
                # num_return_sequences = 1
                self._check_generated_ids(model.generate(do_sample=True, max_length=5, num_beams=2))

            with self.assertRaises(AssertionError):
                # generating more sequences than having beams leads is not possible
                model.generate(input_ids, do_sample=False, num_return_sequences=3, num_beams=2)

            # num_return_sequences > 1, sample
Lysandre's avatar
Lysandre committed
800
801
802
803
804
805
806
807
            self._check_generated_ids(
                model.generate(
                    input_ids,
                    do_sample=True,
                    num_beams=2,
                    num_return_sequences=2,
                )
            )
808
809
810
811
812
813
            # num_return_sequences > 1, greedy
            self._check_generated_ids(model.generate(input_ids, do_sample=False, num_beams=2, num_return_sequences=2))

            # check bad words tokens language generation
            # create list of 1-seq bad token and list of 2-seq of bad tokens
            bad_words_ids = [self._generate_random_bad_tokens(1, model), self._generate_random_bad_tokens(2, model)]
814
            output_tokens = model.generate(
815
                input_ids, do_sample=False, bad_words_ids=bad_words_ids, num_beams=2, num_return_sequences=2
816
            )
817
            # only count generated tokens
818
819
820
            generated_ids = output_tokens[:, input_ids.shape[-1] :]
            self.assertFalse(self._check_match_tokens(generated_ids.numpy().tolist(), bad_words_ids))

821
822
823
824
825
826
827
828
829
830
    def test_loss_computation(self):
        config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
        for model_class in self.all_model_classes:
            model = model_class(config)
            if getattr(model, "compute_loss", None):
                # The number of elements in the loss should be the same as the number of elements in the label
                prepared_for_class = self._prepare_for_class(inputs_dict.copy(), model_class, return_labels=True)
                added_label = prepared_for_class[list(prepared_for_class.keys() - inputs_dict.keys())[0]]
                loss_size = tf.size(added_label)

831
832
833
834
835
                if model.__class__ in TF_MODEL_FOR_CAUSAL_LM_MAPPING.values():
                    # if loss is causal lm loss, labels are shift, so that one label per batch
                    # is cut
                    loss_size = loss_size - self.model_tester.batch_size

836
837
838
                # Test that model correctly compute the loss with kwargs
                prepared_for_class = self._prepare_for_class(inputs_dict.copy(), model_class, return_labels=True)
                input_ids = prepared_for_class.pop("input_ids")
839

840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
                loss = model(input_ids, **prepared_for_class)[0]
                self.assertEqual(loss.shape, [loss_size])

                # Test that model correctly compute the loss with a dict
                prepared_for_class = self._prepare_for_class(inputs_dict.copy(), model_class, return_labels=True)
                loss = model(prepared_for_class)[0]
                self.assertEqual(loss.shape, [loss_size])

                # Test that model correctly compute the loss with a tuple
                prepared_for_class = self._prepare_for_class(inputs_dict.copy(), model_class, return_labels=True)

                # Get keys that were added with the _prepare_for_class function
                label_keys = prepared_for_class.keys() - inputs_dict.keys()
                signature = inspect.getfullargspec(model.call)[0]

                # Create a dictionary holding the location of the tensors in the tuple
                tuple_index_mapping = {1: "input_ids"}
                for label_key in label_keys:
                    label_key_index = signature.index(label_key)
                    tuple_index_mapping[label_key_index] = label_key
                sorted_tuple_index_mapping = sorted(tuple_index_mapping.items())

                # Initialize a list with None, update the values and convert to a tuple
                list_input = [None] * sorted_tuple_index_mapping[-1][0]
                for index, value in sorted_tuple_index_mapping:
                    list_input[index - 1] = prepared_for_class[value]
                tuple_input = tuple(list_input)

                # Send to model
                loss = model(tuple_input)[0]
                self.assertEqual(loss.shape, [loss_size])

872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
    def _generate_random_bad_tokens(self, num_bad_tokens, model):
        # special tokens cannot be bad tokens
        special_tokens = []
        if model.config.bos_token_id is not None:
            special_tokens.append(model.config.bos_token_id)
        if model.config.pad_token_id is not None:
            special_tokens.append(model.config.pad_token_id)
        if model.config.eos_token_id is not None:
            special_tokens.append(model.config.eos_token_id)

        # create random bad tokens that are not special tokens
        bad_tokens = []
        while len(bad_tokens) < num_bad_tokens:
            token = tf.squeeze(ids_tensor((1, 1), self.model_tester.vocab_size), 0).numpy()[0]
            if token not in special_tokens:
                bad_tokens.append(token)
        return bad_tokens

890
    def _check_generated_ids(self, output_ids):
891
892
893
894
        for token_id in output_ids[0].numpy().tolist():
            self.assertGreaterEqual(token_id, 0)
            self.assertLess(token_id, self.model_tester.vocab_size)

895
896
897
898
899
900
901
902
903
904
905
906
    def _check_match_tokens(self, generated_ids, bad_words_ids):
        # for all bad word tokens
        for bad_word_ids in bad_words_ids:
            # for all slices in batch
            for generated_ids_slice in generated_ids:
                # for all word idx
                for i in range(len(bad_word_ids), len(generated_ids_slice)):
                    # if tokens match
                    if generated_ids_slice[i - len(bad_word_ids) : i] == bad_word_ids:
                        return True
        return False

thomwolf's avatar
thomwolf committed
907

thomwolf's avatar
thomwolf committed
908
def ids_tensor(shape, vocab_size, rng=None, name=None, dtype=None):
thomwolf's avatar
thomwolf committed
909
910
911
912
913
914
915
916
917
918
919
920
    """Creates a random int32 tensor of the shape within the vocab size."""
    if rng is None:
        rng = random.Random()

    total_dims = 1
    for dim in shape:
        total_dims *= dim

    values = []
    for _ in range(total_dims):
        values.append(rng.randint(0, vocab_size - 1))

921
    output = tf.constant(values, shape=shape, dtype=dtype if dtype is not None else tf.int32)
thomwolf's avatar
thomwolf committed
922
923

    return output
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001


@require_tf
class UtilsFunctionsTest(unittest.TestCase):

    # tests whether the top_k_top_p_filtering function behaves as expected
    def test_top_k_top_p_filtering(self):
        logits = tf.convert_to_tensor(
            [
                [
                    8.2220991,  # 3rd highest value; idx. 0
                    -0.5620044,
                    5.23229752,
                    4.0386393,
                    -6.8798378,
                    -0.54785802,
                    -3.2012153,
                    2.92777176,
                    1.88171953,
                    7.35341276,  # 5th highest value; idx. 9
                    8.43207833,  # 2nd highest value; idx. 10
                    -9.85711836,
                    -5.96209236,
                    -1.13039161,
                    -7.1115294,
                    -0.8369633,
                    -5.3186408,
                    7.06427407,
                    0.81369344,
                    -0.82023817,
                    -5.9179796,
                    0.58813443,
                    -6.99778438,
                    4.71551189,
                    -0.18771637,
                    7.44020759,  # 4th highest value; idx. 25
                    9.38450987,  # 1st highest value; idx. 26
                    2.12662941,
                    -9.32562038,
                    2.35652522,
                ],  # cummulative prob of 5 highest values <= 0.6
                [
                    0.58425518,
                    4.53139238,
                    -5.57510464,
                    -6.28030699,
                    -7.19529503,
                    -4.02122551,
                    1.39337037,
                    -6.06707057,
                    1.59480517,
                    -9.643119,
                    0.03907799,
                    0.67231762,
                    -8.88206726,
                    6.27115922,  # 4th highest value; idx. 13
                    2.28520723,
                    4.82767506,
                    4.30421368,
                    8.8275313,  # 2nd highest value; idx. 17
                    5.44029958,  # 5th highest value; idx. 18
                    -4.4735794,
                    7.38579536,  # 3rd highest value; idx. 20
                    -2.91051663,
                    2.61946077,
                    -2.5674762,
                    -9.48959302,
                    -4.02922645,
                    -1.35416918,
                    9.67702323,  # 1st highest value; idx. 27
                    -5.89478553,
                    1.85370467,
                ],  # cummulative prob of 5 highest values <= 0.6
            ],
            dtype=tf.float32,
        )

        non_inf_expected_idx = tf.convert_to_tensor(
Lysandre's avatar
Lysandre committed
1002
1003
            [[0, 0], [0, 9], [0, 10], [0, 25], [0, 26], [1, 13], [1, 17], [1, 18], [1, 20], [1, 27]],
            dtype=tf.int32,
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
        )  # expected non filtered idx as noted above

        non_inf_expected_output = tf.convert_to_tensor(
            [8.222099, 7.3534126, 8.432078, 7.4402075, 9.38451, 6.271159, 8.827531, 5.4402995, 7.3857956, 9.677023],
            dtype=tf.float32,
        )  # expected non filtered values as noted above

        output = tf_top_k_top_p_filtering(logits, top_k=10, top_p=0.6, min_tokens_to_keep=4)

        non_inf_output = output[output != -float("inf")]
        non_inf_idx = tf.cast(
Lysandre's avatar
Lysandre committed
1015
1016
            tf.where(tf.not_equal(output, tf.constant(-float("inf"), dtype=tf.float32))),
            dtype=tf.int32,
1017
1018
1019
1020
        )

        tf.debugging.assert_near(non_inf_output, non_inf_expected_output, rtol=1e-12)
        tf.debugging.assert_equal(non_inf_idx, non_inf_expected_idx)