test_modeling_tf_common.py 15.5 KB
Newer Older
thomwolf's avatar
thomwolf committed
1
2
3
4
5
6
7
8
9
10
11
12
13
14
# coding=utf-8
# Copyright 2019 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
thomwolf's avatar
thomwolf committed
15
from __future__ import absolute_import, division, print_function
thomwolf's avatar
thomwolf committed
16
17

import copy
Aymeric Augustin's avatar
Aymeric Augustin committed
18
import os
thomwolf's avatar
thomwolf committed
19
import random
thomwolf's avatar
thomwolf committed
20
import shutil
Aymeric Augustin's avatar
Aymeric Augustin committed
21
22
import sys
import tempfile
thomwolf's avatar
thomwolf committed
23

24
from transformers import is_tf_available, is_torch_available
25

26
from .utils import require_tf
27

Aymeric Augustin's avatar
Aymeric Augustin committed
28

29
if is_tf_available():
thomwolf's avatar
thomwolf committed
30
    import tensorflow as tf
thomwolf's avatar
thomwolf committed
31
    import numpy as np
32

33
    # from transformers.modeling_bert import BertModel, BertConfig, BERT_PRETRAINED_MODEL_ARCHIVE_MAP
thomwolf's avatar
thomwolf committed
34

35
36
37
38
if sys.version_info[0] == 2:

    class TemporaryDirectory(object):
        """Context manager for tempfile.mkdtemp() so it's usable with "with" statement."""
39

40
41
42
        def __enter__(self):
            self.name = tempfile.mkdtemp()
            return self.name
43

44
45
        def __exit__(self, exc_type, exc_value, traceback):
            shutil.rmtree(self.name)
46
47


48
49
50
else:
    TemporaryDirectory = tempfile.TemporaryDirectory
    unicode = str
thomwolf's avatar
thomwolf committed
51

52

thomwolf's avatar
thomwolf committed
53
54
55
def _config_zero_init(config):
    configs_no_init = copy.deepcopy(config)
    for key in configs_no_init.__dict__.keys():
56
        if "_range" in key or "_std" in key:
thomwolf's avatar
thomwolf committed
57
58
59
60
            setattr(configs_no_init, key, 0.0)
    return configs_no_init


61
62
@require_tf
class TFModelTesterMixin:
63

64
65
66
67
68
69
    model_tester = None
    all_model_classes = ()
    test_torchscript = True
    test_pruning = True
    test_resize_embeddings = True
    is_encoder_decoder = False
70

71
72
73
    def test_initialization(self):
        pass
        # config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
74

75
76
77
78
79
80
81
        # configs_no_init = _config_zero_init(config)
        # for model_class in self.all_model_classes:
        #     model = model_class(config=configs_no_init)
        #     for name, param in model.named_parameters():
        #         if param.requires_grad:
        #             self.assertIn(param.data.mean().item(), [0.0, 1.0],
        #             msg="Parameter {} of model {} seems not properly initialized".format(name, model_class))
82

83
84
    def test_save_load(self):
        config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
85

86
87
88
        for model_class in self.all_model_classes:
            model = model_class(config)
            outputs = model(inputs_dict)
89

90
91
92
93
94
95
96
97
98
99
100
101
            with TemporaryDirectory() as tmpdirname:
                model.save_pretrained(tmpdirname)
                model = model_class.from_pretrained(tmpdirname)
                after_outputs = model(inputs_dict)

                # Make sure we don't have nans
                out_1 = after_outputs[0].numpy()
                out_2 = outputs[0].numpy()
                out_1 = out_1[~np.isnan(out_1)]
                out_2 = out_2[~np.isnan(out_2)]
                max_diff = np.amax(np.abs(out_1 - out_2))
                self.assertLessEqual(max_diff, 1e-5)
102

103
104
105
    def test_pt_tf_model_equivalence(self):
        if not is_torch_available():
            return
thomwolf's avatar
thomwolf committed
106

107
108
        import torch
        import transformers
thomwolf's avatar
thomwolf committed
109

110
        config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
thomwolf's avatar
thomwolf committed
111

112
113
114
        for model_class in self.all_model_classes:
            pt_model_class_name = model_class.__name__[2:]  # Skip the "TF" at the beggining
            pt_model_class = getattr(transformers, pt_model_class_name)
thomwolf's avatar
thomwolf committed
115

116
117
118
            config.output_hidden_states = True
            tf_model = model_class(config)
            pt_model = pt_model_class(config)
thomwolf's avatar
thomwolf committed
119

120
121
122
            # Check we can load pt model in tf and vice-versa with model => model functions
            tf_model = transformers.load_pytorch_model_in_tf2_model(tf_model, pt_model, tf_inputs=inputs_dict)
            pt_model = transformers.load_tf2_model_in_pytorch_model(pt_model, tf_model)
123

124
125
126
127
            # Check predictions on first output (logits/hidden-states) are close enought given low-level computational differences
            pt_model.eval()
            pt_inputs_dict = dict(
                (name, torch.from_numpy(key.numpy()).to(torch.long)) for name, key in inputs_dict.items()
128
            )
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
            with torch.no_grad():
                pto = pt_model(**pt_inputs_dict)
            tfo = tf_model(inputs_dict, training=False)
            tf_hidden_states = tfo[0].numpy()
            pt_hidden_states = pto[0].numpy()
            tf_hidden_states[np.isnan(tf_hidden_states)] = 0
            pt_hidden_states[np.isnan(pt_hidden_states)] = 0
            max_diff = np.amax(np.abs(tf_hidden_states - pt_hidden_states))
            self.assertLessEqual(max_diff, 2e-2)

            # Check we can load pt model in tf and vice-versa with checkpoint => model functions
            with TemporaryDirectory() as tmpdirname:
                pt_checkpoint_path = os.path.join(tmpdirname, "pt_model.bin")
                torch.save(pt_model.state_dict(), pt_checkpoint_path)
                tf_model = transformers.load_pytorch_checkpoint_in_tf2_model(tf_model, pt_checkpoint_path)

                tf_checkpoint_path = os.path.join(tmpdirname, "tf_model.h5")
                tf_model.save_weights(tf_checkpoint_path)
                pt_model = transformers.load_tf2_checkpoint_in_pytorch_model(pt_model, tf_checkpoint_path)

            # Check predictions on first output (logits/hidden-states) are close enought given low-level computational differences
            pt_model.eval()
            pt_inputs_dict = dict(
                (name, torch.from_numpy(key.numpy()).to(torch.long)) for name, key in inputs_dict.items()
153
            )
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
            with torch.no_grad():
                pto = pt_model(**pt_inputs_dict)
            tfo = tf_model(inputs_dict)
            tfo = tfo[0].numpy()
            pto = pto[0].numpy()
            tfo[np.isnan(tfo)] = 0
            pto[np.isnan(pto)] = 0
            max_diff = np.amax(np.abs(tfo - pto))
            self.assertLessEqual(max_diff, 2e-2)

    def test_compile_tf_model(self):
        config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()

        if self.is_encoder_decoder:
            input_ids = {
                "decoder_input_ids": tf.keras.Input(batch_shape=(2, 2000), name="decoder_input_ids", dtype="int32"),
                "encoder_input_ids": tf.keras.Input(batch_shape=(2, 2000), name="encoder_input_ids", dtype="int32"),
            }
        else:
            input_ids = tf.keras.Input(batch_shape=(2, 2000), name="input_ids", dtype="int32")
        optimizer = tf.keras.optimizers.Adam(learning_rate=3e-5, epsilon=1e-08, clipnorm=1.0)
        loss = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)
        metric = tf.keras.metrics.SparseCategoricalAccuracy("accuracy")

        for model_class in self.all_model_classes:
            # Prepare our model
            model = model_class(config)

            # Let's load it from the disk to be sure we can use pretrained weights
            with TemporaryDirectory() as tmpdirname:
                outputs = model(inputs_dict)  # build the model
                model.save_pretrained(tmpdirname)
                model = model_class.from_pretrained(tmpdirname)

            outputs_dict = model(input_ids)
            hidden_states = outputs_dict[0]

            # Add a dense layer on top to test intetgration with other keras modules
            outputs = tf.keras.layers.Dense(2, activation="softmax", name="outputs")(hidden_states)

            # Compile extended model
            extended_model = tf.keras.Model(inputs=[input_ids], outputs=[outputs])
            extended_model.compile(optimizer=optimizer, loss=loss, metrics=[metric])

    def test_keyword_and_dict_args(self):
        config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()

        for model_class in self.all_model_classes:
            model = model_class(config)
            outputs_dict = model(inputs_dict)

            inputs_keywords = copy.deepcopy(inputs_dict)
            input_ids = inputs_keywords.pop("input_ids" if not self.is_encoder_decoder else "decoder_input_ids", None)
            outputs_keywords = model(input_ids, **inputs_keywords)

            output_dict = outputs_dict[0].numpy()
            output_keywords = outputs_keywords[0].numpy()

            self.assertLess(np.sum(np.abs(output_dict - output_keywords)), 1e-6)

    def test_attention_outputs(self):
        config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()

        decoder_seq_length = (
            self.model_tester.decoder_seq_length
            if hasattr(self.model_tester, "decoder_seq_length")
            else self.model_tester.seq_length
        )
        encoder_seq_length = (
            self.model_tester.encoder_seq_length
            if hasattr(self.model_tester, "encoder_seq_length")
            else self.model_tester.seq_length
        )
        decoder_key_length = (
            self.model_tester.key_length if hasattr(self.model_tester, "key_length") else decoder_seq_length
        )
        encoder_key_length = (
            self.model_tester.key_length if hasattr(self.model_tester, "key_length") else encoder_seq_length
        )

        for model_class in self.all_model_classes:
            config.output_attentions = True
            config.output_hidden_states = False
            model = model_class(config)
            outputs = model(inputs_dict)
            attentions = [t.numpy() for t in outputs[-1]]
            self.assertEqual(model.config.output_attentions, True)
            self.assertEqual(model.config.output_hidden_states, False)
            self.assertEqual(len(attentions), self.model_tester.num_hidden_layers)
            self.assertListEqual(
                list(attentions[0].shape[-3:]),
                [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length],
246
            )
247
            out_len = len(outputs)
thomwolf's avatar
thomwolf committed
248

249
250
251
            if self.is_encoder_decoder:
                self.assertEqual(out_len % 2, 0)
                decoder_attentions = outputs[(out_len // 2) - 1]
252
253
                self.assertEqual(model.config.output_attentions, True)
                self.assertEqual(model.config.output_hidden_states, False)
254
                self.assertEqual(len(decoder_attentions), self.model_tester.num_hidden_layers)
255
                self.assertListEqual(
256
257
                    list(decoder_attentions[0].shape[-3:]),
                    [self.model_tester.num_attention_heads, decoder_seq_length, decoder_key_length],
258
                )
thomwolf's avatar
thomwolf committed
259

260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
            # Check attention is always last and order is fine
            config.output_attentions = True
            config.output_hidden_states = True
            model = model_class(config)
            outputs = model(inputs_dict)
            self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1), len(outputs))
            self.assertEqual(model.config.output_attentions, True)
            self.assertEqual(model.config.output_hidden_states, True)

            attentions = [t.numpy() for t in outputs[-1]]
            self.assertEqual(len(attentions), self.model_tester.num_hidden_layers)
            self.assertListEqual(
                list(attentions[0].shape[-3:]),
                [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length],
            )
275

276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
    def test_hidden_states_output(self):
        config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()

        for model_class in self.all_model_classes:
            config.output_hidden_states = True
            config.output_attentions = False
            model = model_class(config)
            outputs = model(inputs_dict)
            hidden_states = [t.numpy() for t in outputs[-1]]
            self.assertEqual(model.config.output_attentions, False)
            self.assertEqual(model.config.output_hidden_states, True)
            self.assertEqual(len(hidden_states), self.model_tester.num_hidden_layers + 1)
            self.assertListEqual(
                list(hidden_states[0].shape[-2:]), [self.model_tester.seq_length, self.model_tester.hidden_size]
            )
291

292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
    def test_model_common_attributes(self):
        config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()

        for model_class in self.all_model_classes:
            model = model_class(config)
            assert isinstance(model.get_input_embeddings(), tf.keras.layers.Layer)
            x = model.get_output_embeddings()
            assert x is None or isinstance(x, tf.keras.layers.Layer)

    def test_determinism(self):
        config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()

        for model_class in self.all_model_classes:
            model = model_class(config)
            first, second = model(inputs_dict, training=False)[0], model(inputs_dict, training=False)[0]
            out_1 = first.numpy()
            out_2 = second.numpy()
            out_1 = out_1[~np.isnan(out_1)]
            out_2 = out_2[~np.isnan(out_2)]
            max_diff = np.amax(np.abs(out_1 - out_2))
            self.assertLessEqual(max_diff, 1e-5)

    def _get_embeds(self, wte, input_ids):
        # ^^ In our TF models, the input_embeddings can take slightly different forms,
        # so we try a few of them.
        # We used to fall back to just synthetically creating a dummy tensor of ones:
        try:
            x = wte(input_ids, mode="embedding")
        except Exception:
thomwolf's avatar
thomwolf committed
321
            try:
322
                x = wte([input_ids], mode="embedding")
323
            except Exception:
thomwolf's avatar
thomwolf committed
324
                try:
325
                    x = wte([input_ids, None, None, None], mode="embedding")
326
                except Exception:
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
                    if hasattr(self.model_tester, "embedding_size"):
                        x = tf.ones(input_ids.shape + [self.model_tester.embedding_size], dtype=tf.dtypes.float32)
                    else:
                        x = tf.ones(input_ids.shape + [self.model_tester.hidden_size], dtype=tf.dtypes.float32)
        return x

    def test_inputs_embeds(self):
        config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
        if not self.is_encoder_decoder:
            input_ids = inputs_dict["input_ids"]
            del inputs_dict["input_ids"]
        else:
            encoder_input_ids = inputs_dict["encoder_input_ids"]
            decoder_input_ids = inputs_dict["decoder_input_ids"]
            del inputs_dict["encoder_input_ids"]
            del inputs_dict["decoder_input_ids"]

        for model_class in self.all_model_classes:
            model = model_class(config)

            wte = model.get_input_embeddings()
thomwolf's avatar
thomwolf committed
348
            if not self.is_encoder_decoder:
349
                inputs_dict["inputs_embeds"] = self._get_embeds(wte, input_ids)
thomwolf's avatar
thomwolf committed
350
            else:
351
352
353
354
                inputs_dict["encoder_inputs_embeds"] = self._get_embeds(wte, encoder_input_ids)
                inputs_dict["decoder_inputs_embeds"] = self._get_embeds(wte, decoder_input_ids)

            outputs = model(inputs_dict)
355

thomwolf's avatar
thomwolf committed
356

thomwolf's avatar
thomwolf committed
357
def ids_tensor(shape, vocab_size, rng=None, name=None, dtype=None):
thomwolf's avatar
thomwolf committed
358
359
360
361
362
363
364
365
366
367
368
369
    """Creates a random int32 tensor of the shape within the vocab size."""
    if rng is None:
        rng = random.Random()

    total_dims = 1
    for dim in shape:
        total_dims *= dim

    values = []
    for _ in range(total_dims):
        values.append(rng.randint(0, vocab_size - 1))

370
    output = tf.constant(values, shape=shape, dtype=dtype if dtype is not None else tf.int32)
thomwolf's avatar
thomwolf committed
371
372

    return output