modeling_t5_test.py 8.14 KB
Newer Older
thomwolf's avatar
thomwolf committed
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
# coding=utf-8
# Copyright 2018 Google T5 Authors and HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

import unittest
import shutil

from transformers import is_torch_available

thomwolf's avatar
thomwolf committed
24
from .modeling_common_test import (CommonTestCases, ids_tensor, floats_tensor)
thomwolf's avatar
thomwolf committed
25
from .configuration_common_test import ConfigTester
thomwolf's avatar
thomwolf committed
26
from .utils import require_torch, slow, torch_device
thomwolf's avatar
thomwolf committed
27
28
29
30
31
32

if is_torch_available():
    from transformers import (T5Config, T5Model, T5WithLMHeadModel)
    from transformers.modeling_t5 import T5_PRETRAINED_MODEL_ARCHIVE_MAP


thomwolf's avatar
thomwolf committed
33
@require_torch
thomwolf's avatar
thomwolf committed
34
35
36
37
38
39
40
41
42
43
44
45
46
class T5ModelTest(CommonTestCases.CommonModelTester):

    all_model_classes = (T5Model, T5WithLMHeadModel) if is_torch_available() else ()
    test_pruning = False
    test_torchscript = False
    test_resize_embeddings = False
    is_encoder_decoder = True

    class T5ModelTester(object):

        def __init__(self,
                     parent,
                     batch_size=13,
thomwolf's avatar
thomwolf committed
47
48
                     encoder_seq_length=7,
                     decoder_seq_length=9,
thomwolf's avatar
thomwolf committed
49
                     is_training=True,
thomwolf's avatar
thomwolf committed
50
                     use_attention_mask=True,
thomwolf's avatar
thomwolf committed
51
52
53
54
55
56
57
58
59
                     use_labels=True,
                     vocab_size=99,
                     n_positions=14,
                     hidden_size=32,
                     num_hidden_layers=5,
                     num_attention_heads=4,
                     d_ff=37,
                     relative_attention_num_buckets=8,
                     dropout_rate=0.1,
60
                     initializer_factor=0.002,
thomwolf's avatar
thomwolf committed
61
62
63
64
                     scope=None,
                    ):
            self.parent = parent
            self.batch_size = batch_size
thomwolf's avatar
thomwolf committed
65
66
            self.encoder_seq_length = encoder_seq_length
            self.decoder_seq_length = decoder_seq_length
thomwolf's avatar
thomwolf committed
67
            self.is_training = is_training
thomwolf's avatar
thomwolf committed
68
            self.use_attention_mask = use_attention_mask
thomwolf's avatar
thomwolf committed
69
70
71
72
73
74
75
76
77
            self.use_labels = use_labels
            self.vocab_size = vocab_size
            self.n_positions = n_positions
            self.hidden_size = hidden_size
            self.num_hidden_layers = num_hidden_layers
            self.num_attention_heads = num_attention_heads
            self.d_ff = d_ff
            self.relative_attention_num_buckets = relative_attention_num_buckets
            self.dropout_rate = dropout_rate
78
            self.initializer_factor = initializer_factor
thomwolf's avatar
thomwolf committed
79
80
81
            self.scope = scope

        def prepare_config_and_inputs(self):
thomwolf's avatar
thomwolf committed
82
83
            encoder_input_ids = ids_tensor([self.batch_size, self.encoder_seq_length], self.vocab_size)
            decoder_input_ids = ids_tensor([self.batch_size, self.decoder_seq_length], self.vocab_size)
thomwolf's avatar
thomwolf committed
84

thomwolf's avatar
thomwolf committed
85
86
87
88
89
            encoder_attention_mask = None
            decoder_attention_mask = None
            if self.use_attention_mask:
                encoder_attention_mask = ids_tensor([self.batch_size, self.encoder_seq_length], vocab_size=2)
                decoder_attention_mask = ids_tensor([self.batch_size, self.decoder_seq_length], vocab_size=2)
thomwolf's avatar
thomwolf committed
90

thomwolf's avatar
thomwolf committed
91
            decoder_lm_labels = None
thomwolf's avatar
thomwolf committed
92
            if self.use_labels:
thomwolf's avatar
thomwolf committed
93
                decoder_lm_labels = ids_tensor([self.batch_size, self.decoder_seq_length], self.vocab_size)
thomwolf's avatar
thomwolf committed
94
95

            config = T5Config(
thomwolf's avatar
thomwolf committed
96
                vocab_size=self.vocab_size,
thomwolf's avatar
thomwolf committed
97
98
99
                n_positions=self.n_positions,
                d_model=self.hidden_size,
                d_ff=self.d_ff,
100
                d_kv=self.hidden_size // self.num_attention_heads,
thomwolf's avatar
thomwolf committed
101
102
103
104
                num_layers=self.num_hidden_layers,
                num_heads=self.num_attention_heads,
                relative_attention_num_buckets=self.relative_attention_num_buckets,
                dropout_rate=self.dropout_rate,
105
                initializer_factor=self.initializer_factor)
thomwolf's avatar
thomwolf committed
106

thomwolf's avatar
thomwolf committed
107
            return (config, encoder_input_ids, decoder_input_ids, encoder_attention_mask, decoder_attention_mask, decoder_lm_labels)
thomwolf's avatar
thomwolf committed
108
109
110
111
112
113

        def check_loss_output(self, result):
            self.parent.assertListEqual(
                list(result["loss"].size()),
                [])

thomwolf's avatar
thomwolf committed
114
        def create_and_check_t5_model(self, config, encoder_input_ids, decoder_input_ids, encoder_attention_mask, decoder_attention_mask, decoder_lm_labels):
thomwolf's avatar
thomwolf committed
115
116
            model = T5Model(config=config)
            model.eval()
thomwolf's avatar
thomwolf committed
117
118
119
120
121
122
            decoder_output, encoder_output = model(encoder_input_ids=encoder_input_ids,
                                                   decoder_input_ids=decoder_input_ids,
                                                   encoder_attention_mask=encoder_attention_mask,
                                                   decoder_attention_mask=decoder_attention_mask)
            decoder_output, encoder_output = model(encoder_input_ids=encoder_input_ids,
                                                   decoder_input_ids=decoder_input_ids)
thomwolf's avatar
thomwolf committed
123
124
125
126
127
128
129

            result = {
                "encoder_output": encoder_output,
                "decoder_output": decoder_output,
            }
            self.parent.assertListEqual(
                list(result["encoder_output"].size()),
thomwolf's avatar
thomwolf committed
130
                [self.batch_size, self.encoder_seq_length, self.hidden_size])
thomwolf's avatar
thomwolf committed
131
132
            self.parent.assertListEqual(
                list(result["decoder_output"].size()),
thomwolf's avatar
thomwolf committed
133
                [self.batch_size, self.decoder_seq_length, self.hidden_size])
thomwolf's avatar
thomwolf committed
134
135


thomwolf's avatar
thomwolf committed
136
        def create_and_check_t5_with_lm_head(self, config, encoder_input_ids, decoder_input_ids, encoder_attention_mask, decoder_attention_mask, decoder_lm_labels):
thomwolf's avatar
thomwolf committed
137
138
            model = T5WithLMHeadModel(config=config)
            model.eval()
thomwolf's avatar
thomwolf committed
139
140
            outputs = model(encoder_input_ids=encoder_input_ids, decoder_input_ids=decoder_input_ids,
                            decoder_attention_mask=decoder_attention_mask, decoder_lm_labels=decoder_lm_labels)
141
            loss, prediction_scores = outputs[0], outputs[1]
thomwolf's avatar
thomwolf committed
142
143
144
145
146
147
            result = {
                "loss": loss,
                "prediction_scores": prediction_scores,
            }
            self.parent.assertListEqual(
                list(result["prediction_scores"].size()),
thomwolf's avatar
thomwolf committed
148
                [self.batch_size, self.decoder_seq_length, self.vocab_size])
thomwolf's avatar
thomwolf committed
149
150
151
152
            self.check_loss_output(result)

        def prepare_config_and_inputs_for_common(self):
            config_and_inputs = self.prepare_config_and_inputs()
thomwolf's avatar
thomwolf committed
153
154
155
156
157
158
            (config, encoder_input_ids, decoder_input_ids, encoder_attention_mask,
             decoder_attention_mask, decoder_lm_labels) = config_and_inputs
            inputs_dict = {'encoder_input_ids': encoder_input_ids,
                           'decoder_input_ids': decoder_input_ids,
                           'decoder_attention_mask': decoder_attention_mask,
                           'encoder_attention_mask': encoder_attention_mask}
thomwolf's avatar
thomwolf committed
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
            return config, inputs_dict

    def setUp(self):
        self.model_tester = T5ModelTest.T5ModelTester(self)
        self.config_tester = ConfigTester(self, config_class=T5Config, d_model=37)

    def test_config(self):
        self.config_tester.run_common_tests()

    def test_t5_model(self):
        config_and_inputs = self.model_tester.prepare_config_and_inputs()
        self.model_tester.create_and_check_t5_model(*config_and_inputs)

    def test_with_lm_head(self):
        config_and_inputs = self.model_tester.prepare_config_and_inputs()
        self.model_tester.create_and_check_t5_with_lm_head(*config_and_inputs)

thomwolf's avatar
thomwolf committed
176
    @slow
thomwolf's avatar
thomwolf committed
177
178
179
180
181
182
183
184
185
    def test_model_from_pretrained(self):
        cache_dir = "/tmp/transformers_test/"
        for model_name in list(T5_PRETRAINED_MODEL_ARCHIVE_MAP.keys())[:1]:
            model = T5Model.from_pretrained(model_name, cache_dir=cache_dir)
            shutil.rmtree(cache_dir)
            self.assertIsNotNone(model)

if __name__ == "__main__":
    unittest.main()