test_modeling_gpt2.py 9.72 KB
Newer Older
thomwolf's avatar
thomwolf committed
1
2
3
4
5
6
7
8
9
10
11
12
13
14
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
Aymeric Augustin's avatar
Aymeric Augustin committed
15

thomwolf's avatar
thomwolf committed
16

17
18
import unittest

19
from transformers import is_torch_available
thomwolf's avatar
thomwolf committed
20

21
from .test_configuration_common import ConfigTester
22
from .test_modeling_common import ModelTesterMixin, ids_tensor
Aymeric Augustin's avatar
Aymeric Augustin committed
23
24
25
from .utils import CACHE_DIR, require_torch, slow, torch_device


26
if is_torch_available():
27
28
29
30
31
32
33
34
    from transformers import (
        GPT2Config,
        GPT2Model,
        GPT2_PRETRAINED_MODEL_ARCHIVE_MAP,
        GPT2LMHeadModel,
        GPT2DoubleHeadsModel,
    )

35

36
@require_torch
37
class GPT2ModelTest(ModelTesterMixin, unittest.TestCase):
38

thomwolf's avatar
thomwolf committed
39
    all_model_classes = (GPT2Model, GPT2LMHeadModel, GPT2DoubleHeadsModel) if is_torch_available() else ()
40
41

    class GPT2ModelTester(object):
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
        def __init__(
            self,
            parent,
            batch_size=13,
            seq_length=7,
            is_training=True,
            use_token_type_ids=True,
            use_input_mask=True,
            use_labels=True,
            use_mc_token_ids=True,
            vocab_size=99,
            hidden_size=32,
            num_hidden_layers=5,
            num_attention_heads=4,
            intermediate_size=37,
            hidden_act="gelu",
            hidden_dropout_prob=0.1,
            attention_probs_dropout_prob=0.1,
            max_position_embeddings=512,
            type_vocab_size=16,
            type_sequence_label_size=2,
            initializer_range=0.02,
            num_labels=3,
            num_choices=4,
            scope=None,
        ):
68
69
70
71
72
            self.parent = parent
            self.batch_size = batch_size
            self.seq_length = seq_length
            self.is_training = is_training
            self.use_token_type_ids = use_token_type_ids
thomwolf's avatar
thomwolf committed
73
            self.use_input_mask = use_input_mask
74
            self.use_labels = use_labels
75
            self.use_mc_token_ids = use_mc_token_ids
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
            self.vocab_size = vocab_size
            self.hidden_size = hidden_size
            self.num_hidden_layers = num_hidden_layers
            self.num_attention_heads = num_attention_heads
            self.intermediate_size = intermediate_size
            self.hidden_act = hidden_act
            self.hidden_dropout_prob = hidden_dropout_prob
            self.attention_probs_dropout_prob = attention_probs_dropout_prob
            self.max_position_embeddings = max_position_embeddings
            self.type_vocab_size = type_vocab_size
            self.type_sequence_label_size = type_sequence_label_size
            self.initializer_range = initializer_range
            self.num_labels = num_labels
            self.num_choices = num_choices
            self.scope = scope

        def prepare_config_and_inputs(self):
            input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)

thomwolf's avatar
thomwolf committed
95
96
97
98
            input_mask = None
            if self.use_input_mask:
                input_mask = ids_tensor([self.batch_size, self.seq_length], vocab_size=2)

99
100
101
102
            token_type_ids = None
            if self.use_token_type_ids:
                token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size)

103
104
105
106
            mc_token_ids = None
            if self.use_mc_token_ids:
                mc_token_ids = ids_tensor([self.batch_size, self.num_choices], self.seq_length)

107
108
109
110
111
112
113
114
115
            sequence_labels = None
            token_labels = None
            choice_labels = None
            if self.use_labels:
                sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size)
                token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels)
                choice_labels = ids_tensor([self.batch_size], self.num_choices)

            config = GPT2Config(
thomwolf's avatar
thomwolf committed
116
                vocab_size=self.vocab_size,
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
                n_embd=self.hidden_size,
                n_layer=self.num_hidden_layers,
                n_head=self.num_attention_heads,
                # intermediate_size=self.intermediate_size,
                # hidden_act=self.hidden_act,
                # hidden_dropout_prob=self.hidden_dropout_prob,
                # attention_probs_dropout_prob=self.attention_probs_dropout_prob,
                n_positions=self.max_position_embeddings,
                n_ctx=self.max_position_embeddings
                # type_vocab_size=self.type_vocab_size,
                # initializer_range=self.initializer_range
            )

            head_mask = ids_tensor([self.num_hidden_layers, self.num_attention_heads], 2)

132
133
134
135
136
137
138
139
140
141
142
            return (
                config,
                input_ids,
                input_mask,
                head_mask,
                token_type_ids,
                mc_token_ids,
                sequence_labels,
                token_labels,
                choice_labels,
            )
143
144

        def check_loss_output(self, result):
145
            self.parent.assertListEqual(list(result["loss"].size()), [])
146

thomwolf's avatar
thomwolf committed
147
        def create_and_check_gpt2_model(self, config, input_ids, input_mask, head_mask, token_type_ids, *args):
148
            model = GPT2Model(config=config)
149
            model.to(torch_device)
150
151
152
153
154
155
156
157
158
159
160
            model.eval()

            model(input_ids, token_type_ids=token_type_ids, head_mask=head_mask)
            model(input_ids, token_type_ids=token_type_ids)
            sequence_output, presents = model(input_ids)

            result = {
                "sequence_output": sequence_output,
                "presents": presents,
            }
            self.parent.assertListEqual(
161
162
                list(result["sequence_output"].size()), [self.batch_size, self.seq_length, self.hidden_size]
            )
163
164
            self.parent.assertEqual(len(result["presents"]), config.n_layer)

thomwolf's avatar
thomwolf committed
165
        def create_and_check_lm_head_model(self, config, input_ids, input_mask, head_mask, token_type_ids, *args):
166
            model = GPT2LMHeadModel(config)
167
            model.to(torch_device)
168
169
170
171
            model.eval()

            loss, lm_logits, _ = model(input_ids, token_type_ids=token_type_ids, labels=input_ids)

172
            result = {"loss": loss, "lm_logits": lm_logits}
173

174
            self.parent.assertListEqual(list(result["loss"].size()), [])
175
            self.parent.assertListEqual(
176
177
                list(result["lm_logits"].size()), [self.batch_size, self.seq_length, self.vocab_size]
            )
178

179
180
181
        def create_and_check_double_lm_head_model(
            self, config, input_ids, input_mask, head_mask, token_type_ids, mc_token_ids, *args
        ):
182
            model = GPT2DoubleHeadsModel(config)
183
            model.to(torch_device)
184
185
            model.eval()

186
187
188
189
            multiple_choice_inputs_ids = input_ids.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous()
            multiple_choice_input_mask = input_mask.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous()
            multiple_choice_token_type_ids = token_type_ids.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous()

190
191
192
193
194
195
196
            inputs = {
                "input_ids": multiple_choice_inputs_ids,
                "mc_token_ids": mc_token_ids,
                "attention_mask": multiple_choice_input_mask,
                "token_type_ids": multiple_choice_token_type_ids,
                "lm_labels": multiple_choice_inputs_ids,
            }
197
198

            loss, lm_logits, mc_logits, _ = model(**inputs)
199

200
            result = {"loss": loss, "lm_logits": lm_logits, "mc_logits": mc_logits}
201

202
            self.parent.assertListEqual(list(result["loss"].size()), [])
203
            self.parent.assertListEqual(
204
205
206
                list(result["lm_logits"].size()), [self.batch_size, self.num_choices, self.seq_length, self.vocab_size]
            )
            self.parent.assertListEqual(list(result["mc_logits"].size()), [self.batch_size, self.num_choices])
207
208
209

        def prepare_config_and_inputs_for_common(self):
            config_and_inputs = self.prepare_config_and_inputs()
210

211
212
213
214
215
216
217
218
219
220
221
222
223
            (
                config,
                input_ids,
                input_mask,
                head_mask,
                token_type_ids,
                mc_token_ids,
                sequence_labels,
                token_labels,
                choice_labels,
            ) = config_and_inputs

            inputs_dict = {"input_ids": input_ids, "token_type_ids": token_type_ids, "head_mask": head_mask}
224
225
226
227
228
229

            return config, inputs_dict

    def setUp(self):
        self.model_tester = GPT2ModelTest.GPT2ModelTester(self)
        self.config_tester = ConfigTester(self, config_class=GPT2Config, n_embd=37)
thomwolf's avatar
thomwolf committed
230
231

    def test_config(self):
232
        self.config_tester.run_common_tests()
thomwolf's avatar
thomwolf committed
233

234
235
236
    def test_gpt2_model(self):
        config_and_inputs = self.model_tester.prepare_config_and_inputs()
        self.model_tester.create_and_check_gpt2_model(*config_and_inputs)
thomwolf's avatar
thomwolf committed
237

238
239
240
241
242
243
244
    def test_gpt2_lm_head_model(self):
        config_and_inputs = self.model_tester.prepare_config_and_inputs()
        self.model_tester.create_and_check_lm_head_model(*config_and_inputs)

    def test_gpt2_double_lm_head_model(self):
        config_and_inputs = self.model_tester.prepare_config_and_inputs()
        self.model_tester.create_and_check_double_lm_head_model(*config_and_inputs)
thomwolf's avatar
thomwolf committed
245

246
    @slow
247
248
    def test_model_from_pretrained(self):
        for model_name in list(GPT2_PRETRAINED_MODEL_ARCHIVE_MAP.keys())[:1]:
249
            model = GPT2Model.from_pretrained(model_name, cache_dir=CACHE_DIR)
250
            self.assertIsNotNone(model)