test_modeling_gpt2.py 12.3 KB
Newer Older
thomwolf's avatar
thomwolf committed
1
2
3
4
5
6
7
8
9
10
11
12
13
14
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
Aymeric Augustin's avatar
Aymeric Augustin committed
15

thomwolf's avatar
thomwolf committed
16

17
18
import unittest

19
from transformers import is_torch_available
thomwolf's avatar
thomwolf committed
20

21
from .test_configuration_common import ConfigTester
22
from .test_modeling_common import ModelTesterMixin, ids_tensor
Aymeric Augustin's avatar
Aymeric Augustin committed
23
24
25
from .utils import CACHE_DIR, require_torch, slow, torch_device


26
if is_torch_available():
27
    import torch
28
29
30
31
32
33
34
35
    from transformers import (
        GPT2Config,
        GPT2Model,
        GPT2_PRETRAINED_MODEL_ARCHIVE_MAP,
        GPT2LMHeadModel,
        GPT2DoubleHeadsModel,
    )

36

37
@require_torch
38
class GPT2ModelTest(ModelTesterMixin, unittest.TestCase):
39

thomwolf's avatar
thomwolf committed
40
    all_model_classes = (GPT2Model, GPT2LMHeadModel, GPT2DoubleHeadsModel) if is_torch_available() else ()
41
42
43
    all_generative_model_classes = (
        (GPT2LMHeadModel,) if is_torch_available() else ()
    )  # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly
44
45

    class GPT2ModelTester(object):
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
        def __init__(
            self,
            parent,
            batch_size=13,
            seq_length=7,
            is_training=True,
            use_token_type_ids=True,
            use_input_mask=True,
            use_labels=True,
            use_mc_token_ids=True,
            vocab_size=99,
            hidden_size=32,
            num_hidden_layers=5,
            num_attention_heads=4,
            intermediate_size=37,
            hidden_act="gelu",
            hidden_dropout_prob=0.1,
            attention_probs_dropout_prob=0.1,
            max_position_embeddings=512,
            type_vocab_size=16,
            type_sequence_label_size=2,
            initializer_range=0.02,
            num_labels=3,
            num_choices=4,
            scope=None,
        ):
72
73
74
75
76
            self.parent = parent
            self.batch_size = batch_size
            self.seq_length = seq_length
            self.is_training = is_training
            self.use_token_type_ids = use_token_type_ids
thomwolf's avatar
thomwolf committed
77
            self.use_input_mask = use_input_mask
78
            self.use_labels = use_labels
79
            self.use_mc_token_ids = use_mc_token_ids
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
            self.vocab_size = vocab_size
            self.hidden_size = hidden_size
            self.num_hidden_layers = num_hidden_layers
            self.num_attention_heads = num_attention_heads
            self.intermediate_size = intermediate_size
            self.hidden_act = hidden_act
            self.hidden_dropout_prob = hidden_dropout_prob
            self.attention_probs_dropout_prob = attention_probs_dropout_prob
            self.max_position_embeddings = max_position_embeddings
            self.type_vocab_size = type_vocab_size
            self.type_sequence_label_size = type_sequence_label_size
            self.initializer_range = initializer_range
            self.num_labels = num_labels
            self.num_choices = num_choices
            self.scope = scope
95
96
            self.bos_token_id = vocab_size - 1
            self.eos_token_id = vocab_size - 1
97
98
99
100

        def prepare_config_and_inputs(self):
            input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)

thomwolf's avatar
thomwolf committed
101
102
103
104
            input_mask = None
            if self.use_input_mask:
                input_mask = ids_tensor([self.batch_size, self.seq_length], vocab_size=2)

105
106
107
108
            token_type_ids = None
            if self.use_token_type_ids:
                token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size)

109
110
111
112
            mc_token_ids = None
            if self.use_mc_token_ids:
                mc_token_ids = ids_tensor([self.batch_size, self.num_choices], self.seq_length)

113
114
115
116
117
118
119
120
121
            sequence_labels = None
            token_labels = None
            choice_labels = None
            if self.use_labels:
                sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size)
                token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels)
                choice_labels = ids_tensor([self.batch_size], self.num_choices)

            config = GPT2Config(
thomwolf's avatar
thomwolf committed
122
                vocab_size=self.vocab_size,
123
124
125
126
127
128
129
130
                n_embd=self.hidden_size,
                n_layer=self.num_hidden_layers,
                n_head=self.num_attention_heads,
                # intermediate_size=self.intermediate_size,
                # hidden_act=self.hidden_act,
                # hidden_dropout_prob=self.hidden_dropout_prob,
                # attention_probs_dropout_prob=self.attention_probs_dropout_prob,
                n_positions=self.max_position_embeddings,
131
                n_ctx=self.max_position_embeddings,
132
133
                # type_vocab_size=self.type_vocab_size,
                # initializer_range=self.initializer_range
134
135
                bos_token_id=self.bos_token_id,
                eos_token_ids=self.eos_token_id,
136
137
138
139
            )

            head_mask = ids_tensor([self.num_hidden_layers, self.num_attention_heads], 2)

140
141
142
143
144
145
146
147
148
149
150
            return (
                config,
                input_ids,
                input_mask,
                head_mask,
                token_type_ids,
                mc_token_ids,
                sequence_labels,
                token_labels,
                choice_labels,
            )
151
152

        def check_loss_output(self, result):
153
            self.parent.assertListEqual(list(result["loss"].size()), [])
154

thomwolf's avatar
thomwolf committed
155
        def create_and_check_gpt2_model(self, config, input_ids, input_mask, head_mask, token_type_ids, *args):
156
            model = GPT2Model(config=config)
157
            model.to(torch_device)
158
159
160
161
162
163
164
165
166
167
168
            model.eval()

            model(input_ids, token_type_ids=token_type_ids, head_mask=head_mask)
            model(input_ids, token_type_ids=token_type_ids)
            sequence_output, presents = model(input_ids)

            result = {
                "sequence_output": sequence_output,
                "presents": presents,
            }
            self.parent.assertListEqual(
169
                list(result["sequence_output"].size()), [self.batch_size, self.seq_length, self.hidden_size],
170
            )
171
172
            self.parent.assertEqual(len(result["presents"]), config.n_layer)

thomwolf's avatar
thomwolf committed
173
        def create_and_check_lm_head_model(self, config, input_ids, input_mask, head_mask, token_type_ids, *args):
174
            model = GPT2LMHeadModel(config)
175
            model.to(torch_device)
176
177
178
179
            model.eval()

            loss, lm_logits, _ = model(input_ids, token_type_ids=token_type_ids, labels=input_ids)

180
            result = {"loss": loss, "lm_logits": lm_logits}
181

182
            self.parent.assertListEqual(list(result["loss"].size()), [])
183
            self.parent.assertListEqual(
184
                list(result["lm_logits"].size()), [self.batch_size, self.seq_length, self.vocab_size],
185
            )
186

187
188
189
        def create_and_check_double_lm_head_model(
            self, config, input_ids, input_mask, head_mask, token_type_ids, mc_token_ids, *args
        ):
190
            model = GPT2DoubleHeadsModel(config)
191
            model.to(torch_device)
192
193
            model.eval()

194
195
196
197
            multiple_choice_inputs_ids = input_ids.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous()
            multiple_choice_input_mask = input_mask.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous()
            multiple_choice_token_type_ids = token_type_ids.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous()

198
199
200
201
202
203
204
            inputs = {
                "input_ids": multiple_choice_inputs_ids,
                "mc_token_ids": mc_token_ids,
                "attention_mask": multiple_choice_input_mask,
                "token_type_ids": multiple_choice_token_type_ids,
                "lm_labels": multiple_choice_inputs_ids,
            }
205
206

            loss, lm_logits, mc_logits, _ = model(**inputs)
207

208
            result = {"loss": loss, "lm_logits": lm_logits, "mc_logits": mc_logits}
209

210
            self.parent.assertListEqual(list(result["loss"].size()), [])
211
            self.parent.assertListEqual(
212
213
                list(result["lm_logits"].size()),
                [self.batch_size, self.num_choices, self.seq_length, self.vocab_size],
214
215
            )
            self.parent.assertListEqual(list(result["mc_logits"].size()), [self.batch_size, self.num_choices])
216
217
218

        def prepare_config_and_inputs_for_common(self):
            config_and_inputs = self.prepare_config_and_inputs()
219

220
221
222
223
224
225
226
227
228
229
230
231
            (
                config,
                input_ids,
                input_mask,
                head_mask,
                token_type_ids,
                mc_token_ids,
                sequence_labels,
                token_labels,
                choice_labels,
            ) = config_and_inputs

232
233
234
235
236
            inputs_dict = {
                "input_ids": input_ids,
                "token_type_ids": token_type_ids,
                "head_mask": head_mask,
            }
237
238
239
240
241
242

            return config, inputs_dict

    def setUp(self):
        self.model_tester = GPT2ModelTest.GPT2ModelTester(self)
        self.config_tester = ConfigTester(self, config_class=GPT2Config, n_embd=37)
thomwolf's avatar
thomwolf committed
243
244

    def test_config(self):
245
        self.config_tester.run_common_tests()
thomwolf's avatar
thomwolf committed
246

247
248
249
    def test_gpt2_model(self):
        config_and_inputs = self.model_tester.prepare_config_and_inputs()
        self.model_tester.create_and_check_gpt2_model(*config_and_inputs)
thomwolf's avatar
thomwolf committed
250

251
252
253
254
255
256
257
    def test_gpt2_lm_head_model(self):
        config_and_inputs = self.model_tester.prepare_config_and_inputs()
        self.model_tester.create_and_check_lm_head_model(*config_and_inputs)

    def test_gpt2_double_lm_head_model(self):
        config_and_inputs = self.model_tester.prepare_config_and_inputs()
        self.model_tester.create_and_check_double_lm_head_model(*config_and_inputs)
thomwolf's avatar
thomwolf committed
258

259
    @slow
260
261
    def test_model_from_pretrained(self):
        for model_name in list(GPT2_PRETRAINED_MODEL_ARCHIVE_MAP.keys())[:1]:
262
            model = GPT2Model.from_pretrained(model_name, cache_dir=CACHE_DIR)
263
            self.assertIsNotNone(model)
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344


def prepare_generation_special_tokens():
    return {"bos_token_id": 50256, "eos_token_id": 50256}


class GPT2ModelLanguageGenerationTest(unittest.TestCase):

    special_tokens = prepare_generation_special_tokens()

    @slow
    def test_lm_generate_gpt2(self):
        model = GPT2LMHeadModel.from_pretrained("gpt2")
        input_ids = torch.Tensor([[464, 3290, 318, 13779]]).long()  # The dog is cute
        expected_output_ids = [
            464,
            3290,
            318,
            13779,
            1165,
            13,
            632,
            7832,
            284,
            6437,
            319,
            502,
            290,
            318,
            922,
            329,
            502,
            357,
            1169,
            3290,
        ]  # The dog is cute too. It likes to rub on me and is good for me (the dog
        torch.manual_seed(0)

        output_ids = model.generate(
            input_ids,
            bos_token_id=self.special_tokens["bos_token_id"],
            eos_token_ids=self.special_tokens["eos_token_id"],
        )

        self.assertListEqual(output_ids[0].tolist(), expected_output_ids)

    @slow
    def test_lm_generate_distilgpt2(self):
        model = GPT2LMHeadModel.from_pretrained("distilgpt2")
        input_ids = torch.Tensor([[464, 3290, 318, 13779]]).long()  # The dog is cute
        expected_output_ids = [
            464,
            3290,
            318,
            13779,
            996,
            339,
            460,
            3360,
            655,
            2513,
            287,
            262,
            3952,
            13,
            632,
            318,
            407,
            845,
            3621,
            284,
        ]  # The dog is cute though he can sometimes just walk in the park. It is not very nice to
        torch.manual_seed(0)

        output_ids = model.generate(
            input_ids,
            bos_token_id=self.special_tokens["bos_token_id"],
            eos_token_ids=self.special_tokens["eos_token_id"],
        )

        self.assertListEqual(output_ids[0].tolist(), expected_output_ids)