test_modeling_longformer.py 17.4 KB
Newer Older
Iz Beltagy's avatar
Iz Beltagy committed
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.


import unittest

from transformers import is_torch_available
20
from transformers.testing_utils import require_torch, slow, torch_device
Iz Beltagy's avatar
Iz Beltagy committed
21
22
23
24
25
26
27
28
29
30
31

from .test_configuration_common import ConfigTester
from .test_modeling_common import ModelTesterMixin, ids_tensor


if is_torch_available():
    import torch
    from transformers import (
        LongformerConfig,
        LongformerModel,
        LongformerForMaskedLM,
32
        LongformerForSequenceClassification,
33
        LongformerForTokenClassification,
34
        LongformerForQuestionAnswering,
35
        LongformerForMultipleChoice,
Iz Beltagy's avatar
Iz Beltagy committed
36
37
38
    )


39
class LongformerModelTester:
Iz Beltagy's avatar
Iz Beltagy committed
40
    def __init__(
41
        self, parent,
Iz Beltagy's avatar
Iz Beltagy committed
42
43
    ):
        self.parent = parent
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
        self.batch_size = 13
        self.seq_length = 7
        self.is_training = True
        self.use_input_mask = True
        self.use_token_type_ids = True
        self.use_labels = True
        self.vocab_size = 99
        self.hidden_size = 32
        self.num_hidden_layers = 5
        self.num_attention_heads = 4
        self.intermediate_size = 37
        self.hidden_act = "gelu"
        self.hidden_dropout_prob = 0.1
        self.attention_probs_dropout_prob = 0.1
        self.max_position_embeddings = 512
        self.type_vocab_size = 16
        self.type_sequence_label_size = 2
        self.initializer_range = 0.02
        self.num_labels = 3
        self.num_choices = 4
        self.scope = None
        self.attention_window = 4
Iz Beltagy's avatar
Iz Beltagy committed
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110

        # `ModelTesterMixin.test_attention_outputs` is expecting attention tensors to be of size
        # [num_attention_heads, encoder_seq_length, encoder_key_length], but LongformerSelfAttention
        # returns attention of shape [num_attention_heads, encoder_seq_length, self.attention_window + 1]
        # because its local attention only attends to `self.attention_window + 1` locations
        self.key_length = self.attention_window + 1

        # because of padding `encoder_seq_length`, is different from `seq_length`. Relevant for
        # the `test_attention_outputs` and `test_hidden_states_output` tests
        self.encoder_seq_length = (
            self.seq_length + (self.attention_window - self.seq_length % self.attention_window) % self.attention_window
        )

    def prepare_config_and_inputs(self):
        input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)

        input_mask = None
        if self.use_input_mask:
            input_mask = ids_tensor([self.batch_size, self.seq_length], vocab_size=2)

        token_type_ids = None
        if self.use_token_type_ids:
            token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size)

        sequence_labels = None
        token_labels = None
        choice_labels = None
        if self.use_labels:
            sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size)
            token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels)
            choice_labels = ids_tensor([self.batch_size], self.num_choices)

        config = LongformerConfig(
            vocab_size=self.vocab_size,
            hidden_size=self.hidden_size,
            num_hidden_layers=self.num_hidden_layers,
            num_attention_heads=self.num_attention_heads,
            intermediate_size=self.intermediate_size,
            hidden_act=self.hidden_act,
            hidden_dropout_prob=self.hidden_dropout_prob,
            attention_probs_dropout_prob=self.attention_probs_dropout_prob,
            max_position_embeddings=self.max_position_embeddings,
            type_vocab_size=self.type_vocab_size,
            initializer_range=self.initializer_range,
            attention_window=self.attention_window,
Sylvain Gugger's avatar
Sylvain Gugger committed
111
            return_dict=True,
Iz Beltagy's avatar
Iz Beltagy committed
112
113
114
115
116
117
118
        )

        return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels

    def check_loss_output(self, result):
        self.parent.assertListEqual(list(result["loss"].size()), [])

119
120
121
122
123
124
125
126
    def create_and_check_attention_mask_determinism(
        self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
    ):
        model = LongformerModel(config=config)
        model.to(torch_device)
        model.eval()

        attention_mask = torch.ones(input_ids.shape, dtype=torch.long, device=torch_device)
Sylvain Gugger's avatar
Sylvain Gugger committed
127
128
        output_with_mask = model(input_ids, attention_mask=attention_mask)["last_hidden_state"]
        output_without_mask = model(input_ids)["last_hidden_state"]
129
130
        self.parent.assertTrue(torch.allclose(output_with_mask[0, 0, :5], output_without_mask[0, 0, :5], atol=1e-4))

Iz Beltagy's avatar
Iz Beltagy committed
131
132
133
134
135
136
    def create_and_check_longformer_model(
        self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
    ):
        model = LongformerModel(config=config)
        model.to(torch_device)
        model.eval()
Sylvain Gugger's avatar
Sylvain Gugger committed
137
138
139
        result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids)
        result = model(input_ids, token_type_ids=token_type_ids)
        result = model(input_ids)
Iz Beltagy's avatar
Iz Beltagy committed
140
        self.parent.assertListEqual(
Sylvain Gugger's avatar
Sylvain Gugger committed
141
            list(result["last_hidden_state"].size()), [self.batch_size, self.seq_length, self.hidden_size]
Iz Beltagy's avatar
Iz Beltagy committed
142
        )
Sylvain Gugger's avatar
Sylvain Gugger committed
143
        self.parent.assertListEqual(list(result["pooler_output"].size()), [self.batch_size, self.hidden_size])
Iz Beltagy's avatar
Iz Beltagy committed
144

145
146
147
148
149
150
151
152
153
154
    def create_and_check_longformer_model_with_global_attention_mask(
        self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
    ):
        model = LongformerModel(config=config)
        model.to(torch_device)
        model.eval()
        global_attention_mask = input_mask.clone()
        global_attention_mask[:, input_mask.shape[-1] // 2] = 0
        global_attention_mask = global_attention_mask.to(torch_device)

Sylvain Gugger's avatar
Sylvain Gugger committed
155
        result = model(
156
157
158
159
160
            input_ids,
            attention_mask=input_mask,
            global_attention_mask=global_attention_mask,
            token_type_ids=token_type_ids,
        )
Sylvain Gugger's avatar
Sylvain Gugger committed
161
162
        result = model(input_ids, token_type_ids=token_type_ids, global_attention_mask=global_attention_mask)
        result = model(input_ids, global_attention_mask=global_attention_mask)
163
164

        self.parent.assertListEqual(
Sylvain Gugger's avatar
Sylvain Gugger committed
165
            list(result["last_hidden_state"].size()), [self.batch_size, self.seq_length, self.hidden_size]
166
        )
Sylvain Gugger's avatar
Sylvain Gugger committed
167
        self.parent.assertListEqual(list(result["pooler_output"].size()), [self.batch_size, self.hidden_size])
168

Iz Beltagy's avatar
Iz Beltagy committed
169
170
171
172
173
174
    def create_and_check_longformer_for_masked_lm(
        self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
    ):
        model = LongformerForMaskedLM(config=config)
        model.to(torch_device)
        model.eval()
Sylvain Gugger's avatar
Sylvain Gugger committed
175
176
        result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels)
        self.parent.assertListEqual(list(result["logits"].size()), [self.batch_size, self.seq_length, self.vocab_size])
Iz Beltagy's avatar
Iz Beltagy committed
177
178
        self.check_loss_output(result)

179
180
181
182
183
184
    def create_and_check_longformer_for_question_answering(
        self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
    ):
        model = LongformerForQuestionAnswering(config=config)
        model.to(torch_device)
        model.eval()
Sylvain Gugger's avatar
Sylvain Gugger committed
185
        result = model(
186
187
            input_ids,
            attention_mask=input_mask,
188
            global_attention_mask=input_mask,
189
190
191
192
193
194
195
196
            token_type_ids=token_type_ids,
            start_positions=sequence_labels,
            end_positions=sequence_labels,
        )
        self.parent.assertListEqual(list(result["start_logits"].size()), [self.batch_size, self.seq_length])
        self.parent.assertListEqual(list(result["end_logits"].size()), [self.batch_size, self.seq_length])
        self.check_loss_output(result)

197
198
199
200
201
202
203
    def create_and_check_longformer_for_sequence_classification(
        self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
    ):
        config.num_labels = self.num_labels
        model = LongformerForSequenceClassification(config)
        model.to(torch_device)
        model.eval()
Sylvain Gugger's avatar
Sylvain Gugger committed
204
        result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=sequence_labels)
205
206
207
        self.parent.assertListEqual(list(result["logits"].size()), [self.batch_size, self.num_labels])
        self.check_loss_output(result)

208
209
210
211
212
213
214
    def create_and_check_longformer_for_token_classification(
        self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
    ):
        config.num_labels = self.num_labels
        model = LongformerForTokenClassification(config=config)
        model.to(torch_device)
        model.eval()
Sylvain Gugger's avatar
Sylvain Gugger committed
215
        result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels)
216
217
218
        self.parent.assertListEqual(list(result["logits"].size()), [self.batch_size, self.seq_length, self.num_labels])
        self.check_loss_output(result)

219
220
221
222
223
224
225
226
227
228
    def create_and_check_longformer_for_multiple_choice(
        self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
    ):
        config.num_choices = self.num_choices
        model = LongformerForMultipleChoice(config=config)
        model.to(torch_device)
        model.eval()
        multiple_choice_inputs_ids = input_ids.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous()
        multiple_choice_token_type_ids = token_type_ids.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous()
        multiple_choice_input_mask = input_mask.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous()
229
        multiple_choice_input_mask = input_mask.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous()
Sylvain Gugger's avatar
Sylvain Gugger committed
230
        result = model(
231
232
            multiple_choice_inputs_ids,
            attention_mask=multiple_choice_input_mask,
233
            global_attention_mask=multiple_choice_input_mask,
234
235
236
237
238
239
            token_type_ids=multiple_choice_token_type_ids,
            labels=choice_labels,
        )
        self.parent.assertListEqual(list(result["logits"].size()), [self.batch_size, self.num_choices])
        self.check_loss_output(result)

Iz Beltagy's avatar
Iz Beltagy committed
240
241
242
243
244
245
246
247
248
249
250
    def prepare_config_and_inputs_for_common(self):
        config_and_inputs = self.prepare_config_and_inputs()
        (
            config,
            input_ids,
            token_type_ids,
            input_mask,
            sequence_labels,
            token_labels,
            choice_labels,
        ) = config_and_inputs
251
252
253
254
255
256
257
        global_attention_mask = torch.zeros_like(input_ids)
        inputs_dict = {
            "input_ids": input_ids,
            "token_type_ids": token_type_ids,
            "attention_mask": input_mask,
            "global_attention_mask": global_attention_mask,
        }
Iz Beltagy's avatar
Iz Beltagy committed
258
259
        return config, inputs_dict

260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
    def prepare_config_and_inputs_for_question_answering(self):
        config_and_inputs = self.prepare_config_and_inputs()
        (
            config,
            input_ids,
            token_type_ids,
            input_mask,
            sequence_labels,
            token_labels,
            choice_labels,
        ) = config_and_inputs

        # Replace sep_token_id by some random id
        input_ids[input_ids == config.sep_token_id] = torch.randint(0, config.vocab_size, (1,)).item()
        # Make sure there are exactly three sep_token_id
        input_ids[:, -3:] = config.sep_token_id
        input_mask = torch.ones_like(input_ids)

        return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels

Iz Beltagy's avatar
Iz Beltagy committed
280
281
282
283
284
285
286

@require_torch
class LongformerModelTest(ModelTesterMixin, unittest.TestCase):
    test_pruning = False  # pruning is not supported
    test_headmasking = False  # head masking is not supported
    test_torchscript = False

287
288
289
290
    all_model_classes = (
        (
            LongformerModel,
            LongformerForMaskedLM,
291
292
293
294
            LongformerForSequenceClassification,
            LongformerForQuestionAnswering,
            LongformerForTokenClassification,
            LongformerForMultipleChoice,
295
296
297
298
        )
        if is_torch_available()
        else ()
    )
Iz Beltagy's avatar
Iz Beltagy committed
299
300
301
302
303
304
305
306
307
308
309
310

    def setUp(self):
        self.model_tester = LongformerModelTester(self)
        self.config_tester = ConfigTester(self, config_class=LongformerConfig, hidden_size=37)

    def test_config(self):
        self.config_tester.run_common_tests()

    def test_longformer_model(self):
        config_and_inputs = self.model_tester.prepare_config_and_inputs()
        self.model_tester.create_and_check_longformer_model(*config_and_inputs)

311
312
313
314
315
316
317
318
    def test_longformer_model_attention_mask_determinism(self):
        config_and_inputs = self.model_tester.prepare_config_and_inputs()
        self.model_tester.create_and_check_attention_mask_determinism(*config_and_inputs)

    def test_longformer_model_global_attention_mask(self):
        config_and_inputs = self.model_tester.prepare_config_and_inputs()
        self.model_tester.create_and_check_longformer_model_with_global_attention_mask(*config_and_inputs)

Iz Beltagy's avatar
Iz Beltagy committed
319
320
321
322
    def test_longformer_for_masked_lm(self):
        config_and_inputs = self.model_tester.prepare_config_and_inputs()
        self.model_tester.create_and_check_longformer_for_masked_lm(*config_and_inputs)

323
324
325
326
    def test_longformer_for_question_answering(self):
        config_and_inputs = self.model_tester.prepare_config_and_inputs_for_question_answering()
        self.model_tester.create_and_check_longformer_for_question_answering(*config_and_inputs)

327
328
329
330
    def test_for_sequence_classification(self):
        config_and_inputs = self.model_tester.prepare_config_and_inputs()
        self.model_tester.create_and_check_longformer_for_sequence_classification(*config_and_inputs)

331
332
333
334
    def test_for_token_classification(self):
        config_and_inputs = self.model_tester.prepare_config_and_inputs()
        self.model_tester.create_and_check_longformer_for_token_classification(*config_and_inputs)

335
336
337
338
    def test_for_multiple_choice(self):
        config_and_inputs = self.model_tester.prepare_config_and_inputs()
        self.model_tester.create_and_check_longformer_for_multiple_choice(*config_and_inputs)

Iz Beltagy's avatar
Iz Beltagy committed
339
340
341
342

class LongformerModelIntegrationTest(unittest.TestCase):
    @slow
    def test_inference_no_head(self):
343
        model = LongformerModel.from_pretrained("allenai/longformer-base-4096")
344
        model.to(torch_device)
Iz Beltagy's avatar
Iz Beltagy committed
345

346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
        # 'Hello world!'
        input_ids = torch.tensor([[0, 20920, 232, 328, 1437, 2]], dtype=torch.long, device=torch_device)
        attention_mask = torch.ones(input_ids.shape, dtype=torch.long, device=torch_device)
        output = model(input_ids, attention_mask=attention_mask)[0]
        output_without_mask = model(input_ids)[0]

        expected_output_slice = torch.tensor([0.0549, 0.1087, -0.1119, -0.0368, 0.0250], device=torch_device)
        self.assertTrue(torch.allclose(output[0, 0, -5:], expected_output_slice, atol=1e-4))
        self.assertTrue(torch.allclose(output_without_mask[0, 0, -5:], expected_output_slice, atol=1e-4))

    @slow
    def test_inference_no_head_long(self):
        model = LongformerModel.from_pretrained("allenai/longformer-base-4096")
        model.to(torch_device)

Iz Beltagy's avatar
Iz Beltagy committed
361
        # 'Hello world! ' repeated 1000 times
362
363
364
        input_ids = torch.tensor(
            [[0] + [20920, 232, 328, 1437] * 1000 + [2]], dtype=torch.long, device=torch_device
        )  # long input
Iz Beltagy's avatar
Iz Beltagy committed
365
366

        attention_mask = torch.ones(input_ids.shape, dtype=torch.long, device=input_ids.device)
367
368
        global_attention_mask = torch.zeros(input_ids.shape, dtype=torch.long, device=input_ids.device)
        global_attention_mask[:, [1, 4, 21]] = 1  # Set global attention on a few random positions
Iz Beltagy's avatar
Iz Beltagy committed
369

370
        output = model(input_ids, attention_mask=attention_mask, global_attention_mask=global_attention_mask)[0]
Iz Beltagy's avatar
Iz Beltagy committed
371

372
373
        expected_output_sum = torch.tensor(74585.8594, device=torch_device)
        expected_output_mean = torch.tensor(0.0243, device=torch_device)
Iz Beltagy's avatar
Iz Beltagy committed
374
375
376
377
        self.assertTrue(torch.allclose(output.sum(), expected_output_sum, atol=1e-4))
        self.assertTrue(torch.allclose(output.mean(), expected_output_mean, atol=1e-4))

    @slow
378
    def test_inference_masked_lm_long(self):
379
        model = LongformerForMaskedLM.from_pretrained("allenai/longformer-base-4096")
380
        model.to(torch_device)
Iz Beltagy's avatar
Iz Beltagy committed
381
382

        # 'Hello world! ' repeated 1000 times
383
384
385
        input_ids = torch.tensor(
            [[0] + [20920, 232, 328, 1437] * 1000 + [2]], dtype=torch.long, device=torch_device
        )  # long input
Iz Beltagy's avatar
Iz Beltagy committed
386

387
        loss, prediction_scores = model(input_ids, labels=input_ids)
Iz Beltagy's avatar
Iz Beltagy committed
388

389
390
391
        expected_loss = torch.tensor(0.0074, device=torch_device)
        expected_prediction_scores_sum = torch.tensor(-6.1048e08, device=torch_device)
        expected_prediction_scores_mean = torch.tensor(-3.0348, device=torch_device)
392
        input_ids = input_ids.to(torch_device)
Iz Beltagy's avatar
Iz Beltagy committed
393
394
395
396

        self.assertTrue(torch.allclose(loss, expected_loss, atol=1e-4))
        self.assertTrue(torch.allclose(prediction_scores.sum(), expected_prediction_scores_sum, atol=1e-4))
        self.assertTrue(torch.allclose(prediction_scores.mean(), expected_prediction_scores_mean, atol=1e-4))