test_tokenization_dpr.py 3.65 KB
Newer Older
Quentin Lhoest's avatar
Quentin Lhoest committed
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
# coding=utf-8
# Copyright 2020 Huggingface
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

16
from transformers import (
Quentin Lhoest's avatar
Quentin Lhoest committed
17
18
19
20
21
22
23
24
    DPRContextEncoderTokenizer,
    DPRContextEncoderTokenizerFast,
    DPRQuestionEncoderTokenizer,
    DPRQuestionEncoderTokenizerFast,
    DPRReaderOutput,
    DPRReaderTokenizer,
    DPRReaderTokenizerFast,
)
25
from transformers.testing_utils import require_tokenizers, slow
Quentin Lhoest's avatar
Quentin Lhoest committed
26
27
from transformers.tokenization_utils_base import BatchEncoding

28
from ..bert.test_tokenization_bert import BertTokenizationTest
Quentin Lhoest's avatar
Quentin Lhoest committed
29
30


31
@require_tokenizers
Quentin Lhoest's avatar
Quentin Lhoest committed
32
33
class DPRContextEncoderTokenizationTest(BertTokenizationTest):
    tokenizer_class = DPRContextEncoderTokenizer
34
35
    rust_tokenizer_class = DPRContextEncoderTokenizerFast
    test_rust_tokenizer = True
36
    from_pretrained_id = "facebook/dpr-ctx_encoder-single-nq-base"
Quentin Lhoest's avatar
Quentin Lhoest committed
37
38


39
@require_tokenizers
Quentin Lhoest's avatar
Quentin Lhoest committed
40
41
class DPRQuestionEncoderTokenizationTest(BertTokenizationTest):
    tokenizer_class = DPRQuestionEncoderTokenizer
42
43
    rust_tokenizer_class = DPRQuestionEncoderTokenizerFast
    test_rust_tokenizer = True
44
    from_pretrained_id = "facebook/dpr-ctx_encoder-single-nq-base"
Quentin Lhoest's avatar
Quentin Lhoest committed
45
46


47
@require_tokenizers
Quentin Lhoest's avatar
Quentin Lhoest committed
48
49
class DPRReaderTokenizationTest(BertTokenizationTest):
    tokenizer_class = DPRReaderTokenizer
50
51
    rust_tokenizer_class = DPRReaderTokenizerFast
    test_rust_tokenizer = True
52
    from_pretrained_id = "facebook/dpr-ctx_encoder-single-nq-base"
Quentin Lhoest's avatar
Quentin Lhoest committed
53
54
55

    @slow
    def test_decode_best_spans(self):
56
        tokenizer = self.tokenizer_class.from_pretrained("google-bert/bert-base-uncased")
Quentin Lhoest's avatar
Quentin Lhoest committed
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78

        text_1 = tokenizer.encode("question sequence", add_special_tokens=False)
        text_2 = tokenizer.encode("title sequence", add_special_tokens=False)
        text_3 = tokenizer.encode("text sequence " * 4, add_special_tokens=False)
        input_ids = [[101] + text_1 + [102] + text_2 + [102] + text_3]
        reader_input = BatchEncoding({"input_ids": input_ids})

        start_logits = [[0] * len(input_ids[0])]
        end_logits = [[0] * len(input_ids[0])]
        relevance_logits = [0]
        reader_output = DPRReaderOutput(start_logits, end_logits, relevance_logits)

        start_index, end_index = 8, 9
        start_logits[0][start_index] = 10
        end_logits[0][end_index] = 10
        predicted_spans = tokenizer.decode_best_spans(reader_input, reader_output)
        self.assertEqual(predicted_spans[0].start_index, start_index)
        self.assertEqual(predicted_spans[0].end_index, end_index)
        self.assertEqual(predicted_spans[0].doc_id, 0)

    @slow
    def test_call(self):
79
        tokenizer = self.tokenizer_class.from_pretrained("google-bert/bert-base-uncased")
Quentin Lhoest's avatar
Quentin Lhoest committed
80
81
82
83
84
85
86
87
88

        text_1 = tokenizer.encode("question sequence", add_special_tokens=False)
        text_2 = tokenizer.encode("title sequence", add_special_tokens=False)
        text_3 = tokenizer.encode("text sequence", add_special_tokens=False)
        expected_input_ids = [101] + text_1 + [102] + text_2 + [102] + text_3
        encoded_input = tokenizer(questions=["question sequence"], titles=["title sequence"], texts=["text sequence"])
        self.assertIn("input_ids", encoded_input)
        self.assertIn("attention_mask", encoded_input)
        self.assertListEqual(encoded_input["input_ids"][0], expected_input_ids)