"src/vscode:/vscode.git/clone" did not exist on "e700552f93763973854a59ca086963642ec29c79"
test_tokenization_bart.py 5.71 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
import json
import os
import unittest

from transformers import BartTokenizer, BartTokenizerFast, BatchEncoding
from transformers.file_utils import cached_property
from transformers.testing_utils import require_torch
from transformers.tokenization_roberta import VOCAB_FILES_NAMES

from .test_tokenization_common import TokenizerTesterMixin


class TestTokenizationBart(TokenizerTesterMixin, unittest.TestCase):
    tokenizer_class = BartTokenizer

    def setUp(self):
        super().setUp()
        vocab = [
            "l",
            "o",
            "w",
            "e",
            "r",
            "s",
            "t",
            "i",
            "d",
            "n",
            "\u0120",
            "\u0120l",
            "\u0120n",
            "\u0120lo",
            "\u0120low",
            "er",
            "\u0120lowest",
            "\u0120newer",
            "\u0120wider",
            "<unk>",
        ]
        vocab_tokens = dict(zip(vocab, range(len(vocab))))
        merges = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
        self.special_tokens_map = {"unk_token": "<unk>"}

        self.vocab_file = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["vocab_file"])
        self.merges_file = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["merges_file"])
        with open(self.vocab_file, "w", encoding="utf-8") as fp:
            fp.write(json.dumps(vocab_tokens) + "\n")
        with open(self.merges_file, "w", encoding="utf-8") as fp:
            fp.write("\n".join(merges))

    def get_tokenizer(self, **kwargs):
        kwargs.update(self.special_tokens_map)
        return self.tokenizer_class.from_pretrained(self.tmpdirname, **kwargs)

    def get_rust_tokenizer(self, **kwargs):
        kwargs.update(self.special_tokens_map)
        return BartTokenizerFast.from_pretrained(self.tmpdirname, **kwargs)

    def get_input_output_texts(self, tokenizer):
        return "lower newer", "lower newer"

    @cached_property
    def default_tokenizer(self):
        return BartTokenizer.from_pretrained("facebook/bart-large")

    @cached_property
    def default_tokenizer_fast(self):
        return BartTokenizerFast.from_pretrained("facebook/bart-large")

    @require_torch
    def test_prepare_seq2seq_batch(self):
72
        src_text = ["A long paragraph for summarization.", "Another paragraph for summarization."]
73
74
75
76
        tgt_text = [
            "Summary of the text.",
            "Another summary.",
        ]
77
        expected_src_tokens = [0, 250, 251, 17818, 13, 39186, 1938, 4, 2]
78
79
80
81
82
83
84

        for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
            batch = tokenizer.prepare_seq2seq_batch(
                src_text, tgt_texts=tgt_text, max_length=len(expected_src_tokens), return_tensors="pt"
            )
            self.assertIsInstance(batch, BatchEncoding)

85
86
            self.assertEqual((2, 9), batch.input_ids.shape)
            self.assertEqual((2, 9), batch.attention_mask.shape)
87
88
89
90
91
92
93
            result = batch.input_ids.tolist()[0]
            self.assertListEqual(expected_src_tokens, result)
            # Test that special tokens are reset

    # Test Prepare Seq
    @require_torch
    def test_seq2seq_batch_empty_target_text(self):
94
        src_text = ["A long paragraph for summarization.", "Another paragraph for summarization."]
95
96
97
98
99
100
101
102
103
104
        for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
            batch = tokenizer.prepare_seq2seq_batch(src_text, return_tensors="pt")
            # check if input_ids are returned and no labels
            self.assertIn("input_ids", batch)
            self.assertIn("attention_mask", batch)
            self.assertNotIn("labels", batch)
            self.assertNotIn("decoder_attention_mask", batch)

    @require_torch
    def test_seq2seq_batch_max_target_length(self):
105
        src_text = ["A long paragraph for summarization.", "Another paragraph for summarization."]
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
        tgt_text = [
            "Summary of the text.",
            "Another summary.",
        ]
        for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
            batch = tokenizer.prepare_seq2seq_batch(
                src_text, tgt_texts=tgt_text, max_target_length=32, padding="max_length", return_tensors="pt"
            )
            self.assertEqual(32, batch["labels"].shape[1])

            # test None max_target_length
            batch = tokenizer.prepare_seq2seq_batch(
                src_text, tgt_texts=tgt_text, max_length=32, padding="max_length", return_tensors="pt"
            )
            self.assertEqual(32, batch["labels"].shape[1])

    @require_torch
    def test_seq2seq_batch_not_longer_than_maxlen(self):
        for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
            batch = tokenizer.prepare_seq2seq_batch(
                ["I am a small frog" * 1024, "I am a small frog"], return_tensors="pt"
            )
            self.assertIsInstance(batch, BatchEncoding)
            self.assertEqual(batch.input_ids.shape, (2, 1024))

    @require_torch
    def test_special_tokens(self):

134
        src_text = ["A long paragraph for summarization."]
135
136
137
138
139
140
141
142
143
144
145
        tgt_text = [
            "Summary of the text.",
        ]
        for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
            batch = tokenizer.prepare_seq2seq_batch(src_text, tgt_texts=tgt_text, return_tensors="pt")
            input_ids = batch["input_ids"]
            labels = batch["labels"]
            self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item())
            self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item())
            self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item())
            self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item())