test_tokenization_roformer.py 2.85 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
# coding=utf-8
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import unittest

from transformers import RoFormerTokenizer, RoFormerTokenizerFast
yujun's avatar
yujun committed
19
from transformers.testing_utils import require_rjieba, require_tokenizers
20

21
from ..test_tokenization_common import TokenizerTesterMixin
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66


@require_rjieba
@require_tokenizers
class RoFormerTokenizationTest(TokenizerTesterMixin, unittest.TestCase):

    tokenizer_class = RoFormerTokenizer
    rust_tokenizer_class = RoFormerTokenizerFast
    space_between_special_tokens = True
    test_rust_tokenizer = True

    def setUp(self):
        super().setUp()

    def get_tokenizer(self, **kwargs):
        return self.tokenizer_class.from_pretrained("junnyu/roformer_chinese_base", **kwargs)

    def get_rust_tokenizer(self, **kwargs):
        return self.rust_tokenizer_class.from_pretrained("junnyu/roformer_chinese_base", **kwargs)

    def get_chinese_input_output_texts(self):
        input_text = "永和服装饰品有限公司,今天天气非常好"
        output_text = "永和 服装 饰品 有限公司 , 今 天 天 气 非常 好"
        return input_text, output_text

    def test_tokenizer(self):
        tokenizer = self.get_tokenizer()
        input_text, output_text = self.get_chinese_input_output_texts()
        tokens = tokenizer.tokenize(input_text)

        self.assertListEqual(tokens, output_text.split())

        input_tokens = tokens + [tokenizer.unk_token]
        exp_tokens = [22943, 21332, 34431, 45904, 117, 306, 1231, 1231, 2653, 33994, 1266, 100]
        self.assertListEqual(tokenizer.convert_tokens_to_ids(input_tokens), exp_tokens)

    def test_rust_tokenizer(self):
        tokenizer = self.get_rust_tokenizer()
        input_text, output_text = self.get_chinese_input_output_texts()
        tokens = tokenizer.tokenize(input_text)
        self.assertListEqual(tokens, output_text.split())
        input_tokens = tokens + [tokenizer.unk_token]
        exp_tokens = [22943, 21332, 34431, 45904, 117, 306, 1231, 1231, 2653, 33994, 1266, 100]
        self.assertListEqual(tokenizer.convert_tokens_to_ids(input_tokens), exp_tokens)

yujun's avatar
yujun committed
67
68
69
70
71
72
    # can't train new_tokenizer via Tokenizers lib
    def test_training_new_tokenizer(self):
        pass

    # can't train new_tokenizer via Tokenizers lib
    def test_training_new_tokenizer_with_special_tokens_change(self):
73
        pass