"vscode:/vscode.git/clone" did not exist on "e11d923bfc61ed640bc7e696549578361126485e"
test_tokenization_openai.py 2.56 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
Aymeric Augustin's avatar
Aymeric Augustin committed
15

16

Aymeric Augustin's avatar
Aymeric Augustin committed
17
import json
18
import os
19
import unittest
20

21
from transformers.tokenization_openai import VOCAB_FILES_NAMES, OpenAIGPTTokenizer, OpenAIGPTTokenizerFast
22

23
from .test_tokenization_common import TokenizerTesterMixin
24

25

26
class OpenAIGPTTokenizationTest(TokenizerTesterMixin, unittest.TestCase):
27

28
    tokenizer_class = OpenAIGPTTokenizer
29
30
    rust_tokenizer_class = OpenAIGPTTokenizerFast
    test_rust_tokenizer = True
31
32

    def setUp(self):
Julien Chaumond's avatar
Julien Chaumond committed
33
        super().setUp()
34
35

        # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
        vocab = [
            "l",
            "o",
            "w",
            "e",
            "r",
            "s",
            "t",
            "i",
            "d",
            "n",
            "w</w>",
            "r</w>",
            "t</w>",
            "lo",
            "low",
            "er</w>",
            "low</w>",
            "lowest</w>",
            "newer</w>",
            "wider</w>",
            "<unk>",
        ]
59
60
        vocab_tokens = dict(zip(vocab, range(len(vocab))))
        merges = ["#version: 0.2", "l o", "lo w", "e r</w>", ""]
61

62
63
        self.vocab_file = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["vocab_file"])
        self.merges_file = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["merges_file"])
64
65
66
67
        with open(self.vocab_file, "w") as fp:
            fp.write(json.dumps(vocab_tokens))
        with open(self.merges_file, "w") as fp:
            fp.write("\n".join(merges))
68

69
    def get_input_output_texts(self, tokenizer):
70
        return "lower newer", "lower newer"
71

72
73
74
75
76
77
78
    def test_full_tokenizer(self):
        tokenizer = OpenAIGPTTokenizer(self.vocab_file, self.merges_file)

        text = "lower"
        bpe_tokens = ["low", "er</w>"]
        tokens = tokenizer.tokenize(text)
        self.assertListEqual(tokens, bpe_tokens)
79

80
81
        input_tokens = tokens + ["<unk>"]
        input_bpe_tokens = [14, 15, 20]
82
        self.assertListEqual(tokenizer.convert_tokens_to_ids(input_tokens), input_bpe_tokens)