test_tokenization_distilbert.py 1.72 KB
Newer Older
thomwolf's avatar
thomwolf committed
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function, unicode_literals

17
from transformers.tokenization_distilbert import DistilBertTokenizer
thomwolf's avatar
thomwolf committed
18

19
from .test_tokenization_bert import BertTokenizationTest
20
from .utils import slow
thomwolf's avatar
thomwolf committed
21

22

thomwolf's avatar
thomwolf committed
23
class DistilBertTokenizationTest(BertTokenizationTest):
thomwolf's avatar
thomwolf committed
24

thomwolf's avatar
thomwolf committed
25
    tokenizer_class = DistilBertTokenizer
thomwolf's avatar
thomwolf committed
26

thomwolf's avatar
thomwolf committed
27
28
    def get_tokenizer(self, **kwargs):
        return DistilBertTokenizer.from_pretrained(self.tmpdirname, **kwargs)
thomwolf's avatar
thomwolf committed
29

30
    @slow
thomwolf's avatar
thomwolf committed
31
    def test_sequence_builders(self):
thomwolf's avatar
thomwolf committed
32
        tokenizer = DistilBertTokenizer.from_pretrained("distilbert-base-uncased")
thomwolf's avatar
thomwolf committed
33

Lysandre's avatar
Remove  
Lysandre committed
34
35
        text = tokenizer.encode("sequence builders", add_special_tokens=False)
        text_2 = tokenizer.encode("multi-sequence build", add_special_tokens=False)
thomwolf's avatar
thomwolf committed
36

37
38
        encoded_sentence = tokenizer.build_inputs_with_special_tokens(text)
        encoded_pair = tokenizer.build_inputs_with_special_tokens(text, text_2)
thomwolf's avatar
thomwolf committed
39

LysandreJik's avatar
LysandreJik committed
40
        assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
41
42
43
        assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_2 + [
            tokenizer.sep_token_id
        ]