"mmdet/datasets/utils.py" did not exist on "1189a8adcc55e3a2530ce8b716c55a67b2d53af1"
tokenization_xxx.py 12.6 KB
Newer Older
thomwolf's avatar
thomwolf committed
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
# coding=utf-8
# Copyright 2018 XXX Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Tokenization class for model XXX."""


import collections
import logging
import os
21
from typing import List, Optional
thomwolf's avatar
thomwolf committed
22
23
24

from .tokenization_utils import PreTrainedTokenizer

Aymeric Augustin's avatar
Aymeric Augustin committed
25

thomwolf's avatar
thomwolf committed
26
27
28
29
30
31
32
33
34
35
logger = logging.getLogger(__name__)

####################################################
# In this template, replace all the XXX (various casings) with your model name
####################################################

####################################################
# Mapping from the keyword arguments names of Tokenizer `__init__`
# to file names for serializing Tokenizer instances
####################################################
36
VOCAB_FILES_NAMES = {"vocab_file": "vocab.txt"}
thomwolf's avatar
thomwolf committed
37
38
39
40
41
42

####################################################
# Mapping from the keyword arguments names of Tokenizer `__init__`
# to pretrained vocabulary URL for all the model shortcut names.
####################################################
PRETRAINED_VOCAB_FILES_MAP = {
43
44
45
    "vocab_file": {
        "xxx-base-uncased": "https://s3.amazonaws.com/models.huggingface.co/bert/xxx-base-uncased-vocab.txt",
        "xxx-large-uncased": "https://s3.amazonaws.com/models.huggingface.co/bert/xxx-large-uncased-vocab.txt",
thomwolf's avatar
thomwolf committed
46
47
48
49
50
51
52
    }
}

####################################################
# Mapping from model shortcut names to max length of inputs
####################################################
PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = {
53
54
    "xxx-base-uncased": 512,
    "xxx-large-uncased": 512,
thomwolf's avatar
thomwolf committed
55
56
57
58
59
60
61
62
}

####################################################
# Mapping from model shortcut names to a dictionary of additional
# keyword arguments for Tokenizer `__init__`.
# To be used for checkpoint specific configurations.
####################################################
PRETRAINED_INIT_CONFIGURATION = {
63
64
    "xxx-base-uncased": {"do_lower_case": True},
    "xxx-large-uncased": {"do_lower_case": True},
thomwolf's avatar
thomwolf committed
65
66
67
68
69
70
71
72
73
}


def load_vocab(vocab_file):
    """Loads a vocabulary file into a dictionary."""
    vocab = collections.OrderedDict()
    with open(vocab_file, "r", encoding="utf-8") as reader:
        tokens = reader.readlines()
    for index, token in enumerate(tokens):
74
        token = token.rstrip("\n")
thomwolf's avatar
thomwolf committed
75
76
77
78
79
80
        vocab[token] = index
    return vocab


class XxxTokenizer(PreTrainedTokenizer):
    r"""
81
82
83
84
    Constructs a XXX tokenizer. Based on XXX.

    This tokenizer inherits from :class:`~transformers.PreTrainedTokenizer` which contains most of the methods. Users
    should refer to the superclass for more information regarding methods.
thomwolf's avatar
thomwolf committed
85
86

    Args:
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
        vocab_file (:obj:`str`):
            File containing the vocabulary.
        do_lower_case (:obj:`bool`, `optional`, defaults to :obj:`True`):
            Whether to lowercase the input when tokenizing.
        do_basic_tokenize (:obj:`bool`, `optional`, defaults to :obj:`True`):
            Whether to do basic tokenization before WordPiece.
        never_split (:obj:`Iterable`, `optional`, defaults to :obj:`None`):
            Collection of tokens which will never be split during tokenization. Only has an effect when
            :obj:`do_basic_tokenize=True`
        unk_token (:obj:`str`, `optional`, defaults to :obj:`"[UNK]"`):
            The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
            token instead.
        sep_token (:obj:`str`, `optional`, defaults to :obj:`"[SEP]"`):
            The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences
            for sequence classification or for a text and a question for question answering.
            It is also used as the last token of a sequence built with special tokens.
        pad_token (:obj:`str`, `optional`, defaults to :obj:`"[PAD]"`):
            The token used for padding, for example when batching sequences of different lengths.
        cls_token (:obj:`str`, `optional`, defaults to :obj:`"[CLS]"`):
            The classifier token which is used when doing sequence classification (classification of the whole
            sequence instead of per-token classification). It is the first token of the sequence when built with
            special tokens.
        mask_token (:obj:`str`, `optional`, defaults to :obj:`"[MASK]"`):
            The token used for masking values. This is the token used when training this model with masked language
            modeling. This is the token which the model will try to predict.
thomwolf's avatar
thomwolf committed
112
113
114
115
116
117
118
    """

    vocab_files_names = VOCAB_FILES_NAMES
    pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
    pretrained_init_configuration = PRETRAINED_INIT_CONFIGURATION
    max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES

119
120
121
122
    def __init__(
        self,
        vocab_file,
        do_lower_case=True,
123
124
        do_basic_tokenize=True,
        never_split=None,
125
126
127
128
129
        unk_token="[UNK]",
        sep_token="[SEP]",
        pad_token="[PAD]",
        cls_token="[CLS]",
        mask_token="[MASK]",
130
        tokenize_chinese_chars=True,
131
132
        **kwargs
    ):
Julien Chaumond's avatar
Julien Chaumond committed
133
        super().__init__(
134
135
136
137
138
            unk_token=unk_token,
            sep_token=sep_token,
            pad_token=pad_token,
            cls_token=cls_token,
            mask_token=mask_token,
139
            **kwargs,
140
        )
thomwolf's avatar
thomwolf committed
141
142
143
144

        if not os.path.isfile(vocab_file):
            raise ValueError(
                "Can't find a vocabulary file at path '{}'. To load the vocabulary from a Google pretrained "
145
                "model use `tokenizer = BertTokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`".format(vocab_file)
146
            )
thomwolf's avatar
thomwolf committed
147
        self.vocab = load_vocab(vocab_file)
148
149
150
151
152
153
154
155
        self.ids_to_tokens = collections.OrderedDict([(ids, tok) for tok, ids in self.vocab.items()])
        self.do_basic_tokenize = do_basic_tokenize
        # Replace and adapt
        # if do_basic_tokenize:
        #    self.basic_tokenizer = BasicTokenizer(
        #        do_lower_case=do_lower_case, never_split=never_split, tokenize_chinese_chars=tokenize_chinese_chars
        #    )
        # self.wordpiece_tokenizer = WordpieceTokenizer(vocab=self.vocab, unk_token=self.unk_token)
thomwolf's avatar
thomwolf committed
156
157
158
159
160

    @property
    def vocab_size(self):
        return len(self.vocab)

161
162
163
    def get_vocab(self):
        return dict(self.vocab, **self.added_tokens_encoder)

thomwolf's avatar
thomwolf committed
164
165
166
167
    def _tokenize(self, text):
        split_tokens = []
        if self.do_basic_tokenize:
            for token in self.basic_tokenizer.tokenize(text, never_split=self.all_special_tokens):
168
169
170
171
172
173

                # If the token is part of the never_split set
                if token in self.basic_tokenizer.never_split:
                    split_tokens.append(token)
                else:
                    split_tokens += self.wordpiece_tokenizer.tokenize(token)
thomwolf's avatar
thomwolf committed
174
175
176
177
178
        else:
            split_tokens = self.wordpiece_tokenizer.tokenize(text)
        return split_tokens

    def _convert_token_to_id(self, token):
Aymeric Augustin's avatar
Aymeric Augustin committed
179
        """ Converts a token (str) in an id using the vocab. """
thomwolf's avatar
thomwolf committed
180
181
182
        return self.vocab.get(token, self.vocab.get(self.unk_token))

    def _convert_id_to_token(self, index):
Aymeric Augustin's avatar
Aymeric Augustin committed
183
        """Converts an index (integer) in a token (str) using the vocab."""
thomwolf's avatar
thomwolf committed
184
185
186
187
        return self.ids_to_tokens.get(index, self.unk_token)

    def convert_tokens_to_string(self, tokens):
        """ Converts a sequence of tokens (string) in a single string. """
188
        out_string = " ".join(tokens).replace(" ##", "").strip()
thomwolf's avatar
thomwolf committed
189
190
        return out_string

191
192
193
    def build_inputs_with_special_tokens(
        self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
    ) -> List[int]:
thomwolf's avatar
thomwolf committed
194
195
196
197
        """
        Build model inputs from a sequence or a pair of sequence for sequence classification tasks
        by concatenating and adding special tokens.
        A BERT sequence has the following format:
198
199
200
201
202
203
204
205
206
207
208
209

        - single sequence: ``[CLS] X [SEP]``
        - pair of sequences: ``[CLS] A [SEP] B [SEP]``

        Args:
            token_ids_0 (:obj:`List[int]`):
                List of IDs to which the special tokens will be added
            token_ids_1 (:obj:`List[int]`, `optional`, defaults to :obj:`None`):
                Optional second list of IDs for sequence pairs.

        Returns:
            :obj:`List[int]`: list of `input IDs <../glossary.html#input-ids>`__ with the appropriate special tokens.
thomwolf's avatar
thomwolf committed
210
211
212
213
214
215
216
        """
        if token_ids_1 is None:
            return [self.cls_token_id] + token_ids_0 + [self.sep_token_id]
        cls = [self.cls_token_id]
        sep = [self.sep_token_id]
        return cls + token_ids_0 + sep + token_ids_1 + sep

217
218
219
    def get_special_tokens_mask(
        self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False
    ) -> List[int]:
thomwolf's avatar
thomwolf committed
220
221
        """
        Retrieves sequence ids from a token list that has no special tokens added. This method is called when adding
222
        special tokens using the tokenizer ``prepare_for_model`` method.
thomwolf's avatar
thomwolf committed
223
224

        Args:
225
226
227
228
229
230
            token_ids_0 (:obj:`List[int]`):
                List of ids.
            token_ids_1 (:obj:`List[int]`, `optional`, defaults to :obj:`None`):
                Optional second list of IDs for sequence pairs.
            already_has_special_tokens (:obj:`bool`, `optional`, defaults to :obj:`False`):
                Set to True if the token list is already formatted with special tokens for the model
thomwolf's avatar
thomwolf committed
231
232

        Returns:
233
            :obj:`List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
thomwolf's avatar
thomwolf committed
234
235
236
237
        """

        if already_has_special_tokens:
            if token_ids_1 is not None:
238
239
240
241
                raise ValueError(
                    "You should not supply a second sequence if the provided sequence of "
                    "ids is already formated with special tokens for the model."
                )
thomwolf's avatar
thomwolf committed
242
243
244
245
246
247
            return list(map(lambda x: 1 if x in [self.sep_token_id, self.cls_token_id] else 0, token_ids_0))

        if token_ids_1 is not None:
            return [1] + ([0] * len(token_ids_0)) + [1] + ([0] * len(token_ids_1)) + [1]
        return [1] + ([0] * len(token_ids_0)) + [1]

248
249
250
    def create_token_type_ids_from_sequences(
        self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
    ) -> List[int]:
thomwolf's avatar
thomwolf committed
251
252
253
        """
        Creates a mask from the two sequences passed to be used in a sequence-pair classification task.
        A BERT sequence pair mask has the following format:
254
255
256
257
258

        ::

            0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1
            | first sequence    | second sequence |
thomwolf's avatar
thomwolf committed
259
260

        if token_ids_1 is None, only returns the first portion of the mask (0's).
261
262
263
264
265
266
267
268
269
270

        Args:
            token_ids_0 (:obj:`List[int]`):
                List of ids.
            token_ids_1 (:obj:`List[int]`, `optional`, defaults to :obj:`None`):
                Optional second list of IDs for sequence pairs.

        Returns:
            :obj:`List[int]`: List of `token type IDs <../glossary.html#token-type-ids>`_ according to the given
            sequence(s).
thomwolf's avatar
thomwolf committed
271
272
273
274
275
276
277
278
        """
        sep = [self.sep_token_id]
        cls = [self.cls_token_id]
        if token_ids_1 is None:
            return len(cls + token_ids_0 + sep) * [0]
        return len(cls + token_ids_0 + sep) * [0] + len(token_ids_1 + sep) * [1]

    def save_vocabulary(self, vocab_path):
279
280
281
282
283
284
285
286
287
288
        """
        Save the sentencepiece vocabulary (copy original file) and special tokens file to a directory.

        Args:
            vocab_path (:obj:`str`):
                The directory in which to save the vocabulary.

        Returns:
            :obj:`Tuple(str)`: Paths to the files saved.
        """
thomwolf's avatar
thomwolf committed
289
290
        index = 0
        if os.path.isdir(vocab_path):
291
            vocab_file = os.path.join(vocab_path, VOCAB_FILES_NAMES["vocab_file"])
thomwolf's avatar
thomwolf committed
292
293
294
295
296
        else:
            vocab_file = vocab_path
        with open(vocab_file, "w", encoding="utf-8") as writer:
            for token, token_index in sorted(self.vocab.items(), key=lambda kv: kv[1]):
                if index != token_index:
297
298
299
300
                    logger.warning(
                        "Saving vocabulary to {}: vocabulary indices are not consecutive."
                        " Please check that the vocabulary is not corrupted!".format(vocab_file)
                    )
thomwolf's avatar
thomwolf committed
301
                    index = token_index
302
                writer.write(token + "\n")
thomwolf's avatar
thomwolf committed
303
304
                index += 1
        return (vocab_file,)