tokenization_utils.py 40.1 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
# coding=utf-8
# Copyright 2018 The Open AI Team Authors and The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tokenization classes for OpenAI GPT."""
from __future__ import (absolute_import, division, print_function,
                        unicode_literals)

import logging
import os
21
22
import json
import six
23
import copy
24
25
from io import open

thomwolf's avatar
thomwolf committed
26
27
28
29
from .file_utils import cached_path, is_tf_available

if is_tf_available():
    import tensorflow as tf
30
31
32

logger = logging.getLogger(__name__)

33
34
SPECIAL_TOKENS_MAP_FILE = 'special_tokens_map.json'
ADDED_TOKENS_FILE = 'added_tokens.json'
35
TOKENIZER_CONFIG_FILE = 'tokenizer_config.json'
36
37

class PreTrainedTokenizer(object):
38
39
    """ Base class for all tokenizers.
    Handle all the shared methods for tokenization and special tokens as well as methods dowloading/caching/loading pretrained tokenizers as well as adding tokens to the vocabulary.
40

41
    This class also contain the added tokens in a unified way on top of all tokenizers so we don't have to handle the specific vocabulary augmentation methods of the various underlying dictionary structures (BPE, sentencepiece...).
42

43
44
45
46
47
    Class attributes (overridden by derived classes):

        - ``vocab_files_names``: a python ``dict`` with, as keys, the ``__init__`` keyword name of each vocabulary file required by the model, and as associated values, the filename for saving the associated file (string).
        - ``pretrained_vocab_files_map``: a python ``dict of dict`` the high-level keys being the ``__init__`` keyword name of each vocabulary file required by the model, the low-level being the `short-cut-names` (string) of the pretrained models with, as associated values, the `url` (string) to the associated pretrained vocabulary file.
        - ``max_model_input_sizes``: a python ``dict`` with, as keys, the `short-cut-names` (string) of the pretrained models, and as associated values, the maximum length of the sequence inputs of this model, or None if the model has no maximum input size.
48
        - ``pretrained_init_configuration``: a python ``dict`` with, as keys, the `short-cut-names` (string) of the pretrained models, and as associated values, a dictionnary of specific arguments to pass to the ``__init__``method of the tokenizer class for this pretrained model when loading the tokenizer with the ``from_pretrained()`` method.
49
50
51

    Parameters:

thomwolf's avatar
thomwolf committed
52
        - ``bos_token``: (`Optional`) string: a beginning of sentence token. Will be associated to ``self.bos_token`` and ``self.bos_token_id``
53

thomwolf's avatar
thomwolf committed
54
        - ``eos_token``: (`Optional`) string: an end of sentence token. Will be associated to ``self.eos_token`` and ``self.eos_token_id``
55

thomwolf's avatar
thomwolf committed
56
        - ``unk_token``: (`Optional`) string: an unknown token. Will be associated to ``self.unk_token`` and ``self.unk_token_id``
57

thomwolf's avatar
thomwolf committed
58
        - ``sep_token``: (`Optional`) string: a separation token (e.g. to separate context and query in an input sequence). Will be associated to ``self.sep_token`` and ``self.sep_token_id``
59

thomwolf's avatar
thomwolf committed
60
        - ``pad_token``: (`Optional`) string: a padding token. Will be associated to ``self.pad_token`` and ``self.pad_token_id``
61

thomwolf's avatar
thomwolf committed
62
        - ``cls_token``: (`Optional`) string: a classification token (e.g. to extract a summary of an input sequence leveraging self-attention along the full depth of the model). Will be associated to ``self.cls_token`` and ``self.cls_token_id``
63

thomwolf's avatar
thomwolf committed
64
        - ``mask_token``: (`Optional`) string: a masking token (e.g. when training a model with masked-language modeling). Will be associated to ``self.mask_token`` and ``self.mask_token_id``
65

thomwolf's avatar
thomwolf committed
66
        - ``additional_special_tokens``: (`Optional`) list: a list of additional special tokens. Adding all special tokens here ensure they won't be split by the tokenization process. Will be associated to ``self.additional_special_tokens`` and ``self.additional_special_tokens_ids``
67
68
69
    """
    vocab_files_names = {}
    pretrained_vocab_files_map = {}
70
    pretrained_init_configuration = {}
71
72
    max_model_input_sizes = {}

73
74
75
76
77
78
    SPECIAL_TOKENS_ATTRIBUTES = ["bos_token", "eos_token", "unk_token", "sep_token",
                                 "pad_token", "cls_token", "mask_token",
                                 "additional_special_tokens"]

    @property
    def bos_token(self):
79
        """ Beginning of sentence token (string). Log an error if used while not having been set. """
80
81
82
83
84
85
        if self._bos_token is None:
            logger.error("Using bos_token, but it is not set yet.")
        return self._bos_token

    @property
    def eos_token(self):
86
        """ End of sentence token (string). Log an error if used while not having been set. """
87
88
89
90
91
92
        if self._eos_token is None:
            logger.error("Using eos_token, but it is not set yet.")
        return self._eos_token

    @property
    def unk_token(self):
93
        """ Unknown token (string). Log an error if used while not having been set. """
94
95
96
97
98
99
        if self._unk_token is None:
            logger.error("Using unk_token, but it is not set yet.")
        return self._unk_token

    @property
    def sep_token(self):
100
        """ Separation token (string). E.g. separate context and query in an input sequence. Log an error if used while not having been set. """
101
102
103
104
105
106
        if self._sep_token is None:
            logger.error("Using sep_token, but it is not set yet.")
        return self._sep_token

    @property
    def pad_token(self):
107
        """ Padding token (string). Log an error if used while not having been set. """
108
109
110
111
112
113
        if self._pad_token is None:
            logger.error("Using pad_token, but it is not set yet.")
        return self._pad_token

    @property
    def cls_token(self):
114
        """ Classification token (string). E.g. to extract a summary of an input sequence leveraging self-attention along the full depth of the model. Log an error if used while not having been set. """
115
116
117
118
119
120
        if self._cls_token is None:
            logger.error("Using cls_token, but it is not set yet.")
        return self._cls_token

    @property
    def mask_token(self):
121
        """ Mask token (string). E.g. when training a model with masked-language modeling. Log an error if used while not having been set. """
122
123
124
125
126
127
        if self._mask_token is None:
            logger.error("Using mask_token, but it is not set yet.")
        return self._mask_token

    @property
    def additional_special_tokens(self):
128
        """ All the additional special tokens you may want to use (list of strings). Log an error if used while not having been set. """
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
        if self._additional_special_tokens is None:
            logger.error("Using additional_special_tokens, but it is not set yet.")
        return self._additional_special_tokens

    @bos_token.setter
    def bos_token(self, value):
        self._bos_token = value

    @eos_token.setter
    def eos_token(self, value):
        self._eos_token = value

    @unk_token.setter
    def unk_token(self, value):
        self._unk_token = value

    @sep_token.setter
    def sep_token(self, value):
        self._sep_token = value

    @pad_token.setter
    def pad_token(self, value):
        self._pad_token = value

    @cls_token.setter
    def cls_token(self, value):
        self._cls_token = value

    @mask_token.setter
    def mask_token(self, value):
        self._mask_token = value

    @additional_special_tokens.setter
    def additional_special_tokens(self, value):
        self._additional_special_tokens = value

165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
    @property
    def bos_token_id(self):
        """ Id of the beginning of sentence token in the vocabulary. Log an error if used while not having been set. """
        if self._bos_token is None:
            logger.error("Using bos_token, but it is not set yet.")
        return self.convert_tokens_to_ids(self._bos_token)

    @property
    def eos_token_id(self):
        """ Id of the end of sentence token in the vocabulary. Log an error if used while not having been set. """
        if self._eos_token is None:
            logger.error("Using eos_token, but it is not set yet.")
        return self.convert_tokens_to_ids(self._eos_token)

    @property
    def unk_token_is(self):
        """ Id of the unknown token in the vocabulary. Log an error if used while not having been set. """
        if self._unk_token is None:
            logger.error("Using unk_token, but it is not set yet.")
        return self.convert_tokens_to_ids(self._unk_token)

    @property
    def sep_token_id(self):
        """ Id of the separation token in the vocabulary. E.g. separate context and query in an input sequence. Log an error if used while not having been set. """
        if self._sep_token is None:
            logger.error("Using sep_token, but it is not set yet.")
        return self.convert_tokens_to_ids(self._sep_token)

    @property
    def pad_token_id(self):
        """ Id of the padding token in the vocabulary. Log an error if used while not having been set. """
        if self._pad_token is None:
            logger.error("Using pad_token, but it is not set yet.")
        return self.convert_tokens_to_ids(self._pad_token)

    @property
    def cls_token_id(self):
        """ Id of the classification token in the vocabulary. E.g. to extract a summary of an input sequence leveraging self-attention along the full depth of the model. Log an error if used while not having been set. """
        if self._cls_token is None:
            logger.error("Using cls_token, but it is not set yet.")
        return self.convert_tokens_to_ids(self._cls_token)

    @property
    def mask_token_id(self):
        """ Id of the mask token in the vocabulary. E.g. when training a model with masked-language modeling. Log an error if used while not having been set. """
        if self._mask_token is None:
            logger.error("Using mask_token, but it is not set yet.")
        return self.convert_tokens_to_ids(self._mask_token)

    @property
    def additional_special_tokens_ids(self):
        """ Ids of all the additional special tokens in the vocabulary (list of integers). Log an error if used while not having been set. """
        if self._additional_special_tokens is None:
            logger.error("Using additional_special_tokens, but it is not set yet.")
        return self.convert_tokens_to_ids(self._additional_special_tokens)

221
222
223
224
225
226
227
228
229
230
231
    def __init__(self, max_len=None, **kwargs):
        self._bos_token = None
        self._eos_token = None
        self._unk_token = None
        self._sep_token = None
        self._pad_token = None
        self._cls_token = None
        self._mask_token = None
        self._additional_special_tokens = []

        self.max_len = max_len if max_len is not None else int(1e12)
232
233

        # Added tokens
234
235
236
        self.added_tokens_encoder = {}
        self.added_tokens_decoder = {}

237
238
239
240
        # inputs and kwargs for saving and re-loading (see ``from_pretrained`` and ``save_pretrained``)
        self.init_inputs = ()
        self.init_kwargs = {}

241
        for key, value in kwargs.items():
242
            if key in self.SPECIAL_TOKENS_ATTRIBUTES:
243
244
245
246
                if key == 'additional_special_tokens':
                    assert isinstance(value, (list, tuple)) and all(isinstance(t, str) or (six.PY2 and isinstance(t, unicode)) for t in value)
                else:
                    assert isinstance(value, str) or (six.PY2 and isinstance(value, unicode))
247
248
249
                setattr(self, key, value)


250
251
    @classmethod
    def from_pretrained(cls, *inputs, **kwargs):
LysandreJik's avatar
Doc  
LysandreJik committed
252
253
        r"""
        Instantiate a :class:`~pytorch_transformers.PreTrainedTokenizer` (or a derived class) from a predefined tokenizer.
254

LysandreJik's avatar
Doc  
LysandreJik committed
255
        Args:
256
257
258
259
260
261
262
263
264
            pretrained_model_name_or_path: either:

                - a string with the `shortcut name` of a predefined tokenizer to load from cache or download, e.g.: ``bert-base-uncased``.
                - a path to a `directory` containing vocabulary files required by the tokenizer, for instance saved using the :func:`~pytorch_transformers.PreTrainedTokenizer.save_pretrained` method, e.g.: ``./my_model_directory/``.
                - (not applicable to all derived classes) a path or url to a single saved vocabulary file if and only if the tokenizer only requires a single vocabulary file (e.g. Bert, XLNet), e.g.: ``./my_model_directory/vocab.txt``.

            cache_dir: (`optional`) string:
                Path to a directory in which a downloaded predefined tokenizer vocabulary files should be cached if the standard cache should not be used.

265
266
267
            force_download: (`optional`) boolean, default False:
                Force to (re-)download the vocabulary files and override the cached versions if they exists.

268
269
270
271
            proxies: (`optional`) dict, default None:
                A dictionary of proxy servers to use by protocol or endpoint, e.g.: {'http': 'foo.bar:3128', 'http://hostname': 'foo.bar:4012'}.
                The proxies are used on each request.

272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
            inputs: (`optional`) positional arguments: will be passed to the Tokenizer ``__init__`` method.

            kwargs: (`optional`) keyword arguments: will be passed to the Tokenizer ``__init__`` method. Can be used to set special tokens like ``bos_token``, ``eos_token``, ``unk_token``, ``sep_token``, ``pad_token``, ``cls_token``, ``mask_token``, ``additional_special_tokens``. See parameters in the doc string of :class:`~pytorch_transformers.PreTrainedTokenizer` for details.

        Examples::

            # We can't instantiate directly the base class `PreTrainedTokenizer` so let's show our examples on a derived class: BertTokenizer

            # Download vocabulary from S3 and cache.
            tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')

            # If vocabulary files are in a directory (e.g. tokenizer was saved using `save_pretrained('./test/saved_model/')`)
            tokenizer = BertTokenizer.from_pretrained('./test/saved_model/')

            # If the tokenizer uses a single vocabulary file, you can point directly to this file
            tokenizer = BertTokenizer.from_pretrained('./test/saved_model/my_vocab.txt')

            # You can link tokens to special vocabulary when instantiating
            tokenizer = BertTokenizer.from_pretrained('bert-base-uncased', unk_token='<unk>')
            # You should be sure '<unk>' is in the vocabulary when doing that.
            # Otherwise use tokenizer.add_special_tokens({'unk_token': '<unk>'}) instead)
            assert tokenizer.unk_token == '<unk>'

        """
296
297
        return cls._from_pretrained(*inputs, **kwargs)

298

299
    @classmethod
300
    def _from_pretrained(cls, pretrained_model_name_or_path, *init_inputs, **kwargs):
thomwolf's avatar
thomwolf committed
301
        cache_dir = kwargs.pop('cache_dir', None)
302
        force_download = kwargs.pop('force_download', False)
303
        proxies = kwargs.pop('proxies', None)
thomwolf's avatar
thomwolf committed
304

305
306
        s3_models = list(cls.max_model_input_sizes.keys())
        vocab_files = {}
307
        init_configuration = {}
308
        if pretrained_model_name_or_path in s3_models:
thomwolf's avatar
thomwolf committed
309
            # Get the vocabulary from AWS S3 bucket
310
311
            for file_id, map_list in cls.pretrained_vocab_files_map.items():
                vocab_files[file_id] = map_list[pretrained_model_name_or_path]
312
313
            if cls.pretrained_init_configuration and pretrained_model_name_or_path in cls.pretrained_init_configuration:
                init_configuration = cls.pretrained_init_configuration[pretrained_model_name_or_path]
314
        else:
thomwolf's avatar
thomwolf committed
315
            # Get the vocabulary from local files
316
317
318
319
320
            logger.info(
                "Model name '{}' not found in model shortcut name list ({}). "
                "Assuming '{}' is a path or url to a directory containing tokenizer files.".format(
                    pretrained_model_name_or_path, ', '.join(s3_models),
                    pretrained_model_name_or_path))
thomwolf's avatar
thomwolf committed
321
322
323

            # Look for the tokenizer main vocabulary files
            for file_id, file_name in cls.vocab_files_names.items():
324
                if os.path.isdir(pretrained_model_name_or_path):
thomwolf's avatar
thomwolf committed
325
                    # If a directory is provided we look for the standard filenames
326
327
                    full_file_name = os.path.join(pretrained_model_name_or_path, file_name)
                else:
thomwolf's avatar
thomwolf committed
328
                    # If a path to a file is provided we use it (will only work for non-BPE tokenizer using a single vocabulary file)
329
330
                    full_file_name = pretrained_model_name_or_path
                if not os.path.exists(full_file_name):
331
                    logger.info("Didn't find file {}. We won't load it.".format(full_file_name))
332
333
                    full_file_name = None
                vocab_files[file_id] = full_file_name
thomwolf's avatar
thomwolf committed
334
335

            # Look for the additional tokens files
336
337
338
339
            additional_files_names = {'added_tokens_file': ADDED_TOKENS_FILE,
                                      'special_tokens_map_file': SPECIAL_TOKENS_MAP_FILE,
                                      'tokenizer_config_file': TOKENIZER_CONFIG_FILE,
                                      }
thomwolf's avatar
thomwolf committed
340
341
342
343
344
345

            # If a path to a file was provided, get the parent directory
            saved_directory = pretrained_model_name_or_path
            if os.path.exists(saved_directory) and not os.path.isdir(saved_directory):
                saved_directory = os.path.dirname(saved_directory)

346
            for file_id, file_name in additional_files_names.items():
thomwolf's avatar
thomwolf committed
347
348
349
350
351
352
                full_file_name = os.path.join(saved_directory, file_name)
                if not os.path.exists(full_file_name):
                    logger.info("Didn't find file {}. We won't load it.".format(full_file_name))
                    full_file_name = None
                vocab_files[file_id] = full_file_name

353
354
355
356
357
358
359
360
            if all(full_file_name is None for full_file_name in vocab_files.values()):
                logger.error(
                    "Model name '{}' was not found in model name list ({}). "
                    "We assumed '{}' was a path or url but couldn't find tokenizer files"
                    "at this path or url.".format(
                        pretrained_model_name_or_path, ', '.join(s3_models),
                        pretrained_model_name_or_path, ))
                return None
361
362

        # Get files from url, cache, or disk depending on the case
363
364
365
366
367
368
        try:
            resolved_vocab_files = {}
            for file_id, file_path in vocab_files.items():
                if file_path is None:
                    resolved_vocab_files[file_id] = None
                else:
369
                    resolved_vocab_files[file_id] = cached_path(file_path, cache_dir=cache_dir, force_download=force_download, proxies=proxies)
370
        except EnvironmentError as e:
371
372
373
374
375
376
377
378
379
            if pretrained_model_name_or_path in s3_models:
                logger.error("Couldn't reach server to download vocabulary.")
            else:
                logger.error(
                    "Model name '{}' was not found in model name list ({}). "
                    "We assumed '{}' was a path or url but couldn't find files {} "
                    "at this path or url.".format(
                        pretrained_model_name_or_path, ', '.join(s3_models),
                        pretrained_model_name_or_path, str(vocab_files.keys())))
380
            raise e
381
382
383
384
385
386
387
388

        for file_id, file_path in vocab_files.items():
            if file_path == resolved_vocab_files[file_id]:
                logger.info("loading file {}".format(file_path))
            else:
                logger.info("loading file {} from cache at {}".format(
                    file_path, resolved_vocab_files[file_id]))

389
390
391
392
393
        # Prepare tokenizer initialization kwargs
        # Did we saved some inputs and kwargs to reload ?
        tokenizer_config_file = resolved_vocab_files.pop('tokenizer_config_file', None)
        if tokenizer_config_file is not None:
            init_kwargs = json.load(open(tokenizer_config_file, encoding="utf-8"))
394
            saved_init_inputs = init_kwargs.pop('init_inputs', ())
395
396
397
398
399
400
            if not init_inputs:
                init_inputs = saved_init_inputs
        else:
            init_kwargs = init_configuration

        # Update with newly provided kwargs
401
402
        init_kwargs.update(kwargs)

403
        # Set max length if needed
404
405
406
407
        if pretrained_model_name_or_path in cls.max_model_input_sizes:
            # if we're using a pretrained model, ensure the tokenizer
            # wont index sequences longer than the number of positional embeddings
            max_len = cls.max_model_input_sizes[pretrained_model_name_or_path]
408
            if max_len is not None and isinstance(max_len, (int, float)):
409
                init_kwargs['max_len'] = min(init_kwargs.get('max_len', int(1e12)), max_len)
410

411
        # Merge resolved_vocab_files arguments in init_kwargs.
412
413
        added_tokens_file = resolved_vocab_files.pop('added_tokens_file', None)
        special_tokens_map_file = resolved_vocab_files.pop('special_tokens_map_file', None)
thomwolf's avatar
thomwolf committed
414
        for args_name, file_path in resolved_vocab_files.items():
415
416
            if args_name not in init_kwargs:
                init_kwargs[args_name] = file_path
417
418
419
        if special_tokens_map_file is not None:
            special_tokens_map = json.load(open(special_tokens_map_file, encoding="utf-8"))
            for key, value in special_tokens_map.items():
420
421
                if key not in init_kwargs:
                    init_kwargs[key] = value
thomwolf's avatar
thomwolf committed
422

423
        # Instantiate tokenizer.
424
425
426
427
428
        tokenizer = cls(*init_inputs, **init_kwargs)

        # Save inputs and kwargs for saving and re-loading with ``save_pretrained``
        tokenizer.init_inputs = init_inputs
        tokenizer.init_kwargs = init_kwargs
429

430
431
        # Add supplementary tokens.
        if added_tokens_file is not None:
thomwolf's avatar
thomwolf committed
432
            added_tok_encoder = json.load(open(added_tokens_file, encoding="utf-8"))
433
434
435
436
            added_tok_decoder = {v:k for k, v in added_tok_encoder.items()}
            tokenizer.added_tokens_encoder.update(added_tok_encoder)
            tokenizer.added_tokens_decoder.update(added_tok_decoder)

437
438
        return tokenizer

thomwolf's avatar
thomwolf committed
439

440
    def save_pretrained(self, save_directory):
441
442
443
444
445
446
447
        """ Save the tokenizer vocabulary files together with:
                - added tokens,
                - special-tokens-to-class-attributes-mapping,
                - tokenizer instantiation positional and keywords inputs (e.g. do_lower_case for Bert).

            This won't save modifications other than (added tokens and special token mapping) you may have
            applied to the tokenizer after the instantion (e.g. modifying tokenizer.do_lower_case after creation).
448
449

            This method make sure the full tokenizer can then be re-loaded using the :func:`~pytorch_transformers.PreTrainedTokenizer.from_pretrained` class method.
450
451
452
453
454
455
456
        """
        if not os.path.isdir(save_directory):
            logger.error("Saving directory ({}) should be a directory".format(save_directory))
            return

        special_tokens_map_file = os.path.join(save_directory, SPECIAL_TOKENS_MAP_FILE)
        added_tokens_file = os.path.join(save_directory, ADDED_TOKENS_FILE)
457
458
459
460
        tokenizer_config_file = os.path.join(save_directory, TOKENIZER_CONFIG_FILE)

        tokenizer_config = copy.deepcopy(self.init_kwargs)
        tokenizer_config['init_inputs'] = copy.deepcopy(self.init_inputs)
461
462
        for file_id in self.vocab_files_names.keys():
            tokenizer_config.pop(file_id, None)
463
464
465

        with open(tokenizer_config_file, 'w', encoding='utf-8') as f:
            f.write(json.dumps(tokenizer_config, ensure_ascii=False))
466
467
468
469
470

        with open(special_tokens_map_file, 'w', encoding='utf-8') as f:
            f.write(json.dumps(self.special_tokens_map, ensure_ascii=False))

        with open(added_tokens_file, 'w', encoding='utf-8') as f:
thomwolf's avatar
thomwolf committed
471
            if self.added_tokens_encoder:
472
                out_str = json.dumps(self.added_tokens_encoder, ensure_ascii=False)
thomwolf's avatar
thomwolf committed
473
474
475
            else:
                out_str = u"{}"
            f.write(out_str)
476
477
478
479
480
481
482

        vocab_files = self.save_vocabulary(save_directory)

        return vocab_files + (special_tokens_map_file, added_tokens_file)


    def save_vocabulary(self, save_directory):
483
        """ Save the tokenizer vocabulary to a directory. This method does *NOT* save added tokens
484
            and special token mappings.
485
486

            Please use :func:`~pytorch_transformers.PreTrainedTokenizer.save_pretrained` `()` to save the full Tokenizer state if you want to reload it using the :func:`~pytorch_transformers.PreTrainedTokenizer.from_pretrained` class method.
487
        """
thomwolf's avatar
thomwolf committed
488
489
        raise NotImplementedError

490
491

    def vocab_size(self):
492
        """ Size of the base vocabulary (without the added tokens) """
thomwolf's avatar
thomwolf committed
493
494
        raise NotImplementedError

495
496

    def __len__(self):
497
        """ Size of the full vocabulary with the added tokens """
498
499
500
501
        return self.vocab_size + len(self.added_tokens_encoder)


    def add_tokens(self, new_tokens):
LysandreJik's avatar
Doc  
LysandreJik committed
502
503
        """
        Add a list of new tokens to the tokenizer class. If the new tokens are not in the
504
505
        vocabulary, they are added to it with indices starting from length of the current vocabulary.

LysandreJik's avatar
Doc  
LysandreJik committed
506
507
        Args:
            new_tokens: list of string. Each string is a token to add. Tokens are only added if they are not already in the vocabulary (tested by checking if the tokenizer assign the index of the ``unk_token`` to them).
508

LysandreJik's avatar
Doc  
LysandreJik committed
509
510
        Returns:
            Number of tokens added to the vocabulary.
511
512
513
514
515
516
517
518
519
520

        Examples::

            # Let's see how to increase the vocabulary of Bert model and tokenizer
            tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
            model = BertModel.from_pretrained('bert-base-uncased')

            num_added_toks = tokenizer.add_tokens(['new_tok1', 'my_new-tok2'])
            print('We have added', num_added_toks, 'tokens')
            model.resize_token_embeddings(len(tokenizer))  # Notice: resize_token_embeddings expect to receive the full size of the new vocabulary, i.e. the length of the tokenizer.
521
522
523
524
525
526
        """
        if not new_tokens:
            return 0

        to_add_tokens = []
        for token in new_tokens:
527
            assert isinstance(token, str) or (six.PY2 and isinstance(token, unicode))
thomwolf's avatar
thomwolf committed
528
529
            if token != self.unk_token and \
                    self.convert_tokens_to_ids(token) == self.convert_tokens_to_ids(self.unk_token):
530
531
532
533
534
535
536
537
538
539
540
541
                to_add_tokens.append(token)
                logger.info("Adding %s to the vocabulary", token)

        added_tok_encoder = dict((tok, len(self) + i) for i, tok in enumerate(to_add_tokens))
        added_tok_decoder = {v:k for k, v in added_tok_encoder.items()}
        self.added_tokens_encoder.update(added_tok_encoder)
        self.added_tokens_decoder.update(added_tok_decoder)

        return len(to_add_tokens)


    def add_special_tokens(self, special_tokens_dict):
LysandreJik's avatar
Doc  
LysandreJik committed
542
543
544
545
        """
        Add a dictionary of special tokens (eos, pad, cls...) to the encoder and link them
        to class attributes. If special tokens are NOT in the vocabulary, they are added
        to it (indexed starting from the last index of the current vocabulary).
546

thomwolf's avatar
thomwolf committed
547
548
549
550
551
552
553
        Using `add_special_tokens` will ensure your special tokens can be used in several ways:

        - special tokens are carefully handled by the tokenizer (they are never split)
        - you can easily refer to special tokens using tokenizer class attributes like `tokenizer.cls_token`. This makes it easy to develop model-agnostic training and fine-tuning scripts.

        When possible, special tokens are already registered for provided pretrained models (ex: BertTokenizer cls_token is already registered to be '[CLS]' and XLM's one is also registered to be '</s>')

LysandreJik's avatar
Doc  
LysandreJik committed
554
555
556
557
        Args:
            special_tokens_dict: dict of string. Keys should be in the list of predefined special attributes:
                [``bos_token``, ``eos_token``, ``unk_token``, ``sep_token``, ``pad_token``, ``cls_token``, ``mask_token``,
                ``additional_special_tokens``].
558

LysandreJik's avatar
Doc  
LysandreJik committed
559
                Tokens are only added if they are not already in the vocabulary (tested by checking if the tokenizer assign the index of the ``unk_token`` to them).
560

LysandreJik's avatar
Doc  
LysandreJik committed
561
562
        Returns:
            Number of tokens added to the vocabulary.
563
564
565
566
567
568
569
570
571
572
573
574
575
576

        Examples::

            # Let's see how to add a new classification token to GPT-2
            tokenizer = GPT2Tokenizer.from_pretrained('gpt2')
            model = GPT2Model.from_pretrained('gpt2')

            special_tokens_dict = {'cls_token': '<CLS>'}

            num_added_toks = tokenizer.add_special_tokens(special_tokens_dict)
            print('We have added', num_added_toks, 'tokens')
            model.resize_token_embeddings(len(tokenizer))  # Notice: resize_token_embeddings expect to receive the full size of the new vocabulary, i.e. the length of the tokenizer.

            assert tokenizer.cls_token == '<CLS>'
577
578
579
580
        """
        if not special_tokens_dict:
            return 0

581
        added_tokens = 0
582
        for key, value in special_tokens_dict.items():
583
            assert key in self.SPECIAL_TOKENS_ATTRIBUTES
584
585
586
587
588
589
            if key == 'additional_special_tokens':
                assert isinstance(value, (list, tuple)) and all(isinstance(t, str) or (six.PY2 and isinstance(t, unicode)) for t in value)
                added_tokens += self.add_tokens(value)
            else:
                assert isinstance(value, str) or (six.PY2 and isinstance(value, unicode))
                added_tokens += self.add_tokens([value])
590
591
592
            logger.info("Assigning %s to the %s key of the tokenizer", value, key)
            setattr(self, key, value)

593
        return added_tokens
594
595
596
597
598
599
600
601

    def tokenize(self, text, **kwargs):
        """ Converts a string in a sequence of tokens (string), using the tokenizer.
            Split in words for word-based vocabulary or sub-words for sub-word-based
            vocabularies (BPE/SentencePieces/WordPieces).

            Take care of added tokens.
        """
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
        def split_on_token(tok, text):
            result = []
            split_text = text.split(tok)
            for i, sub_text in enumerate(split_text):
                sub_text = sub_text.strip()
                if i == 0 and not sub_text:
                    result += [tok]
                elif i == len(split_text) - 1:
                    if sub_text:
                        result += [sub_text]
                    else:
                        pass
                else:
                    if sub_text:
                        result += [sub_text]
                    result += [tok]
            return result

620
621
622
623
624
        def split_on_tokens(tok_list, text):
            if not text:
                return []
            if not tok_list:
                return self._tokenize(text, **kwargs)
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640

            tokenized_text = []
            text_list = [text]
            for tok in tok_list:
                tokenized_text = []
                for sub_text in text_list:
                    if sub_text not in self.added_tokens_encoder \
                            and sub_text not in self.all_special_tokens:
                        tokenized_text += split_on_token(tok, sub_text)
                    else:
                        tokenized_text += [sub_text]
                text_list = tokenized_text

            return sum((self._tokenize(token, **kwargs) if token not \
                    in self.added_tokens_encoder and token not in self.all_special_tokens \
                    else [token] for token in tokenized_text), [])
641

642
        added_tokens = list(self.added_tokens_encoder.keys()) + self.all_special_tokens
643
644
645
646
647
648
649
650
        tokenized_text = split_on_tokens(added_tokens, text)
        return tokenized_text

    def _tokenize(self, text, **kwargs):
        """ Converts a string in a sequence of tokens (string), using the tokenizer.
            Split in words for word-based vocabulary or sub-words for sub-word-based
            vocabularies (BPE/SentencePieces/WordPieces).

651
            Do NOT take care of added tokens.
652
        """
thomwolf's avatar
thomwolf committed
653
654
        raise NotImplementedError

655
    def convert_tokens_to_ids(self, tokens):
656
657
        """ Converts a single token, or a sequence of tokens, (str/unicode) in a single integer id
            (resp. a sequence of ids), using the vocabulary.
658
659
        """
        if isinstance(tokens, str) or (six.PY2 and isinstance(tokens, unicode)):
660
            return self._convert_token_to_id_with_added_voc(tokens)
661
662
663

        ids = []
        for token in tokens:
664
            ids.append(self._convert_token_to_id_with_added_voc(token))
665
666
667
668
669
670
        if len(ids) > self.max_len:
            logger.warning("Token indices sequence length is longer than the specified maximum sequence length "
                           "for this model ({} > {}). Running this sequence through the model will result in "
                           "indexing errors".format(len(ids), self.max_len))
        return ids

671
    def _convert_token_to_id_with_added_voc(self, token):
672
673
674
675
676
        if token in self.added_tokens_encoder:
            return self.added_tokens_encoder[token]
        return self._convert_token_to_id(token)

    def _convert_token_to_id(self, token):
thomwolf's avatar
thomwolf committed
677
678
        raise NotImplementedError

thomwolf's avatar
thomwolf committed
679
    def encode(self, text, text_pair=None, add_special_tokens=False, **kwargs):
LysandreJik's avatar
Doc  
LysandreJik committed
680
681
        """
        Converts a string in a sequence of ids (integer), using the tokenizer and vocabulary.
682
        
LysandreJik's avatar
Doc  
LysandreJik committed
683
684
685
686
687
688
689
        Same as doing ``self.convert_tokens_to_ids(self.tokenize(text))``.

        Args:
            text: The first sequence to be encoded.
            text_pair: Optional second sequence to be encoded.
            add_special_tokens: if set to ``True``, the sequences will be encoded with the special tokens relative
                to their model.
thomwolf's avatar
thomwolf committed
690
            **kwargs: passed to the `self.tokenize()` method
691
        """
thomwolf's avatar
thomwolf committed
692
693
694
695
696
697
698
699
        if is_tf_available():
            is_tf_tensor = False
            if isinstance(text, tf.Tensor):
                text = text.numpy()
                is_tf_tensor = True
            if isinstance(text, bytes):
                text = text.decode('utf-8')

LysandreJik's avatar
LysandreJik committed
700
        if text_pair is None:
701
            if add_special_tokens:
thomwolf's avatar
thomwolf committed
702
                output = self.add_special_tokens_single_sentence(self.convert_tokens_to_ids(self.tokenize(text, **kwargs)))
703
            else:
thomwolf's avatar
thomwolf committed
704
705
706
707
                output = self.convert_tokens_to_ids(self.tokenize(text, **kwargs))
        else:
            first_sentence_tokens = [self._convert_token_to_id(token) for token in self.tokenize(text, **kwargs)]
            second_sentence_tokens = [self._convert_token_to_id(token) for token in self.tokenize(text_pair, **kwargs)]
708

thomwolf's avatar
thomwolf committed
709
710
711
712
            if add_special_tokens:
                output = self.add_special_tokens_sentences_pair(first_sentence_tokens, second_sentence_tokens)
            else:
                output = first_sentence_tokens, second_sentence_tokens
713

thomwolf's avatar
thomwolf committed
714
715
716
717
        if is_tf_available() and is_tf_tensor:
            output = tf.constant(output)

        return output
718

719
    def add_special_tokens_single_sentence(self, token_ids):
LysandreJik's avatar
LysandreJik committed
720
721
        logger.warning("This tokenizer does not make use of special tokens. The sequence has been returned with no modification.")
        return token_ids
722

723
    def add_special_tokens_sentences_pair(self, token_ids_0, token_ids_1):
LysandreJik's avatar
LysandreJik committed
724
725
        logger.warning("This tokenizer does not make use of special tokens. The two sequences have been concatenated.")
        return token_ids_0 + token_ids_1
726

727
728
729
730
731
732
733
734
    def convert_ids_to_tokens(self, ids, skip_special_tokens=False):
        """ Converts a single index or a sequence of indices (integers) in a token "
            (resp.) a sequence of tokens (str/unicode), using the vocabulary and added tokens.

            Args:
                skip_special_tokens: Don't decode special tokens (self.all_special_tokens). Default: False
        """
        if isinstance(ids, int):
735
736
737
738
            if ids in self.added_tokens_decoder:
                return self.added_tokens_decoder[ids]
            else:
                return self._convert_id_to_token(ids)
739
740
        tokens = []
        for index in ids:
thomwolf's avatar
thomwolf committed
741
            if skip_special_tokens and index in self.all_special_ids:
742
743
744
745
746
747
748
749
                continue
            if index in self.added_tokens_decoder:
                tokens.append(self.added_tokens_decoder[index])
            else:
                tokens.append(self._convert_id_to_token(index))
        return tokens

    def _convert_id_to_token(self, index):
thomwolf's avatar
thomwolf committed
750
751
        raise NotImplementedError

752
753
754
755
    def convert_tokens_to_string(self, tokens):
        """ Converts a sequence of tokens (string) in a single string.
            The most simple way to do it is ' '.join(self.convert_ids_to_tokens(token_ids))
            but we often want to remove sub-word tokenization artifacts at the same time.
756
        """
757
        return ' '.join(self.convert_ids_to_tokens(tokens))
758
759

    def decode(self, token_ids, skip_special_tokens=False, clean_up_tokenization_spaces=True):
LysandreJik's avatar
Doc  
LysandreJik committed
760
761
762
        """
        Converts a sequence of ids (integer) in a string, using the tokenizer and vocabulary
        with options to remove special tokens and clean up tokenization spaces.
763
        Similar to doing ``self.convert_tokens_to_string(self.convert_ids_to_tokens(token_ids))``.
764
765
        """
        filtered_tokens = self.convert_ids_to_tokens(token_ids, skip_special_tokens=skip_special_tokens)
thomwolf's avatar
thomwolf committed
766
767
768
769
770
771
772
773
774
775
776
777
778

        # To avoid mixing byte-level and unicode for byte-level BPT
        # we need to build string separatly for added tokens and byte-level tokens
        # cf. https://github.com/huggingface/pytorch-transformers/issues/1133
        sub_texts = []
        current_sub_text = []
        for token in filtered_tokens:
            if skip_special_tokens and token in self.all_special_ids:
                continue
            if token in self.added_tokens_encoder:
                if current_sub_text:
                    sub_texts.append(self.convert_tokens_to_string(current_sub_text))
                    current_sub_text = []
779
                sub_texts.append(" " + token)
thomwolf's avatar
thomwolf committed
780
781
782
783
784
            else:
                current_sub_text.append(token)
        if current_sub_text:
            sub_texts.append(self.convert_tokens_to_string(current_sub_text))
        text = ''.join(sub_texts)
785

786
787
788
        if self._sep_token is not None and self._sep_token in text:
            text = text.replace(self._cls_token, self._sep_token)
            split_text = list(filter(lambda sentence: len(sentence) > 0, text.split(self._sep_token)))
789
790
791
792
793
794
795
796
797
798
799
            if clean_up_tokenization_spaces:
                clean_text = [self.clean_up_tokenization(text) for text in split_text]
                return clean_text
            else:
                return split_text
        else:
            if clean_up_tokenization_spaces:
                clean_text = self.clean_up_tokenization(text)
                return clean_text
            else:
                return text
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820

    @property
    def special_tokens_map(self):
        """ A dictionary mapping special token class attribute (cls_token, unk_token...) to their
            values ('<unk>', '<cls>'...)
        """
        set_attr = {}
        for attr in self.SPECIAL_TOKENS_ATTRIBUTES:
            attr_value = getattr(self, "_" + attr)
            if attr_value:
                set_attr[attr] = attr_value
        return set_attr

    @property
    def all_special_tokens(self):
        """ List all the special tokens ('<unk>', '<cls>'...) mapped to class attributes
            (cls_token, unk_token...).
        """
        all_toks = []
        set_attr = self.special_tokens_map
        for attr_value in set_attr.values():
epwalsh's avatar
epwalsh committed
821
            all_toks = all_toks + (list(attr_value) if isinstance(attr_value, (list, tuple)) else [attr_value])
822
823
824
825
826
827
828
829
830
        all_toks = list(set(all_toks))
        return all_toks

    @property
    def all_special_ids(self):
        """ List the vocabulary indices of the special tokens ('<unk>', '<cls>'...) mapped to
            class attributes (cls_token, unk_token...).
        """
        all_toks = self.all_special_tokens
831
        all_ids = list(self._convert_token_to_id(t) for t in all_toks)
832
833
        return all_ids

thomwolf's avatar
thomwolf committed
834
835
    @staticmethod
    def clean_up_tokenization(out_string):
836
837
        """ Clean up a list of simple English tokenization artifacts like spaces before punctuations and abreviated forms.
        """
thomwolf's avatar
thomwolf committed
838
839
840
841
        out_string = out_string.replace(' .', '.').replace(' ?', '?').replace(' !', '!').replace(' ,', ','
                        ).replace(" ' ", "'").replace(" n't", "n't").replace(" 'm", "'m").replace(" do not", " don't"
                        ).replace(" 's", "'s").replace(" 've", "'ve").replace(" 're", "'re")
        return out_string