tokenization_utils_fast.py 22.5 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
# coding=utf-8
# Copyright 2020 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Tokenization classes for fast tokenizers (provided by HuggingFace's tokenizers library).
    For slow (python) tokenizers see tokenization_utils.py
"""

19
import copy
20
import os
21
import warnings
22
23
24
25
from collections import defaultdict
from typing import Any, Dict, List, Optional, Tuple, Union

from tokenizers import Encoding as EncodingFast
26
from tokenizers import Tokenizer as TokenizerFast
27
28
from tokenizers.decoders import Decoder as DecoderFast

29
from .convert_slow_tokenizer import convert_slow_tokenizer
Sylvain Gugger's avatar
Sylvain Gugger committed
30
from .file_utils import add_end_docstrings
31
from .tokenization_utils import PreTrainedTokenizer
32
from .tokenization_utils_base import (
Sylvain Gugger's avatar
Sylvain Gugger committed
33
    INIT_TOKENIZER_DOCSTRING,
34
    AddedToken,
35
36
37
38
39
40
41
42
43
    BatchEncoding,
    PaddingStrategy,
    PreTokenizedInput,
    PreTokenizedInputPair,
    PreTrainedTokenizerBase,
    TextInput,
    TextInputPair,
    TruncationStrategy,
)
Lysandre Debut's avatar
Lysandre Debut committed
44
from .utils import logging
45
46


Lysandre Debut's avatar
Lysandre Debut committed
47
logger = logging.get_logger(__name__)
48
49


50
51
52
53
54
55
56
57
58
# Fast tokenizers (provided by HuggingFace tokenizer's library) can be saved in a single file
TOKENIZER_FILE = "tokenizer.json"
SPECIAL_TOKENS_MAP_FILE = "special_tokens_map.json"
TOKENIZER_CONFIG_FILE = "tokenizer_config.json"

# Slow tokenizers have an additional addedd tokens files
ADDED_TOKENS_FILE = "added_tokens.json"


Sylvain Gugger's avatar
Sylvain Gugger committed
59
60
61
62
63
64
@add_end_docstrings(
    INIT_TOKENIZER_DOCSTRING,
    """
    .. automethod:: __call__
    """,
)
65
class PreTrainedTokenizerFast(PreTrainedTokenizerBase):
Sylvain Gugger's avatar
Sylvain Gugger committed
66
67
    """
    Base class for all fast tokenizers (wrapping HuggingFace tokenizers library).
68

Sylvain Gugger's avatar
Sylvain Gugger committed
69
    Inherits from :class:`~transformers.tokenization_utils_base.PreTrainedTokenizerBase`.
70

Stas Bekman's avatar
Stas Bekman committed
71
72
    Handles all the shared methods for tokenization and special tokens, as well as methods for
    downloading/caching/loading pretrained tokenizers, as well as adding tokens to the vocabulary.
73

Stas Bekman's avatar
Stas Bekman committed
74
    This class also contains the added tokens in a unified way on top of all tokenizers so we don't
75
76
77
78
    have to handle the specific vocabulary augmentation methods of the various underlying
    dictionary structures (BPE, sentencepiece...).
    """

79
80
81
82
83
84
85
86
87
88
89
90
91
    slow_tokenizer_class: PreTrainedTokenizer = None

    def __init__(self, *args, **kwargs):
        # We instantiate fast tokenizers based on a slow tokenizer for now
        # In the future we'll also use a direct way based on saving/instantiating
        # tokenizer's Tokenizer directly from it's serialization JSON
        if "__slow_tokenizer" in kwargs and kwargs["__slow_tokenizer"]:
            slow_tokenizer = kwargs.pop("__slow_tokenizer")
        else:
            slow_tokenizer = self.slow_tokenizer_class(*args, **kwargs)
        self._tokenizer = convert_slow_tokenizer(slow_tokenizer)

        kwargs = copy.deepcopy(slow_tokenizer.init_kwargs)
92
93
94
95
96
97
98
99
100
101

        # We call this after having initialized the backend tokenizer because we update it.
        super().__init__(**kwargs)

    @property
    def is_fast(self) -> bool:
        return True

    @property
    def vocab_size(self) -> int:
Sylvain Gugger's avatar
Sylvain Gugger committed
102
103
104
        """
        :obj:`int`: Size of the base vocabulary (without the added tokens).
        """
105
106
107
        return self._tokenizer.get_vocab_size(with_added_tokens=False)

    def get_vocab(self) -> Dict[str, int]:
Sylvain Gugger's avatar
Sylvain Gugger committed
108
109
110
111
112
113
114
115
116
        """
        Returns the vocabulary as a dictionary of token to index.

        :obj:`tokenizer.get_vocab()[token]` is equivalent to :obj:`tokenizer.convert_tokens_to_ids(token)` when
        :obj:`token` is in the vocab.

        Returns:
            :obj:`Dict[str, int]`: The vocabulary.
        """
117
118
        return self._tokenizer.get_vocab(with_added_tokens=True)

119
    def get_added_vocab(self) -> Dict[str, int]:
Sylvain Gugger's avatar
Sylvain Gugger committed
120
121
122
123
124
125
        """
        Returns the added tokens in the vocabulary as a dictionary of token to index.

        Returns:
            :obj:`Dict[str, int]`: The added tokens.
        """
126
127
128
129
130
        base_vocab = self._tokenizer.get_vocab(with_added_tokens=False)
        full_vocab = self._tokenizer.get_vocab(with_added_tokens=True)
        added_vocab = dict((tok, index) for tok, index in full_vocab.items() if tok not in base_vocab)
        return added_vocab

131
    def __len__(self) -> int:
Sylvain Gugger's avatar
Sylvain Gugger committed
132
133
134
        """
        Size of the full vocabulary with the added tokens.
        """
135
136
137
        return self._tokenizer.get_vocab_size(with_added_tokens=True)

    @property
138
    def backend_tokenizer(self) -> TokenizerFast:
Sylvain Gugger's avatar
Sylvain Gugger committed
139
140
141
        """
        :obj:`tokenizers.implementations.BaseTokenizer`: The Rust tokenizer used as a backend.
        """
142
143
144
145
        return self._tokenizer

    @property
    def decoder(self) -> DecoderFast:
Sylvain Gugger's avatar
Sylvain Gugger committed
146
147
148
        """
        :obj:`tokenizers.decoders.Decoder`: The Rust decoder for this tokenizer.
        """
149
150
151
152
153
154
155
156
157
158
        return self._tokenizer._tokenizer.decoder

    def _convert_encoding(
        self,
        encoding: EncodingFast,
        return_token_type_ids: Optional[bool] = None,
        return_attention_mask: Optional[bool] = None,
        return_overflowing_tokens: bool = False,
        return_special_tokens_mask: bool = False,
        return_offsets_mapping: bool = False,
159
        return_length: bool = False,
160
161
        verbose: bool = True,
    ) -> Dict[str, Any]:
Lysandre's avatar
Lysandre committed
162
        """Convert the encoding representation (from low-level HuggingFace tokenizer output) to a python Dict.
163

Lysandre's avatar
Lysandre committed
164
165
        Overflowing tokens are converted to additional examples (like batches) so the output values of
        the dict are lists (overflows) of lists (tokens).
166

Lysandre's avatar
Lysandre committed
167
        Output shape: (overflows, sequence length)
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
        """
        if return_token_type_ids is None:
            return_token_type_ids = "token_type_ids" in self.model_input_names
        if return_attention_mask is None:
            return_attention_mask = "attention_mask" in self.model_input_names

        if return_overflowing_tokens and encoding.overflowing is not None:
            encodings = [encoding] + encoding.overflowing
        else:
            encodings = [encoding]

        encoding_dict = defaultdict(list)
        for e in encodings:
            encoding_dict["input_ids"].append(e.ids)

            if return_token_type_ids:
                encoding_dict["token_type_ids"].append(e.type_ids)
            if return_attention_mask:
                encoding_dict["attention_mask"].append(e.attention_mask)
            if return_special_tokens_mask:
                encoding_dict["special_tokens_mask"].append(e.special_tokens_mask)
            if return_offsets_mapping:
                encoding_dict["offset_mapping"].append(e.offsets)
191
192
            if return_length:
                encoding_dict["length"].append(len(e.ids))
193
194
195

        return encoding_dict

196
    def convert_tokens_to_ids(self, tokens: Union[str, List[str]]) -> Union[int, List[int]]:
Sylvain Gugger's avatar
Sylvain Gugger committed
197
198
199
200
201
202
203
204
205
        """
        Converts a token string (or a sequence of tokens) in a single integer id (or a sequence of ids), using the
        vocabulary.

        Args:
            token (:obj:`str` or :obj:`List[str]`): One or several token(s) to convert to token id(s).

        Returns:
            :obj:`int` or :obj:`List[int]`: The token id or list of token ids.
206
207
208
209
210
211
212
213
214
215
216
217
        """
        if tokens is None:
            return None

        if isinstance(tokens, str):
            return self._convert_token_to_id_with_added_voc(tokens)

        ids = []
        for token in tokens:
            ids.append(self._convert_token_to_id_with_added_voc(token))
        return ids

218
    def _convert_token_to_id_with_added_voc(self, token: str) -> int:
219
220
221
222
223
224
225
226
        index = self._tokenizer.token_to_id(token)
        if index is None:
            return self.unk_token_id
        return index

    def _convert_id_to_token(self, index: int) -> Optional[str]:
        return self._tokenizer.id_to_token(int(index))

227
228
    def _add_tokens(self, new_tokens: List[Union[str, AddedToken]], special_tokens=False) -> int:
        if special_tokens:
229
230
            return self._tokenizer.add_special_tokens(new_tokens)

231
232
233
        return self._tokenizer.add_tokens(new_tokens)

    def num_special_tokens_to_add(self, pair: bool = False) -> int:
Sylvain Gugger's avatar
Sylvain Gugger committed
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
        """
        Returns the number of added tokens when encoding a sequence with special tokens.

        .. note::
            This encodes a dummy input and checks the number of added tokens, and is therefore not efficient. Do not
            put this inside your training loop.

        Args:
            pair (:obj:`bool`, `optional`, defaults to :obj:`False`):
                Whether the number of added tokens should be computed in the case of a sequence pair or a single
                sequence.

        Returns:
            :obj:`int`: Number of special tokens added to sequences.
        """
249
250
251
252
        return self._tokenizer.num_special_tokens_to_add(pair)

    def convert_ids_to_tokens(
        self, ids: Union[int, List[int]], skip_special_tokens: bool = False
253
    ) -> Union[str, List[str]]:
Sylvain Gugger's avatar
Sylvain Gugger committed
254
255
256
257
258
259
260
261
262
        """
        Converts a single index or a sequence of indices in a token or a sequence of tokens, using the vocabulary
        and added tokens.

        Args:
            ids (:obj:`int` or :obj:`List[int]`):
                The token id (or token ids) to convert to tokens.
            skip_special_tokens (:obj:`bool`, `optional`, defaults to :obj:`False`):
                Whether or not to remove special tokens in the decoding.
263

Sylvain Gugger's avatar
Sylvain Gugger committed
264
265
        Returns:
            :obj:`str` or :obj:`List[str]`: The decoded token(s).
266
267
268
269
270
271
272
273
274
275
276
        """
        if isinstance(ids, int):
            return self._tokenizer.id_to_token(ids)
        tokens = []
        for index in ids:
            index = int(index)
            if skip_special_tokens and index in self.all_special_ids:
                continue
            tokens.append(self._tokenizer.id_to_token(index))
        return tokens

277
    def tokenize(self, text: str, pair: Optional[str] = None, add_special_tokens: bool = False) -> List[str]:
Sylvain Gugger's avatar
Sylvain Gugger committed
278
279
280
        """
        Converts a string in a sequence of tokens, using the backend Rust tokenizer.

281
282
283
        Note that, unlike slow tokenizers (instances of :class:`~transformers.PreTrainedTokenizer`), this method
        will replace the unknown tokens with the :obj:`unk_token`.

Sylvain Gugger's avatar
Sylvain Gugger committed
284
285
286
287
288
289
290
291
292
293
294
        Args:
            text (:obj:`str`):
                The sequence to be encoded.
            pair (:obj:`str`, `optional`):
                A second sequence to be encoded with the first.
            add_special_tokens (:obj:`bool`, `optional`, defaults to :obj:`False`):
                Whether or not to add the special tokens associated with the corresponding model.

        Returns:
            :obj:`List[str]`: The list of tokens.
        """
295
296
297
        return self._tokenizer.encode(text, pair, add_special_tokens=add_special_tokens).tokens

    def set_truncation_and_padding(
298
299
300
301
302
303
        self,
        padding_strategy: PaddingStrategy,
        truncation_strategy: TruncationStrategy,
        max_length: int,
        stride: int,
        pad_to_multiple_of: Optional[int],
304
    ):
Sylvain Gugger's avatar
Sylvain Gugger committed
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
        """
        Define the truncation and the padding strategies for fast tokenizers (provided by HuggingFace tokenizers
        library) and restore the tokenizer settings afterwards.

        The provided tokenizer has no padding / truncation strategy before the managed section. If your tokenizer set a
        padding / truncation strategy before, then it will be reset to no padding / truncation when exiting the managed
        section.

        Args:
            padding_strategy (:class:`~transformers.tokenization_utils_base.PaddingStrategy`):
                The kind of padding that will be applied to the input
            truncation_strategy (:class:`~transformers.tokenization_utils_base.TruncationStrategy`):
                The kind of truncation that will be applied to the input
            max_length (:obj:`int`):
                The maximum size of a sequence.
            stride (:obj:`int`):
                The stride to use when handling overflow.
            pad_to_multiple_of (:obj:`int`, `optional`):
                If set will pad the sequence to a multiple of the provided value. This is especially useful to enable
                the use of Tensor Cores on NVIDIA hardware with compute capability >= 7.5 (Volta).
325
326
327
328
329
330
331
332
333
334
335
336
337
338
        """
        # Set truncation and padding on the backend tokenizer
        if truncation_strategy != TruncationStrategy.DO_NOT_TRUNCATE:
            self._tokenizer.enable_truncation(max_length, stride=stride, strategy=truncation_strategy.value)
        else:
            self._tokenizer.no_truncation()

        if padding_strategy != PaddingStrategy.DO_NOT_PAD:
            self._tokenizer.enable_padding(
                length=max_length if padding_strategy == PaddingStrategy.MAX_LENGTH else None,
                direction=self.padding_side,
                pad_id=self.pad_token_id,
                pad_type_id=self.pad_token_type_id,
                pad_token=self.pad_token,
339
                pad_to_multiple_of=pad_to_multiple_of,
340
341
342
343
344
345
346
347
348
349
350
351
352
353
            )
        else:
            self._tokenizer.no_padding()

    def _batch_encode_plus(
        self,
        batch_text_or_text_pairs: Union[
            List[TextInput], List[TextInputPair], List[PreTokenizedInput], List[PreTokenizedInputPair]
        ],
        add_special_tokens: bool = True,
        padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD,
        truncation_strategy: TruncationStrategy = TruncationStrategy.DO_NOT_TRUNCATE,
        max_length: Optional[int] = None,
        stride: int = 0,
354
        is_split_into_words: bool = False,
355
        pad_to_multiple_of: Optional[int] = None,
356
357
358
359
360
361
        return_tensors: Optional[str] = None,
        return_token_type_ids: Optional[bool] = None,
        return_attention_mask: Optional[bool] = None,
        return_overflowing_tokens: bool = False,
        return_special_tokens_mask: bool = False,
        return_offsets_mapping: bool = False,
362
        return_length: bool = False,
363
364
365
366
367
        verbose: bool = True,
        **kwargs
    ) -> BatchEncoding:

        if not isinstance(batch_text_or_text_pairs, list):
368
            raise TypeError(
369
370
371
                "batch_text_or_text_pairs has to be a list (got {})".format(type(batch_text_or_text_pairs))
            )

372
373
374
375
376
377
378
        if "is_pretokenized" in kwargs:
            warnings.warn(
                "`is_pretokenized` is deprecated and will be removed in a future version, use `is_split_into_words` instead.",
                FutureWarning,
            )
            is_split_into_words = kwargs.pop("is_pretokenized")

379
380
381
        if kwargs:
            raise ValueError(f"Keyword arguments {kwargs} not recognized.")

382
383
384
385
386
387
        # Set the truncation and padding strategy and restore the initial configuration
        self.set_truncation_and_padding(
            padding_strategy=padding_strategy,
            truncation_strategy=truncation_strategy,
            max_length=max_length,
            stride=stride,
388
            pad_to_multiple_of=pad_to_multiple_of,
389
390
391
392
393
394
395
396
397
        )

        # Avoid thread overhead if only one example.
        if len(batch_text_or_text_pairs) == 1:
            if isinstance(batch_text_or_text_pairs[0], tuple):
                # We got a Tuple with a pair of sequences
                encodings = self._tokenizer.encode(
                    *batch_text_or_text_pairs[0],
                    add_special_tokens=add_special_tokens,
398
                    is_pretokenized=is_split_into_words,
399
400
401
402
403
404
                )
            else:
                # We got a single sequence
                encodings = self._tokenizer.encode(
                    batch_text_or_text_pairs[0],
                    add_special_tokens=add_special_tokens,
405
                    is_pretokenized=is_split_into_words,
406
407
408
409
                )
            encodings = [encodings]
        else:
            encodings = self._tokenizer.encode_batch(
410
411
412
                batch_text_or_text_pairs,
                add_special_tokens=add_special_tokens,
                is_pretokenized=is_split_into_words,
413
414
415
416
417
418
419
420
421
422
423
424
425
            )

        # Convert encoding to dict
        # `Tokens` has type: List[Dict[str, List[List[int]]]] or List[Dict[str, 2D-Tensor]]
        # with nested dimensions corresponding to batch, overflows, sequence length
        tokens = [
            self._convert_encoding(
                encoding=encoding,
                return_token_type_ids=return_token_type_ids,
                return_attention_mask=return_attention_mask,
                return_overflowing_tokens=return_overflowing_tokens,
                return_special_tokens_mask=return_special_tokens_mask,
                return_offsets_mapping=return_offsets_mapping,
426
                return_length=return_length,
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
                verbose=verbose,
            )
            for encoding in encodings
        ]

        # Convert the output to have dict[list] from list[dict]
        sanitized = {}
        for key in tokens[0].keys():
            # To List[List[List[int]]] of shape (batch, overflows, sequence length)
            stack = [e for item in tokens for e in item[key]]
            sanitized[key] = stack

        # If returning overflowing tokens, we need to return a mapping
        # from the batch idx to the original sample
        if return_overflowing_tokens:
            overflow_to_sample_mapping = []
            for i, enc in enumerate(tokens):
                overflow_to_sample_mapping += [i] * len(enc["input_ids"])
            sanitized["overflow_to_sample_mapping"] = overflow_to_sample_mapping

        return BatchEncoding(sanitized, encodings, tensor_type=return_tensors)

    def _encode_plus(
        self,
        text: Union[TextInput, PreTokenizedInput],
        text_pair: Optional[Union[TextInput, PreTokenizedInput]] = None,
        add_special_tokens: bool = True,
        padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD,
        truncation_strategy: TruncationStrategy = TruncationStrategy.DO_NOT_TRUNCATE,
        max_length: Optional[int] = None,
        stride: int = 0,
458
        is_split_into_words: bool = False,
459
        pad_to_multiple_of: Optional[int] = None,
460
461
462
463
464
465
        return_tensors: Optional[bool] = None,
        return_token_type_ids: Optional[bool] = None,
        return_attention_mask: Optional[bool] = None,
        return_overflowing_tokens: bool = False,
        return_special_tokens_mask: bool = False,
        return_offsets_mapping: bool = False,
466
        return_length: bool = False,
467
468
469
        verbose: bool = True,
        **kwargs
    ) -> BatchEncoding:
470
471
472
473
474
475
        if "is_pretokenized" in kwargs:
            warnings.warn(
                "`is_pretokenized` is deprecated and will be removed in a future version, use `is_split_into_words` instead.",
                FutureWarning,
            )
            is_split_into_words = kwargs.pop("is_pretokenized")
476
477
478
479

        batched_input = [(text, text_pair)] if text_pair else [text]
        batched_output = self._batch_encode_plus(
            batched_input,
480
            is_split_into_words=is_split_into_words,
481
482
483
484
485
            add_special_tokens=add_special_tokens,
            padding_strategy=padding_strategy,
            truncation_strategy=truncation_strategy,
            max_length=max_length,
            stride=stride,
486
            pad_to_multiple_of=pad_to_multiple_of,
487
488
489
490
491
492
            return_tensors=return_tensors,
            return_token_type_ids=return_token_type_ids,
            return_attention_mask=return_attention_mask,
            return_overflowing_tokens=return_overflowing_tokens,
            return_special_tokens_mask=return_special_tokens_mask,
            return_offsets_mapping=return_offsets_mapping,
493
            return_length=return_length,
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
            verbose=verbose,
            **kwargs,
        )

        # Return tensor is None, then we can remove the leading batch axis
        # Overfolwing tokens are returned as a batch of output so we keep them in this case
        if return_tensors is None and not return_overflowing_tokens:
            batched_output = BatchEncoding(
                {
                    key: value[0] if len(value) > 0 and isinstance(value[0], list) else value
                    for key, value in batched_output.items()
                },
                batched_output.encodings,
            )

        return batched_output

    def decode(
512
513
514
515
516
        self,
        token_ids: Union[int, List[int]],
        skip_special_tokens: bool = False,
        clean_up_tokenization_spaces: bool = True,
        **kwargs
517
    ) -> str:
Sylvain Gugger's avatar
Sylvain Gugger committed
518
519
520
521
522
523
524
        """
        Converts a sequence of ids in a string, using the tokenizer and vocabulary
        with options to remove special tokens and clean up tokenization spaces.

        Similar to doing ``self.convert_tokens_to_string(self.convert_ids_to_tokens(token_ids))``.

        Args:
525
            token_ids (:obj:`Union[int, List[int]]`):
Sylvain Gugger's avatar
Sylvain Gugger committed
526
527
528
529
530
531
532
533
534
                List of tokenized input ids. Can be obtained using the ``__call__`` method.
            skip_special_tokens (:obj:`bool`, `optional`, defaults to :obj:`False`):
                Whether or not to remove special tokens in the decoding.
            clean_up_tokenization_spaces (:obj:`bool`, `optional`, defaults to :obj:`True`):
                Whether or not to clean up the tokenization spaces.

        Returns:
            :obj:`str`: The decoded sentence.
        """
535
536
        if isinstance(token_ids, int):
            token_ids = [token_ids]
537
538
539
540
541
542
543
544
545
        text = self._tokenizer.decode(token_ids, skip_special_tokens=skip_special_tokens)

        if clean_up_tokenization_spaces:
            clean_text = self.clean_up_tokenization(text)
            return clean_text
        else:
            return text

    def save_vocabulary(self, save_directory: str) -> Tuple[str]:
Sylvain Gugger's avatar
Sylvain Gugger committed
546
547
548
549
550
        """
        Save the tokenizer vocabulary to a directory. This method does *NOT* save added tokens
        and special token mappings.

        .. warning::
551
552
            Please use :meth:`~transformers.PreTrainedTokenizerFast.save_pretrained` to save the full tokenizer state if
            you want to reload it using the :meth:`~transformers.PreTrainedTokenizerFast.from_pretrained` class method.
Sylvain Gugger's avatar
Sylvain Gugger committed
553
554

        Args:
Tiger's avatar
Tiger committed
555
            save_directory (:obj:`str`): The path to a directory where the tokenizer will be saved.
Sylvain Gugger's avatar
Sylvain Gugger committed
556
557
558
559

        Returns:
            A tuple of :obj:`str`: The files saved.
        """
560
        if os.path.isdir(save_directory):
561
            files = self._tokenizer.model.save(save_directory)
562
563
564
565
566
        else:
            folder, file = os.path.split(os.path.abspath(save_directory))
            files = self._tokenizer.save_model(folder, name=file)

        return tuple(files)