"...git@developer.sourcefind.cn:dcuai/dlexamples.git" did not exist on "82496fd438242f3904c61d2f2254913eaeb4b8e9"
Unverified Commit 79eb3915 authored by Thomas Wolf's avatar Thomas Wolf Committed by GitHub
Browse files

[tokenizers] Fixing #8001 - Adding tests on tokenizers serialization (#8006)

* fixing #8001

* make T5 tokenizer serialization more robust - style
parent 7087d9b1
...@@ -129,6 +129,9 @@ class AlbertTokenizer(PreTrainedTokenizer): ...@@ -129,6 +129,9 @@ class AlbertTokenizer(PreTrainedTokenizer):
**kwargs **kwargs
): ):
super().__init__( super().__init__(
do_lower_case=do_lower_case,
remove_space=remove_space,
keep_accents=keep_accents,
bos_token=bos_token, bos_token=bos_token,
eos_token=eos_token, eos_token=eos_token,
unk_token=unk_token, unk_token=unk_token,
......
...@@ -178,11 +178,16 @@ class BertTokenizer(PreTrainedTokenizer): ...@@ -178,11 +178,16 @@ class BertTokenizer(PreTrainedTokenizer):
**kwargs **kwargs
): ):
super().__init__( super().__init__(
do_lower_case=do_lower_case,
do_basic_tokenize=do_basic_tokenize,
never_split=never_split,
unk_token=unk_token, unk_token=unk_token,
sep_token=sep_token, sep_token=sep_token,
pad_token=pad_token, pad_token=pad_token,
cls_token=cls_token, cls_token=cls_token,
mask_token=mask_token, mask_token=mask_token,
tokenize_chinese_chars=tokenize_chinese_chars,
strip_accents=strip_accents,
**kwargs, **kwargs,
) )
......
...@@ -129,11 +129,12 @@ class BertweetTokenizer(PreTrainedTokenizer): ...@@ -129,11 +129,12 @@ class BertweetTokenizer(PreTrainedTokenizer):
**kwargs **kwargs
): ):
super().__init__( super().__init__(
normalization=normalization,
bos_token=bos_token, bos_token=bos_token,
eos_token=eos_token, eos_token=eos_token,
unk_token=unk_token,
sep_token=sep_token, sep_token=sep_token,
cls_token=cls_token, cls_token=cls_token,
unk_token=unk_token,
pad_token=pad_token, pad_token=pad_token,
mask_token=mask_token, mask_token=mask_token,
**kwargs, **kwargs,
......
...@@ -308,16 +308,13 @@ class GPT2Tokenizer(object): ...@@ -308,16 +308,13 @@ class GPT2Tokenizer(object):
- We remapped the token ids in our dictionary with regarding to the new special tokens, `[PAD]` => 0, `[CLS]` => 1, `[SEP]` => 2, `[UNK]` => 3, `[MASK]` => 50264 - We remapped the token ids in our dictionary with regarding to the new special tokens, `[PAD]` => 0, `[CLS]` => 1, `[SEP]` => 2, `[UNK]` => 3, `[MASK]` => 50264
do_lower_case (:obj:`bool`, optional):
Whether to convert inputs to lower case. **Not used in GPT2 tokenizer**.
special_tokens (:obj:`list`, optional): special_tokens (:obj:`list`, optional):
List of special tokens to be added to the end of the vocabulary. List of special tokens to be added to the end of the vocabulary.
""" """
def __init__(self, vocab_file=None, do_lower_case=True, special_tokens=None): def __init__(self, vocab_file=None, special_tokens=None):
self.pad_token = "[PAD]" self.pad_token = "[PAD]"
self.sep_token = "[SEP]" self.sep_token = "[SEP]"
self.unk_token = "[UNK]" self.unk_token = "[UNK]"
...@@ -523,6 +520,7 @@ class DebertaTokenizer(PreTrainedTokenizer): ...@@ -523,6 +520,7 @@ class DebertaTokenizer(PreTrainedTokenizer):
**kwargs **kwargs
): ):
super().__init__( super().__init__(
do_lower_case=do_lower_case,
unk_token=unk_token, unk_token=unk_token,
sep_token=sep_token, sep_token=sep_token,
pad_token=pad_token, pad_token=pad_token,
......
...@@ -194,6 +194,9 @@ class FSMTTokenizer(PreTrainedTokenizer): ...@@ -194,6 +194,9 @@ class FSMTTokenizer(PreTrainedTokenizer):
): ):
super().__init__( super().__init__(
langs=langs, langs=langs,
src_vocab_file=src_vocab_file,
tgt_vocab_file=tgt_vocab_file,
merges_file=merges_file,
unk_token=unk_token, unk_token=unk_token,
bos_token=bos_token, bos_token=bos_token,
sep_token=sep_token, sep_token=sep_token,
......
...@@ -164,7 +164,14 @@ class GPT2Tokenizer(PreTrainedTokenizer): ...@@ -164,7 +164,14 @@ class GPT2Tokenizer(PreTrainedTokenizer):
bos_token = AddedToken(bos_token, lstrip=False, rstrip=False) if isinstance(bos_token, str) else bos_token bos_token = AddedToken(bos_token, lstrip=False, rstrip=False) if isinstance(bos_token, str) else bos_token
eos_token = AddedToken(eos_token, lstrip=False, rstrip=False) if isinstance(eos_token, str) else eos_token eos_token = AddedToken(eos_token, lstrip=False, rstrip=False) if isinstance(eos_token, str) else eos_token
unk_token = AddedToken(unk_token, lstrip=False, rstrip=False) if isinstance(unk_token, str) else unk_token unk_token = AddedToken(unk_token, lstrip=False, rstrip=False) if isinstance(unk_token, str) else unk_token
super().__init__(bos_token=bos_token, eos_token=eos_token, unk_token=unk_token, **kwargs) super().__init__(
errors=errors,
unk_token=unk_token,
bos_token=bos_token,
eos_token=eos_token,
add_prefix_space=add_prefix_space,
**kwargs,
)
with open(vocab_file, encoding="utf-8") as vocab_handle: with open(vocab_file, encoding="utf-8") as vocab_handle:
self.encoder = json.load(vocab_handle) self.encoder = json.load(vocab_handle)
......
...@@ -97,10 +97,12 @@ class MarianTokenizer(PreTrainedTokenizer): ...@@ -97,10 +97,12 @@ class MarianTokenizer(PreTrainedTokenizer):
): ):
super().__init__( super().__init__(
# bos_token=bos_token, unused. Start decoding with config.decoder_start_token_id # bos_token=bos_token, unused. Start decoding with config.decoder_start_token_id
model_max_length=model_max_length, source_lang=source_lang,
eos_token=eos_token, target_lang=target_lang,
unk_token=unk_token, unk_token=unk_token,
eos_token=eos_token,
pad_token=pad_token, pad_token=pad_token,
model_max_length=model_max_length,
**kwargs, **kwargs,
) )
assert Path(source_spm).exists(), f"cannot find spm source {source_spm}" assert Path(source_spm).exists(), f"cannot find spm source {source_spm}"
......
...@@ -119,11 +119,16 @@ class ProphetNetTokenizer(PreTrainedTokenizer): ...@@ -119,11 +119,16 @@ class ProphetNetTokenizer(PreTrainedTokenizer):
**kwargs **kwargs
): ):
super().__init__( super().__init__(
do_lower_case=do_lower_case,
do_basic_tokenize=do_basic_tokenize,
never_split=never_split,
unk_token=unk_token, unk_token=unk_token,
sep_token=sep_token, sep_token=sep_token,
x_sep_token=x_sep_token,
pad_token=pad_token, pad_token=pad_token,
mask_token=mask_token, mask_token=mask_token,
x_sep_token=x_sep_token, tokenize_chinese_chars=tokenize_chinese_chars,
strip_accents=strip_accents,
**kwargs, **kwargs,
) )
self.unique_no_split_tokens.append(x_sep_token) self.unique_no_split_tokens.append(x_sep_token)
......
...@@ -112,15 +112,22 @@ class T5Tokenizer(PreTrainedTokenizer): ...@@ -112,15 +112,22 @@ class T5Tokenizer(PreTrainedTokenizer):
**kwargs **kwargs
): ):
# Add extra_ids to the special token list # Add extra_ids to the special token list
if extra_ids > 0: if extra_ids > 0 and additional_special_tokens is None:
if additional_special_tokens is None: additional_special_tokens = ["<extra_id_{}>".format(i) for i in range(extra_ids)]
additional_special_tokens = [] elif extra_ids > 0 and additional_special_tokens is not None:
additional_special_tokens.extend(["<extra_id_{}>".format(i) for i in range(extra_ids)]) # Check that we have the right number of extra_id special tokens
extra_tokens = len(set(filter(lambda x: bool("extra_id" in x), additional_special_tokens)))
if extra_tokens != extra_ids:
raise ValueError(
f"Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are provided to T5Tokenizer. "
"In this case the additional_special_tokens must include the extra_ids tokens"
)
super().__init__( super().__init__(
eos_token=eos_token, eos_token=eos_token,
unk_token=unk_token, unk_token=unk_token,
pad_token=pad_token, pad_token=pad_token,
extra_ids=extra_ids,
additional_special_tokens=additional_special_tokens, additional_special_tokens=additional_special_tokens,
**kwargs, **kwargs,
) )
......
...@@ -126,6 +126,18 @@ class T5TokenizerFast(PreTrainedTokenizerFast): ...@@ -126,6 +126,18 @@ class T5TokenizerFast(PreTrainedTokenizerFast):
additional_special_tokens=None, additional_special_tokens=None,
**kwargs **kwargs
): ):
# Add extra_ids to the special token list
if extra_ids > 0 and additional_special_tokens is None:
additional_special_tokens = ["<extra_id_{}>".format(i) for i in range(extra_ids)]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra special tokens
extra_tokens = len(set(filter(lambda x: bool("extra_id_" in x), additional_special_tokens)))
if extra_tokens != extra_ids:
raise ValueError(
f"Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are provided to T5Tokenizer. "
"In this case the additional_special_tokens must include the extra_ids tokens"
)
super().__init__( super().__init__(
vocab_file, vocab_file,
tokenizer_file=tokenizer_file, tokenizer_file=tokenizer_file,
...@@ -137,13 +149,6 @@ class T5TokenizerFast(PreTrainedTokenizerFast): ...@@ -137,13 +149,6 @@ class T5TokenizerFast(PreTrainedTokenizerFast):
**kwargs, **kwargs,
) )
if extra_ids > 0:
all_extra_tokens = ["<extra_id_{}>".format(i) for i in range(extra_ids)]
if all(tok not in self.additional_special_tokens for tok in all_extra_tokens):
self.additional_special_tokens = self.additional_special_tokens + [
"<extra_id_{}>".format(i) for i in range(extra_ids)
]
self.vocab_file = vocab_file self.vocab_file = vocab_file
self._extra_ids = extra_ids self._extra_ids = extra_ids
......
...@@ -164,7 +164,19 @@ class TransfoXLTokenizer(PreTrainedTokenizer): ...@@ -164,7 +164,19 @@ class TransfoXLTokenizer(PreTrainedTokenizer):
**kwargs **kwargs
): ):
super().__init__( super().__init__(
unk_token=unk_token, eos_token=eos_token, additional_special_tokens=additional_special_tokens, **kwargs special=special,
min_freq=min_freq,
max_size=max_size,
lower_case=lower_case,
delimiter=delimiter,
vocab_file=vocab_file,
pretrained_vocab_file=pretrained_vocab_file,
never_split=never_split,
unk_token=unk_token,
eos_token=eos_token,
additional_special_tokens=additional_special_tokens,
language=language,
**kwargs,
) )
if never_split is None: if never_split is None:
......
...@@ -1673,7 +1673,7 @@ class PreTrainedTokenizerBase(SpecialTokensMixin): ...@@ -1673,7 +1673,7 @@ class PreTrainedTokenizerBase(SpecialTokensMixin):
if ( if (
"tokenizer_file" not in resolved_vocab_files or resolved_vocab_files["tokenizer_file"] is None "tokenizer_file" not in resolved_vocab_files or resolved_vocab_files["tokenizer_file"] is None
) and cls.slow_tokenizer_class is not None: ) and cls.slow_tokenizer_class is not None:
slow_tokenizer = cls.slow_tokenizer_class._from_pretrained( slow_tokenizer = (cls.slow_tokenizer_class)._from_pretrained(
copy.deepcopy(resolved_vocab_files), copy.deepcopy(resolved_vocab_files),
pretrained_model_name_or_path, pretrained_model_name_or_path,
copy.deepcopy(init_configuration), copy.deepcopy(init_configuration),
......
...@@ -16,7 +16,6 @@ ...@@ -16,7 +16,6 @@
For slow (python) tokenizers see tokenization_utils.py For slow (python) tokenizers see tokenization_utils.py
""" """
import copy
import json import json
import os import os
import warnings import warnings
...@@ -105,7 +104,7 @@ class PreTrainedTokenizerFast(PreTrainedTokenizerBase): ...@@ -105,7 +104,7 @@ class PreTrainedTokenizerFast(PreTrainedTokenizerBase):
self._tokenizer = fast_tokenizer self._tokenizer = fast_tokenizer
if slow_tokenizer is not None: if slow_tokenizer is not None:
kwargs = copy.deepcopy(slow_tokenizer.init_kwargs) kwargs.update(slow_tokenizer.init_kwargs)
# We call this after having initialized the backend tokenizer because we update it. # We call this after having initialized the backend tokenizer because we update it.
super().__init__(**kwargs) super().__init__(**kwargs)
......
...@@ -621,6 +621,9 @@ class XLMTokenizer(PreTrainedTokenizer): ...@@ -621,6 +621,9 @@ class XLMTokenizer(PreTrainedTokenizer):
cls_token=cls_token, cls_token=cls_token,
mask_token=mask_token, mask_token=mask_token,
additional_special_tokens=additional_special_tokens, additional_special_tokens=additional_special_tokens,
lang2id=lang2id,
id2lang=id2lang,
do_lowercase_and_remove_accent=do_lowercase_and_remove_accent,
**kwargs, **kwargs,
) )
......
...@@ -123,9 +123,10 @@ class XLMProphetNetTokenizer(PreTrainedTokenizer): ...@@ -123,9 +123,10 @@ class XLMProphetNetTokenizer(PreTrainedTokenizer):
super().__init__( super().__init__(
bos_token=bos_token, bos_token=bos_token,
eos_token=eos_token, eos_token=eos_token,
unk_token=unk_token,
sep_token=sep_token, sep_token=sep_token,
unk_token=unk_token,
pad_token=pad_token, pad_token=pad_token,
cls_token=cls_token,
mask_token=mask_token, mask_token=mask_token,
**kwargs, **kwargs,
) )
......
...@@ -128,6 +128,9 @@ class XLNetTokenizer(PreTrainedTokenizer): ...@@ -128,6 +128,9 @@ class XLNetTokenizer(PreTrainedTokenizer):
**kwargs **kwargs
): ):
super().__init__( super().__init__(
do_lower_case=do_lower_case,
remove_space=remove_space,
keep_accents=keep_accents,
bos_token=bos_token, bos_token=bos_token,
eos_token=eos_token, eos_token=eos_token,
unk_token=unk_token, unk_token=unk_token,
......
...@@ -177,6 +177,25 @@ class TokenizerTesterMixin: ...@@ -177,6 +177,25 @@ class TokenizerTesterMixin:
self.assertIn("tokenizer_file", signature.parameters) self.assertIn("tokenizer_file", signature.parameters)
self.assertIsNone(signature.parameters["tokenizer_file"].default) self.assertIsNone(signature.parameters["tokenizer_file"].default)
def test_tokenizer_slow_store_full_signature(self):
signature = inspect.signature(self.tokenizer_class.__init__)
tokenizer = self.get_tokenizer()
for parameter_name, parameter in signature.parameters.items():
if parameter.default != inspect.Parameter.empty:
self.assertIn(parameter_name, tokenizer.init_kwargs)
def test_tokenizer_fast_store_full_signature(self):
if not self.test_rust_tokenizer:
return
signature = inspect.signature(self.rust_tokenizer_class.__init__)
tokenizer = self.get_rust_tokenizer()
for parameter_name, parameter in signature.parameters.items():
if parameter.default != inspect.Parameter.empty:
self.assertIn(parameter_name, tokenizer.init_kwargs)
def test_rust_and_python_full_tokenizers(self): def test_rust_and_python_full_tokenizers(self):
if not self.test_rust_tokenizer: if not self.test_rust_tokenizer:
return return
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment