Unverified Commit 5787e4c1 authored by Anthony MOI's avatar Anthony MOI Committed by GitHub
Browse files

Various tokenizers fixes (#5558)

* BertTokenizerFast - Do not specify strip_accents by default

* Bump tokenizers to new version

* Add test for AddedToken serialization
parent 21f28c34
...@@ -114,7 +114,7 @@ setup( ...@@ -114,7 +114,7 @@ setup(
packages=find_packages("src"), packages=find_packages("src"),
install_requires=[ install_requires=[
"numpy", "numpy",
"tokenizers == 0.8.0-rc4", "tokenizers == 0.8.1.rc1",
# dataclasses for Python versions that don't have it # dataclasses for Python versions that don't have it
"dataclasses;python_version<'3.7'", "dataclasses;python_version<'3.7'",
# utilities from PyPA to e.g. compare versions # utilities from PyPA to e.g. compare versions
......
...@@ -606,7 +606,7 @@ class BertTokenizerFast(PreTrainedTokenizerFast): ...@@ -606,7 +606,7 @@ class BertTokenizerFast(PreTrainedTokenizerFast):
mask_token="[MASK]", mask_token="[MASK]",
clean_text=True, clean_text=True,
tokenize_chinese_chars=True, tokenize_chinese_chars=True,
strip_accents=True, strip_accents=None,
wordpieces_prefix="##", wordpieces_prefix="##",
**kwargs **kwargs
): ):
......
...@@ -24,6 +24,7 @@ from typing import TYPE_CHECKING, Dict, List, Tuple, Union ...@@ -24,6 +24,7 @@ from typing import TYPE_CHECKING, Dict, List, Tuple, Union
from transformers import PreTrainedTokenizer, PreTrainedTokenizerBase, PreTrainedTokenizerFast from transformers import PreTrainedTokenizer, PreTrainedTokenizerBase, PreTrainedTokenizerFast
from transformers.testing_utils import require_tf, require_torch, slow from transformers.testing_utils import require_tf, require_torch, slow
from transformers.tokenization_utils import AddedToken
if TYPE_CHECKING: if TYPE_CHECKING:
...@@ -233,6 +234,12 @@ class TokenizerTesterMixin: ...@@ -233,6 +234,12 @@ class TokenizerTesterMixin:
self.assertListEqual(subwords, subwords_loaded) self.assertListEqual(subwords, subwords_loaded)
def test_pickle_added_tokens(self):
tok1 = AddedToken("<s>", rstrip=True, lstrip=True, normalized=False, single_word=True)
tok2 = pickle.loads(pickle.dumps(tok1))
self.assertEqual(tok1.__getstate__(), tok2.__getstate__())
def test_added_tokens_do_lower_case(self): def test_added_tokens_do_lower_case(self):
# TODO(thom) activate fast tokenizer tests once Rust tokenizers accepts white spaces in added tokens # TODO(thom) activate fast tokenizer tests once Rust tokenizers accepts white spaces in added tokens
tokenizers = self.get_tokenizers(fast=False, do_lower_case=True) tokenizers = self.get_tokenizers(fast=False, do_lower_case=True)
......
...@@ -91,8 +91,6 @@ class CommonFastTokenizerTest(unittest.TestCase): ...@@ -91,8 +91,6 @@ class CommonFastTokenizerTest(unittest.TestCase):
self.assert_padding(tokenizer_r, tokenizer_p) self.assert_padding(tokenizer_r, tokenizer_p)
self.assert_create_token_type_ids(tokenizer_r, tokenizer_p) self.assert_create_token_type_ids(tokenizer_r, tokenizer_p)
self.assert_prepare_for_model(tokenizer_r, tokenizer_p) self.assert_prepare_for_model(tokenizer_r, tokenizer_p)
# TODO: enable for v3.0.0
# self.assert_empty_output_no_special_tokens(tokenizer_r, tokenizer_p)
def fast_only(self, tokenizer_r): def fast_only(self, tokenizer_r):
# Ensure None raise an error # Ensure None raise an error
...@@ -748,29 +746,41 @@ class WordPieceFastTokenizerTest(CommonFastTokenizerTest): ...@@ -748,29 +746,41 @@ class WordPieceFastTokenizerTest(CommonFastTokenizerTest):
add_special_tokens=True, add_special_tokens=True,
) )
expected_results = [ do_lower_case = tokenizer_r.init_kwargs.get("do_lower_case")
((0, 1), "A"), expected_results = (
((1, 2), ","), [
((3, 8), "naive"), # BERT normalizes this away ((0, 0), "[CLS]"),
# Append MASK here after lower-casing ((0, 1), "A"),
((16, 21), "Allen"), ((1, 2), ","),
((22, 24), "##NL"), ((3, 5), "na"),
((24, 25), "##P"), ((5, 6), "##ï"),
((26, 34), "sentence"), ((6, 8), "##ve"),
((35, 36), "."), ((9, 15), "[MASK]"),
] ((16, 21), "Allen"),
((21, 23), "##NL"),
# Check if the tokenizer is uncased ((23, 24), "##P"),
if tokenizer_r.init_kwargs.get("do_lower_case"): ((25, 33), "sentence"),
expected_results = [(offset, token.lower()) for (offset, token) in expected_results] ((33, 34), "."),
((0, 0), "[SEP]"),
# Append the special tokens ]
expected_results.insert(3, ((9, 15), "[MASK]")) if not do_lower_case
expected_results.insert(0, (None, "[CLS]")) else [
expected_results.append((None, "[SEP]")) ((0, 0), "[CLS]"),
((0, 1), "a"),
((1, 2), ","),
((3, 8), "naive"),
((9, 15), "[MASK]"),
((16, 21), "allen"),
((21, 23), "##nl"),
((23, 24), "##p"),
((25, 33), "sentence"),
((33, 34), "."),
((0, 0), "[SEP]"),
]
)
self.assertEqual([e[1] for e in expected_results], tokenizer_r.convert_ids_to_tokens(tokens["input_ids"])) self.assertEqual([e[1] for e in expected_results], tokenizer_r.convert_ids_to_tokens(tokens["input_ids"]))
# self.assertEqual([e[0] for e in expected_results], tokens["offset_mapping"]) self.assertEqual([e[0] for e in expected_results], tokens["offset_mapping"])
class RobertaFastTokenizerTest(CommonFastTokenizerTest): class RobertaFastTokenizerTest(CommonFastTokenizerTest):
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment