Unverified Commit 5787e4c1 authored by Anthony MOI's avatar Anthony MOI Committed by GitHub
Browse files

Various tokenizers fixes (#5558)

* BertTokenizerFast - Do not specify strip_accents by default

* Bump tokenizers to new version

* Add test for AddedToken serialization
parent 21f28c34
......@@ -114,7 +114,7 @@ setup(
packages=find_packages("src"),
install_requires=[
"numpy",
"tokenizers == 0.8.0-rc4",
"tokenizers == 0.8.1.rc1",
# dataclasses for Python versions that don't have it
"dataclasses;python_version<'3.7'",
# utilities from PyPA to e.g. compare versions
......
......@@ -606,7 +606,7 @@ class BertTokenizerFast(PreTrainedTokenizerFast):
mask_token="[MASK]",
clean_text=True,
tokenize_chinese_chars=True,
strip_accents=True,
strip_accents=None,
wordpieces_prefix="##",
**kwargs
):
......
......@@ -24,6 +24,7 @@ from typing import TYPE_CHECKING, Dict, List, Tuple, Union
from transformers import PreTrainedTokenizer, PreTrainedTokenizerBase, PreTrainedTokenizerFast
from transformers.testing_utils import require_tf, require_torch, slow
from transformers.tokenization_utils import AddedToken
if TYPE_CHECKING:
......@@ -233,6 +234,12 @@ class TokenizerTesterMixin:
self.assertListEqual(subwords, subwords_loaded)
def test_pickle_added_tokens(self):
tok1 = AddedToken("<s>", rstrip=True, lstrip=True, normalized=False, single_word=True)
tok2 = pickle.loads(pickle.dumps(tok1))
self.assertEqual(tok1.__getstate__(), tok2.__getstate__())
def test_added_tokens_do_lower_case(self):
# TODO(thom) activate fast tokenizer tests once Rust tokenizers accepts white spaces in added tokens
tokenizers = self.get_tokenizers(fast=False, do_lower_case=True)
......
......@@ -91,8 +91,6 @@ class CommonFastTokenizerTest(unittest.TestCase):
self.assert_padding(tokenizer_r, tokenizer_p)
self.assert_create_token_type_ids(tokenizer_r, tokenizer_p)
self.assert_prepare_for_model(tokenizer_r, tokenizer_p)
# TODO: enable for v3.0.0
# self.assert_empty_output_no_special_tokens(tokenizer_r, tokenizer_p)
def fast_only(self, tokenizer_r):
# Ensure None raise an error
......@@ -748,29 +746,41 @@ class WordPieceFastTokenizerTest(CommonFastTokenizerTest):
add_special_tokens=True,
)
expected_results = [
do_lower_case = tokenizer_r.init_kwargs.get("do_lower_case")
expected_results = (
[
((0, 0), "[CLS]"),
((0, 1), "A"),
((1, 2), ","),
((3, 8), "naive"), # BERT normalizes this away
# Append MASK here after lower-casing
((3, 5), "na"),
((5, 6), "##ï"),
((6, 8), "##ve"),
((9, 15), "[MASK]"),
((16, 21), "Allen"),
((22, 24), "##NL"),
((24, 25), "##P"),
((26, 34), "sentence"),
((35, 36), "."),
((21, 23), "##NL"),
((23, 24), "##P"),
((25, 33), "sentence"),
((33, 34), "."),
((0, 0), "[SEP]"),
]
# Check if the tokenizer is uncased
if tokenizer_r.init_kwargs.get("do_lower_case"):
expected_results = [(offset, token.lower()) for (offset, token) in expected_results]
# Append the special tokens
expected_results.insert(3, ((9, 15), "[MASK]"))
expected_results.insert(0, (None, "[CLS]"))
expected_results.append((None, "[SEP]"))
if not do_lower_case
else [
((0, 0), "[CLS]"),
((0, 1), "a"),
((1, 2), ","),
((3, 8), "naive"),
((9, 15), "[MASK]"),
((16, 21), "allen"),
((21, 23), "##nl"),
((23, 24), "##p"),
((25, 33), "sentence"),
((33, 34), "."),
((0, 0), "[SEP]"),
]
)
self.assertEqual([e[1] for e in expected_results], tokenizer_r.convert_ids_to_tokens(tokens["input_ids"]))
# self.assertEqual([e[0] for e in expected_results], tokens["offset_mapping"])
self.assertEqual([e[0] for e in expected_results], tokens["offset_mapping"])
class RobertaFastTokenizerTest(CommonFastTokenizerTest):
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment