Unverified Commit 182b8374 authored by Tanay Mehta's avatar Tanay Mehta Committed by GitHub
Browse files

Add Number Normalisation for SpeechT5 (#25447)

* add: NumberNormalizer works for integers, floats, common currencies, negative numbers and percentages

* fix: renamed number normalizer class and added normalization to SpeechT5Processor

* fix: restyled with black and ruff, should pass code quality tests

* fix: moved normalization to tokenizer and other small changes to normalizer

* add: test for normalization and changed the existing full tokenizer test

* fix: tokenization tests now pass, made changes to existing tokenization where normalization is covered; added normalize arg to func signature

* fix: changed default normalize setting to False, modified the tests a bit

* fix: added support for comma separated numbers, tokenization on the fly with kwargs and normalizer getter setter funcs
parent 58c36bea
# coding=utf-8
# Copyright 2023 The Fairseq Authors, Microsoft Research, and the HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Number Normalizer class for SpeechT5."""
import re
class EnglishNumberNormalizer:
def __init__(self):
self.ones = ["", "one", "two", "three", "four", "five", "six", "seven", "eight", "nine"]
self.teens = [
"",
"eleven",
"twelve",
"thirteen",
"fourteen",
"fifteen",
"sixteen",
"seventeen",
"eighteen",
"nineteen",
]
self.tens = ["", "ten", "twenty", "thirty", "forty", "fifty", "sixty", "seventy", "eighty", "ninety"]
self.thousands = [
"",
"thousand",
"million",
"billion",
"trillion",
"quadrillion",
"quintillion",
"sextillion",
"septillion",
"octillion",
"nonillion",
"decillion",
]
# Define a dictionary to map currency symbols to their names
# Top most traded currencies according to
# https://en.wikipedia.org/wiki/Template:Most_traded_currencies
self.currency_symbols = {
"$": " dollars",
"€": " euros",
"£": " pounds",
"¢": " cents",
"¥": " japanese yen",
"﷼": " saudi riyal",
"₹": " indian rupees",
"₽": " russian rubles",
"฿": " thai baht",
"₺": " turkish liras",
"₴": " ukrainian hryvnia",
"₣": " swiss francs",
"₡": " costa rican colon",
"₱": " philippine peso",
"₪": " israeli shekels",
"₮": " mongolian tögrög",
"₩": " south korean won",
"₦": " nigerian naira",
"₫": " vietnamese Đồng",
}
def spell_number(self, num):
if num == 0:
return "zero"
parts = []
for i in range(0, len(self.thousands)):
if num % 1000 != 0:
part = ""
hundreds = num % 1000 // 100
tens_units = num % 100
if hundreds > 0:
part += self.ones[hundreds] + " hundred"
if tens_units > 0:
part += " and "
if tens_units > 10 and tens_units < 20:
part += self.teens[tens_units - 10]
else:
tens_digit = self.tens[tens_units // 10]
ones_digit = self.ones[tens_units % 10]
if tens_digit:
part += tens_digit
if ones_digit:
if tens_digit:
part += " "
part += ones_digit
parts.append(part)
num //= 1000
return " ".join(reversed(parts))
def convert(self, number):
"""
Converts an individual number passed in string form to spelt-out form
"""
if "." in number:
integer_part, decimal_part = number.split(".")
else:
integer_part, decimal_part = number, "00"
# Extract currency symbol if present
currency_symbol = ""
for symbol, name in self.currency_symbols.items():
if integer_part.startswith(symbol):
currency_symbol = name
integer_part = integer_part[len(symbol) :]
break
if integer_part.startswith("-"):
if integer_part[1:].startswith(symbol):
currency_symbol = name
integer_part = "-" + integer_part[len(symbol) + 1 :]
break
# Extract 'minus' prefix for negative numbers
minus_prefix = ""
if integer_part.startswith("-"):
minus_prefix = "minus "
integer_part = integer_part[1:]
elif integer_part.startswith("minus"):
minus_prefix = "minus "
integer_part = integer_part[len("minus") :]
percent_suffix = ""
if "%" in integer_part or "%" in decimal_part:
percent_suffix = " percent"
integer_part = integer_part.replace("%", "")
decimal_part = decimal_part.replace("%", "")
integer_part = integer_part.zfill(3 * ((len(integer_part) - 1) // 3 + 1))
parts = []
for i in range(0, len(integer_part), 3):
chunk = int(integer_part[i : i + 3])
if chunk > 0:
part = self.spell_number(chunk)
unit = self.thousands[len(integer_part[i:]) // 3 - 1]
if unit:
part += " " + unit
parts.append(part)
spelled_integer = " ".join(parts)
# Format the spelt-out number based on conditions, such as:
# If it has decimal parts, currency symbol, minus prefix, etc
if decimal_part == "00":
return (
f"{minus_prefix}{spelled_integer}{percent_suffix}{currency_symbol}"
if minus_prefix or currency_symbol
else f"{spelled_integer}{percent_suffix}"
)
else:
spelled_decimal = " ".join([self.spell_number(int(digit)) for digit in decimal_part])
return (
f"{minus_prefix}{spelled_integer} point {spelled_decimal}{percent_suffix}{currency_symbol}"
if minus_prefix or currency_symbol
else f"{minus_prefix}{spelled_integer} point {spelled_decimal}{percent_suffix}"
)
def __call__(self, text):
"""
Convert numbers / number-like quantities in a string to their spelt-out counterparts
"""
# Form part of the pattern for all currency symbols
pattern = r"(?<!\w)(-?\$?\€?\£?\¢?\¥?\₹?\₽?\฿?\₺?\₴?\₣?\₡?\₱?\₪?\₮?\₩?\₦?\₫?\﷼?\d+(?:\.\d{1,2})?%?)(?!\w)"
# Find and replace commas in numbers (15,000 -> 15000, etc)
text = re.sub(r"(\d+,\d+)", lambda match: match.group(1).replace(",", ""), text)
# Use regex to find and replace numbers in the text
converted_text = re.sub(pattern, lambda match: self.convert(match.group(1)), text)
converted_text = re.sub(" +", " ", converted_text)
return converted_text
...@@ -23,6 +23,7 @@ import sentencepiece as spm ...@@ -23,6 +23,7 @@ import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging from ...utils import logging
from .number_normalizer import EnglishNumberNormalizer
logger = logging.get_logger(__name__) logger = logging.get_logger(__name__)
...@@ -64,6 +65,8 @@ class SpeechT5Tokenizer(PreTrainedTokenizer): ...@@ -64,6 +65,8 @@ class SpeechT5Tokenizer(PreTrainedTokenizer):
token instead. token instead.
pad_token (`str`, *optional*, defaults to `"<pad>"`): pad_token (`str`, *optional*, defaults to `"<pad>"`):
The token used for padding, for example when batching sequences of different lengths. The token used for padding, for example when batching sequences of different lengths.
normalize (`bool`, *optional*, defaults to `False`):
Whether to convert numeric quantities in the text to their spelt-out english counterparts.
sp_model_kwargs (`dict`, *optional*): sp_model_kwargs (`dict`, *optional*):
Will be passed to the `SentencePieceProcessor.__init__()` method. The [Python wrapper for Will be passed to the `SentencePieceProcessor.__init__()` method. The [Python wrapper for
SentencePiece](https://github.com/google/sentencepiece/tree/master/python) can be used, among other things, SentencePiece](https://github.com/google/sentencepiece/tree/master/python) can be used, among other things,
...@@ -97,6 +100,7 @@ class SpeechT5Tokenizer(PreTrainedTokenizer): ...@@ -97,6 +100,7 @@ class SpeechT5Tokenizer(PreTrainedTokenizer):
eos_token="</s>", eos_token="</s>",
unk_token="<unk>", unk_token="<unk>",
pad_token="<pad>", pad_token="<pad>",
normalize=False,
sp_model_kwargs: Optional[Dict[str, Any]] = None, sp_model_kwargs: Optional[Dict[str, Any]] = None,
**kwargs, **kwargs,
) -> None: ) -> None:
...@@ -107,19 +111,40 @@ class SpeechT5Tokenizer(PreTrainedTokenizer): ...@@ -107,19 +111,40 @@ class SpeechT5Tokenizer(PreTrainedTokenizer):
eos_token=eos_token, eos_token=eos_token,
unk_token=unk_token, unk_token=unk_token,
pad_token=pad_token, pad_token=pad_token,
normalize=normalize,
sp_model_kwargs=self.sp_model_kwargs, sp_model_kwargs=self.sp_model_kwargs,
**kwargs, **kwargs,
) )
self.vocab_file = vocab_file self.vocab_file = vocab_file
self.normalize = normalize
self._normalizer = None
self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs) self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(vocab_file) self.sp_model.Load(vocab_file)
def prepare_for_tokenization(self, text, is_split_into_words=False, **kwargs):
normalize = kwargs.pop("normalize", self.normalize)
if is_split_into_words:
text = " " + text
if normalize:
text = self.normalizer(text)
return (text, kwargs)
@property @property
def vocab_size(self): def vocab_size(self):
return self.sp_model.get_piece_size() return self.sp_model.get_piece_size()
@property
def normalizer(self):
if self._normalizer is None:
self._normalizer = EnglishNumberNormalizer()
return self._normalizer
@normalizer.setter
def normalizer(self, value):
self._normalizer = value
def get_vocab(self): def get_vocab(self):
vocab = {self.convert_ids_to_tokens(i): i for i in range(self.vocab_size)} vocab = {self.convert_ids_to_tokens(i): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder) vocab.update(self.added_tokens_encoder)
......
...@@ -52,12 +52,24 @@ class SpeechT5TokenizerTest(TokenizerTesterMixin, unittest.TestCase): ...@@ -52,12 +52,24 @@ class SpeechT5TokenizerTest(TokenizerTesterMixin, unittest.TestCase):
output_text = "this is a test" output_text = "this is a test"
return input_text, output_text return input_text, output_text
def get_numeric_input_output_texts(self):
input_text = "I have $123.45 and owe €59.78. My balance is -₴876.90 and have 73% stocks in my company which equals to ₦72649201"
output_text = "I have one hundred and twenty three point four five dollars and owe fifty nine point seven eight euros. My balance is minus eight hundred and seventy six point nine zero ukrainian hryvnia and have seventy three percent stocks in my company which equals to seventy two million six hundred and forty nine thousand two hundred and one nigerian naira"
return input_text, output_text
def get_clean_sequence(self, tokenizer, with_prefix_space=False, max_length=20, min_length=5): def get_clean_sequence(self, tokenizer, with_prefix_space=False, max_length=20, min_length=5):
input_text, output_text = self.get_input_output_texts(tokenizer) input_text, output_text = self.get_input_output_texts(tokenizer)
ids = tokenizer.encode(output_text, add_special_tokens=False) ids = tokenizer.encode(output_text, add_special_tokens=False)
text = tokenizer.decode(ids, clean_up_tokenization_spaces=False) text = tokenizer.decode(ids, clean_up_tokenization_spaces=False)
return text, ids return text, ids
def test_tokenizer_normalization(self):
tokenizer = self.get_tokenizer(normalize=True)
input_text, expected_text = self.get_numeric_input_output_texts()
input_ids = tokenizer.encode(input_text)
output_text = tokenizer.decode(input_ids, skip_special_tokens=True)
self.assertEqual(output_text, expected_text)
def test_convert_token_and_id(self): def test_convert_token_and_id(self):
"""Test ``_convert_token_to_id`` and ``_convert_id_to_token``.""" """Test ``_convert_token_to_id`` and ``_convert_id_to_token``."""
token = "<pad>" token = "<pad>"
...@@ -137,7 +149,7 @@ class SpeechT5TokenizerTest(TokenizerTesterMixin, unittest.TestCase): ...@@ -137,7 +149,7 @@ class SpeechT5TokenizerTest(TokenizerTesterMixin, unittest.TestCase):
pass pass
def test_full_tokenizer(self): def test_full_tokenizer(self):
tokenizer = self.get_tokenizer() tokenizer = self.get_tokenizer(normalize=True)
tokens = tokenizer.tokenize("This is a test") tokens = tokenizer.tokenize("This is a test")
# fmt: off # fmt: off
...@@ -153,20 +165,20 @@ class SpeechT5TokenizerTest(TokenizerTesterMixin, unittest.TestCase): ...@@ -153,20 +165,20 @@ class SpeechT5TokenizerTest(TokenizerTesterMixin, unittest.TestCase):
self.assertListEqual( self.assertListEqual(
tokens, tokens,
# fmt: off # fmt: off
[SPIECE_UNDERLINE, 'I', SPIECE_UNDERLINE, 'w', 'a', 's', SPIECE_UNDERLINE, 'b', 'o', 'r', 'n', SPIECE_UNDERLINE, 'i', 'n', SPIECE_UNDERLINE, '92000', ',', SPIECE_UNDERLINE, 'a', 'n', 'd', SPIECE_UNDERLINE, 't', 'h', 'i', 's', SPIECE_UNDERLINE, 'i', 's', SPIECE_UNDERLINE, 'f', 'a', 'l', 's', 'é', '.'] [SPIECE_UNDERLINE, 'I', SPIECE_UNDERLINE, 'w', 'a', 's', SPIECE_UNDERLINE, 'b', 'o', 'r', 'n', SPIECE_UNDERLINE, 'i', 'n', SPIECE_UNDERLINE, 'n', 'i', 'n', 'e', 't', 'y', SPIECE_UNDERLINE, 't', 'w', 'o', SPIECE_UNDERLINE, 't', 'h', 'o', 'u', 's', 'a', 'n', 'd', ',', SPIECE_UNDERLINE, 'a', 'n', 'd', SPIECE_UNDERLINE, 't', 'h', 'i', 's', SPIECE_UNDERLINE, 'i', 's', SPIECE_UNDERLINE, 'f', 'a', 'l', 's', 'é', '.']
# fmt: on # fmt: on
) )
ids = tokenizer.convert_tokens_to_ids(tokens) ids = tokenizer.convert_tokens_to_ids(tokens)
# fmt: off # fmt: off
self.assertListEqual(ids, [4, 30, 4, 20, 7, 12, 4, 25, 8, 13, 9, 4, 10, 9, 4, 3, 23, 4, 7, 9, 14, 4, 6, 11, 10, 12, 4, 10, 12, 4, 19, 7, 15, 12, 73, 26]) self.assertListEqual(ids, [4, 30, 4, 20, 7, 12, 4, 25, 8, 13, 9, 4, 10, 9, 4, 9, 10, 9, 5, 6, 22, 4, 6, 20, 8, 4, 6, 11, 8, 16, 12, 7, 9, 14, 23, 4, 7, 9, 14, 4, 6, 11, 10, 12, 4, 10, 12, 4, 19, 7, 15, 12, 73, 26])
# fmt: on # fmt: on
back_tokens = tokenizer.convert_ids_to_tokens(ids) back_tokens = tokenizer.convert_ids_to_tokens(ids)
self.assertListEqual( self.assertListEqual(
back_tokens, back_tokens,
# fmt: off # fmt: off
[SPIECE_UNDERLINE, 'I', SPIECE_UNDERLINE, 'w', 'a', 's', SPIECE_UNDERLINE, 'b', 'o', 'r', 'n', SPIECE_UNDERLINE, 'i', 'n', SPIECE_UNDERLINE, '<unk>', ',', SPIECE_UNDERLINE, 'a', 'n', 'd', SPIECE_UNDERLINE, 't', 'h', 'i', 's', SPIECE_UNDERLINE, 'i', 's', SPIECE_UNDERLINE, 'f', 'a', 'l', 's', 'é', '.'] [SPIECE_UNDERLINE, 'I', SPIECE_UNDERLINE, 'w', 'a', 's', SPIECE_UNDERLINE, 'b', 'o', 'r', 'n', SPIECE_UNDERLINE, 'i', 'n', SPIECE_UNDERLINE, 'n', 'i', 'n', 'e', 't', 'y', SPIECE_UNDERLINE, 't', 'w', 'o', SPIECE_UNDERLINE, 't', 'h', 'o', 'u', 's', 'a', 'n', 'd', ',', SPIECE_UNDERLINE, 'a', 'n', 'd', SPIECE_UNDERLINE, 't', 'h', 'i', 's', SPIECE_UNDERLINE, 'i', 's', SPIECE_UNDERLINE, 'f', 'a', 'l', 's', 'é', '.']
# fmt: on # fmt: on
) )
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment