grad_tts_utils.py 10.2 KB
Newer Older
patil-suraj's avatar
patil-suraj committed
1
2
# tokenizer

patil-suraj's avatar
patil-suraj committed
3
import os
patil-suraj's avatar
style  
patil-suraj committed
4
import re
patil-suraj's avatar
patil-suraj committed
5
from shutil import copyfile
patil-suraj's avatar
patil-suraj committed
6
7

import torch
8

Patrick von Platen's avatar
Patrick von Platen committed
9
from transformers import PreTrainedTokenizer
patil-suraj's avatar
style  
patil-suraj committed
10

patil-suraj's avatar
patil-suraj committed
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25

try:
    from unidecode import unidecode
except:
    print("unidecode is not installed")
    pass

try:
    import inflect
except:
    print("inflect is not installed")
    pass


valid_symbols = [
patil-suraj's avatar
style  
patil-suraj committed
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
    "AA",
    "AA0",
    "AA1",
    "AA2",
    "AE",
    "AE0",
    "AE1",
    "AE2",
    "AH",
    "AH0",
    "AH1",
    "AH2",
    "AO",
    "AO0",
    "AO1",
    "AO2",
    "AW",
    "AW0",
    "AW1",
    "AW2",
    "AY",
    "AY0",
    "AY1",
    "AY2",
    "B",
    "CH",
    "D",
    "DH",
    "EH",
    "EH0",
    "EH1",
    "EH2",
    "ER",
    "ER0",
    "ER1",
    "ER2",
    "EY",
    "EY0",
    "EY1",
    "EY2",
    "F",
    "G",
    "HH",
    "IH",
    "IH0",
    "IH1",
    "IH2",
    "IY",
    "IY0",
    "IY1",
    "IY2",
    "JH",
    "K",
    "L",
    "M",
    "N",
    "NG",
    "OW",
    "OW0",
    "OW1",
    "OW2",
    "OY",
    "OY0",
    "OY1",
    "OY2",
    "P",
    "R",
    "S",
    "SH",
    "T",
    "TH",
    "UH",
    "UH0",
    "UH1",
    "UH2",
    "UW",
    "UW0",
    "UW1",
    "UW2",
    "V",
    "W",
    "Y",
    "Z",
    "ZH",
patil-suraj's avatar
patil-suraj committed
110
111
112
113
]

_valid_symbol_set = set(valid_symbols)

patil-suraj's avatar
style  
patil-suraj committed
114

patil-suraj's avatar
patil-suraj committed
115
116
117
118
119
120
121
122
123
124
def intersperse(lst, item):
    # Adds blank symbol
    result = [item] * (len(lst) * 2 + 1)
    result[1::2] = lst
    return result


class CMUDict:
    def __init__(self, file_or_path, keep_ambiguous=True):
        if isinstance(file_or_path, str):
patil-suraj's avatar
style  
patil-suraj committed
125
            with open(file_or_path, encoding="latin-1") as f:
patil-suraj's avatar
patil-suraj committed
126
127
128
129
130
131
132
133
134
135
136
137
138
139
                entries = _parse_cmudict(f)
        else:
            entries = _parse_cmudict(file_or_path)
        if not keep_ambiguous:
            entries = {word: pron for word, pron in entries.items() if len(pron) == 1}
        self._entries = entries

    def __len__(self):
        return len(self._entries)

    def lookup(self, word):
        return self._entries.get(word.upper())


patil-suraj's avatar
style  
patil-suraj committed
140
_alt_re = re.compile(r"\([0-9]+\)")
patil-suraj's avatar
patil-suraj committed
141
142
143
144
145


def _parse_cmudict(file):
    cmudict = {}
    for line in file:
patil-suraj's avatar
style  
patil-suraj committed
146
147
148
        if len(line) and (line[0] >= "A" and line[0] <= "Z" or line[0] == "'"):
            parts = line.split("  ")
            word = re.sub(_alt_re, "", parts[0])
patil-suraj's avatar
patil-suraj committed
149
150
151
152
153
154
155
156
157
158
            pronunciation = _get_pronunciation(parts[1])
            if pronunciation:
                if word in cmudict:
                    cmudict[word].append(pronunciation)
                else:
                    cmudict[word] = [pronunciation]
    return cmudict


def _get_pronunciation(s):
patil-suraj's avatar
style  
patil-suraj committed
159
    parts = s.strip().split(" ")
patil-suraj's avatar
patil-suraj committed
160
161
162
    for part in parts:
        if part not in _valid_symbol_set:
            return None
patil-suraj's avatar
style  
patil-suraj committed
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
    return " ".join(parts)


_whitespace_re = re.compile(r"\s+")

_abbreviations = [
    (re.compile("\\b%s\\." % x[0], re.IGNORECASE), x[1])
    for x in [
        ("mrs", "misess"),
        ("mr", "mister"),
        ("dr", "doctor"),
        ("st", "saint"),
        ("co", "company"),
        ("jr", "junior"),
        ("maj", "major"),
        ("gen", "general"),
        ("drs", "doctors"),
        ("rev", "reverend"),
        ("lt", "lieutenant"),
        ("hon", "honorable"),
        ("sgt", "sergeant"),
        ("capt", "captain"),
        ("esq", "esquire"),
        ("ltd", "limited"),
        ("col", "colonel"),
        ("ft", "fort"),
    ]
]
patil-suraj's avatar
patil-suraj committed
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207


def expand_abbreviations(text):
    for regex, replacement in _abbreviations:
        text = re.sub(regex, replacement, text)
    return text


def expand_numbers(text):
    return normalize_numbers(text)


def lowercase(text):
    return text.lower()


def collapse_whitespace(text):
patil-suraj's avatar
style  
patil-suraj committed
208
    return re.sub(_whitespace_re, " ", text)
patil-suraj's avatar
patil-suraj committed
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237


def convert_to_ascii(text):
    return unidecode(text)


def basic_cleaners(text):
    text = lowercase(text)
    text = collapse_whitespace(text)
    return text


def transliteration_cleaners(text):
    text = convert_to_ascii(text)
    text = lowercase(text)
    text = collapse_whitespace(text)
    return text


def english_cleaners(text):
    text = convert_to_ascii(text)
    text = lowercase(text)
    text = expand_numbers(text)
    text = expand_abbreviations(text)
    text = collapse_whitespace(text)
    return text


_inflect = inflect.engine()
patil-suraj's avatar
style  
patil-suraj committed
238
239
240
241
242
243
_comma_number_re = re.compile(r"([0-9][0-9\,]+[0-9])")
_decimal_number_re = re.compile(r"([0-9]+\.[0-9]+)")
_pounds_re = re.compile(r"£([0-9\,]*[0-9]+)")
_dollars_re = re.compile(r"\$([0-9\.\,]*[0-9]+)")
_ordinal_re = re.compile(r"[0-9]+(st|nd|rd|th)")
_number_re = re.compile(r"[0-9]+")
patil-suraj's avatar
patil-suraj committed
244
245
246


def _remove_commas(m):
patil-suraj's avatar
style  
patil-suraj committed
247
    return m.group(1).replace(",", "")
patil-suraj's avatar
patil-suraj committed
248
249
250


def _expand_decimal_point(m):
patil-suraj's avatar
style  
patil-suraj committed
251
    return m.group(1).replace(".", " point ")
patil-suraj's avatar
patil-suraj committed
252
253
254
255


def _expand_dollars(m):
    match = m.group(1)
patil-suraj's avatar
style  
patil-suraj committed
256
    parts = match.split(".")
patil-suraj's avatar
patil-suraj committed
257
    if len(parts) > 2:
patil-suraj's avatar
style  
patil-suraj committed
258
        return match + " dollars"
patil-suraj's avatar
patil-suraj committed
259
260
261
    dollars = int(parts[0]) if parts[0] else 0
    cents = int(parts[1]) if len(parts) > 1 and parts[1] else 0
    if dollars and cents:
patil-suraj's avatar
style  
patil-suraj committed
262
263
264
        dollar_unit = "dollar" if dollars == 1 else "dollars"
        cent_unit = "cent" if cents == 1 else "cents"
        return "%s %s, %s %s" % (dollars, dollar_unit, cents, cent_unit)
patil-suraj's avatar
patil-suraj committed
265
    elif dollars:
patil-suraj's avatar
style  
patil-suraj committed
266
267
        dollar_unit = "dollar" if dollars == 1 else "dollars"
        return "%s %s" % (dollars, dollar_unit)
patil-suraj's avatar
patil-suraj committed
268
    elif cents:
patil-suraj's avatar
style  
patil-suraj committed
269
270
        cent_unit = "cent" if cents == 1 else "cents"
        return "%s %s" % (cents, cent_unit)
patil-suraj's avatar
patil-suraj committed
271
    else:
patil-suraj's avatar
style  
patil-suraj committed
272
        return "zero dollars"
patil-suraj's avatar
patil-suraj committed
273
274
275
276
277
278
279
280
281
282


def _expand_ordinal(m):
    return _inflect.number_to_words(m.group(0))


def _expand_number(m):
    num = int(m.group(0))
    if num > 1000 and num < 3000:
        if num == 2000:
patil-suraj's avatar
style  
patil-suraj committed
283
            return "two thousand"
patil-suraj's avatar
patil-suraj committed
284
        elif num > 2000 and num < 2010:
patil-suraj's avatar
style  
patil-suraj committed
285
            return "two thousand " + _inflect.number_to_words(num % 100)
patil-suraj's avatar
patil-suraj committed
286
        elif num % 100 == 0:
patil-suraj's avatar
style  
patil-suraj committed
287
            return _inflect.number_to_words(num // 100) + " hundred"
patil-suraj's avatar
patil-suraj committed
288
        else:
patil-suraj's avatar
style  
patil-suraj committed
289
            return _inflect.number_to_words(num, andword="", zero="oh", group=2).replace(", ", " ")
patil-suraj's avatar
patil-suraj committed
290
    else:
patil-suraj's avatar
style  
patil-suraj committed
291
        return _inflect.number_to_words(num, andword="")
patil-suraj's avatar
patil-suraj committed
292
293
294
295


def normalize_numbers(text):
    text = re.sub(_comma_number_re, _remove_commas, text)
patil-suraj's avatar
style  
patil-suraj committed
296
    text = re.sub(_pounds_re, r"\1 pounds", text)
patil-suraj's avatar
patil-suraj committed
297
298
299
300
301
302
    text = re.sub(_dollars_re, _expand_dollars, text)
    text = re.sub(_decimal_number_re, _expand_decimal_point, text)
    text = re.sub(_ordinal_re, _expand_ordinal, text)
    text = re.sub(_number_re, _expand_number, text)
    return text

patil-suraj's avatar
style  
patil-suraj committed
303

patil-suraj's avatar
patil-suraj committed
304
305
306
""" from https://github.com/keithito/tacotron """


patil-suraj's avatar
style  
patil-suraj committed
307
308
309
310
_pad = "_"
_punctuation = "!'(),.:;? "
_special = "-"
_letters = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz"
patil-suraj's avatar
patil-suraj committed
311
312

# Prepend "@" to ARPAbet symbols to ensure uniqueness:
patil-suraj's avatar
style  
patil-suraj committed
313
_arpabet = ["@" + s for s in valid_symbols]
patil-suraj's avatar
patil-suraj committed
314
315
316
317
318
319
320
321

# Export all symbols:
symbols = [_pad] + list(_special) + list(_punctuation) + list(_letters) + _arpabet


_symbol_to_id = {s: i for i, s in enumerate(symbols)}
_id_to_symbol = {i: s for i, s in enumerate(symbols)}

patil-suraj's avatar
style  
patil-suraj committed
322
_curly_re = re.compile(r"(.*?)\{(.+?)\}(.*)")
patil-suraj's avatar
patil-suraj committed
323
324
325
326
327
328
329
330
331
332
333


def get_arpabet(word, dictionary):
    word_arpabet = dictionary.lookup(word)
    if word_arpabet is not None:
        return "{" + word_arpabet[0] + "}"
    else:
        return word


def text_to_sequence(text, cleaner_names=[english_cleaners], dictionary=None):
patil-suraj's avatar
style  
patil-suraj committed
334
    """Converts a string of text to a sequence of IDs corresponding to the symbols in the text.
patil-suraj's avatar
patil-suraj committed
335
336
337
338
339
340
341
342
343
344
345

    The text can optionally have ARPAbet sequences enclosed in curly braces embedded
    in it. For example, "Turn left on {HH AW1 S S T AH0 N} Street."

    Args:
      text: string to convert to a sequence
      cleaner_names: names of the cleaner functions to run the text through
      dictionary: arpabet class with arpabet dictionary

    Returns:
      List of integers corresponding to the symbols in the text
patil-suraj's avatar
style  
patil-suraj committed
346
    """
patil-suraj's avatar
patil-suraj committed
347
    sequence = []
patil-suraj's avatar
style  
patil-suraj committed
348
    space = _symbols_to_sequence(" ")
patil-suraj's avatar
patil-suraj committed
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
    # Check for curly braces and treat their contents as ARPAbet:
    while len(text):
        m = _curly_re.match(text)
        if not m:
            clean_text = _clean_text(text, cleaner_names)
            if dictionary is not None:
                clean_text = [get_arpabet(w, dictionary) for w in clean_text.split(" ")]
                for i in range(len(clean_text)):
                    t = clean_text[i]
                    if t.startswith("{"):
                        sequence += _arpabet_to_sequence(t[1:-1])
                    else:
                        sequence += _symbols_to_sequence(t)
                    sequence += space
            else:
                sequence += _symbols_to_sequence(clean_text)
            break
        sequence += _symbols_to_sequence(_clean_text(m.group(1), cleaner_names))
        sequence += _arpabet_to_sequence(m.group(2))
        text = m.group(3)
patil-suraj's avatar
style  
patil-suraj committed
369

patil-suraj's avatar
patil-suraj committed
370
371
372
373
374
375
376
    # remove trailing space
    if dictionary is not None:
        sequence = sequence[:-1] if sequence[-1] == space[0] else sequence
    return sequence


def sequence_to_text(sequence):
patil-suraj's avatar
style  
patil-suraj committed
377
378
    """Converts a sequence of IDs back to a string"""
    result = ""
patil-suraj's avatar
patil-suraj committed
379
380
381
382
    for symbol_id in sequence:
        if symbol_id in _id_to_symbol:
            s = _id_to_symbol[symbol_id]
            # Enclose ARPAbet back in curly braces:
patil-suraj's avatar
style  
patil-suraj committed
383
384
            if len(s) > 1 and s[0] == "@":
                s = "{%s}" % s[1:]
patil-suraj's avatar
patil-suraj committed
385
            result += s
patil-suraj's avatar
style  
patil-suraj committed
386
    return result.replace("}{", " ")
patil-suraj's avatar
patil-suraj committed
387
388
389
390
391
392
393
394
395
396
397
398
399


def _clean_text(text, cleaner_names):
    for cleaner in cleaner_names:
        text = cleaner(text)
    return text


def _symbols_to_sequence(symbols):
    return [_symbol_to_id[s] for s in symbols if _should_keep_symbol(s)]


def _arpabet_to_sequence(text):
patil-suraj's avatar
style  
patil-suraj committed
400
    return _symbols_to_sequence(["@" + s for s in text.split()])
patil-suraj's avatar
patil-suraj committed
401
402
403


def _should_keep_symbol(s):
patil-suraj's avatar
style  
patil-suraj committed
404
    return s in _symbol_to_id and s != "_" and s != "~"
patil-suraj's avatar
patil-suraj committed
405
406
407


VOCAB_FILES_NAMES = {
patil-suraj's avatar
patil-suraj committed
408
    "dict_file": "dict_file.txt",
patil-suraj's avatar
patil-suraj committed
409
410
}

patil-suraj's avatar
style  
patil-suraj committed
411

patil-suraj's avatar
patil-suraj committed
412
413
class GradTTSTokenizer(PreTrainedTokenizer):
    vocab_files_names = VOCAB_FILES_NAMES
patil-suraj's avatar
patil-suraj committed
414

patil-suraj's avatar
patil-suraj committed
415
416
417
    def __init__(self, dict_file, **kwargs):
        super().__init__(**kwargs)
        self.cmu = CMUDict(dict_file)
patil-suraj's avatar
patil-suraj committed
418
        self.dict_file = dict_file
patil-suraj's avatar
style  
patil-suraj committed
419

patil-suraj's avatar
patil-suraj committed
420
421
422
    def __call__(self, text):
        x = torch.LongTensor(intersperse(text_to_sequence(text, dictionary=self.cmu), len(symbols)))[None]
        x_lengths = torch.LongTensor([x.shape[-1]])
patil-suraj's avatar
patil-suraj committed
423
        return x, x_lengths
patil-suraj's avatar
style  
patil-suraj committed
424
425

    def save_vocabulary(self, save_directory: str, filename_prefix=None):
patil-suraj's avatar
patil-suraj committed
426
427
428
429
430
        dict_file = os.path.join(
            save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["dict_file"]
        )

        copyfile(self.dict_file, dict_file)
patil-suraj's avatar
style  
patil-suraj committed
431
432

        return (dict_file,)