detokenizer.py 1.81 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
import re

def ptb_detokenizer(string):
	string = string.replace(" '", "'")
	string = string.replace(" \n", "\n")
	string = string.replace("\n ", "\n")
	string = string.replace(" n't", "n't")
	string = string.replace(" N ","1 ")
	string = string.replace("$ 1", "$1")
	string = string.replace("# 1", "#1")
	return string


def wikitext_detokenizer(string):
	#contractions
	string = string.replace("s '", "s'")
	string = re.sub(r"/' [0-9]/", r"/'[0-9]/", string)
	# number separators
	string = string.replace(" @-@ ", "-")
	string = string.replace(" @,@ ", ",")
	string = string.replace(" @.@ ", ".")
	#punctuation
	string = string.replace(" : ", ": ")
	string = string.replace(" ; ", "; ")
	string = string.replace(" . ", ". ")
	string = string.replace(" ! ", "! ")
	string = string.replace(" ? ", "? ")
	string = string.replace(" , ", ", ")
	# double brackets
	string = re.sub(r"\(\s*([^\)]*?)\s*\)", r"(\1)", string)
	string = re.sub(r"\[\s*([^\]]*?)\s*\]", r"[\1]", string)
	string = re.sub(r"{\s*([^}]*?)\s*}", r"{\1}", string)
	string = re.sub(r"\"\s*([^\"]*?)\s*\"", r'"\1"', string)
	string = re.sub(r"'\s*([^']*?)\s*'", r"'\1'", string)
	# miscellaneous
	string = string.replace("= = = =", "====")
	string = string.replace("= = =", "===")
	string = string.replace("= =", "==")
	string = string.replace(" "+chr(176)+" ", chr(176))
	string = string.replace(" \n", "\n")
	string = string.replace("\n ", "\n")
	string = string.replace(" N ", " 1 ")
	string = string.replace(" 's", "'s")

	return string

def lambada_detokenizer(string):
	return string

def get_detokenizer(path):
	for key in DETOKENIZERS.keys():
		if key in path:
			print(key)
			return DETOKENIZERS[key]

DETOKENIZERS = {
	'ptb': ptb_detokenizer,
	'wikitext': wikitext_detokenizer,
	'lambada': lambada_detokenizer,
}