translation.py 7.66 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
"""
NOTE: This file implements translation tasks using datasets from WMT conferences,
provided by sacrebleu. Traditionally they are evaluated with BLEU scores. TER
and CHRF are other options.

We defer citations and descriptions of the many translations tasks used
here to the SacreBLEU repo from which we've obtained the datasets:
https://github.com/mjpost/sacrebleu/blob/master/sacrebleu/dataset.py

Homepage: https://github.com/mjpost/sacrebleu/blob/master/sacrebleu/dataset.py
"""
12
import pycountry
13
from pprint import pprint
14
15
16
from sacrebleu import sacrebleu
from lm_eval import metrics
from lm_eval.base import Task, rf
Muennighoff's avatar
Muennighoff committed
17
18
from typing import List

bzantium's avatar
bzantium committed
19
20
21
22
23
24
25
26
27
28
29
30
31
32
try:
    import nagisa

    HAS_NAGISA = True
except ImportError:
    HAS_NAGISA = False

try:
    import jieba

    HAS_JIEBA = True
except ImportError:
    HAS_JIEBA = False

Muennighoff's avatar
Muennighoff committed
33

34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
_CITATION = """
@inproceedings{post-2018-call,
    title = "A Call for Clarity in Reporting {BLEU} Scores",
    author = "Post, Matt",
    booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers",
    month = oct,
    year = "2018",
    address = "Belgium, Brussels",
    publisher = "Association for Computational Linguistics",
    url = "https://www.aclweb.org/anthology/W18-6319",
    pages = "186--191",
}
"""


49
50
51
sacrebleu_datasets = sacrebleu.DATASETS


&'s avatar
& committed
52
def create_tasks_from_benchmarks(benchmark_dict):
&'s avatar
& committed
53
    """Creates a dictionary of tasks from a dict
&'s avatar
& committed
54
    :param benchmark_dict: { dataset: [lang_pair, ...], }
&'s avatar
& committed
55
56
57
    :return: {task_name: task}
        e.g. {wmt14-fr-en: Task, wmt16-de-en: Task}
    """
bzantium's avatar
bzantium committed
58

Leo Gao's avatar
Leo Gao committed
59
60
    def version_of(dataset, language_pair):
        if language_pair[-2:] in ["zh", "ja"]:
bzantium's avatar
bzantium committed
61
            return 1  # changed to use jieba/nagisa
Leo Gao's avatar
Leo Gao committed
62
63
        return 0

&'s avatar
& committed
64
    return {
bzantium's avatar
bzantium committed
65
66
67
        f"{dataset}-{language_pair}": create_translation_task(
            dataset, language_pair, version_of(dataset, language_pair)
        )
&'s avatar
& committed
68
69
70
71
        for dataset, language_pairs in benchmark_dict.items()
        for language_pair in language_pairs
    }

bzantium's avatar
bzantium committed
72

Muennighoff's avatar
Muennighoff committed
73
74
75
76
########################################
# Language Specifics
########################################

bzantium's avatar
bzantium committed
77

Muennighoff's avatar
Muennighoff committed
78
79
def zh_split(zh_text: List[str]) -> List[str]:
    """Chinese splitting"""
bzantium's avatar
bzantium committed
80
81
82
83
84
85
    if not HAS_JIEBA:
        raise ImportError(
            "Chinese text splitting requires the `jieba` package. "
            "Please install it with:\npip install jieba"
        )

Muennighoff's avatar
Muennighoff committed
86
87
    return [" ".join(jieba.cut(txt.strip())) for txt in zh_text]

bzantium's avatar
bzantium committed
88

Muennighoff's avatar
Muennighoff committed
89
90
def ja_split(ja_text: List[str]) -> List[str]:
    """Japanese splitting"""
bzantium's avatar
bzantium committed
91
92
93
94
95
96
    if not HAS_NAGISA:
        raise ImportError(
            "Japanese text splitting requires the `nagisa` package. "
            "Please install it with:\npip install nagisa"
        )

Muennighoff's avatar
Muennighoff committed
97
98
    return [" ".join(nagisa.tagging(txt.strip()).words) for txt in ja_text]

bzantium's avatar
bzantium committed
99

Muennighoff's avatar
Muennighoff committed
100
101
NO_SPACE_LANG = {"zh": zh_split, "ja": ja_split}

&'s avatar
& committed
102
103
104
105
########################################
# Tasks
########################################

bzantium's avatar
bzantium committed
106

Leo Gao's avatar
Leo Gao committed
107
def create_translation_task(dataset, language_pair, version=0):
108
    class TranslationTask(GeneralTranslationTask):
Leo Gao's avatar
Leo Gao committed
109
        VERSION = version
bzantium's avatar
bzantium committed
110

111
112
        def __init__(self):
            super().__init__(dataset, language_pair)
bzantium's avatar
bzantium committed
113

114
115
    return TranslationTask

bzantium's avatar
bzantium committed
116

117
class GeneralTranslationTask(Task):
Leo Gao's avatar
Leo Gao committed
118
    VERSION = 0
119
120
121
122
123
124
125
126
127

    # e.g. ("wmt14", "fr-en")
    def __init__(self, sacrebleu_dataset, sacrebleu_language_pair=None):
        self.sacrebleu_dataset = sacrebleu_dataset
        self.sacrebleu_language_pair = sacrebleu_language_pair
        self.src_file = self.ref_file = self.src_data = self.ref_data = None

        super().__init__()

Jon Tow's avatar
Jon Tow committed
128
    def download(self, data_dir=None, cache_dir=None, download_mode=None):
129
        # This caches in the users home dir automatically
bzantium's avatar
bzantium committed
130
131
132
        self.src_file, self.ref_file = sacrebleu.download_test_set(
            self.sacrebleu_dataset, self.sacrebleu_language_pair
        )
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
        self.src_data, self.ref_data = [
            [line.rstrip() for line in sacrebleu.smart_open(file)]
            for file in (self.src_file, self.ref_file)
        ]

    def has_training_docs(self):
        """Whether the task has a training set"""
        # TODO In the future we could be more discerning. Some more recent tests have train and dev sets
        return False

    def has_validation_docs(self):
        """Whether the task has a validation set"""
        return False

    def has_test_docs(self):
        """Whether the task has a test set"""
        return True

    def test_docs(self):
        """
        :return: Iterable[obj]
            A iterable of any object, that doc_to_text can handle
        """
bzantium's avatar
bzantium committed
156
157
158
        return [
            {"src": src, "ref": ref} for src, ref in zip(self.src_data, self.ref_data)
        ]
159
160

    def doc_to_text(self, doc):
Leo Gao's avatar
Leo Gao committed
161
162
163
164
        language_codes = self.sacrebleu_language_pair.split("-")
        src_lang = code_to_language(language_codes[0])
        tar_lang = code_to_language(language_codes[1])
        return f"{src_lang} phrase: " + doc["src"] + f"\n{tar_lang} phrase:"
165

bzantium's avatar
bzantium committed
166
167
168
169
170
171
    def should_decontaminate(self):
        return True

    def doc_to_decontamination_query(self, doc):
        return doc["src"]

172
    def doc_to_target(self, doc):
&'s avatar
& committed
173
        # This shows a single target, though there may be multiple targets in a lang test
Leo Gao's avatar
Leo Gao committed
174
        return " " + doc["ref"] if isinstance(doc["ref"], str) else doc["ref"][0]
175
176

    def construct_requests(self, doc, ctx):
bzantium's avatar
bzantium committed
177
        """Uses RequestFactory to construct Requests and returns an iterable of
178
179
180
181
182
183
184
185
186
        Requests which will be sent to the LM.

        :param doc:
            The document as returned from training_docs, validation_docs, or test_docs.
        :param ctx: str
            The context string, generated by fewshot_context. This includes the natural
            language description, as well as the few shot examples, and the question
            part of the document for `doc`.
        """
bzantium's avatar
bzantium committed
187
        return rf.greedy_until(ctx, {"until": ["\n"]})
188
189

    def process_results(self, doc, results):
Muennighoff's avatar
Muennighoff committed
190
191
192
193
194
195
        # Add spaces between words for BLEU score calculation of target languages like Chinese
        tar_lang_code = self.sacrebleu_language_pair.split("-")[-1]
        if tar_lang_code in NO_SPACE_LANG:
            doc["ref"] = NO_SPACE_LANG[tar_lang_code]([doc["ref"]])[0]
            results = NO_SPACE_LANG[tar_lang_code](results)

196
197
        # These metrics are corpus-level not sentence level, so we'll hide the
        # results in this dict and compute the corpus score in the aggregate method
&'s avatar
& committed
198
        ref_pred = (doc["ref"], results)
199
        return {
&'s avatar
& committed
200
201
202
            "bleu": ref_pred,
            "chrf": ref_pred,
            "ter": ref_pred,
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
        }

    def aggregation(self):
        """
        :returns: {str: [float] -> float}
            A dictionary where keys are the names of submetrics and values are
            functions that aggregate a list of metrics
        """
        return {
            "bleu": metrics.bleu,
            "chrf": metrics.chrf,
            "ter": metrics.ter,
        }

    def higher_is_better(self):
        """
        :returns: {str: bool}
            A dictionary where keys are the names of submetrics and values are
            whether a higher value of the submetric is better
        """
        return {
            "bleu": True,
            "chrf": True,
            "ter": False,
        }

&'s avatar
& committed
229
230
231
232
233
234
    def __str__(self):
        language_codes = self.sacrebleu_language_pair.split("-")
        src_lang = code_to_language(language_codes[0])
        tar_lang = code_to_language(language_codes[1])
        return f"{self.sacrebleu_dataset.upper()} {src_lang} to {tar_lang} Task"

235
236
237
238
239
240
241
242

########################################
# Util
########################################


def code_to_language(code):
    # key is alpha_2 or alpha_3 depending on the code length
&'s avatar
& committed
243
    language_tuple = pycountry.languages.get(**{f"alpha_{len(code)}": code})
244
    return language_tuple.name