webqs.py 2.68 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
"""
Semantic Parsing on Freebase from Question-Answer Pairs
https://cs.stanford.edu/~pliang/papers/freebase-emnlp2013.pdf

WebQuestions is a benchmark for question answering. The dataset consists of 6,642
question/answer pairs. The questions are supposed to be answerable by Freebase, a
large knowledge graph. The questions are mostly centered around a single named entity.
The questions are popular ones asked on the web (at least in 2013).

Homepage: https://worksheets.codalab.org/worksheets/0xba659fe363cb46e7a505c5b6a774dc8a
11
12
13
14
"""
from . common import HFTask
from lm_eval.base import rf
from ..metrics import mean
15

16
17

_CITATION = """
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
@inproceedings{berant-etal-2013-semantic,
    title = "Semantic Parsing on {F}reebase from Question-Answer Pairs",
    author = "Berant, Jonathan  and
      Chou, Andrew  and
      Frostig, Roy  and
      Liang, Percy",
    booktitle = "Proceedings of the 2013 Conference on Empirical Methods in Natural Language Processing",
    month = oct,
    year = "2013",
    address = "Seattle, Washington, USA",
    publisher = "Association for Computational Linguistics",
    url = "https://aclanthology.org/D13-1160",
    pages = "1533--1544",
}
"""
&'s avatar
& committed
33

Leo Gao's avatar
Leo Gao committed
34

35
class WebQs(HFTask):
Leo Gao's avatar
Leo Gao committed
36
    VERSION = 0
Leo Gao's avatar
Leo Gao committed
37
38
    DATASET_PATH = "web_questions"
    DATASET_NAME = None
Leo Gao's avatar
Leo Gao committed
39
40
41
42
43
44
45
46
47
48

    def has_training_docs(self):
        return True

    def has_validation_docs(self):
        return False

    def has_test_docs(self):
        return True

49
    def doc_to_text(self, doc):
Leo Gao's avatar
Leo Gao committed
50
        return "Question: " + doc['question'] + '\nAnswer:'
Leo Gao's avatar
Leo Gao committed
51

52
    def doc_to_target(self, doc):
Leo Gao's avatar
Leo Gao committed
53
54
55
        # this picks one answer to be the "correct" one, despite sometimes 
        # multiple correct answers being possible.
        # TODO: make sure we're actually handling multi-answer correctly
56
        return " " + doc['answers'][0]
57
58
59
60
61
62
63
64
65
66
67
        
    def _remove_prefixes(self, aliases):
        # Optimization: Remove any alias that has a strict prefix elsewhere in the list
        # we can do this because if the prefix is acceptable by isgreedy, we can stop looking
        aliases.sort()
        ret = [aliases[0]]
        for alias in aliases[1:]:
            if not alias.startswith(ret[-1]):
                ret.append(alias)

        return ret
Leo Gao's avatar
Leo Gao committed
68

Leo Gao's avatar
Leo Gao committed
69
    def construct_requests(self, doc, ctx):
70
71
72
73
74
        ret = []
        for alias in self._remove_prefixes(doc['answers']):
            _, is_prediction = rf.loglikelihood(ctx, " " + alias)
            ret.append(is_prediction)
        return ret
75

Leo Gao's avatar
Leo Gao committed
76
    def process_results(self, doc, results):
77
78
79
        return {
            "acc": float(any(results))
        }
Leo Gao's avatar
Leo Gao committed
80
81

    def aggregation(self):
82
83
84
        return {
            "acc": mean,
        }
Leo Gao's avatar
Leo Gao committed
85
86

    def higher_is_better(self):
87
88
        return {
            "acc": True
89
        }