pubmedqa.py 2.93 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
"""
PubMedQA: A Dataset for Biomedical Research Question Answering
https://arxiv.org/pdf/1909.06146.pdf

PubMedQA is a novel biomedical question answering (QA) dataset collected from
PubMed abstracts. The task of PubMedQA is to answer research questions with 
yes/no/maybe (e.g.: Do preoperative statins reduce atrial fibrillation after 
coronary artery bypass grafting?) using the corresponding abstracts. PubMedQA 
has 1k expert-annotated, 61.2k unlabeled and 211.3k artificially generated QA 
instances. Each PubMedQA instance is composed of (1) a question which is either
an existing research article title or derived from one, (2) a context which is
the corresponding abstract without its conclusion, (3) a long answer, which is
the conclusion of the abstract and, presumably, answers the research question, 
and (4) a yes/no/maybe answer which summarizes the conclusion.

Homepage: https://pubmedqa.github.io/

@inproceedings{jin2019pubmedqa,
  title={PubMedQA: A Dataset for Biomedical Research Question Answering},
  author={Jin, Qiao and Dhingra, Bhuwan and Liu, Zhengping and Cohen, William and Lu, Xinghua},
  booktitle={Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Language Processing (EMNLP-IJCNLP)},
  pages={2567--2577},
  year={2019}
}
"""
jeffhsu3's avatar
jeffhsu3 committed
26
import numpy as np
27
from .common import HFTask
&'s avatar
& committed
28
29
from lm_eval.base import rf
from ..metrics import mean
jeffhsu3's avatar
jeffhsu3 committed
30
31
32


class Pubmed_QA(HFTask):
Leo Gao's avatar
Leo Gao committed
33
    VERSION = 0
jeffhsu3's avatar
jeffhsu3 committed
34
35
36
37
38
39
40
41
42
    DATASET_PATH = "pubmed_qa"
    DATASET_NAME = "pqa_labeled"

    def has_training_docs(self):
        return False

    def has_validation_docs(self):
        return False

jeffhsu3's avatar
jeffhsu3 committed
43
44
45
    def has_test_docs(self):
        return True

46
47
48
49
50
    def test_docs(self):
        if self.has_test_docs():
            # HF is labelled as train but its really just for testing
            return self.data["train"]

jeffhsu3's avatar
jeffhsu3 committed
51
    def doc_to_text(self, doc):
jeffhsu3's avatar
jeffhsu3 committed
52
        ctxs = "\n".join(doc["context"]["contexts"])
Leo Gao's avatar
Leo Gao committed
53
        return "Abstract: {}\nQuestion: {}\nAnswer:".format(
jeffhsu3's avatar
jeffhsu3 committed
54
            ctxs,
jeffhsu3's avatar
jeffhsu3 committed
55
56
            doc["question"],
            doc["final_decision"]
jeffhsu3's avatar
jeffhsu3 committed
57
58
59
        )

    def doc_to_target(self, doc):
jeffhsu3's avatar
jeffhsu3 committed
60
        return " {}".format(doc["final_decision"])
jeffhsu3's avatar
jeffhsu3 committed
61
62
63
64
65
66
67
68
69
70
71

    def construct_requests(self, doc, ctx):
        """ Uses RequestFactory to construct Requests and returns
        an iterable of Requests which will be sent to the LM.
        """
        ll_yes, _ = rf.loglikelihood(ctx, " yes")
        ll_no, _ = rf.loglikelihood(ctx, " no")
        ll_maybe, _ = rf.loglikelihood(ctx, " maybe")
        return ll_yes, ll_no, ll_maybe

    def process_results(self, doc, results):
jeffhsu3's avatar
jeffhsu3 committed
72
        gold = doc["final_decision"]
jeffhsu3's avatar
jeffhsu3 committed
73
74
75
        ll_yes, ll_no, ll_maybe = results
        pred = np.argmax(results)
        return {
jeffhsu3's avatar
jeffhsu3 committed
76
            "acc": ["yes", "no", "maybe"][pred] == gold, 
jeffhsu3's avatar
jeffhsu3 committed
77
78
79
80
        }

    def aggregation(self):
        return {
jeffhsu3's avatar
jeffhsu3 committed
81
            "acc" : mean
jeffhsu3's avatar
jeffhsu3 committed
82
83
84
85
        }

    def higher_is_better(self):
        return {
jeffhsu3's avatar
jeffhsu3 committed
86
            "acc" : True
jeffhsu3's avatar
jeffhsu3 committed
87
        }