SuperGLUE_BoolQ_ppl_314797.py 1.3 KB
Newer Older
1
2
3
4
5
6
7
from opencompass.openicl.icl_prompt_template import PromptTemplate
from opencompass.openicl.icl_retriever import ZeroRetriever
from opencompass.openicl.icl_inferencer import PPLInferencer
from opencompass.openicl.icl_evaluator import AccEvaluator
from opencompass.datasets import BoolQDataset_V3

BoolQ_reader_cfg = dict(
8
9
10
    input_columns=['question', 'passage'],
    output_column='label',
    test_split='train')
11
12
13
14
15
16
17

BoolQ_infer_cfg = dict(
    prompt_template=dict(
        type=PromptTemplate,
        template={
            'false':
            dict(round=[
18
19
                dict(role='HUMAN', prompt='Passage: {passage}\nQuestion: {question}?'),
                dict(role='BOT', prompt='Answer: No'),
20
21
22
            ]),
            'true':
            dict(round=[
23
24
                dict(role='HUMAN', prompt='Passage: {passage}\nQuestion: {question}?'),
                dict(role='BOT', prompt='Answer: Yes'),
25
26
27
28
29
30
31
32
33
34
35
            ]),
        },
    ),
    retriever=dict(type=ZeroRetriever),
    inferencer=dict(type=PPLInferencer),
)

BoolQ_eval_cfg = dict(evaluator=dict(type=AccEvaluator))

BoolQ_datasets = [
    dict(
36
        abbr='BoolQ',
37
        type=BoolQDataset_V3,
38
        path='./data/SuperGLUE/BoolQ/val.jsonl',
39
40
41
42
43
        reader_cfg=BoolQ_reader_cfg,
        infer_cfg=BoolQ_infer_cfg,
        eval_cfg=BoolQ_eval_cfg,
    )
]