piqa_ppl_3431ea.py 1.22 KB
Newer Older
mzr1996's avatar
mzr1996 committed
1
2
3
4
from opencompass.openicl.icl_prompt_template import PromptTemplate
from opencompass.openicl.icl_retriever import ZeroRetriever
from opencompass.openicl.icl_inferencer import PPLInferencer
from opencompass.openicl.icl_evaluator import AccEvaluator
5
from opencompass.datasets import piqaDataset
mzr1996's avatar
mzr1996 committed
6
7
8
9
10
11
12
13
14
15
16
17
18

piqa_reader_cfg = dict(
    input_columns=['goal', 'sol1', 'sol2'],
    output_column='label',
    test_split='validation')

piqa_infer_cfg = dict(
    prompt_template=dict(
        type=PromptTemplate,
        template={
            0:
            dict(
                round=[
19
20
                    dict(role='HUMAN', prompt='{goal}'),
                    dict(role='BOT', prompt='{sol1}')
mzr1996's avatar
mzr1996 committed
21
22
23
24
                ], ),
            1:
            dict(
                round=[
25
26
                    dict(role='HUMAN', prompt='{goal}'),
                    dict(role='BOT', prompt='{sol2}')
mzr1996's avatar
mzr1996 committed
27
28
29
30
31
32
33
34
35
                ], )
        }),
    retriever=dict(type=ZeroRetriever),
    inferencer=dict(type=PPLInferencer))

piqa_eval_cfg = dict(evaluator=dict(type=AccEvaluator))

piqa_datasets = [
    dict(
36
37
38
        abbr='piqa',
        type=piqaDataset,
        path='./data/piqa',
mzr1996's avatar
mzr1996 committed
39
40
41
42
        reader_cfg=piqa_reader_cfg,
        infer_cfg=piqa_infer_cfg,
        eval_cfg=piqa_eval_cfg)
]