race_ppl_5831a0.py 1.44 KB
Newer Older
1
2
3
4
5
6
7
8
from opencompass.openicl.icl_prompt_template import PromptTemplate
from opencompass.openicl.icl_retriever import ZeroRetriever
from opencompass.openicl.icl_inferencer import PPLInferencer
from opencompass.openicl.icl_evaluator import AccEvaluator
from opencompass.datasets import RaceDataset

race_reader_cfg = dict(
    input_columns=['article', 'question', 'A', 'B', 'C', 'D'],
9
    output_column='answer',
10
11
    train_split='validation',
    test_split='test'
12
)
13
14
15
16
17
18
19

race_infer_cfg = dict(
    prompt_template=dict(
        type=PromptTemplate,
        template={
            ans: dict(
                round=[
20
21
                    dict(role='HUMAN', prompt='Article:\n{article}\nQuestion:\n{question}\nA. {A}\nB. {B}\nC. {C}\nD. {D}'),
                    dict(role='BOT', prompt=f'Answer: {ans}'),
22
23
24
25
26
27
28
29
30
31
32
33
                ]
            )
            for ans in ['A', 'B', 'C', 'D']
        }),
    retriever=dict(type=ZeroRetriever),
    inferencer=dict(type=PPLInferencer))

race_eval_cfg = dict(evaluator=dict(type=AccEvaluator))

race_datasets = [
    dict(
        abbr='race-middle',
34
35
        type=RaceDataset,
        path='./data/race',
36
37
38
39
40
41
        name='middle',
        reader_cfg=race_reader_cfg,
        infer_cfg=race_infer_cfg,
        eval_cfg=race_eval_cfg),
    dict(
        abbr='race-high',
42
43
        type=RaceDataset,
        path='./data/race',
44
45
46
47
48
        name='high',
        reader_cfg=race_reader_cfg,
        infer_cfg=race_infer_cfg,
        eval_cfg=race_eval_cfg)
]