anli_gen_fc7328.py 1.51 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
from opencompass.openicl.icl_prompt_template import PromptTemplate
from opencompass.openicl.icl_retriever import ZeroRetriever
from opencompass.openicl.icl_inferencer import GenInferencer
from opencompass.openicl.icl_evaluator import AccEvaluator
from opencompass.datasets import AnliDataset
from opencompass.utils.text_postprocessors import first_capital_postprocess

anli_datasets = []
for _split in ['R1', 'R2', 'R3']:
    anli_reader_cfg = dict(
11
12
        input_columns=['context', 'hypothesis'],
        output_column='label',
13
14
15
16
17
18
19
    )

    anli_infer_cfg = dict(
        prompt_template=dict(
            type=PromptTemplate,
            template=dict(
                round=[
20
21
                    dict(role='HUMAN', prompt='{context}\n{hypothesis}\nQuestion: What is the relation between the two sentences?\nA. Contradiction\nB. Entailment\nC. Neutral\nAnswer: '),
                    dict(role='BOT', prompt='{label}'),
22
23
24
25
26
27
28
29
                ]
            ),
        ),
        retriever=dict(type=ZeroRetriever),
        inferencer=dict(type=GenInferencer),
    )

    anli_eval_cfg = dict(evaluator=dict(type=AccEvaluator),
30
                         pred_role='BOT',
31
32
33
34
35
                         pred_postprocessor=dict(type=first_capital_postprocess))

    anli_datasets.append(
        dict(
            type=AnliDataset,
36
37
            abbr=f'anli-{_split}',
            path=f'data/anli/anli_v1.0/{_split}/dev.jsonl',
38
39
40
41
42
            reader_cfg=anli_reader_cfg,
            infer_cfg=anli_infer_cfg,
            eval_cfg=anli_eval_cfg,
        )
    )