eval_TheoremQA.py 1.13 KB
Newer Older
jerrrrry's avatar
jerrrrry committed
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
from mmengine.config import read_base

with read_base():
    from opencompass.configs.datasets.TheoremQA.TheoremQA_5shot_gen_6f0af8 import \
        TheoremQA_datasets as datasets
    from opencompass.configs.models.hf_internlm.hf_internlm2_20b import \
        models as hf_internlm2_20b_model
    from opencompass.configs.models.hf_internlm.hf_internlm2_math_20b import \
        models as hf_internlm2_math_20b_model
    from opencompass.configs.models.mistral.hf_mistral_7b_v0_1 import \
        models as hf_mistral_7b_v0_1_model
    from opencompass.configs.models.mistral.hf_mistral_7b_v0_2 import \
        models as hf_mistral_7b_v0_2_model

models = sum([v for k, v in locals().items() if k.endswith('_model')], [])

work_dir = 'outputs/TheoremQA-5shot'

# dataset    version    metric    mode      mistral-7b-v0.1-hf    mistral-7b-v0.2-hf    internlm2-20b-hf    internlm2-math-20b-hf
# ---------  ---------  --------  ------  --------------------  --------------------  ------------------  -----------------------
# TheoremQA  6f0af8     score     gen                    18.00                 16.75               25.87                    30.88