eval_demo.py 1.78 KB
Newer Older
Leymore's avatar
Leymore committed
1
2
from mmengine.config import read_base

yuzhaohui's avatar
yuzhaohui committed
3
with read_base():
Ezra-Yu's avatar
Ezra-Yu committed
4
5
6
7
    from .datasets.winograd.winograd_ppl import winograd_datasets
    from .datasets.siqa.siqa_gen import siqa_datasets

datasets = [*siqa_datasets, *winograd_datasets]
yuzhaohui's avatar
yuzhaohui committed
8

Ezra-Yu's avatar
Ezra-Yu committed
9
from opencompass.models import HuggingFaceCausalLM
yuzhaohui's avatar
yuzhaohui committed
10

Ezra-Yu's avatar
Ezra-Yu committed
11
12
# OPT-350M
opt350m = dict(
yuzhaohui's avatar
yuzhaohui committed
13
       type=HuggingFaceCausalLM,
Ezra-Yu's avatar
Ezra-Yu committed
14
15
16
       # the folowing are HuggingFaceCausalLM init parameters
       path='facebook/opt-350m',
       tokenizer_path='facebook/opt-350m',
yuzhaohui's avatar
yuzhaohui committed
17
18
19
20
       tokenizer_kwargs=dict(
           padding_side='left',
           truncation_side='left',
           proxies=None,
Ezra-Yu's avatar
Ezra-Yu committed
21
22
           trust_remote_code=True),
       model_kwargs=dict(device_map='auto'),
yuzhaohui's avatar
yuzhaohui committed
23
       max_seq_len=2048,
Ezra-Yu's avatar
Ezra-Yu committed
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
       # the folowing are not HuggingFaceCausalLM init parameters
       abbr='opt350m',                    # Model abbreviation
       max_out_len=100,                   # Maximum number of generated tokens          
       batch_size=64,
       run_cfg=dict(num_gpus=1),    # Run configuration for specifying resource requirements
    )

# OPT-125M
opt125m = dict(
       type=HuggingFaceCausalLM,
       # the folowing are HuggingFaceCausalLM init parameters
       path='facebook/opt-125m',
       tokenizer_path='facebook/opt-125m',
       tokenizer_kwargs=dict(
           padding_side='left',
           truncation_side='left',
           proxies=None,
           trust_remote_code=True),
yuzhaohui's avatar
yuzhaohui committed
42
       model_kwargs=dict(device_map='auto'),
Ezra-Yu's avatar
Ezra-Yu committed
43
44
45
46
47
48
       max_seq_len=2048,
       # the folowing are not HuggingFaceCausalLM init parameters
       abbr='opt125m',                # Model abbreviation
       max_out_len=100,               # Maximum number of generated tokens
       batch_size=128,
       run_cfg=dict(num_gpus=1),   # Run configuration for specifying resource requirements
yuzhaohui's avatar
yuzhaohui committed
49
    )
Ezra-Yu's avatar
Ezra-Yu committed
50
51

models = [opt350m, opt125m]