main.py 2.29 KB
Newer Older
Jason Phang's avatar
Jason Phang committed
1
2
import argparse
import json
Leo Gao's avatar
Leo Gao committed
3
import logging
Leo Gao's avatar
Leo Gao committed
4

5
from lm_eval import tasks, evaluator
Jason Phang's avatar
lib  
Jason Phang committed
6

Leo Gao's avatar
Leo Gao committed
7
logging.getLogger("openai").setLevel(logging.WARNING)
Leo Gao's avatar
Leo Gao committed
8

9

Jason Phang's avatar
Jason Phang committed
10
11
def parse_args():
    parser = argparse.ArgumentParser()
cjlovering's avatar
cjlovering committed
12
13
14
15
16
17
18
19
20
21
22
23
    parser.add_argument("--model", required=True)
    parser.add_argument("--model_args", default="")
    parser.add_argument("--tasks", default="all_tasks")
    parser.add_argument("--provide_description", action="store_true")
    parser.add_argument("--num_fewshot", type=int, default=0)
    parser.add_argument("--batch_size", type=int, default=None)
    parser.add_argument("--device", type=str, default=None)
    parser.add_argument("--output_path", default=None)
    parser.add_argument("--limit", type=int, default=None)
    parser.add_argument("--no_cache", action="store_true")
    parser.add_argument("--description_dict_path", default=None)
    parser.add_argument("--check_integrity", action="store_true")
Jason Phang's avatar
Jason Phang committed
24
25
    return parser.parse_args()

Leo Gao's avatar
Leo Gao committed
26

27
def main():
Jason Phang's avatar
Jason Phang committed
28
    args = parse_args()
29
    assert not args.provide_description  # not implemented
cjlovering's avatar
cjlovering committed
30

Leo Gao's avatar
Leo Gao committed
31
    if args.limit:
cjlovering's avatar
cjlovering committed
32
33
34
        print(
            "WARNING: --limit SHOULD ONLY BE USED FOR TESTING. REAL METRICS SHOULD NOT BE COMPUTED USING LIMIT."
        )
Leo Gao's avatar
Leo Gao committed
35

Jason Phang's avatar
Jason Phang committed
36
37
38
39
    if args.tasks == "all_tasks":
        task_names = tasks.ALL_TASKS
    else:
        task_names = args.tasks.split(",")
Leo Gao's avatar
Leo Gao committed
40

41
42
    description_dict = {}
    if args.description_dict_path:
cjlovering's avatar
cjlovering committed
43
        with open(args.description_dict_path, "r") as f:
44
45
            description_dict = json.load(f)

46
    results = evaluator.simple_evaluate(
47
48
        model=args.model,
        model_args=args.model_args,
49
        tasks=task_names,
50
51
52
53
54
        num_fewshot=args.num_fewshot,
        batch_size=args.batch_size,
        device=args.device,
        no_cache=args.no_cache,
        limit=args.limit,
Leo Gao's avatar
Leo Gao committed
55
        description_dict=description_dict,
cjlovering's avatar
cjlovering committed
56
        check_integrity=args.check_integrity,
57
    )
Leo Gao's avatar
Update  
Leo Gao committed
58

cjlovering's avatar
cjlovering committed
59
    print(results)
Jason Phang's avatar
Jason Phang committed
60
    dumped = json.dumps(results, indent=2)
cjlovering's avatar
cjlovering committed
61

Jason Phang's avatar
Jason Phang committed
62
    print(dumped)
63

Jason Phang's avatar
Jason Phang committed
64
65
66
    if args.output_path:
        with open(args.output_path, "w") as f:
            f.write(dumped)
Jason Phang's avatar
Jason Phang committed
67

68
69
70
71
    print(
        f"{args.model} ({args.model_args}), limit: {args.limit}, provide_description: {args.provide_description}, "
        f"num_fewshot: {args.num_fewshot}, batch_size: {args.batch_size}"
    )
72
    print(evaluator.make_table(results))
Jason Phang's avatar
lib  
Jason Phang committed
73

74

Jason Phang's avatar
Jason Phang committed
75
if __name__ == "__main__":
Jason Phang's avatar
lib  
Jason Phang committed
76
    main()