main.py 4.69 KB
Newer Older
lintangsutawika's avatar
lintangsutawika committed
1
import os
lintangsutawika's avatar
lintangsutawika committed
2
import re
Jason Phang's avatar
Jason Phang committed
3
import json
4
import fnmatch
lintangsutawika's avatar
lintangsutawika committed
5
import jsonlines
lintangsutawika's avatar
lintangsutawika committed
6
import argparse
FarzanehNakhaee's avatar
FarzanehNakhaee committed
7
import logging
Leo Gao's avatar
Leo Gao committed
8

9
from lm_eval import evaluator, utils
10
from lm_eval.api.registry import ALL_TASKS
lintangsutawika's avatar
lintangsutawika committed
11
from lm_eval.logger import eval_logger
Jason Phang's avatar
lib  
Jason Phang committed
12

13
os.environ["TOKENIZERS_PARALLELISM"] = "false"
14

Fabrizio Milo's avatar
Fabrizio Milo committed
15

Jason Phang's avatar
Jason Phang committed
16
17
def parse_args():
    parser = argparse.ArgumentParser()
Fabrizio Milo's avatar
Fabrizio Milo committed
18
19
    parser.add_argument("--model", required=True)
    parser.add_argument("--model_args", default="")
20
    parser.add_argument("--tasks", default=None, choices=utils.MultiChoice(sorted(ALL_TASKS)))
21
    parser.add_argument("--config", default=None)
Fabrizio Milo's avatar
Fabrizio Milo committed
22
    parser.add_argument("--num_fewshot", type=int, default=0)
23
    parser.add_argument("--batch_size", type=int, default=1)
24
25
    parser.add_argument("--max_batch_size", type=int, default=None,
                        help="Maximal batch size to try with --batch_size auto")
Fabrizio Milo's avatar
Fabrizio Milo committed
26
27
    parser.add_argument("--device", type=str, default=None)
    parser.add_argument("--output_path", default=None)
28
29
30
31
    parser.add_argument("--limit", type=float, default=None,
                        help="Limit the number of examples per task. "
                             "If <1, limit is a percentage of the total number of examples.")
    parser.add_argument("--data_sampling", type=float, default=None)
Fabrizio Milo's avatar
Fabrizio Milo committed
32
33
34
35
    parser.add_argument("--no_cache", action="store_true")
    parser.add_argument("--decontamination_ngrams_path", default=None)
    parser.add_argument("--description_dict_path", default=None)
    parser.add_argument("--check_integrity", action="store_true")
36
37
    parser.add_argument("--write_out", action="store_true", default=False)
    parser.add_argument("--output_base_path", type=str, default=None)
Jason Phang's avatar
Jason Phang committed
38
39
    return parser.parse_args()

Fabrizio Milo's avatar
Fabrizio Milo committed
40

41
def main():
Jason Phang's avatar
Jason Phang committed
42
    args = parse_args()
Fabrizio Milo's avatar
Fabrizio Milo committed
43

Leo Gao's avatar
Leo Gao committed
44
    if args.limit:
lintangsutawika's avatar
lintangsutawika committed
45
46
47
        eval_logger.warning(
            " --limit SHOULD ONLY BE USED FOR TESTING."
            "REAL METRICS SHOULD NOT BE COMPUTED USING LIMIT."
Fabrizio Milo's avatar
Fabrizio Milo committed
48
        )
Leo Gao's avatar
Leo Gao committed
49

50
    if args.tasks is None:
51
        task_names = ALL_TASKS
Jason Phang's avatar
Jason Phang committed
52
    else:
53
54
        if os.path.isdir(args.tasks):
            import glob
55
56

            task_names = []
57
58
            yaml_path = os.path.join(args.tasks, "*.yaml")
            for yaml_file in glob.glob(yaml_path):
lintangsutawika's avatar
lintangsutawika committed
59
                config = utils.load_yaml_config(yaml_file)
60
61
                task_names.append(config)
        else:
62
            tasks_list = args.tasks.split(",")
63
            task_names = utils.pattern_match(tasks_list, ALL_TASKS)
64
65
            for task in [task for task in tasks_list if task not in task_names]:
                if os.path.isfile(task):
lintangsutawika's avatar
lintangsutawika committed
66
                    config = utils.load_yaml_config(task)
67
                    task_names.append(config)
lintangsutawika's avatar
lintangsutawika committed
68

lintangsutawika's avatar
lintangsutawika committed
69
    eval_logger.info(f"Selected Tasks: {task_names}")
70

71
72
73
74
75
    # TODO: description_dict?
    # description_dict = {}
    # if args.description_dict_path:
    #     with open(args.description_dict_path, "r") as f:
    #         description_dict = json.load(f)
76

77
78
79
80
81
82
    results = evaluator.simple_evaluate(
        model=args.model,
        model_args=args.model_args,
        tasks=task_names,
        num_fewshot=args.num_fewshot,
        batch_size=args.batch_size,
83
        max_batch_size=args.max_batch_size,
84
        device=args.device,
85
        no_cache=args.no_cache,
86
        limit=args.limit,
87
        # description_dict=description_dict,
88
89
        decontamination_ngrams_path=args.decontamination_ngrams_path,
        check_integrity=args.check_integrity,
90
91
        write_out=args.write_out,
        output_base_path=args.output_base_path,
92
    )
93

94
    if results is not None:
lintangsutawika's avatar
lintangsutawika committed
95
96
        samples = results.pop("samples")

97
98
99
        dumped = json.dumps(results, indent=2)
        print(dumped)

100
101
        batch_sizes = ",".join(map(str, results["config"]["batch_sizes"]))

102
        if args.output_path:
103
            os.makedirs(os.path.dirname(args.output_path), exist_ok=True)
lintangsutawika's avatar
lintangsutawika committed
104

105
106
107
            with open(args.output_path, "w") as f:
                f.write(dumped)

lintangsutawika's avatar
lintangsutawika committed
108
109
110
111
112
113
114
115
116
117
118
119
120
121
            for task_name, config in results["configs"].items():
                output_name = "{}_{}".format(
                    re.sub("/", "__", args.model_args), task_name
                )
                if os.path.isdir(args.output_path):
                    filename = f"./{args.output_path}/{output_name}.jsonl"
                elif os.path.isfile(args.output_path):
                    filename = (
                        f"./{os.path.dirname(args.output_path)}/{output_name}.jsonl"
                    )

                with jsonlines.open(filename, "w") as f:
                    f.write_all(samples[task_name])

122
        print(
123
124
            f"{args.model} ({args.model_args}), limit: {args.limit}, num_fewshot: {args.num_fewshot}, "
            f"batch_size: {args.batch_size}{f' ({batch_sizes})' if batch_sizes else ''}"
125
126
        )
        print(evaluator.make_table(results))
Jason Phang's avatar
lib  
Jason Phang committed
127

128

Jason Phang's avatar
Jason Phang committed
129
if __name__ == "__main__":
Jason Phang's avatar
lib  
Jason Phang committed
130
    main()