"docs/references/deploy_on_k8s.md" did not exist on "ddf39d3fcee2ed42284bfb90c3ea37b302090dad"
main.py 4.52 KB
Newer Older
lintangsutawika's avatar
lintangsutawika committed
1
import os
Jason Phang's avatar
Jason Phang committed
2
import json
3
import fnmatch
lintangsutawika's avatar
lintangsutawika committed
4
import argparse
FarzanehNakhaee's avatar
FarzanehNakhaee committed
5
import logging
Leo Gao's avatar
Leo Gao committed
6

7
from lm_eval import evaluator, utils
8
from lm_eval.api.registry import ALL_TASKS
lintangsutawika's avatar
lintangsutawika committed
9
from lm_eval.logger import eval_logger
Jason Phang's avatar
lib  
Jason Phang committed
10

lintangsutawika's avatar
lintangsutawika committed
11
os.environ["TOKENIZERS_PARALLELISM"] = "false"
Fabrizio Milo's avatar
Fabrizio Milo committed
12
13


14
15
16
17
18
19
20
21
class MultiChoice:
    def __init__(self, choices):
        self.choices = choices

    # Simple wildcard support (linux filename patterns)
    def __contains__(self, values):
        for value in values.split(","):
            if len(fnmatch.filter(self.choices, value)) == 0:
lintangsutawika's avatar
lintangsutawika committed
22
                eval_logger.warning("{} is not in task list.".format(value))
lintangsutawika's avatar
lintangsutawika committed
23
                eval_logger.info(f"Available tasks to choose:")
24
25
                for choice in self.choices:
                    eval_logger.info(f"  - {choice}")
26
27
28
29
30
31
        return True

    def __iter__(self):
        for choice in self.choices:
            yield choice

Fabrizio Milo's avatar
Fabrizio Milo committed
32

Jason Phang's avatar
Jason Phang committed
33
34
def parse_args():
    parser = argparse.ArgumentParser()
Fabrizio Milo's avatar
Fabrizio Milo committed
35
36
    parser.add_argument("--model", required=True)
    parser.add_argument("--model_args", default="")
37
    parser.add_argument("--tasks", default=None, choices=MultiChoice(sorted(ALL_TASKS)))
38
    parser.add_argument("--config", default=None)
Fabrizio Milo's avatar
Fabrizio Milo committed
39
40
    parser.add_argument("--provide_description", action="store_true")
    parser.add_argument("--num_fewshot", type=int, default=0)
41
    parser.add_argument("--batch_size", type=int, default=1)
Fabrizio Milo's avatar
Fabrizio Milo committed
42
43
44
45
46
47
48
    parser.add_argument("--device", type=str, default=None)
    parser.add_argument("--output_path", default=None)
    parser.add_argument("--limit", type=int, default=None)
    parser.add_argument("--no_cache", action="store_true")
    parser.add_argument("--decontamination_ngrams_path", default=None)
    parser.add_argument("--description_dict_path", default=None)
    parser.add_argument("--check_integrity", action="store_true")
Jason Phang's avatar
Jason Phang committed
49
50
    return parser.parse_args()

Fabrizio Milo's avatar
Fabrizio Milo committed
51

52
53
54
55
56
57
58
# Returns a list containing all values of the source_list that
# match at least one of the patterns
def pattern_match(patterns, source_list):
    task_names = set()
    for pattern in patterns:
        for matching in fnmatch.filter(source_list, pattern):
            task_names.add(matching)
59
    return sorted(list(task_names))
60

61
def setup_example_logger(output_path):
FarzanehNakhaee's avatar
FarzanehNakhaee committed
62
63
    """Sets up a logger that will save each example and prediction."""
    example_logger = logging.getLogger("examples")
64
65
66
67
68
69
    if output_path:
        filename = f"./{os.path.dirname(output_path)}/examples.jsonl"
        formatter = logging.Formatter("%(message)s")
        handler = logging.FileHandler(filename)
        handler.setFormatter(formatter)
        example_logger.addHandler(handler)
FarzanehNakhaee's avatar
FarzanehNakhaee committed
70
71
    example_logger.setLevel(logging.INFO)

Fabrizio Milo's avatar
Fabrizio Milo committed
72

73
def main():
FarzanehNakhaee's avatar
FarzanehNakhaee committed
74
    args = parse_args()    
Fabrizio Milo's avatar
Fabrizio Milo committed
75

Leo Gao's avatar
Leo Gao committed
76
    if args.limit:
lintangsutawika's avatar
lintangsutawika committed
77
78
79
        eval_logger.warning(
            " --limit SHOULD ONLY BE USED FOR TESTING."
            "REAL METRICS SHOULD NOT BE COMPUTED USING LIMIT."
Fabrizio Milo's avatar
Fabrizio Milo committed
80
        )
Leo Gao's avatar
Leo Gao committed
81

82
83
84
85
    if args.output_path:
        os.makedirs(os.path.dirname(args.output_path), exist_ok=True)

    setup_example_logger(args.output_path)
FarzanehNakhaee's avatar
FarzanehNakhaee committed
86

lintangsutawika's avatar
lintangsutawika committed
87
    if args.tasks is not None:
88
89
        if os.path.isdir(args.tasks):
            import glob
90
91

            task_names = []
92
93
            yaml_path = os.path.join(args.tasks, "*.yaml")
            for yaml_file in glob.glob(yaml_path):
lintangsutawika's avatar
lintangsutawika committed
94
                config = utils.load_yaml_config(yaml_file)
95
96
                task_names.append(config)
        else:
97
98
99
100
            tasks_list = args.tasks.split(",")
            task_names = pattern_match(tasks_list, ALL_TASKS)
            for task in [task for task in tasks_list if task not in task_names]:
                if os.path.isfile(task):
lintangsutawika's avatar
lintangsutawika committed
101
                    config = utils.load_yaml_config(task)
102
                    task_names.append(config)
lintangsutawika's avatar
lintangsutawika committed
103

lintangsutawika's avatar
lintangsutawika committed
104
    eval_logger.info(f"Selected Tasks: {task_names}")
105

106
107
108
109
110
111
112
113
114
115
116
    results = evaluator.simple_evaluate(
        model=args.model,
        model_args=args.model_args,
        tasks=task_names,
        num_fewshot=args.num_fewshot,
        batch_size=args.batch_size,
        device=args.device,
        limit=args.limit,
        decontamination_ngrams_path=args.decontamination_ngrams_path,
        check_integrity=args.check_integrity,
    )
117
118
119
120
121
122
123
124
125
126
127
128
129
    if results is not None:
        dumped = json.dumps(results, indent=2)
        print(dumped)

        if args.output_path:
            with open(args.output_path, "w") as f:
                f.write(dumped)

        print(
            f"{args.model} ({args.model_args}), limit: {args.limit}, provide_description: {args.provide_description}, "
            f"num_fewshot: {args.num_fewshot}, batch_size: {args.batch_size}"
        )
        print(evaluator.make_table(results))
Jason Phang's avatar
lib  
Jason Phang committed
130

131

Jason Phang's avatar
Jason Phang committed
132
if __name__ == "__main__":
Jason Phang's avatar
lib  
Jason Phang committed
133
    main()