__main__.py 8.48 KB
Newer Older
lintangsutawika's avatar
lintangsutawika committed
1
import os
lintangsutawika's avatar
lintangsutawika committed
2
import re
3
import sys
Jason Phang's avatar
Jason Phang committed
4
import json
FarzanehNakhaee's avatar
FarzanehNakhaee committed
5
import logging
6
import argparse
7
import numpy as np
lintangsutawika's avatar
format  
lintangsutawika committed
8

9
from pathlib import Path
haileyschoelkopf's avatar
haileyschoelkopf committed
10
from typing import Union
11

12
13
import logging

14
15
SPACING = " " * 47

16
17
18
19
20
21
logging.basicConfig(
    format="%(asctime)s,%(msecs)03d %(levelname)-8s [%(filename)s:%(lineno)d] %(message)s",
    datefmt="%Y-%m-%d:%H:%M:%S",
    level=logging.INFO,
)

Fabrizio Milo's avatar
Fabrizio Milo committed
22

23
def _handle_non_serializable(o):
24
    if isinstance(o, np.int64) or isinstance(o, np.int32):
25
26
27
        return int(o)
    elif isinstance(o, set):
        return list(o)
28
29
    else:
        return str(o)
30

haileyschoelkopf's avatar
haileyschoelkopf committed
31
def parse_eval_args() -> argparse.Namespace:
lintangsutawika's avatar
lintangsutawika committed
32
    parser = argparse.ArgumentParser(formatter_class=argparse.RawTextHelpFormatter)
33
    parser.add_argument("--model", default="hf", help="Name of model e.g. `hf`")
lintangsutawika's avatar
lintangsutawika committed
34
35
36
    parser.add_argument(
        "--tasks",
        default=None,
37
        help="To get full list of tasks, use the command lm-eval --tasks list"
lintangsutawika's avatar
lintangsutawika committed
38
    )
39
40
41
42
43
    parser.add_argument(
        "--model_args",
        default="",
        help="String arguments for model, e.g. `pretrained=EleutherAI/pythia-160m,dtype=float32`",
    )
lintangsutawika's avatar
lintangsutawika committed
44
    parser.add_argument(
45
46
        "--num_fewshot",
        type=int,
47
        default=None,
48
49
        help="Number of examples in few-shot context",
    )
lintangsutawika's avatar
lintangsutawika committed
50
    parser.add_argument("--batch_size", type=str, default=1)
lintangsutawika's avatar
lintangsutawika committed
51
52
53
54
55
56
    parser.add_argument(
        "--max_batch_size",
        type=int,
        default=None,
        help="Maximal batch size to try with --batch_size auto",
    )
57
58
59
60
61
62
63
64
65
66
67
    parser.add_argument(
        "--device",
        type=str,
        default=None,
        help="Device to use (e.g. cuda, cuda:0, cpu)",
    )
    parser.add_argument(
        "--output_path",
        default=None,
        type=str,
        metavar="= [dir/file.jsonl] [DIR]",
68
        help="The path to the output file where the result metrics will be saved. If the path is a directory and log_samples is true, the results will be saved in the directory. Else the parent directory will be used.",
69
    )
lintangsutawika's avatar
lintangsutawika committed
70
71
72
73
74
75
76
    parser.add_argument(
        "--limit",
        type=float,
        default=None,
        help="Limit the number of examples per task. "
        "If <1, limit is a percentage of the total number of examples.",
    )
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
    parser.add_argument(
        "--use_cache",
        type=str,
        default=None,
        help="A path to a sqlite db file for caching model responses. `None` if not caching.",
    )
    parser.add_argument("--decontamination_ngrams_path", default=None)  # TODO: not used
    parser.add_argument(
        "--check_integrity",
        action="store_true",
        help="Whether to run the relevant part of the test suite for the tasks",
    )
    parser.add_argument(
        "--write_out",
        action="store_true",
        default=False,
        help="Prints the prompt for the first few documents",
    )
    parser.add_argument(
        "--log_samples",
        action="store_true",
        default=False,
        help="If True, write out all model outputs and documents for per-sample measurement and post-hoc analysis",
    )
101
102
103
104
105
106
    parser.add_argument(
        "--show_config",
        action="store_true",
        default=False,
        help="If True, shows the the full config of all tasks at the end of the evaluation.",
    )
107
108
109
110
111
112
    parser.add_argument(
        "--include_path",
        type=str,
        default=None,
        help="Additional path to include if there are external tasks to include.",
    )
lintangsutawika's avatar
lintangsutawika committed
113
    parser.add_argument(
lintangsutawika's avatar
lintangsutawika committed
114
115
116
        "--verbosity",
        type=str,
        default="INFO",
lintangsutawika's avatar
lintangsutawika committed
117
118
        help="Log error when tasks are not registered.",
    )
Jason Phang's avatar
Jason Phang committed
119
120
    return parser.parse_args()

Fabrizio Milo's avatar
Fabrizio Milo committed
121

haileyschoelkopf's avatar
haileyschoelkopf committed
122
def cli_evaluate(args: Union[argparse.Namespace, None] = None) -> None:
123
124
125
    if not args:
        # we allow for args to be passed externally, else we parse them ourselves
        args = parse_eval_args()
haileyschoelkopf's avatar
haileyschoelkopf committed
126

127
    eval_logger = logging.getLogger("lm-eval")
lintangsutawika's avatar
lintangsutawika committed
128
    eval_logger.setLevel(getattr(logging, f"{args.verbosity}"))
129
    eval_logger.info(f"Verbosity set to {args.verbosity}")
haileyschoelkopf's avatar
haileyschoelkopf committed
130
    os.environ["TOKENIZERS_PARALLELISM"] = "false"
Fabrizio Milo's avatar
Fabrizio Milo committed
131

132
133
    from lm_eval import evaluator, utils
    from lm_eval.tasks import include_path
134
    from lm_eval.api.registry import ALL_TASKS
135

Leo Gao's avatar
Leo Gao committed
136
    if args.limit:
lintangsutawika's avatar
lintangsutawika committed
137
138
139
        eval_logger.warning(
            " --limit SHOULD ONLY BE USED FOR TESTING."
            "REAL METRICS SHOULD NOT BE COMPUTED USING LIMIT."
Fabrizio Milo's avatar
Fabrizio Milo committed
140
        )
lintangsutawika's avatar
lintangsutawika committed
141
142
    if args.include_path is not None:
        eval_logger.info(f"Including path: {args.include_path}")
143
        include_path(args.include_path)
lintangsutawika's avatar
lintangsutawika committed
144

145
    if args.tasks is None:
146
        task_names = ALL_TASKS
147
148
149
    elif args.tasks == "list":
        eval_logger.info("Available Tasks:\n - {}".format(f"\n - ".join(sorted(ALL_TASKS))))
        sys.exit()
Jason Phang's avatar
Jason Phang committed
150
    else:
151
152
        if os.path.isdir(args.tasks):
            import glob
153
154

            task_names = []
155
156
            yaml_path = os.path.join(args.tasks, "*.yaml")
            for yaml_file in glob.glob(yaml_path):
lintangsutawika's avatar
lintangsutawika committed
157
                config = utils.load_yaml_config(yaml_file)
158
159
                task_names.append(config)
        else:
160
            tasks_list = args.tasks.split(",")
161
            task_names = utils.pattern_match(tasks_list, ALL_TASKS)
162
163
            for task in [task for task in tasks_list if task not in task_names]:
                if os.path.isfile(task):
lintangsutawika's avatar
lintangsutawika committed
164
                    config = utils.load_yaml_config(task)
165
                    task_names.append(config)
baberabb's avatar
baberabb committed
166
            task_missing = [task for task in tasks_list if task not in task_names]
lintangsutawika's avatar
lintangsutawika committed
167

baberabb's avatar
baberabb committed
168
169
170
171
172
173
174
175
176
            if task_missing:
                missing = ", ".join(task_missing)
                eval_logger.error(
                    f"Tasks were not found: {missing}\n"
                    f"{SPACING}Try `lm-eval -h` for list of available tasks",
                )
                raise ValueError(
                    f"Tasks {missing} were not found. Try `lm-eval -h` for list of available tasks."
                )
lintangsutawika's avatar
lintangsutawika committed
177

178
179
    if args.output_path:
        path = Path(args.output_path)
Lintang Sutawika's avatar
Lintang Sutawika committed
180
        # check if file or 'dir/results.json' exists
baberabb's avatar
baberabb committed
181
        if path.is_file() or Path(args.output_path).joinpath("results.json").is_file():
182
183
184
            eval_logger.warning(
                f"File already exists at {path}. Results will be overwritten."
            )
lintangsutawika's avatar
lintangsutawika committed
185
            output_path_file = path.joinpath("results.json")
186
187
188
189
190
191
192
193
194
            assert not path.is_file(), "File already exists"
        # if path json then get parent dir
        elif path.suffix in (".json", ".jsonl"):
            output_path_file = path
            path.parent.mkdir(parents=True, exist_ok=True)
            path = path.parent
        else:
            path.mkdir(parents=True, exist_ok=True)
            output_path_file = path.joinpath("results.json")
195
196
    elif args.log_samples and not args.output_path:
        assert args.output_path, "Specify --output_path"
197

lintangsutawika's avatar
lintangsutawika committed
198
    eval_logger.info(f"Selected Tasks: {task_names}")
199

200
201
202
203
204
205
    results = evaluator.simple_evaluate(
        model=args.model,
        model_args=args.model_args,
        tasks=task_names,
        num_fewshot=args.num_fewshot,
        batch_size=args.batch_size,
206
        max_batch_size=args.max_batch_size,
207
        device=args.device,
haileyschoelkopf's avatar
haileyschoelkopf committed
208
        use_cache=args.use_cache,
209
210
211
        limit=args.limit,
        decontamination_ngrams_path=args.decontamination_ngrams_path,
        check_integrity=args.check_integrity,
212
        write_out=args.write_out,
213
        log_samples=args.log_samples,
214
    )
215

216
    if results is not None:
217
218
        if args.log_samples:
            samples = results.pop("samples")
219
        dumped = json.dumps(results, indent=2, default=_handle_non_serializable)
220
221
        if args.show_config:
            print(dumped)
222

223
224
        batch_sizes = ",".join(map(str, results["config"]["batch_sizes"]))

225
        if args.output_path:
226
            output_path_file.open("w").write(dumped)
227

228
229
230
            if args.log_samples:
                for task_name, config in results["configs"].items():
                    output_name = "{}_{}".format(
lintangsutawika's avatar
lintangsutawika committed
231
                        re.sub("/|=", "__", args.model_args), task_name
lintangsutawika's avatar
lintangsutawika committed
232
                    )
233
                    filename = path.joinpath(f"{output_name}.jsonl")
234
235
236
237
                    samples_dumped = json.dumps(
                        samples[task_name], indent=2, default=_handle_non_serializable
                    )
                    filename.open("w").write(samples_dumped)
lintangsutawika's avatar
lintangsutawika committed
238

239
        print(
240
241
            f"{args.model} ({args.model_args}), limit: {args.limit}, num_fewshot: {args.num_fewshot}, "
            f"batch_size: {args.batch_size}{f' ({batch_sizes})' if batch_sizes else ''}"
242
243
        )
        print(evaluator.make_table(results))
lintangsutawika's avatar
lintangsutawika committed
244
245
        if "groups" in results:
            print(evaluator.make_table(results, "groups"))
Jason Phang's avatar
lib  
Jason Phang committed
246

247

Jason Phang's avatar
Jason Phang committed
248
if __name__ == "__main__":
haileyschoelkopf's avatar
haileyschoelkopf committed
249
    cli_evaluate()