"official/vision/beta/losses/loss_utils.py" did not exist on "27347c2d9241161afd110a004e51a4242f094da3"
__main__.py 16 KB
Newer Older
1
2
3
import argparse
import json
import logging
lintangsutawika's avatar
lintangsutawika committed
4
import os
5
import sys
6
from functools import partial
haileyschoelkopf's avatar
haileyschoelkopf committed
7
from typing import Union
Leo Gao's avatar
Leo Gao committed
8

9
from lm_eval import evaluator, utils
10
from lm_eval.evaluator import request_caching_arg_to_dict
11
from lm_eval.loggers import EvaluationTracker, WandbLogger
12
from lm_eval.tasks import TaskManager
13
from lm_eval.utils import handle_non_serializable, make_table, simple_parse_args_string
Fabrizio Milo's avatar
Fabrizio Milo committed
14

15

16
17
18
def _int_or_none_list_arg_type(
    min_len: int, max_len: int, defaults: str, value: str, split_char: str = ","
):
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
    def parse_value(item):
        item = item.strip().lower()
        if item == "none":
            return None
        try:
            return int(item)
        except ValueError:
            raise argparse.ArgumentTypeError(f"{item} is not an integer or None")

    items = [parse_value(v) for v in value.split(split_char)]
    num_items = len(items)

    if num_items == 1:
        # Makes downstream handling the same for single and multiple values
        items = items * max_len
34
    elif num_items < min_len or num_items > max_len:
35
36
37
        raise argparse.ArgumentTypeError(
            f"Argument requires {max_len} integers or None, separated by '{split_char}'"
        )
38
39
40
41
42
43
44
45
46
    elif num_items != max_len:
        logging.warning(
            f"Argument requires {max_len} integers or None, separated by '{split_char}'. "
            "Missing values will be filled with defaults."
        )
        default_items = [parse_value(v) for v in defaults.split(split_char)]
        items.extend(
            default_items[num_items:]
        )  # extend items list with missing defaults
47
48
49
50

    return items


51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
def check_argument_types(parser: argparse.ArgumentParser):
    """
    Check to make sure all CLI args are typed, raises error if not
    """
    for action in parser._actions:
        if action.dest != "help" and not action.const:
            if action.type is None:
                raise ValueError(
                    f"Argument '{action.dest}' doesn't have a type specified."
                )
            else:
                continue


def setup_parser() -> argparse.ArgumentParser:
lintangsutawika's avatar
lintangsutawika committed
66
    parser = argparse.ArgumentParser(formatter_class=argparse.RawTextHelpFormatter)
67
68
69
    parser.add_argument(
        "--model", "-m", type=str, default="hf", help="Name of model e.g. `hf`"
    )
lintangsutawika's avatar
lintangsutawika committed
70
71
    parser.add_argument(
        "--tasks",
Baber Abbasi's avatar
Baber Abbasi committed
72
        "-t",
lintangsutawika's avatar
lintangsutawika committed
73
        default=None,
74
        type=str,
75
        metavar="task1,task2",
lintangsutawika's avatar
lintangsutawika committed
76
        help="To get full list of tasks, use the command lm-eval --tasks list",
lintangsutawika's avatar
lintangsutawika committed
77
    )
78
79
    parser.add_argument(
        "--model_args",
Baber Abbasi's avatar
Baber Abbasi committed
80
        "-a",
81
        default="",
82
        type=str,
83
        help="Comma separated string arguments for model, e.g. `pretrained=EleutherAI/pythia-160m,dtype=float32`",
84
    )
lintangsutawika's avatar
lintangsutawika committed
85
    parser.add_argument(
86
        "--num_fewshot",
Baber Abbasi's avatar
Baber Abbasi committed
87
        "-f",
88
        type=int,
89
        default=None,
90
        metavar="N",
91
92
        help="Number of examples in few-shot context",
    )
93
94
    parser.add_argument(
        "--batch_size",
Baber Abbasi's avatar
Baber Abbasi committed
95
        "-b",
96
97
98
99
100
        type=str,
        default=1,
        metavar="auto|auto:N|N",
        help="Acceptable values are 'auto', 'auto:N' or N, where N is an integer. Default 1.",
    )
lintangsutawika's avatar
lintangsutawika committed
101
102
103
104
    parser.add_argument(
        "--max_batch_size",
        type=int,
        default=None,
105
106
        metavar="N",
        help="Maximal batch size to try with --batch_size auto.",
lintangsutawika's avatar
lintangsutawika committed
107
    )
108
109
110
111
    parser.add_argument(
        "--device",
        type=str,
        default=None,
112
        help="Device to use (e.g. cuda, cuda:0, cpu).",
113
114
115
    )
    parser.add_argument(
        "--output_path",
Baber Abbasi's avatar
Baber Abbasi committed
116
        "-o",
117
118
        default=None,
        type=str,
119
        metavar="DIR|DIR/file.json",
120
        help="The path to the output file where the result metrics will be saved. If the path is a directory and log_samples is true, the results will be saved in the directory. Else the parent directory will be used.",
121
    )
lintangsutawika's avatar
lintangsutawika committed
122
123
    parser.add_argument(
        "--limit",
Baber Abbasi's avatar
Baber Abbasi committed
124
        "-L",
lintangsutawika's avatar
lintangsutawika committed
125
126
        type=float,
        default=None,
127
        metavar="N|0<N<1",
lintangsutawika's avatar
lintangsutawika committed
128
129
130
        help="Limit the number of examples per task. "
        "If <1, limit is a percentage of the total number of examples.",
    )
131
132
    parser.add_argument(
        "--use_cache",
Baber Abbasi's avatar
Baber Abbasi committed
133
        "-c",
134
135
        type=str,
        default=None,
136
        metavar="DIR",
137
138
        help="A path to a sqlite db file for caching model responses. `None` if not caching.",
    )
139
140
141
142
143
144
145
    parser.add_argument(
        "--cache_requests",
        type=str,
        default=None,
        choices=["true", "refresh", "delete"],
        help="Speed up evaluation by caching the building of dataset requests. `None` if not caching.",
    )
146
147
148
    parser.add_argument(
        "--check_integrity",
        action="store_true",
149
        help="Whether to run the relevant part of the test suite for the tasks.",
150
151
152
    )
    parser.add_argument(
        "--write_out",
Baber Abbasi's avatar
Baber Abbasi committed
153
        "-w",
154
155
        action="store_true",
        default=False,
156
        help="Prints the prompt for the first few documents.",
157
158
159
    )
    parser.add_argument(
        "--log_samples",
Baber Abbasi's avatar
Baber Abbasi committed
160
        "-s",
161
162
        action="store_true",
        default=False,
163
        help="If True, write out all model outputs and documents for per-sample measurement and post-hoc analysis. Use with --output_path.",
164
    )
KonradSzafer's avatar
KonradSzafer committed
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
    parser.add_argument(
        "--system_instruction",
        type=str,
        default=None,
        help="System instruction to be used in the prompt",
    )
    parser.add_argument(
        "--apply_chat_template",
        action="store_true",
        default=False,
        help="If True, applies the chat template to the prompt",
    )
    parser.add_argument(
        "--fewshot_as_multiturn",
        action="store_true",
        default=False,
        help="If True, uses the fewshot as a multi-turn conversation",
    )
183
184
185
186
187
188
    parser.add_argument(
        "--show_config",
        action="store_true",
        default=False,
        help="If True, shows the the full config of all tasks at the end of the evaluation.",
    )
189
190
191
192
    parser.add_argument(
        "--include_path",
        type=str,
        default=None,
193
        metavar="DIR",
194
195
        help="Additional path to include if there are external tasks to include.",
    )
196
197
    parser.add_argument(
        "--gen_kwargs",
198
        type=str,
199
        default=None,
USVSN Sai Prashanth's avatar
USVSN Sai Prashanth committed
200
201
        help=(
            "String arguments for model generation on greedy_until tasks,"
202
            " e.g. `temperature=0,top_k=0,top_p=0`."
lintangsutawika's avatar
lintangsutawika committed
203
204
205
        ),
    )
    parser.add_argument(
lintangsutawika's avatar
lintangsutawika committed
206
        "--verbosity",
Baber Abbasi's avatar
Baber Abbasi committed
207
208
        "-v",
        type=str.upper,
lintangsutawika's avatar
lintangsutawika committed
209
        default="INFO",
210
211
        metavar="CRITICAL|ERROR|WARNING|INFO|DEBUG",
        help="Controls the reported logging error level. Set to DEBUG when testing + adding new task configurations for comprehensive log output.",
212
    )
213
214
    parser.add_argument(
        "--wandb_args",
215
        type=str,
216
217
218
        default="",
        help="Comma separated string arguments passed to wandb.init, e.g. `project=lm-eval,job_type=eval",
    )
219
220
221
222
223
224
    parser.add_argument(
        "--hf_hub_log_args",
        type=str,
        default="",
        help="Comma separated string arguments passed to Hugging Face Hub's log function, e.g. `hub_results_org=EleutherAI,hub_repo_name=lm-eval-results`",
    )
Baber Abbasi's avatar
Baber Abbasi committed
225
226
227
228
229
230
231
    parser.add_argument(
        "--predict_only",
        "-x",
        action="store_true",
        default=False,
        help="Use with --log_samples. Only model outputs will be saved and metrics will not be evaluated.",
    )
232
    default_seed_string = "0,1234,1234,1234"
233
234
    parser.add_argument(
        "--seed",
235
236
        type=partial(_int_or_none_list_arg_type, 3, 4, default_seed_string),
        default=default_seed_string,  # for backward compatibility
237
        help=(
238
239
            "Set seed for python's random, numpy, torch, and fewshot sampling.\n"
            "Accepts a comma-separated list of 4 values for python's random, numpy, torch, and fewshot sampling seeds, "
Sadra Barikbin's avatar
Sadra Barikbin committed
240
            "respectively, or a single integer to set the same seed for all four.\n"
241
242
243
244
245
            f"The values are either an integer or 'None' to not set the seed. Default is `{default_seed_string}` "
            "(for backward compatibility).\n"
            "E.g. `--seed 0,None,8,52` sets `random.seed(0)`, `torch.manual_seed(8)`, and fewshot sampling seed to 52. "
            "Here numpy's seed is not set since the second value is `None`.\n"
            "E.g, `--seed 42` sets all four seeds to 42."
246
247
        ),
    )
248
249
    parser.add_argument(
        "--trust_remote_code",
250
        action="store_true",
251
252
        help="Sets trust_remote_code to True to execute code to create HF Datasets from the Hub",
    )
253
254
255
256
257
    return parser


def parse_eval_args(parser: argparse.ArgumentParser) -> argparse.Namespace:
    check_argument_types(parser)
Jason Phang's avatar
Jason Phang committed
258
259
    return parser.parse_args()

Fabrizio Milo's avatar
Fabrizio Milo committed
260

haileyschoelkopf's avatar
haileyschoelkopf committed
261
262
263
def cli_evaluate(args: Union[argparse.Namespace, None] = None) -> None:
    if not args:
        # we allow for args to be passed externally, else we parse them ourselves
264
265
        parser = setup_parser()
        args = parse_eval_args(parser)
haileyschoelkopf's avatar
haileyschoelkopf committed
266

267
    if args.wandb_args:
268
        wandb_logger = WandbLogger(**simple_parse_args_string(args.wandb_args))
269

270
    eval_logger = utils.eval_logger
lintangsutawika's avatar
lintangsutawika committed
271
    eval_logger.setLevel(getattr(logging, f"{args.verbosity}"))
272
    eval_logger.info(f"Verbosity set to {args.verbosity}")
haileyschoelkopf's avatar
haileyschoelkopf committed
273
    os.environ["TOKENIZERS_PARALLELISM"] = "false"
Fabrizio Milo's avatar
Fabrizio Milo committed
274

275
    # update the evaluation tracker args with the output path and the HF token
276
277
278
279
    if args.output_path:
        args.hf_hub_log_args += f",output_path={args.output_path}"
    if os.environ.get("HF_TOKEN", None):
        args.hf_hub_log_args += f",token={os.environ.get('HF_TOKEN')}"
280
281
282
    evaluation_tracker_args = simple_parse_args_string(args.hf_hub_log_args)
    evaluation_tracker = EvaluationTracker(**evaluation_tracker_args)

Baber Abbasi's avatar
Baber Abbasi committed
283
284
285
    if args.predict_only:
        args.log_samples = True
    if (args.log_samples or args.predict_only) and not args.output_path:
286
287
288
        raise ValueError(
            "Specify --output_path if providing --log_samples or --predict_only"
        )
Baber Abbasi's avatar
Baber Abbasi committed
289

KonradSzafer's avatar
KonradSzafer committed
290
291
292
293
294
295
296
297
298
299
300
301
    if args.fewshot_as_multiturn and args.apply_chat_template is False:
        raise ValueError(
            "If fewshot_as_multiturn is set, apply_chat_template must be set to True."
        )

    if (
        args.num_fewshot is None or args.num_fewshot == 0
    ) and args.fewshot_as_multiturn:
        raise ValueError(
            "If fewshot_as_multiturn is set, num_fewshot must be greater than 0."
        )

302
303
    if args.include_path is not None:
        eval_logger.info(f"Including path: {args.include_path}")
304
    task_manager = TaskManager(args.verbosity, include_path=args.include_path)
Fabrizio Milo's avatar
Fabrizio Milo committed
305

KonradSzafer's avatar
KonradSzafer committed
306
    if "push_samples_to_hub" in evaluation_tracker_args and not args.log_samples:
307
308
309
310
        eval_logger.warning(
            "Pushing samples to the Hub requires --log_samples to be set. Samples will not be pushed to the Hub."
        )

Leo Gao's avatar
Leo Gao committed
311
    if args.limit:
lintangsutawika's avatar
lintangsutawika committed
312
313
314
        eval_logger.warning(
            " --limit SHOULD ONLY BE USED FOR TESTING."
            "REAL METRICS SHOULD NOT BE COMPUTED USING LIMIT."
Fabrizio Milo's avatar
Fabrizio Milo committed
315
        )
lintangsutawika's avatar
lintangsutawika committed
316

317
    if args.tasks is None:
318
319
        eval_logger.error("Need to specify task to evaluate.")
        sys.exit()
320
    elif args.tasks == "list":
lintangsutawika's avatar
lintangsutawika committed
321
        eval_logger.info(
Lintang Sutawika's avatar
Lintang Sutawika committed
322
            "Available Tasks:\n - {}".format("\n - ".join(task_manager.all_tasks))
lintangsutawika's avatar
lintangsutawika committed
323
        )
Lintang Sutawika's avatar
Lintang Sutawika committed
324
        sys.exit()
Jason Phang's avatar
Jason Phang committed
325
    else:
326
327
        if os.path.isdir(args.tasks):
            import glob
328
329

            task_names = []
330
331
            yaml_path = os.path.join(args.tasks, "*.yaml")
            for yaml_file in glob.glob(yaml_path):
lintangsutawika's avatar
lintangsutawika committed
332
                config = utils.load_yaml_config(yaml_file)
333
334
                task_names.append(config)
        else:
335
336
337
            task_list = args.tasks.split(",")
            task_names = task_manager.match_tasks(task_list)
            for task in [task for task in task_list if task not in task_names]:
338
                if os.path.isfile(task):
lintangsutawika's avatar
lintangsutawika committed
339
                    config = utils.load_yaml_config(task)
340
                    task_names.append(config)
341
            task_missing = [
342
                task for task in task_list if task not in task_names and "*" not in task
343
            ]  # we don't want errors if a wildcard ("*") task name was used
lintangsutawika's avatar
lintangsutawika committed
344

baberabb's avatar
baberabb committed
345
346
347
348
            if task_missing:
                missing = ", ".join(task_missing)
                eval_logger.error(
                    f"Tasks were not found: {missing}\n"
lintangsutawika's avatar
lintangsutawika committed
349
                    f"{utils.SPACING}Try `lm-eval --tasks list` for list of available tasks",
baberabb's avatar
baberabb committed
350
351
                )
                raise ValueError(
352
                    f"Tasks not found: {missing}. Try `lm-eval --tasks list` for list of available tasks, or '--verbosity DEBUG' to troubleshoot task registration issues."
baberabb's avatar
baberabb committed
353
                )
lintangsutawika's avatar
lintangsutawika committed
354

355
356
    # Respect user's value passed in via CLI, otherwise default to True and add to comma-separated model args
    if args.trust_remote_code:
357
358
        eval_logger.info(
            "Passed `--trust_remote_code`, setting environment variable `HF_DATASETS_TRUST_REMOTE_CODE=true`"
359
        )
360
361
362
363
364
365
366
367
        # HACK: import datasets and override its HF_DATASETS_TRUST_REMOTE_CODE value internally,
        # because it's already been determined based on the prior env var before launching our
        # script--`datasets` gets imported by lm_eval internally before these lines can update the env.
        import datasets

        datasets.config.HF_DATASETS_TRUST_REMOTE_CODE = True

        args.model_args = args.model_args + ",trust_remote_code=True"
368

lintangsutawika's avatar
lintangsutawika committed
369
    eval_logger.info(f"Selected Tasks: {task_names}")
370

371
372
373
374
    request_caching_args = request_caching_arg_to_dict(
        cache_requests=args.cache_requests
    )

375
376
377
378
379
380
    results = evaluator.simple_evaluate(
        model=args.model,
        model_args=args.model_args,
        tasks=task_names,
        num_fewshot=args.num_fewshot,
        batch_size=args.batch_size,
381
        max_batch_size=args.max_batch_size,
382
        device=args.device,
haileyschoelkopf's avatar
haileyschoelkopf committed
383
        use_cache=args.use_cache,
384
385
        limit=args.limit,
        check_integrity=args.check_integrity,
386
        write_out=args.write_out,
387
        log_samples=args.log_samples,
KonradSzafer's avatar
KonradSzafer committed
388
389
390
391
        evaluation_tracker=evaluation_tracker,
        system_instruction=args.system_instruction,
        apply_chat_template=args.apply_chat_template,
        fewshot_as_multiturn=args.fewshot_as_multiturn,
lintangsutawika's avatar
lintangsutawika committed
392
        gen_kwargs=args.gen_kwargs,
393
        task_manager=task_manager,
394
        verbosity=args.verbosity,
Baber Abbasi's avatar
Baber Abbasi committed
395
        predict_only=args.predict_only,
396
397
398
        random_seed=args.seed[0],
        numpy_random_seed=args.seed[1],
        torch_random_seed=args.seed[2],
399
        fewshot_random_seed=args.seed[3],
400
        **request_caching_args,
401
    )
402

403
    if results is not None:
404
405
        if args.log_samples:
            samples = results.pop("samples")
406
        dumped = json.dumps(
407
            results, indent=2, default=handle_non_serializable, ensure_ascii=False
408
        )
409
410
        if args.show_config:
            print(dumped)
411

412
413
        batch_sizes = ",".join(map(str, results["config"]["batch_sizes"]))

414
415
416
417
418
419
420
421
422
423
        # Add W&B logging
        if args.wandb_args:
            try:
                wandb_logger.post_init(results)
                wandb_logger.log_eval_result()
                if args.log_samples:
                    wandb_logger.log_eval_samples(samples)
            except Exception as e:
                eval_logger.info(f"Logging to Weights and Biases failed due to {e}")

KonradSzafer's avatar
KonradSzafer committed
424
425
426
        evaluation_tracker.save_results_aggregated(
            results=results, samples=samples if args.log_samples else None
        )
427
428
429
430
431
432

        if args.log_samples:
            for task_name, config in results["configs"].items():
                evaluation_tracker.save_results_samples(
                    task_name=task_name, samples=samples[task_name]
                )
lintangsutawika's avatar
lintangsutawika committed
433

434
435
436
437
438
439
        if (
            evaluation_tracker.push_results_to_hub
            or evaluation_tracker.push_samples_to_hub
        ):
            evaluation_tracker.recreate_metadata_card()

440
        print(
441
            f"{args.model} ({args.model_args}), gen_kwargs: ({args.gen_kwargs}), limit: {args.limit}, num_fewshot: {args.num_fewshot}, "
442
            f"batch_size: {args.batch_size}{f' ({batch_sizes})' if batch_sizes else ''}"
443
        )
444
        print(make_table(results))
lintangsutawika's avatar
lintangsutawika committed
445
        if "groups" in results:
446
            print(make_table(results, "groups"))
Jason Phang's avatar
lib  
Jason Phang committed
447

448
449
450
451
        if args.wandb_args:
            # Tear down wandb run once all the logging is done.
            wandb_logger.run.finish()

452

Jason Phang's avatar
Jason Phang committed
453
if __name__ == "__main__":
haileyschoelkopf's avatar
haileyschoelkopf committed
454
    cli_evaluate()