evaluator.py 21.8 KB
Newer Older
Baber Abbasi's avatar
Baber Abbasi committed
1
import itertools
2
import logging
Baber Abbasi's avatar
Baber Abbasi committed
3
import random
4
5
from collections import defaultdict
from typing import TYPE_CHECKING, List, Optional, Union
Baber Abbasi's avatar
Baber Abbasi committed
6

7
import numpy as np
Baber Abbasi's avatar
Baber Abbasi committed
8
import torch
lintangsutawika's avatar
lintangsutawika committed
9

lintangsutawika's avatar
lintangsutawika committed
10
import lm_eval.api.metrics
lintangsutawika's avatar
lintangsutawika committed
11
import lm_eval.api.registry
Baber Abbasi's avatar
Baber Abbasi committed
12
import lm_eval.models
13
from lm_eval.caching.cache import delete_cache
14
15
16
17
18
19
20
21
from lm_eval.evaluator_utils import (
    consolidate_results,
    get_sample_size,
    get_task_list,
    prepare_print_tasks,
    print_writeout,
    run_task_tests,
)
22
from lm_eval.logging_utils import add_env_info, get_git_commit_hash
Baber Abbasi's avatar
Baber Abbasi committed
23
from lm_eval.tasks import TaskManager, get_task_dict
24
from lm_eval.utils import eval_logger, positional_deprecated, simple_parse_args_string
25

Fabrizio Milo's avatar
Fabrizio Milo committed
26

27
28
29
30
31
if TYPE_CHECKING:
    from lm_eval.api.model import LM
    from lm_eval.tasks import Task


32
@positional_deprecated
Fabrizio Milo's avatar
Fabrizio Milo committed
33
34
def simple_evaluate(
    model,
35
36
    model_args: Optional[Union[str, dict]] = None,
    tasks: Optional[List[Union[str, dict, object]]] = None,
Baber Abbasi's avatar
Baber Abbasi committed
37
38
39
40
41
    num_fewshot: Optional[int] = None,
    batch_size: Optional[int] = None,
    max_batch_size: Optional[int] = None,
    device: Optional[str] = None,
    use_cache: Optional[str] = None,
42
43
44
    cache_requests: bool = False,
    rewrite_requests_cache: bool = False,
    delete_requests_cache: bool = False,
Baber Abbasi's avatar
Baber Abbasi committed
45
    limit: Optional[Union[int, float]] = None,
Ethan Smith's avatar
Ethan Smith committed
46
47
48
49
    bootstrap_iters: int = 100000,
    check_integrity: bool = False,
    write_out: bool = False,
    log_samples: bool = True,
50
51
    gen_kwargs: Optional[str] = None,
    task_manager: Optional[TaskManager] = None,
52
    verbosity: str = "INFO",
Baber Abbasi's avatar
Baber Abbasi committed
53
    predict_only: bool = False,
54
55
56
    random_seed: int = 0,
    numpy_random_seed: int = 1234,
    torch_random_seed: int = 1234,
Fabrizio Milo's avatar
Fabrizio Milo committed
57
):
58
    """Instantiate and evaluate a model on a list of tasks.
59

60
61
    :param model: Union[str, LM]
        Name of model or LM object, see lm_eval.models.get_model
62
63
    :param model_args: Optional[str, dict]
        String or dict arguments for each model class, see LM.create_from_arg_string and LM.create_from_arg_object.
64
        Ignored if `model` argument is a LM object.
65
    :param tasks: list[Union[str, dict, Task]]
Leo Gao's avatar
Leo Gao committed
66
        List of task names or Task objects. Task objects will be taken to have name task.EVAL_HARNESS_NAME if defined and type(task).__name__ otherwise.
67
68
    :param num_fewshot: int
        Number of examples in few-shot context
69
    :param batch_size: int or str, optional
70
        Batch size for model
71
72
    :param max_batch_size: int, optional
        Maximal batch size to try with automatic batch size detection
73
    :param device: str, optional
74
        PyTorch device (e.g. "cpu" or "cuda:0") for running models
haileyschoelkopf's avatar
haileyschoelkopf committed
75
76
    :param use_cache: str, optional
        A path to a sqlite db file for caching model responses. `None` if not caching.
77
78
79
80
81
82
    :param cache_requests: bool, optional
        Speed up evaluation by caching the building of dataset requests. `None` if not caching.
    :param rewrite_requests_cache: bool, optional
        Rewrites all of the request cache if set to `True`. `None` if not desired.
    :param delete_requests_cache: bool, optional
        Deletes all of the request cache if set to `True`. `None` if not desired.
83
84
    :param limit: int or float, optional
        Limit the number of examples per task (only use this for testing), If <1, limit is a percentage of the total number of examples.
85
86
    :param bootstrap_iters:
        Number of iterations for bootstrap statistics
Stephen Hogg's avatar
Stephen Hogg committed
87
88
    :param check_integrity: bool
        Whether to run the relevant part of the test suite for the tasks
89
    :param write_out: bool
90
91
92
        If True, write out an example document and model input for checking task integrity
    :param log_samples: bool
        If True, write out all model outputs and documents for per-sample measurement and post-hoc analysis
93
94
95
    :param gen_kwargs: str
        String arguments for model generation
        Ignored for all tasks with loglikelihood output_type
Baber Abbasi's avatar
Baber Abbasi committed
96
97
    :param predict_only: bool
        If true only model outputs will be generated and returned. Metrics will not be evaluated
98
99
100
101
102
103
    :param random_seed: int
        Random seed for python's random module. If set to None, the seed will not be set.
    :param numpy_random_seed: int
        Random seed for numpy. If set to None, the seed will not be set.
    :param torch_random_seed: int
        Random seed for torch. If set to None, the seed will not be set.
Baber Abbasi's avatar
Baber Abbasi committed
104

105
    :return
106
        Dictionary of results
107
    """
108
109
    eval_logger.setLevel(getattr(logging, f"{verbosity}"))

110
111
112
113
    if delete_requests_cache:
        eval_logger.info("Deleting requests cache...")
        delete_cache()

114
    seed_message = []
115
116
    if random_seed is not None:
        # See https://github.com/EleutherAI/lm-evaluation-harness/pull/1412
117
        seed_message.append(f"Setting random seed to {random_seed}")
118
119
120
        random.seed(random_seed)

    if numpy_random_seed is not None:
121
        seed_message.append(f"Setting numpy seed to {numpy_random_seed}")
122
123
124
        np.random.seed(numpy_random_seed)

    if torch_random_seed is not None:
125
        seed_message.append(f"Setting torch manual seed to {torch_random_seed}")
126
127
        torch.manual_seed(torch_random_seed)

128
129
130
    if seed_message:
        eval_logger.info(" | ".join(seed_message))

131
132
    if tasks is None:
        tasks = []
133
134
135
136
    if len(tasks) == 0:
        raise ValueError(
            "No tasks specified, or no tasks found. Please verify the task names."
        )
137

lintangsutawika's avatar
lintangsutawika committed
138
139
    if gen_kwargs is not None:
        gen_kwargs = simple_parse_args_string(gen_kwargs)
lintangsutawika's avatar
udate  
lintangsutawika committed
140
        eval_logger.warning(
141
142
            "generation_kwargs specified through cli, these settings will update set parameters in yaml tasks. "
            "Ensure 'do_sample=True' for non-greedy decoding!"
lintangsutawika's avatar
udate  
lintangsutawika committed
143
        )
lintangsutawika's avatar
lintangsutawika committed
144
145
146
        if gen_kwargs == "":
            gen_kwargs = None

147
    if isinstance(model, str):
Fabrizio Milo's avatar
Fabrizio Milo committed
148
149
        if model_args is None:
            model_args = ""
150

151
        if isinstance(model_args, dict):
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
            lm = lm_eval.api.registry.get_model(model).create_from_arg_obj(
                model_args,
                {
                    "batch_size": batch_size,
                    "max_batch_size": max_batch_size,
                    "device": device,
                },
            )

        else:
            lm = lm_eval.api.registry.get_model(model).create_from_arg_string(
                model_args,
                {
                    "batch_size": batch_size,
                    "max_batch_size": max_batch_size,
                    "device": device,
                },
            )
170
    else:
171
172
        if not isinstance(model, lm_eval.api.model.LM):
            raise TypeError
173
        lm = model
174

haileyschoelkopf's avatar
haileyschoelkopf committed
175
    if use_cache is not None:
176
        eval_logger.info(f"Using cache at {use_cache + '_rank' + str(lm.rank) + '.db'}")
haileyschoelkopf's avatar
haileyschoelkopf committed
177
178
179
180
181
        lm = lm_eval.api.model.CachingLM(
            lm,
            use_cache
            # each rank receives a different cache db.
            # necessary to avoid multiple writes to cache at once
182
183
184
            + "_rank"
            + str(lm.rank)
            + ".db",
haileyschoelkopf's avatar
haileyschoelkopf committed
185
186
        )

187
188
189
190
191
    if task_manager is None:
        task_manager = TaskManager(verbosity)

    eval_logger.info(
        "get_task_dict has been updated to accept an optional argument, `task_manager`"
Baber Abbasi's avatar
Baber Abbasi committed
192
193
        "Read more here:https://github.com/EleutherAI/lm-evaluation-harness/blob/main/docs/interface.md#external-library-usage"
    )
194
    task_dict = get_task_dict(tasks, task_manager)
195
    for task_name in task_dict.keys():
lintangsutawika's avatar
lintangsutawika committed
196
        task_obj = task_dict[task_name]
197
        if isinstance(task_obj, tuple):
198
            _, task_obj = task_obj
199
200
            if task_obj is None:
                continue
lintangsutawika's avatar
lintangsutawika committed
201

Baber Abbasi's avatar
Baber Abbasi committed
202
203
        if task_obj.get_config("output_type") == "generate_until":
            if gen_kwargs is not None:
Baber Abbasi's avatar
Baber Abbasi committed
204
                task_obj.set_config(
Baber Abbasi's avatar
Baber Abbasi committed
205
206
207
                    key="generation_kwargs", value=gen_kwargs, update=True
                )

208
209
210
211
212
213
214
        if predict_only:
            log_samples = True
            eval_logger.info(
                f"Processing {task_name} in output-only mode. Metrics will not be calculated!"
            )
            # we have to change the class properties post-hoc. This is pretty hacky.
            task_obj.override_metric(metric_name="bypass")
215

216
        if num_fewshot is not None:
Baber Abbasi's avatar
Baber Abbasi committed
217
            if (default_num_fewshot := task_obj.get_config("num_fewshot")) == 0:
218
219
220
                eval_logger.info(
                    f"num_fewshot has been set to 0 for {task_name} in its config. Manual configuration will be ignored."
                )
221
            else:
Baber Abbasi's avatar
Baber Abbasi committed
222
223
224
                eval_logger.warning(
                    f"Overwriting default num_fewshot of {task_name} from {default_num_fewshot} to {num_fewshot}"
                )
Baber Abbasi's avatar
Baber Abbasi committed
225
                task_obj.set_config(key="num_fewshot", value=num_fewshot)
Jonathan Tow's avatar
Merge  
Jonathan Tow committed
226

Stephen Hogg's avatar
Stephen Hogg committed
227
    if check_integrity:
228
        run_task_tests(task_list=tasks)
Stephen Hogg's avatar
Stephen Hogg committed
229

230
231
232
233
    results = evaluate(
        lm=lm,
        task_dict=task_dict,
        limit=limit,
234
235
        cache_requests=cache_requests,
        rewrite_requests_cache=rewrite_requests_cache,
Niklas Muennighoff's avatar
Niklas Muennighoff committed
236
        bootstrap_iters=bootstrap_iters,
237
        write_out=write_out,
238
        log_samples=log_samples,
239
        verbosity=verbosity,
240
    )
241

242
    if lm.rank == 0:
243
244
245
246
247
248
249
        if isinstance(model, str):
            model_name = model
        elif hasattr(model, "config") and hasattr(model.config, "_name_or_path"):
            model_name = model.config._name_or_path
        else:
            model_name = type(model).__name__

250
251
        # add info about the model and few shot config
        results["config"] = {
252
            "model": model_name,
253
254
            "model_args": model_args,
            "batch_size": batch_size,
255
256
257
            "batch_sizes": (
                list(lm.batch_sizes.values()) if hasattr(lm, "batch_sizes") else []
            ),
258
            "device": device,
haileyschoelkopf's avatar
haileyschoelkopf committed
259
            "use_cache": use_cache,
260
261
            "limit": limit,
            "bootstrap_iters": bootstrap_iters,
lintangsutawika's avatar
lintangsutawika committed
262
            "gen_kwargs": gen_kwargs,
263
        }
264
        results["git_hash"] = get_git_commit_hash()
265
        add_env_info(results)  # additional environment info to results
266
267
268
        return results
    else:
        return None
269

Leo Gao's avatar
Leo Gao committed
270

271
@positional_deprecated
Fabrizio Milo's avatar
Fabrizio Milo committed
272
def evaluate(
273
    lm: "LM",
Fabrizio Milo's avatar
Fabrizio Milo committed
274
    task_dict,
Baber Abbasi's avatar
Baber Abbasi committed
275
    limit: Optional[int] = None,
276
277
    cache_requests: bool = False,
    rewrite_requests_cache: bool = False,
Baber Abbasi's avatar
Baber Abbasi committed
278
    bootstrap_iters: Optional[int] = 100000,
Ethan Smith's avatar
Ethan Smith committed
279
280
    write_out: bool = False,
    log_samples: bool = True,
281
    verbosity: str = "INFO",
Fabrizio Milo's avatar
Fabrizio Milo committed
282
):
283
284
285
286
287
    """Instantiate and evaluate a model on a list of tasks.

    :param lm: obj
        Language Model
    :param task_dict: dict[str, Task]
haileyschoelkopf's avatar
haileyschoelkopf committed
288
        Dictionary of tasks. Tasks will be taken to have name type(task).config.task .
289
290
291
292
    :param limit: int, optional
        Limit the number of examples per task (only use this for testing)
    :param bootstrap_iters:
        Number of iterations for bootstrap statistics
293
    :param write_out: bool
294
295
296
        If True, write out an example document and model input for checking task integrity
    :param log_samples: bool
        If True, write out all model outputs and documents for per-sample measurement and post-hoc analysis
297
298
299
    :return
        Dictionary of results
    """
300

301
    eval_logger.setLevel(getattr(logging, f"{verbosity}"))
302

303
    # tracks all Instances/requests a model must generate output on.
304
    requests = defaultdict(list)
305
306
    # stores the amount to pad out reqs per req. type so that
    # number of fwd passes per distributed rank is equal
307
    padding_requests = defaultdict(int)
308

309
310
311
    # get lists of group hierarchy and each type of request
    task_hierarchy, eval_tasks = get_task_list(task_dict)
    if not log_samples:
312
        if not all(
313
314
            "bypass" not in getattr(task_output.task, "_metric_fn_list", {}).keys()
            for task_output in eval_tasks
315
316
        ):
            raise ValueError("log_samples must be True for 'bypass' metric-only tasks")
317
318
319
    for task_output in eval_tasks:
        task: Task = task_output.task
        limit = get_sample_size(task, limit)
320
321
322
323
324
325
326
        task.build_all_requests(
            limit=limit,
            rank=lm.rank,
            world_size=lm.world_size,
            cache_requests=cache_requests,
            rewrite_requests_cache=rewrite_requests_cache,
        )
327
        eval_logger.debug(
328
            f"Task: {task_output.task_name}; number of requests on this rank: {len(task.instances)}"
haileyschoelkopf's avatar
haileyschoelkopf committed
329
330
331
        )

        if write_out:
332
            print_writeout(task)
333
        # aggregate Instances by LM method requested to get output.
lintangsutawika's avatar
lintangsutawika committed
334
335
336
        for instance in task.instances:
            reqtype = instance.request_type
            requests[reqtype].append(instance)
337
338

        if lm.world_size > 1:
339
340
341
342
            instances_rnk = torch.tensor(len(task._instances), device=lm.device)
            gathered_item = (
                lm.accelerator.gather(instances_rnk).cpu().detach().numpy().tolist()
            )
343
344
345
346
347
348
            # "multiple_choice" task types dispatch (several) "loglikelihood" request types
            reqtype = (
                "loglikelihood"
                if task.OUTPUT_TYPE == "multiple_choice"
                else task.OUTPUT_TYPE
            )
349
            # compute number of pseudo-batches to pad with (FSDP/DDP require even batches among ranks)
350
            numpad = max(gathered_item) - gathered_item[lm.rank]
351
352
            # todo: may not account for padding in cases like SquadV2 which has multiple req types
            padding_requests[reqtype] += numpad
353

354
    ### Run LM on inputs, get all outputs ###
Leo Gao's avatar
Leo Gao committed
355
356
    # execute each type of request
    for reqtype, reqs in requests.items():
357
        eval_logger.info(f"Running {reqtype} requests")
358
359
360
361
        # create `K` copies of each request `req` based off `K = req.repeats`
        cloned_reqs = []
        for req in reqs:
            cloned_reqs.extend([req] * req.repeats)
lintangsutawika's avatar
lintangsutawika committed
362

363
364
        if (lm.world_size > 1) and (padding_requests[reqtype] > 0):
            for _ in range(padding_requests[reqtype]):
365
366
                cloned_reqs.extend([req] * req.repeats)

367
368
369
370
371
372
373
        # run requests through model
        resps = getattr(lm, reqtype)(cloned_reqs)

        # put responses from model into a list of length K for each request.
        for x, req in zip(resps, cloned_reqs):
            req.resps.append(x)

374
375
        if lm.world_size > 1:
            lm.accelerator.wait_for_everyone()
376

377
378
    RANK = lm.rank
    WORLD_SIZE = lm.world_size
379
380
    ### Postprocess outputs ###
    # TODO: del model here, maybe (idea: allow user to specify device of e.g. reward model separately)
381
382
    for task_output in eval_tasks:
        task = task_output.task
383
384
        task.apply_filters()

385
386
        ### Collect values of metrics on all datapoints ###
        # # unpack results and sort back in order and return control to Task
haileyschoelkopf's avatar
haileyschoelkopf committed
387
        # TODO: make it possible to use a different metric per filter
388
        # Pre-process task.instances to group by doc_id
389
        instances_by_doc_id = defaultdict(list)
390
391
392
393
394
        for instance in task.instances:
            instances_by_doc_id[instance.doc_id].append(instance)
        # Sort instances within each group
        for instances in instances_by_doc_id.values():
            instances.sort(key=lambda x: x.idx)
haileyschoelkopf's avatar
haileyschoelkopf committed
395
        # iterate over different filters used
396
397
398
        for filter_key in task.instances[0].filtered_resps.keys():
            doc_iterator = task.doc_iterator(
                rank=RANK, limit=limit, world_size=WORLD_SIZE
399
            )
400
            for doc_id, doc in doc_iterator:
401
                requests = instances_by_doc_id[doc_id]
lintangsutawika's avatar
lintangsutawika committed
402
                metrics = task.process_results(
403
                    doc, [req.filtered_resps[filter_key] for req in requests]
lintangsutawika's avatar
lintangsutawika committed
404
                )
405
406
407
408
409
410
411
412
                if log_samples:
                    target = task.doc_to_target(doc)
                    example = {
                        "doc_id": doc_id,
                        "doc": doc,
                        "target": target,
                        "arguments": [req.args for req in requests],
                        "resps": [req.resps for req in requests],
413
414
415
                        "filtered_resps": [
                            req.filtered_resps[filter_key] for req in requests
                        ],
416
417
                    }
                    example.update(metrics)
418
                    task_output.logged_samples.append(example)
419
                for metric, value in metrics.items():
420
                    task_output.sample_metrics[(metric, filter_key)].append(value)
421

422
423
    if WORLD_SIZE > 1:
        # if multigpu, then gather data across all ranks to rank 0
424
        # first gather logged samples across all ranks
425
426
427
428
429
430
431
432
        for task_output in eval_tasks:
            if log_samples:
                # for task_name, task_samples in list(samples.items()):
                full_samples = [None] * WORLD_SIZE if RANK == 0 else None
                torch.distributed.gather_object(
                    obj=task_output.logged_samples,
                    object_gather_list=full_samples,
                    dst=0,
433
                )
434

435
436
437
438
                if RANK == 0:
                    task_output.logged_samples = list(
                        itertools.chain.from_iterable(full_samples)
                    )
439

440
441
442
443
444
445
446
            # then collect metrics across all ranks
            for metrics in task_output.sample_metrics:
                metric_list = [None] * WORLD_SIZE if RANK == 0 else None
                torch.distributed.gather_object(
                    obj=task_output.sample_metrics[metrics],
                    object_gather_list=metric_list,
                    dst=0,
447
                )
448
449
450
451
                if RANK == 0:
                    task_output.sample_metrics[metrics] = list(
                        itertools.chain.from_iterable(metric_list)
                    )
452

453
    if RANK == 0:
454
455
        ### Aggregate results over all datapoints ###
        # aggregate results ; run bootstrap CIs
456
457
458
459
460
        for task_output in eval_tasks:
            task_output.calculate_aggregate_metric(bootstrap_iters=bootstrap_iters)
        results, samples, configs, versions, num_fewshot = consolidate_results(
            eval_tasks
        )
Fabrizio Milo's avatar
Fabrizio Milo committed
461

462
        ### Calculate group metrics ###
lintangsutawika's avatar
lintangsutawika committed
463
        if bool(results):
464
            for group, task_list in reversed(task_hierarchy.items()):
465
466
467
468
469
470
                if len(task_list) == 0:
                    # task_hierarchy entries are either
                    # `group_name: [subtask1, subtask2, ...]`
                    # or `task_name: []`.
                    # we only want to operate on groups here.
                    continue
471
472
473
474
475
476
477
478
479
                metric_list = list(
                    {
                        key
                        for task in task_list
                        for key in results[task].keys()
                        if "_stderr" not in key and key not in ["alias", "samples"]
                    }
                )
                for metric in metric_list:
480
481
482
                    stderr = "_stderr,".join(metric.split(","))

                    # gather metrics, sizes, and stderrs from subtasks
Baber Abbasi's avatar
Baber Abbasi committed
483
                    metrics = [
484
485
486
                        results[task][metric]
                        for task in task_list
                        if metric in results[task]
Baber Abbasi's avatar
Baber Abbasi committed
487
                    ]  # TODO: copy?
488
489
490
491
492
493
494
495
496
497
                    stderrs = [
                        results[task][stderr]
                        for task in task_list
                        if stderr in results[task]
                    ]
                    sizes = [
                        results[task]["samples"]
                        for task in task_list
                        if metric in results[task]
                    ]
498
499

                    # compute group's pooled metric and stderr
Baber Abbasi's avatar
Baber Abbasi committed
500
501
502
                    results[group][
                        metric
                    ] = lm_eval.api.metrics.aggregate_subtask_metrics(metrics, sizes)
503
504
505
506
                    # TODO: calculate grouped metric using aggregation fn
                    if "N/A" in stderrs:
                        results[group][stderr] = "N/A"
                    else:
Baber Abbasi's avatar
Baber Abbasi committed
507
508
509
                        results[group][
                            stderr
                        ] = lm_eval.api.metrics.pooled_sample_stderr(stderrs, sizes)
510
511
512
513
514
                        # TODO: allow GroupConfigs to choose which variance formula is used, for back-compatibility
                        # To use the old (likely incorrect) variance formula, comment out the above and uncomment this line:
                        # results[group][stderr] = lm_eval.api.metrics.combined_sample_stderr(stderrs, sizes, metrics=metrics)

                    results[group]["samples"] = sum(sizes)
lintangsutawika's avatar
lintangsutawika committed
515

516
517
        results_agg = defaultdict(dict)
        groups_agg = defaultdict(dict)
Lintang Sutawika's avatar
Lintang Sutawika committed
518
519
520
521
522
523
524
525
526
527
        all_tasks_list = list(task_hierarchy.keys())
        while True:
            add_tasks_list = list(k for k in results_agg.keys())
            left_tasks_list = sorted(list(set(all_tasks_list) - set(add_tasks_list)))
            if len(left_tasks_list) == 0:
                break

            _task_hierarchy = {
                k: v for k, v in task_hierarchy.items() if k in left_tasks_list
            }
528
            _results_agg, _groups_agg = prepare_print_tasks(_task_hierarchy, results)
Lintang Sutawika's avatar
Lintang Sutawika committed
529
530
531

            results_agg = {**results_agg, **_results_agg}
            groups_agg = {**groups_agg, **_groups_agg}
lintangsutawika's avatar
lintangsutawika committed
532

533
        for group_name, task_list in task_hierarchy.items():
Baber Abbasi's avatar
Baber Abbasi committed
534
535
536
537
            if task_list:
                num_fewshot[group_name] = num_fewshot[
                    task_list[0]
                ]  # TODO: validate this
538

539
        results_dict = {
540
            "results": dict(results_agg.items()),
lintangsutawika's avatar
lintangsutawika committed
541
            **({"groups": dict(groups_agg.items())} if bool(groups_agg) else {}),
542
            "group_subtasks": dict(reversed(task_hierarchy.items())),
543
544
            "configs": dict(sorted(configs.items())),
            "versions": dict(sorted(versions.items())),
545
            "n-shot": dict(sorted(num_fewshot.items())),
546
        }
547
548
549
550
        if log_samples:
            results_dict["samples"] = dict(samples)

        return results_dict
Fabrizio Milo's avatar
Fabrizio Milo committed
551

552
553
    else:
        return None
554
555
556
557


def request_caching_arg_to_dict(cache_requests: str) -> dict:
    request_caching_args = {
558
559
560
        "cache_requests": cache_requests in {"true", "refresh"},
        "rewrite_requests_cache": cache_requests == "refresh",
        "delete_requests_cache": cache_requests == "delete",
561
562
563
    }

    return request_caching_args