evaluator.py 19.5 KB
Newer Older
lintangsutawika's avatar
lintangsutawika committed
1
import random
Leo Gao's avatar
Leo Gao committed
2
import itertools
FarzanehNakhaee's avatar
FarzanehNakhaee committed
3
import json
lintangsutawika's avatar
lintangsutawika committed
4
import collections
FarzanehNakhaee's avatar
FarzanehNakhaee committed
5
6
import logging
import sys
lintangsutawika's avatar
lintangsutawika committed
7

8
9
import torch

10
import numpy as np
lintangsutawika's avatar
lintangsutawika committed
11
12

import lm_eval.api
13
import lm_eval.tasks
lintangsutawika's avatar
lintangsutawika committed
14
import lm_eval.models
lintangsutawika's avatar
lintangsutawika committed
15
import lm_eval.api.metrics
lintangsutawika's avatar
lintangsutawika committed
16
import lm_eval.api.registry
lintangsutawika's avatar
lintangsutawika committed
17

lintangsutawika's avatar
lintangsutawika committed
18
19
20
21
from lm_eval.utils import (
    positional_deprecated,
    run_task_tests,
    make_table,
22
    create_iterator,
lintangsutawika's avatar
lintangsutawika committed
23
24
    get_git_commit_hash,
)
25

lintangsutawika's avatar
lintangsutawika committed
26
27
from lm_eval.logger import eval_logger

FarzanehNakhaee's avatar
FarzanehNakhaee committed
28
29
30
31
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
logger.addHandler(logging.StreamHandler(sys.stdout))

Fabrizio Milo's avatar
Fabrizio Milo committed
32

33
@positional_deprecated
Fabrizio Milo's avatar
Fabrizio Milo committed
34
35
36
37
def simple_evaluate(
    model,
    model_args=None,
    tasks=[],
38
    num_fewshot=None,
Fabrizio Milo's avatar
Fabrizio Milo committed
39
    batch_size=None,
40
    max_batch_size=None,
Fabrizio Milo's avatar
Fabrizio Milo committed
41
    device=None,
haileyschoelkopf's avatar
haileyschoelkopf committed
42
    use_cache=None,
Fabrizio Milo's avatar
Fabrizio Milo committed
43
    limit=None,
Ethan Smith's avatar
Ethan Smith committed
44
45
    bootstrap_iters: int = 100000,
    check_integrity: bool = False,
Fabrizio Milo's avatar
Fabrizio Milo committed
46
    decontamination_ngrams_path=None,
Ethan Smith's avatar
Ethan Smith committed
47
48
    write_out: bool = False,
    log_samples: bool = True,
Fabrizio Milo's avatar
Fabrizio Milo committed
49
):
50
    """Instantiate and evaluate a model on a list of tasks.
51

52
53
54
    :param model: Union[str, LM]
        Name of model or LM object, see lm_eval.models.get_model
    :param model_args: Optional[str]
Fabrizio Milo's avatar
Fabrizio Milo committed
55
        String arguments for each model class, see LM.create_from_arg_string.
56
57
        Ignored if `model` argument is a LM object.
    :param tasks: list[Union[str, Task]]
Leo Gao's avatar
Leo Gao committed
58
        List of task names or Task objects. Task objects will be taken to have name task.EVAL_HARNESS_NAME if defined and type(task).__name__ otherwise.
59
60
    :param num_fewshot: int
        Number of examples in few-shot context
61
    :param batch_size: int or str, optional
62
        Batch size for model
63
64
    :param max_batch_size: int, optional
        Maximal batch size to try with automatic batch size detection
65
    :param device: str, optional
66
        PyTorch device (e.g. "cpu" or "cuda:0") for running models
haileyschoelkopf's avatar
haileyschoelkopf committed
67
68
    :param use_cache: str, optional
        A path to a sqlite db file for caching model responses. `None` if not caching.
69
70
    :param limit: int or float, optional
        Limit the number of examples per task (only use this for testing), If <1, limit is a percentage of the total number of examples.
71
72
    :param bootstrap_iters:
        Number of iterations for bootstrap statistics
Stephen Hogg's avatar
Stephen Hogg committed
73
74
    :param check_integrity: bool
        Whether to run the relevant part of the test suite for the tasks
75
    :param write_out: bool
76
77
78
        If True, write out an example document and model input for checking task integrity
    :param log_samples: bool
        If True, write out all model outputs and documents for per-sample measurement and post-hoc analysis
79
    :return
80
        Dictionary of results
81
    """
82
    random.seed(0)
83
    np.random.seed(1234)
84
85
86
    torch.manual_seed(
        1234
    )  # TODO: this may affect training runs that are run with evaluation mid-run.
87

88
89
90
    assert (
        tasks != []
    ), "No tasks specified, or no tasks found. Please verify the task names."
91
92

    if isinstance(model, str):
Fabrizio Milo's avatar
Fabrizio Milo committed
93
94
        if model_args is None:
            model_args = ""
lintangsutawika's avatar
lintangsutawika committed
95
        lm = lm_eval.api.registry.get_model(model).create_from_arg_string(
lintangsutawika's avatar
lintangsutawika committed
96
97
98
99
100
101
            model_args,
            {
                "batch_size": batch_size,
                "max_batch_size": max_batch_size,
                "device": device,
            },
Fabrizio Milo's avatar
Fabrizio Milo committed
102
        )
103
    else:
104
        assert isinstance(model, lm_eval.api.model.LM)
105
        lm = model
106

haileyschoelkopf's avatar
haileyschoelkopf committed
107
108
109
110
111
112
113
114
115
116
    if use_cache is not None:
        print(f"Using cache at {use_cache + '_rank' + str(lm.rank) + '.db'}")
        lm = lm_eval.api.model.CachingLM(
            lm,
            use_cache
            # each rank receives a different cache db.
            # necessary to avoid multiple writes to cache at once
            + "_rank" + str(lm.rank) + ".db",
        )

117
118
    task_dict = lm_eval.tasks.get_task_dict(tasks)
    for task_name in task_dict.keys():
lintangsutawika's avatar
lintangsutawika committed
119
120
121
        task_obj = task_dict[task_name]
        if type(task_obj) == tuple:
            group, task_obj = task_obj
122
123
            if task_obj is None:
                continue
lintangsutawika's avatar
lintangsutawika committed
124
125

        config = task_obj._config
126
127
128
129
130
131
132
        if num_fewshot is not None:
            if config["num_fewshot"] > 0:
                default_num_fewshot = config["num_fewshot"]
                eval_logger.warning(
                    f"Overwriting default num_fewshot of {task_name} from {default_num_fewshot} to {num_fewshot}"
                )

Lintang Sutawika's avatar
Lintang Sutawika committed
133
            task_obj._config["num_fewshot"] = num_fewshot
Jonathan Tow's avatar
Merge  
Jonathan Tow committed
134

Stephen Hogg's avatar
Stephen Hogg committed
135
    if check_integrity:
136
        run_task_tests(task_list=tasks)
Stephen Hogg's avatar
Stephen Hogg committed
137

138
139
140
141
    results = evaluate(
        lm=lm,
        task_dict=task_dict,
        limit=limit,
Niklas Muennighoff's avatar
Niklas Muennighoff committed
142
        bootstrap_iters=bootstrap_iters,
Fabrizio Milo's avatar
Fabrizio Milo committed
143
        decontamination_ngrams_path=decontamination_ngrams_path,
144
        write_out=write_out,
145
        log_samples=log_samples,
146
    )
147

148
149
150
    if lm.rank == 0:
        # add info about the model and few shot config
        results["config"] = {
lintangsutawika's avatar
lintangsutawika committed
151
152
153
            "model": model
            if isinstance(model, str)
            else model.model.config._name_or_path,
154
155
            "model_args": model_args,
            "batch_size": batch_size,
lintangsutawika's avatar
lintangsutawika committed
156
157
158
            "batch_sizes": list(lm.batch_sizes.values())
            if hasattr(lm, "batch_sizes")
            else [],
159
            "device": device,
haileyschoelkopf's avatar
haileyschoelkopf committed
160
            "use_cache": use_cache,
161
162
163
            "limit": limit,
            "bootstrap_iters": bootstrap_iters,
        }
164
        results["git_hash"] = get_git_commit_hash()
165
166
167
        return results
    else:
        return None
168

Leo Gao's avatar
Leo Gao committed
169

170
decontaminate_suffix = "_decontaminate"
Leo Gao's avatar
Leo Gao committed
171

Fabrizio Milo's avatar
Fabrizio Milo committed
172

173
@positional_deprecated
Fabrizio Milo's avatar
Fabrizio Milo committed
174
175
176
177
def evaluate(
    lm,
    task_dict,
    limit=None,
Ethan Smith's avatar
Ethan Smith committed
178
    bootstrap_iters: int = 100000,
Fabrizio Milo's avatar
Fabrizio Milo committed
179
    decontamination_ngrams_path=None,
Ethan Smith's avatar
Ethan Smith committed
180
181
    write_out: bool = False,
    log_samples: bool = True,
Fabrizio Milo's avatar
Fabrizio Milo committed
182
):
183
184
185
186
187
    """Instantiate and evaluate a model on a list of tasks.

    :param lm: obj
        Language Model
    :param task_dict: dict[str, Task]
haileyschoelkopf's avatar
haileyschoelkopf committed
188
        Dictionary of tasks. Tasks will be taken to have name type(task).config.task .
189
190
191
192
    :param limit: int, optional
        Limit the number of examples per task (only use this for testing)
    :param bootstrap_iters:
        Number of iterations for bootstrap statistics
193
    :param write_out: bool
194
195
196
        If True, write out an example document and model input for checking task integrity
    :param log_samples: bool
        If True, write out all model outputs and documents for per-sample measurement and post-hoc analysis
197
198
199
    :return
        Dictionary of results
    """
200

lintangsutawika's avatar
lintangsutawika committed
201
    # decontaminate = decontamination_ngrams_path is not None
202

203
    # stores the final result for each task, for each metric/filter pair.
Leo Gao's avatar
Leo Gao committed
204
    results = collections.defaultdict(dict)
205
    # Tracks each task's version.
Leo Gao's avatar
Leo Gao committed
206
    versions = collections.defaultdict(dict)
207
    # Tracks the YAML configs of all chosen tasks.
208
    configs = collections.defaultdict(dict)
209
    # logs info about each document evaluated.
lintangsutawika's avatar
lintangsutawika committed
210
    samples = collections.defaultdict(list)
211
    # tracks all Instances/requests a model must generate output on.
Leo Gao's avatar
Leo Gao committed
212
    requests = collections.defaultdict(list)
213
    # Aggregated task scores presented with groups
214
    results_agg = collections.defaultdict(dict)
215
    # Aggregated groups scores only
lintangsutawika's avatar
lintangsutawika committed
216
    groups_agg = collections.defaultdict(dict)
217
218
    # stores the amount to pad out reqs per req. type so that
    # number of fwd passes per distributed rank is equal
219
    padding_requests = collections.defaultdict(int)
lintangsutawika's avatar
lintangsutawika committed
220
    # store the hierarchy to do proper ordering
lintangsutawika's avatar
lintangsutawika committed
221
    task_hierarchy = collections.defaultdict(list)
lintangsutawika's avatar
lintangsutawika committed
222
    # store the ordering of tasks and groups
lintangsutawika's avatar
lintangsutawika committed
223
    task_order = collections.defaultdict(int)
lintangsutawika's avatar
lintangsutawika committed
224
    # store the aggregation for aggregating across tasks in the same group
225
    sample_agg_fn = collections.defaultdict(dict)
226

227
    # get lists of each type of request
228
    for task_name, task in task_dict.items():
229
        if type(task) == tuple:
lintangsutawika's avatar
lintangsutawika committed
230
231
            group_name, task = task
            task_hierarchy[group_name].append(task_name)
232
        else:
lintangsutawika's avatar
lintangsutawika committed
233
234
235
236
            task_hierarchy[task_name] = []

        if task is None:
            continue
237

Leo Gao's avatar
Leo Gao committed
238
        versions[task_name] = task.VERSION
haileyschoelkopf's avatar
haileyschoelkopf committed
239
240
        configs[task_name] = dict(task.dump_config())

Hailey Schoelkopf's avatar
Hailey Schoelkopf committed
241
        if limit is not None:
242
243
244
245
246
247
            if task.has_test_docs():
                task_docs = task.test_docs()
            elif task.has_validation_docs():
                task_docs = task.validation_docs()
            else:
                raise RuntimeError("Task has neither test_docs nor validation_docs")
248
            limit = int(len(task_docs) * limit) if limit < 1.0 else int(limit)
249

250
251
        task.build_all_requests(limit=limit, rank=lm.rank, world_size=lm.world_size)

haileyschoelkopf's avatar
haileyschoelkopf committed
252
253
254
255
256
257
258
        eval_logger.info(
            f"Task: {task_name}; number of requests on this rank: {len(task.instances)}"
        )

        if write_out:
            for inst in task.instances:
                # print the prompt for the first few documents
Hailey Schoelkopf's avatar
Hailey Schoelkopf committed
259
260
                if inst.doc_id < 1:
                    eval_logger.info(
haileyschoelkopf's avatar
haileyschoelkopf committed
261
262
                        f"Task: {task_name}; document {inst.doc_id}; context prompt (starting on next line):\
\n{inst.args[0]}\n(end of prompt on previous line)\ntarget string or answer choice index (starting on next line):\n{task.doc_to_target(inst.doc)}\n(end of target on previous line)"
haileyschoelkopf's avatar
haileyschoelkopf committed
263
                    )
haileyschoelkopf's avatar
haileyschoelkopf committed
264
                    eval_logger.info(f"Request: {str(inst)}")
haileyschoelkopf's avatar
haileyschoelkopf committed
265

266
        # aggregate Instances by LM method requested to get output.
lintangsutawika's avatar
lintangsutawika committed
267
268
        reqtype = (
            "loglikelihood"
Hailey Schoelkopf's avatar
Hailey Schoelkopf committed
269
            if task.OUTPUT_TYPE == "multiple_choice"
lintangsutawika's avatar
lintangsutawika committed
270
271
272
            else task.OUTPUT_TYPE
        )  # TODO: this is hacky, fix in task.py
        requests[reqtype].extend(task.instances)
273
274

        if lm.world_size > 1:
275
276
277
278
            instances_rnk = torch.tensor(len(task._instances), device=lm.device)
            gathered_item = (
                lm.accelerator.gather(instances_rnk).cpu().detach().numpy().tolist()
            )
279

280
            # compute number of pseudobatches to pad with (FSDP/DDP require even batches among ranks)
281
            numpad = max(gathered_item) - gathered_item[lm.rank]
282
            padding_requests[task.OUTPUT_TYPE] += numpad
283

284
    ### Run LM on inputs, get all outputs ###
Leo Gao's avatar
Leo Gao committed
285
286
    # execute each type of request
    for reqtype, reqs in requests.items():
lintangsutawika's avatar
lintangsutawika committed
287
        eval_logger.info("Running {} requests".format(reqtype))
288
289
290
291
        # create `K` copies of each request `req` based off `K = req.repeats`
        cloned_reqs = []
        for req in reqs:
            cloned_reqs.extend([req] * req.repeats)
lintangsutawika's avatar
lintangsutawika committed
292

293
294
        if (lm.world_size > 1) and (padding_requests[reqtype] > 0):
            for _ in range(padding_requests[reqtype]):
295
296
                cloned_reqs.extend([req] * req.repeats)

297
298
299
300
301
302
303
        # run requests through model
        resps = getattr(lm, reqtype)(cloned_reqs)

        # put responses from model into a list of length K for each request.
        for x, req in zip(resps, cloned_reqs):
            req.resps.append(x)

304
305
        if lm.world_size > 1:
            lm.accelerator.wait_for_everyone()
306

307
308
309
    ### Postprocess outputs ###
    # TODO: del model here, maybe (idea: allow user to specify device of e.g. reward model separately)
    for task_name, task in task_dict.items():
310
311
        if type(task) == tuple:
            group, task = task
312
313
            if task is None:
                continue
314
315
316
        task.apply_filters()

    ### Collect values of metrics on all datapoints ###
Leo Gao's avatar
Leo Gao committed
317
318
319
    vals = collections.defaultdict(list)

    # unpack results and sort back in order and return control to Task
320
    for task_name, task in task_dict.items():
321
322
        if type(task) == tuple:
            group, task = task
323
324
            if task is None:
                continue
haileyschoelkopf's avatar
haileyschoelkopf committed
325
326
        # TODO: make it possible to use a different metric per filter
        # iterate over different filters used
327
        for key in task.instances[0].filtered_resps.keys():
328
329
330
331
            doc_iterator = (
                itertools.islice(
                    enumerate(task.test_docs()), lm.rank, limit, lm.world_size
                )
lintangsutawika's avatar
lintangsutawika committed
332
                if task.has_test_docs()
333
334
335
336
                else itertools.islice(
                    enumerate(task.validation_docs()), lm.rank, limit, lm.world_size
                )
            )
337
            for doc_id, doc in doc_iterator:
338
339
                # subset instances to only this document id ; sort by idx
                requests = list(filter(lambda x: x.doc_id == doc_id, task.instances))
340
                requests.sort(key=lambda x: x.idx)
lintangsutawika's avatar
lintangsutawika committed
341
342
343
                metrics = task.process_results(
                    doc, [req.filtered_resps[key] for req in requests]
                )
344
345
346
347
348
349
350
351
352
353
354
355
                if log_samples:
                    target = task.doc_to_target(doc)
                    example = {
                        "doc_id": doc_id,
                        "doc": doc,
                        "target": target,
                        "arguments": [req.args for req in requests],
                        "resps": [req.resps for req in requests],
                        "filtered_resps": [req.filtered_resps[key] for req in requests],
                    }
                    example.update(metrics)
                    samples[task_name].append(example)
356
357
358
                for metric, value in metrics.items():
                    vals[(task_name, key, metric)].append(value)

359
    if lm.world_size > 1:
360
        # if multigpu, then gather data across all ranks
361
362
363
364
365
366
367
368
        # first gather logged samples across all ranks
        for task_name, task_samples in list(samples.items()):
            full_samples = [None] * lm.world_size
            torch.distributed.all_gather_object(full_samples, task_samples)

            samples[task_name] = list(itertools.chain.from_iterable(full_samples))

        # then collect metrics across all ranks
369
370
        vals_torch = collections.defaultdict(list)
        for (task_name, key, metric), items in vals.items():
371
            numitem = 0
372
            if type(items[0]) == tuple:
373
374
                numitem = len(items[0])

375
376
377
378
            if isinstance(items[0], (str, list)):
                # handle the string case
                gathered_items = [None] * lm.accelerator.num_processes
                torch.distributed.all_gather_object(gathered_items, items)
379

380
                gathered_item = list(itertools.chain.from_iterable(gathered_items))
381
            else:
382
383
384
385
386
387
388
389
390
391
                # distributed gather requires all ranks to have same dimensions
                # so we pad out with float32 min value
                pad_value = torch.finfo(torch.float32).min
                metrics_tensor = torch.tensor(items, device=lm.device)

                original_dtype = metrics_tensor.dtype  # store original dtype
                torch_device_tensor = lm.accelerator.pad_across_processes(
                    metrics_tensor.to(torch.float32), pad_index=pad_value
                )
                gathered_item = lm.accelerator.gather(torch_device_tensor)
392

393
394
395
396
397
398
399
400
401
402
403
                if numitem > 0:
                    gathered_filtered = gathered_item[gathered_item[:, 0] != pad_value]
                else:
                    gathered_filtered = gathered_item[gathered_item != pad_value]

                gathered_item = (
                    gathered_filtered.to(original_dtype).cpu().detach().numpy().tolist()
                )
                # reconvert if we were passed a tuple of values
                if numitem > 0:
                    gathered_item = [tuple(g) for g in gathered_item]
404

405
406
            if lm.rank == 0:
                vals_torch[(task_name, key, metric)] = gathered_item
407

408
        vals = vals_torch
409

410
    if lm.rank == 0:
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438

        ### Get task ordering for correct sample-wide aggregation
        group_to_task = {}
        for group in task_hierarchy.keys():
            if group not in task_order:
                task_order[group] = 0

            if len(task_hierarchy[group]) > 0:
                group_to_task[group] = task_hierarchy[group].copy()

            for task in task_hierarchy[group]:

                if task in task_order:
                    task_order[task] += 1
                else:
                    task_order[task] = 1 + task_order[group]

                if task in task_hierarchy:
                    group_to_task[group].remove(task)
                    group_to_task[group].extend(task_hierarchy[task])

        task_to_group = {}
        for group in group_to_task:
            for task in group_to_task[group]:
                if task in task_to_group:
                    task_to_group[task].append(group)
                else:
                    task_to_group[task] = [group]
lintangsutawika's avatar
lintangsutawika committed
439

440
441
442
443
        ### Aggregate results over all datapoints ###
        # aggregate results ; run bootstrap CIs
        for (task_name, key, metric), items in vals.items():
            task = task_dict[task_name]
lintangsutawika's avatar
lintangsutawika committed
444
445
            metric_key = metric + "," + key

446
            if type(task) == tuple:
lintangsutawika's avatar
lintangsutawika committed
447
448
449
450
                group_name, task = task
            else:
                group_name = None

451
452
            agg_fn = task.aggregation()[metric]
            task_score = agg_fn(items)
lintangsutawika's avatar
lintangsutawika committed
453
454

            if group_name is not None:
455
456
457
458
459
460
                sample_metric_key = metric + "(sample agg)," + key
                for grouping in task_to_group[task_name]:
                    if metric_key in results[grouping]:
                        results[grouping][metric_key].append(task_score)
                    else:
                        results[grouping][metric_key] = [task_score]
lintangsutawika's avatar
lintangsutawika committed
461

462
463
464
465
466
                    if sample_metric_key in results[grouping]:
                        results[grouping][sample_metric_key] += items
                    else:
                        results[grouping][sample_metric_key] = items.copy()
                        sample_agg_fn[grouping][sample_metric_key] = agg_fn
lintangsutawika's avatar
lintangsutawika committed
467
468

            results[task_name][metric_key] = task_score
lintangsutawika's avatar
lintangsutawika committed
469

470
471
            # hotfix: bleu, chrf, ter seem to be really expensive to bootstrap
            # so we run them less iterations. still looking for a cleaner way to do this
haileyschoelkopf's avatar
haileyschoelkopf committed
472
            if bootstrap_iters > 0:
haileyschoelkopf's avatar
haileyschoelkopf committed
473
474
                stderr = lm_eval.api.metrics.stderr_for_metric(
                    metric=task.aggregation()[metric],
haileyschoelkopf's avatar
haileyschoelkopf committed
475
                    bootstrap_iters=min(bootstrap_iters, 100)
haileyschoelkopf's avatar
haileyschoelkopf committed
476
477
478
                    if metric in ["bleu", "chrf", "ter"]
                    else bootstrap_iters,
                )
479

haileyschoelkopf's avatar
haileyschoelkopf committed
480
481
                if stderr is not None:
                    results[task_name][metric + "_stderr" + "," + key] = stderr(items)
Fabrizio Milo's avatar
Fabrizio Milo committed
482

lintangsutawika's avatar
lintangsutawika committed
483
484
485
486
        if bool(results):
            for task_or_group in results.keys():
                for metric in results[task_or_group].keys():
                    if type(results[task_or_group][metric]) == list:
487
                        if "(sample agg)" in metric:
lintangsutawika's avatar
lintangsutawika committed
488
489
490
                            results[task_or_group][metric] = sample_agg_fn[
                                task_or_group
                            ][metric](results[task_or_group][metric])
491
                        else:
lintangsutawika's avatar
lintangsutawika committed
492
493
494
                            results[task_or_group][metric] = np.average(
                                results[task_or_group][metric]
                            )
lintangsutawika's avatar
lintangsutawika committed
495
496
                        versions[task_or_group] = "N/A"

lintangsutawika's avatar
lintangsutawika committed
497
498
499
500
        for task_name, task in task_dict.items():
            if type(task) == tuple:
                group_name, task = task
                order = task_order[group_name]
lintangsutawika's avatar
lintangsutawika committed
501
                tabbed_name = "-" * order + group_name
lintangsutawika's avatar
lintangsutawika committed
502
503
504
505
506
507
                results_agg[tabbed_name] = results[group_name]
                versions[tabbed_name] = versions[group_name]
                if order == 0:
                    groups_agg[group_name] = results[group_name]

            order = task_order[task_name]
lintangsutawika's avatar
lintangsutawika committed
508
            tabbed_name = "-" * order + task_name
lintangsutawika's avatar
lintangsutawika committed
509
510
            results_agg[tabbed_name] = results[task_name]
            versions[tabbed_name] = versions[task_name]
lintangsutawika's avatar
lintangsutawika committed
511

512
        results_dict = {
513
            "results": dict(results_agg.items()),
lintangsutawika's avatar
lintangsutawika committed
514
            **({"groups": dict(groups_agg.items())} if bool(groups_agg) else {}),
515
516
            "configs": dict(sorted(configs.items())),
            "versions": dict(sorted(versions.items())),
517
        }
518
519
520
521
        if log_samples:
            results_dict["samples"] = dict(samples)

        return results_dict
Fabrizio Milo's avatar
Fabrizio Milo committed
522

523
524
    else:
        return None