evaluator.py 11.6 KB
Newer Older
lintangsutawika's avatar
lintangsutawika committed
1
import random
Leo Gao's avatar
Leo Gao committed
2
import itertools
lintangsutawika's avatar
lintangsutawika committed
3
4
import collections

5
6
import torch

7
import numpy as np
lintangsutawika's avatar
lintangsutawika committed
8
9

import lm_eval.api
10
import lm_eval.tasks
lintangsutawika's avatar
lintangsutawika committed
11
import lm_eval.models
lintangsutawika's avatar
lintangsutawika committed
12
import lm_eval.api.metrics
lintangsutawika's avatar
lintangsutawika committed
13
import lm_eval.api.registry
lintangsutawika's avatar
lintangsutawika committed
14

lintangsutawika's avatar
lintangsutawika committed
15
16
17
18
from lm_eval.utils import (
    positional_deprecated,
    run_task_tests,
    make_table,
19
    create_iterator,
lintangsutawika's avatar
lintangsutawika committed
20
21
    get_git_commit_hash,
)
22

lintangsutawika's avatar
lintangsutawika committed
23
24
from lm_eval.logger import eval_logger

Fabrizio Milo's avatar
Fabrizio Milo committed
25

26
@positional_deprecated
Fabrizio Milo's avatar
Fabrizio Milo committed
27
28
29
30
31
32
def simple_evaluate(
    model,
    model_args=None,
    tasks=[],
    num_fewshot=0,
    batch_size=None,
33
    max_batch_size=None,
Fabrizio Milo's avatar
Fabrizio Milo committed
34
35
36
37
38
39
    device=None,
    no_cache=False,
    limit=None,
    bootstrap_iters=100000,
    check_integrity=False,
    decontamination_ngrams_path=None,
40
41
    write_out=False,
    output_base_path=None,
Fabrizio Milo's avatar
Fabrizio Milo committed
42
):
43
    """Instantiate and evaluate a model on a list of tasks.
44

45
46
47
    :param model: Union[str, LM]
        Name of model or LM object, see lm_eval.models.get_model
    :param model_args: Optional[str]
Fabrizio Milo's avatar
Fabrizio Milo committed
48
        String arguments for each model class, see LM.create_from_arg_string.
49
50
        Ignored if `model` argument is a LM object.
    :param tasks: list[Union[str, Task]]
Leo Gao's avatar
Leo Gao committed
51
        List of task names or Task objects. Task objects will be taken to have name task.EVAL_HARNESS_NAME if defined and type(task).__name__ otherwise.
52
53
    :param num_fewshot: int
        Number of examples in few-shot context
54
    :param batch_size: int or str, optional
55
        Batch size for model
56
57
    :param max_batch_size: int, optional
        Maximal batch size to try with automatic batch size detection
58
    :param device: str, optional
59
        PyTorch device (e.g. "cpu" or "cuda:0") for running models
60
    :param no_cache: bool
Leo Gao's avatar
Leo Gao committed
61
        Whether or not to cache
62
63
    :param limit: int or float, optional
        Limit the number of examples per task (only use this for testing), If <1, limit is a percentage of the total number of examples.
64
65
    :param bootstrap_iters:
        Number of iterations for bootstrap statistics
Stephen Hogg's avatar
Stephen Hogg committed
66
67
    :param check_integrity: bool
        Whether to run the relevant part of the test suite for the tasks
68
    :param write_out: bool
69
        If True, write details about prompts and logits to json for all tasks
70
    :param output_base_path: str, optional
71
        Directory to which detailed eval info will be written. Defaults to present working dir.
72
    :return
73
        Dictionary of results
74
    """
75
76
77
    random.seed(1234)
    np.random.seed(1234)

78
79
80
    assert tasks != [], "No tasks specified"

    if isinstance(model, str):
Fabrizio Milo's avatar
Fabrizio Milo committed
81
82
        if model_args is None:
            model_args = ""
lintangsutawika's avatar
lintangsutawika committed
83
        lm = lm_eval.api.registry.get_model(model).create_from_arg_string(
84
            model_args, {"batch_size": batch_size, "max_batch_size": max_batch_size, "device": device}
Fabrizio Milo's avatar
Fabrizio Milo committed
85
        )
86
    else:
87
        assert isinstance(model, lm_eval.api.model.LM)
88
        lm = model
89

lintangsutawika's avatar
update  
lintangsutawika committed
90
    task_dict = lm_eval.tasks.get_task_dict(tasks, num_fewshot=num_fewshot)
Jonathan Tow's avatar
Merge  
Jonathan Tow committed
91

Stephen Hogg's avatar
Stephen Hogg committed
92
    if check_integrity:
93
        run_task_tests(task_list=tasks)
Stephen Hogg's avatar
Stephen Hogg committed
94

95
96
97
98
    results = evaluate(
        lm=lm,
        task_dict=task_dict,
        limit=limit,
Niklas Muennighoff's avatar
Niklas Muennighoff committed
99
        bootstrap_iters=bootstrap_iters,
Fabrizio Milo's avatar
Fabrizio Milo committed
100
        decontamination_ngrams_path=decontamination_ngrams_path,
101
102
        write_out=write_out,
        output_base_path=output_base_path,
103
    )
104

105
106
107
    if lm.rank == 0:
        # add info about the model and few shot config
        results["config"] = {
108
            "model": model if isinstance(model, str) else model.model.config._name_or_path,
109
110
111
            "model_args": model_args,
            "num_fewshot": num_fewshot,
            "batch_size": batch_size,
112
            "batch_sizes": list(lm.batch_sizes.values()) if hasattr(lm, "batch_sizes") else [],
113
114
115
116
117
            "device": device,
            "no_cache": no_cache,
            "limit": limit,
            "bootstrap_iters": bootstrap_iters,
        }
118
        results["git_hash"] = get_git_commit_hash()
119
120
121
        return results
    else:
        return None
122

Leo Gao's avatar
Leo Gao committed
123

124
decontaminate_suffix = "_decontaminate"
Leo Gao's avatar
Leo Gao committed
125

Fabrizio Milo's avatar
Fabrizio Milo committed
126

127
@positional_deprecated
Fabrizio Milo's avatar
Fabrizio Milo committed
128
129
130
131
132
133
def evaluate(
    lm,
    task_dict,
    limit=None,
    bootstrap_iters=100000,
    decontamination_ngrams_path=None,
134
135
    write_out=False,
    output_base_path=None,
Fabrizio Milo's avatar
Fabrizio Milo committed
136
):
137
138
139
140
141
    """Instantiate and evaluate a model on a list of tasks.

    :param lm: obj
        Language Model
    :param task_dict: dict[str, Task]
Leo Gao's avatar
Leo Gao committed
142
        Dictionary of tasks. Tasks will be taken to have name task.EVAL_HARNESS_NAME if defined and type(task).__name__ otherwise.
143
144
145
146
147
148
    :param num_fewshot: int
        Number of examples in few-shot context
    :param limit: int, optional
        Limit the number of examples per task (only use this for testing)
    :param bootstrap_iters:
        Number of iterations for bootstrap statistics
149
    :param write_out: bool
150
        If True, write all prompts, logits and metrics to json for offline analysis
151
    :param output_base_path: str, optional
152
        Directory to which detailed eval info will be written. Defaults to present working dir
153
154
155
    :return
        Dictionary of results
    """
156

lintangsutawika's avatar
lintangsutawika committed
157
    # decontaminate = decontamination_ngrams_path is not None
158

Leo Gao's avatar
Leo Gao committed
159
    results = collections.defaultdict(dict)
Leo Gao's avatar
Leo Gao committed
160
    versions = collections.defaultdict(dict)
161
    configs = collections.defaultdict(dict)
Leo Gao's avatar
Leo Gao committed
162
163
164

    requests = collections.defaultdict(list)

lintangsutawika's avatar
lintangsutawika committed
165
    # docs = {}
Leo Gao's avatar
Leo Gao committed
166

167
    # get lists of each type of request
168
    for task_name, task in task_dict.items():
Leo Gao's avatar
Leo Gao committed
169
        versions[task_name] = task.VERSION
170
171
172
        configs[task_name] = dict(
            task.dump_config()
        )  # TODO: don't access a private attribute here ; for non-YAML tasks handle this case
lintangsutawika's avatar
lintangsutawika committed
173

Leo Gao's avatar
Leo Gao committed
174
        # deterministically shuffle docs and chop off the first `limit` because sometimes docs are in some kind of order
175
176
177
178
        # task_docs = list(task_doc_func())
        # rnd = random.Random()
        # rnd.seed(42)
        # rnd.shuffle(task_docs)
179
        if limit is not None:
180
181
182
183
184
185
            if task.has_test_docs():
                task_docs = task.test_docs()
            elif task.has_validation_docs():
                task_docs = task.validation_docs()
            else:
                raise RuntimeError("Task has neither test_docs nor validation_docs")
186
            limit = int(len(task_docs) * limit) if limit < 1.0 else int(limit)
187

188
189
        task.build_all_requests(limit=limit, rank=lm.rank, world_size=lm.world_size)

190
        # aggregate Instances by LM method requested to get output.
lintangsutawika's avatar
lintangsutawika committed
191
192
193
194
195
196
        reqtype = (
            "loglikelihood"
            if task.OUTPUT_TYPE == "multiple_choice"
            else task.OUTPUT_TYPE
        )  # TODO: this is hacky, fix in task.py
        requests[reqtype].extend(task.instances)
197
198

        if lm.world_size > 1:
199
200
201
202
            instances_rnk = torch.tensor(len(task._instances), device=lm.device)
            gathered_item = (
                lm.accelerator.gather(instances_rnk).cpu().detach().numpy().tolist()
            )
203

204
            # compute number of pseudobatches to pad with (FSDP/DDP require even batches among ranks)
205
            numpad = max(gathered_item) - gathered_item[lm.rank]
206

207
    ### Run LM on inputs, get all outputs ###
Leo Gao's avatar
Leo Gao committed
208
209
    # execute each type of request
    for reqtype, reqs in requests.items():
lintangsutawika's avatar
lintangsutawika committed
210
        eval_logger.info("Running {} requests".format(reqtype))
211
212
213
214
        # create `K` copies of each request `req` based off `K = req.repeats`
        cloned_reqs = []
        for req in reqs:
            cloned_reqs.extend([req] * req.repeats)
lintangsutawika's avatar
lintangsutawika committed
215

216
        if (lm.world_size > 1) and (numpad > 0):
217
218
219
            for _ in range(numpad):
                cloned_reqs.extend([req] * req.repeats)

220
221
222
223
224
225
226
        # run requests through model
        resps = getattr(lm, reqtype)(cloned_reqs)

        # put responses from model into a list of length K for each request.
        for x, req in zip(resps, cloned_reqs):
            req.resps.append(x)

227
228
229
    if lm.world_size > 1:
        lm.accelerator.wait_for_everyone()

230
231
232
233
234
235
    ### Postprocess outputs ###
    # TODO: del model here, maybe (idea: allow user to specify device of e.g. reward model separately)
    for task_name, task in task_dict.items():
        task.apply_filters()

    ### Collect values of metrics on all datapoints ###
lintangsutawika's avatar
lintangsutawika committed
236
    # TODO: make metric configurable, add metric registry
Leo Gao's avatar
Leo Gao committed
237
238
239
    vals = collections.defaultdict(list)

    # unpack results and sort back in order and return control to Task
240
241
242
243
    for task_name, task in task_dict.items():
        # calculate values for each filter setup (TODO: make getting list of keys cleaner)
        # TODO: make it possible to use a different metric per key
        for key in task.instances[0].filtered_resps.keys():
244
245
246
247
            doc_iterator = (
                itertools.islice(
                    enumerate(task.test_docs()), lm.rank, limit, lm.world_size
                )
lintangsutawika's avatar
lintangsutawika committed
248
                if task.has_test_docs()
249
250
251
252
                else itertools.islice(
                    enumerate(task.validation_docs()), lm.rank, limit, lm.world_size
                )
            )
253
            for doc_id, doc in doc_iterator:
254
255
                # subset instances to only this document id ; sort by idx
                requests = list(filter(lambda x: x.doc_id == doc_id, task.instances))
256
                requests.sort(key=lambda x: x.idx)
lintangsutawika's avatar
lintangsutawika committed
257
258
259
                metrics = task.process_results(
                    doc, [req.filtered_resps[key] for req in requests]
                )
260
261
262
                for metric, value in metrics.items():
                    vals[(task_name, key, metric)].append(value)

263
    if lm.world_size > 1:
264
        # if multigpu, then gather data across all ranks
265
266
        vals_torch = collections.defaultdict(list)
        for (task_name, key, metric), items in vals.items():
267
268

            numitem = 0
269
            if type(items[0]) == tuple:
270
271
                numitem = len(items[0])

272
273
            # distributed gather requires all ranks to have same dimensions
            # so we pad out with float32 min value
274
            pad_value = torch.finfo(torch.float32).min
275
276
277
278
279
280
            metrics_tensor = torch.tensor(items, device=lm.device)

            original_dtype = metrics_tensor.dtype  # store original dtype
            torch_device_tensor = lm.accelerator.pad_across_processes(
                metrics_tensor.to(torch.float32), pad_index=pad_value
            )
281
            gathered_item = lm.accelerator.gather(torch_device_tensor)
282

283
            if numitem > 0:
284
                gathered_filtered = gathered_item[gathered_item[:, 0] != pad_value]
285
286
            else:
                gathered_filtered = gathered_item[gathered_item != pad_value]
287
288
289
290

            gathered_item = (
                gathered_filtered.to(original_dtype).cpu().detach().numpy().tolist()
            )
291
292
293
            # reconvert if we were passed a tuple of values
            if numitem > 0:
                gathered_item = [tuple(g) for g in gathered_item]
294

295
296
            if lm.rank == 0:
                vals_torch[(task_name, key, metric)] = gathered_item
297

298
        vals = vals_torch
299

300
301
302
303
304
    if lm.rank == 0:
        ### Aggregate results over all datapoints ###
        # aggregate results ; run bootstrap CIs
        for (task_name, key, metric), items in vals.items():
            task = task_dict[task_name]
305
            results[task_name][metric + "," + key] = task.aggregation()[metric](items)
Leo Gao's avatar
Leo Gao committed
306

307
308
            # hotfix: bleu, chrf, ter seem to be really expensive to bootstrap
            # so we run them less iterations. still looking for a cleaner way to do this
309

lintangsutawika's avatar
lintangsutawika committed
310
            stderr = lm_eval.api.metrics.stderr_for_metric(
311
312
313
314
315
316
317
                metric=task.aggregation()[metric],
                bootstrap_iters=min(bootstrap_iters, 1000)
                if metric in ["bleu", "chrf", "ter"]
                else bootstrap_iters,
            )

            if stderr is not None:
318
                results[task_name][metric + "_stderr" + "," + key] = stderr(items)
Fabrizio Milo's avatar
Fabrizio Milo committed
319

320
321
322
323
324
        return {
            "results": dict(results),
            "configs": dict(configs),
            "versions": dict(versions),
        }
Fabrizio Milo's avatar
Fabrizio Milo committed
325

326
327
    else:
        return None