evaluator.py 7.38 KB
Newer Older
lintangsutawika's avatar
lintangsutawika committed
1
import random
Leo Gao's avatar
Leo Gao committed
2
import itertools
lintangsutawika's avatar
lintangsutawika committed
3
4
import collections

5
import numpy as np
lintangsutawika's avatar
lintangsutawika committed
6
7

import lm_eval.api
8
import lm_eval.api.metrics
lintangsutawika's avatar
lintangsutawika committed
9

10
import lm_eval.tasks
lintangsutawika's avatar
lintangsutawika committed
11
12
import lm_eval.models

13
from lm_eval.utils import positional_deprecated, run_task_tests, make_table, get_git_commit_hash
14

lintangsutawika's avatar
lintangsutawika committed
15
16
from lm_eval.logger import eval_logger

Fabrizio Milo's avatar
Fabrizio Milo committed
17

18
@positional_deprecated
Fabrizio Milo's avatar
Fabrizio Milo committed
19
20
21
22
23
24
25
26
27
28
29
30
31
def simple_evaluate(
    model,
    model_args=None,
    tasks=[],
    num_fewshot=0,
    batch_size=None,
    device=None,
    no_cache=False,
    limit=None,
    bootstrap_iters=100000,
    check_integrity=False,
    decontamination_ngrams_path=None,
):
32

33
    """Instantiate and evaluate a model on a list of tasks.
34

35
36
37
    :param model: Union[str, LM]
        Name of model or LM object, see lm_eval.models.get_model
    :param model_args: Optional[str]
Fabrizio Milo's avatar
Fabrizio Milo committed
38
        String arguments for each model class, see LM.create_from_arg_string.
39
40
        Ignored if `model` argument is a LM object.
    :param tasks: list[Union[str, Task]]
Leo Gao's avatar
Leo Gao committed
41
        List of task names or Task objects. Task objects will be taken to have name task.EVAL_HARNESS_NAME if defined and type(task).__name__ otherwise.
42
43
44
45
46
    :param num_fewshot: int
        Number of examples in few-shot context
    :param batch_size: int, optional
        Batch size for model
    :param device: str, optional
47
        PyTorch device (e.g. "cpu" or "cuda:0") for running models
48
    :param no_cache: bool
Leo Gao's avatar
Leo Gao committed
49
        Whether or not to cache
50
51
52
53
    :param limit: int, optional
        Limit the number of examples per task (only use this for testing)
    :param bootstrap_iters:
        Number of iterations for bootstrap statistics
Stephen Hogg's avatar
Stephen Hogg committed
54
55
    :param check_integrity: bool
        Whether to run the relevant part of the test suite for the tasks
56
    :return
57
        Dictionary of results
58
    """
59
60
61
    random.seed(1234)
    np.random.seed(1234)

62
63
64
    assert tasks != [], "No tasks specified"

    if isinstance(model, str):
Fabrizio Milo's avatar
Fabrizio Milo committed
65
66
        if model_args is None:
            model_args = ""
67
        lm = lm_eval.api.model.get_model(model).create_from_arg_string(
Fabrizio Milo's avatar
Fabrizio Milo committed
68
69
            model_args, {"batch_size": batch_size, "device": device}
        )
70
    else:
71
        assert isinstance(model, lm_eval.api.model.LM)
72
        lm = model
73

lintangsutawika's avatar
update  
lintangsutawika committed
74
    task_dict = lm_eval.tasks.get_task_dict(tasks, num_fewshot=num_fewshot)
Jonathan Tow's avatar
Merge  
Jonathan Tow committed
75

Stephen Hogg's avatar
Stephen Hogg committed
76
    if check_integrity:
77
        run_task_tests(task_list=tasks)
Stephen Hogg's avatar
Stephen Hogg committed
78

79
80
81
82
83
    results = evaluate(
        lm=lm,
        task_dict=task_dict,
        num_fewshot=num_fewshot,
        limit=limit,
Niklas Muennighoff's avatar
Niklas Muennighoff committed
84
        bootstrap_iters=bootstrap_iters,
Fabrizio Milo's avatar
Fabrizio Milo committed
85
        decontamination_ngrams_path=decontamination_ngrams_path,
86
    )
87
88
89
90
91
92
93
94
95
96

    # add info about the model and few shot config
    results["config"] = {
        "model": model,
        "model_args": model_args,
        "num_fewshot": num_fewshot,
        "batch_size": batch_size,
        "device": device,
        "no_cache": no_cache,
        "limit": limit,
97
        "bootstrap_iters": bootstrap_iters,
98
    }
99
    results["git_hash"] = get_git_commit_hash()
100
101

    return results
Leo Gao's avatar
Leo Gao committed
102

Fabrizio Milo's avatar
Fabrizio Milo committed
103

104
decontaminate_suffix = "_decontaminate"
Leo Gao's avatar
Leo Gao committed
105

Fabrizio Milo's avatar
Fabrizio Milo committed
106

107
@positional_deprecated
Fabrizio Milo's avatar
Fabrizio Milo committed
108
109
110
111
112
113
114
115
def evaluate(
    lm,
    task_dict,
    num_fewshot=0,
    limit=None,
    bootstrap_iters=100000,
    decontamination_ngrams_path=None,
):
116
117
118
119
120
    """Instantiate and evaluate a model on a list of tasks.

    :param lm: obj
        Language Model
    :param task_dict: dict[str, Task]
Leo Gao's avatar
Leo Gao committed
121
        Dictionary of tasks. Tasks will be taken to have name task.EVAL_HARNESS_NAME if defined and type(task).__name__ otherwise.
122
123
124
125
126
127
128
129
130
    :param num_fewshot: int
        Number of examples in few-shot context
    :param limit: int, optional
        Limit the number of examples per task (only use this for testing)
    :param bootstrap_iters:
        Number of iterations for bootstrap statistics
    :return
        Dictionary of results
    """
131

Leo Gao's avatar
Leo Gao committed
132
    decontaminate = decontamination_ngrams_path is not None
133

Leo Gao's avatar
Leo Gao committed
134
    results = collections.defaultdict(dict)
Leo Gao's avatar
Leo Gao committed
135
    versions = collections.defaultdict(dict)
Leo Gao's avatar
Leo Gao committed
136
137
138
139
140
141

    requests = collections.defaultdict(list)
    requests_origin = collections.defaultdict(list)

    docs = {}

142
    # get lists of each type of request
143
    for task_name, task in task_dict.items():
Leo Gao's avatar
Leo Gao committed
144
        versions[task_name] = task.VERSION
145
    
Leo Gao's avatar
Leo Gao committed
146
        # deterministically shuffle docs and chop off the first `limit` because sometimes docs are in some kind of order
147
148
149
150
151
152
153
154
        # task_docs = list(task_doc_func())
        # rnd = random.Random()
        # rnd.seed(42)
        # rnd.shuffle(task_docs)

        # for doc_id, doc in enumerate(itertools.islice(task_docs, 0, limit)):
        task.build_all_requests(limit=limit)
        # aggregate Instances by LM method requested to get output.
155
156
        reqtype = "loglikelihood" if task.OUTPUT_TYPE == "multiple_choice" else task.OUTPUT_TYPE #TODO: this is hacky, fix in task.py
        requests[reqtype].extend(task.instances) 
157
158
    
    ### Run LM on inputs, get all outputs ###
Leo Gao's avatar
Leo Gao committed
159
160
    # execute each type of request
    for reqtype, reqs in requests.items():
lintangsutawika's avatar
lintangsutawika committed
161
        eval_logger.info("Running {} requests".format(reqtype))
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
        # create `K` copies of each request `req` based off `K = req.repeats`
        cloned_reqs = []
        for req in reqs:
            cloned_reqs.extend([req] * req.repeats)
        
        # run requests through model
        resps = getattr(lm, reqtype)(cloned_reqs)

        # put responses from model into a list of length K for each request.
        for x, req in zip(resps, cloned_reqs):
            req.resps.append(x)

    ### Postprocess outputs ###
    # TODO: del model here, maybe (idea: allow user to specify device of e.g. reward model separately)
    for task_name, task in task_dict.items():
        task.apply_filters()


    ### Collect values of metrics on all datapoints ###
    # TODO: make metric configurable, add metric registry 
Leo Gao's avatar
Leo Gao committed
182
183
184
    vals = collections.defaultdict(list)

    # unpack results and sort back in order and return control to Task
185
186
187
188
189
190
191
    for task_name, task in task_dict.items():
        # calculate values for each filter setup (TODO: make getting list of keys cleaner)
        # TODO: make it possible to use a different metric per key
        for key in task.instances[0].filtered_resps.keys():
            for doc_id, doc in enumerate(itertools.islice(task.test_docs(), 0, limit) if task.has_test_docs() else task.validation_docs()):
                # subset instances to only this document id ; sort by idx
                requests = list(filter(lambda x: x.doc_id == doc_id, task.instances))
192
                requests.sort(key=lambda x: x.idx)
193
194
195
196
197
198
199
200
201
                metrics = task.process_results(doc, [req.filtered_resps[key] for req in requests])
                for metric, value in metrics.items():
                    vals[(task_name, key, metric)].append(value)
    


    ### Aggregate results over all datapoints ###
    # aggregate results ; run bootstrap CIs
    for (task_name, key, metric), items in vals.items():
Leo Gao's avatar
Leo Gao committed
202
        task = task_dict[task_name]
203
        results[task_name][metric + " - filter=" + key] = task.aggregation()[metric](items)
Leo Gao's avatar
Leo Gao committed
204

205
206
        # hotfix: bleu, chrf, ter seem to be really expensive to bootstrap
        # so we run them less iterations. still looking for a cleaner way to do this
207

208
209
        stderr = lm_eval.api.metrics.stderr_for_metric(
            metric=task.aggregation()[metric],
Fabrizio Milo's avatar
Fabrizio Milo committed
210
211
212
            bootstrap_iters=min(bootstrap_iters, 1000)
            if metric in ["bleu", "chrf", "ter"]
            else bootstrap_iters,
213
        )
Fabrizio Milo's avatar
Fabrizio Milo committed
214

Leo Gao's avatar
Leo Gao committed
215
        if stderr is not None:
216
            results[task_name][metric + " - filter=" + key + "_stderr"] = stderr(items)
Fabrizio Milo's avatar
Fabrizio Milo committed
217
218

    return {"results": dict(results), "versions": dict(versions)}