test_evaluator.py 1.48 KB
Newer Older
haileyschoelkopf's avatar
haileyschoelkopf committed
1
# import lm_eval.base as base
2
3
4
from typing import List

import pytest
haileyschoelkopf's avatar
haileyschoelkopf committed
5
6

# import lm_eval.models as models
7
import lm_eval.api as api
Leo Gao's avatar
Leo Gao committed
8
import lm_eval.evaluator as evaluator
9
from lm_eval import tasks
10

Leo Gao's avatar
Leo Gao committed
11
12
13
14

# TODO: more fine grained unit tests rather than this big honking integration
# test once we break evaluator into smaller, more manageable pieces

15
16

@pytest.mark.parametrize(
baberabb's avatar
baberabb committed
17
    "task_name,limit,model,model_args",
18
19
20
21
22
23
24
25
26
    [
        (
            ["arc_easy"],
            10,
            "hf",
            "pretrained=EleutherAI/pythia-160m,dtype=float32,device=cpu",
        )
    ],
)
baberabb's avatar
baberabb committed
27
def test_evaluator(task_name: List[str], limit: int, model: str, model_args: str):
28
29
    task_name = task_name
    limit = 10
30

baberabb's avatar
baberabb committed
31
    e1 = evaluator.simple_evaluate(
32
33
34
35
        model=model,
        tasks=task_name,
        limit=limit,
        model_args=model_args,
36
    )
37
38
39
40
41
42
43
44
45
46
    assert e1 is not None

    lm = api.registry.get_model(model).create_from_arg_string(
        model_args,
        {
            "batch_size": None,
            "max_batch_size": None,
            "device": None,
        },
    )
47
48
    task_manager = tasks.TaskManager()
    task_dict = tasks.get_task_dict(task_name, task_manager)
49
50
51
52

    e2 = evaluator.evaluate(
        lm=lm,
        task_dict=task_dict,
53
        limit=limit,
54
    )
55

56
    assert e2 is not None
57
    # check that caching is working
58
59
60
61
62
63
64
65

    def r(x):
        return x["results"]["arc_easy"]

    assert all(
        x == y
        for x, y in zip([y for _, y in r(e1).items()], [y for _, y in r(e2).items()])
    )