test_evaluator.py 1.69 KB
Newer Older
1
import os
haileyschoelkopf's avatar
haileyschoelkopf committed
2
3
4

# import lm_eval.base as base
import lm_eval.api.registry as registry
Leo Gao's avatar
Leo Gao committed
5
import lm_eval.tasks as tasks
haileyschoelkopf's avatar
haileyschoelkopf committed
6
7
8

# import lm_eval.models as models

Leo Gao's avatar
Leo Gao committed
9
import lm_eval.evaluator as evaluator
10
import random
Leo Gao's avatar
Leo Gao committed
11
12
13
14
15
16
import pytest


# TODO: more fine grained unit tests rather than this big honking integration
# test once we break evaluator into smaller, more manageable pieces

baberabb's avatar
baberabb committed
17
18
19
20
21
# @pytest.mark.parametrize("taskname,task_class", tasks.TASK_REGISTRY.items())
def test_evaluator():
    TASK = ["arc_easy"]
    LIMIT = 10
    # task_dict = tasks.get_task_dict(task)
22

haileyschoelkopf's avatar
haileyschoelkopf committed
23
24
25
26
    # TODO: re-add cachingLM
    # os.system("rm test_cache.db")
    # lm = base.CachingLM(models.get_model("dummy")(), "test_cache.db")
    lm = registry.get_model("dummy")()
27
28

    def ll_fn(reqs):
baberabb's avatar
baberabb committed
29
        for ctx, cont in [req.args for req in reqs]:
30
31
            if len(ctx) == 0:
                continue
32
            # space convention
Fabrizio Milo's avatar
Fabrizio Milo committed
33
34
35
            assert ctx[-1] != " "
            assert cont[0] == " " or ctx[-1] == "\n"

36
        res = []
Fabrizio Milo's avatar
Fabrizio Milo committed
37

38
39
40
41
42
        random.seed(42)
        for _ in reqs:
            res.append((-random.random(), False))

        return res
Jason Phang's avatar
Jason Phang committed
43
44

    def ll_perp_fn(reqs):
Fabrizio Milo's avatar
Fabrizio Milo committed
45
        for (string,) in reqs:
Jason Phang's avatar
Jason Phang committed
46
47
48
49
50
            assert isinstance(string, str)

        res = []
        random.seed(42)
        for _ in reqs:
Leo Gao's avatar
Leo Gao committed
51
            res.append(-random.random())
Jason Phang's avatar
Jason Phang committed
52
53

        return res
54
55

    lm.loglikelihood = ll_fn
56
    lm.loglikelihood_rolling = ll_perp_fn
57

baberabb's avatar
baberabb committed
58
59
60
61
    e1 = evaluator.simple_evaluate(
        model="dummy",
        tasks=TASK,
        limit=LIMIT,
Fabrizio Milo's avatar
Fabrizio Milo committed
62
        bootstrap_iters=10,
63
    )
baberabb's avatar
baberabb committed
64
65
66
67
    e2 = evaluator.simple_evaluate(
        model="dummy",
        tasks=TASK,
        limit=LIMIT,
Fabrizio Milo's avatar
Fabrizio Milo committed
68
        bootstrap_iters=10,
69
    )
70

71
    # check that caching is working
72
    assert e1 == e2