test_evaluator.py 1.5 KB
Newer Older
1
2
import os
import lm_eval.base as base
Leo Gao's avatar
Leo Gao committed
3
4
5
import lm_eval.tasks as tasks
import lm_eval.models as models
import lm_eval.evaluator as evaluator
6
import random
Leo Gao's avatar
Leo Gao committed
7
8
9
10
11
12
import pytest


# TODO: more fine grained unit tests rather than this big honking integration
# test once we break evaluator into smaller, more manageable pieces

13
14
@pytest.mark.parametrize("taskname,task_class", tasks.TASK_REGISTRY.items())
def test_evaluator(taskname, task_class):
Leo Gao's avatar
Leo Gao committed
15
    task_dict = tasks.get_task_dict([taskname])
16
17
18

    os.system("rm test_cache.db")
    lm = base.CachingLM(models.get_model('dummy')(), "test_cache.db")
19
20
21

    def ll_fn(reqs):
        for ctx, cont in reqs:
22
23
            if len(ctx) == 0:
                continue
24
25
26
27
28
29
30
31
32
33
34
            # space convention
            assert ctx[-1] != ' '
            assert cont[0] == ' ' or ctx[-1] == '\n'
        
        res = []
        
        random.seed(42)
        for _ in reqs:
            res.append((-random.random(), False))

        return res
Jason Phang's avatar
Jason Phang committed
35
36
37
38
39
40
41
42

    def ll_perp_fn(reqs):
        for string, in reqs:
            assert isinstance(string, str)

        res = []
        random.seed(42)
        for _ in reqs:
Leo Gao's avatar
Leo Gao committed
43
            res.append(-random.random())
Jason Phang's avatar
Jason Phang committed
44
45

        return res
46
47

    lm.loglikelihood = ll_fn
48
    lm.loglikelihood_rolling = ll_perp_fn
49
50

    limit = 10
51
52
    e1 = evaluator.evaluate(lm, task_dict, False, 0, limit, bootstrap_iters=10, description_dict=None)
    e2 = evaluator.evaluate(lm, task_dict, False, 0, limit, bootstrap_iters=10, description_dict=None)
53

54
    # check that caching is working
55
    assert e1 == e2