test_version_stable.py 3.4 KB
Newer Older
1
2
3
4
5
6
7
import lm_eval.tasks as tasks
import lm_eval.models as models
import lm_eval.evaluator as evaluator
import random
import pytest
import os
import json
8
import hashlib
9
import collections
10
11
12
13
14
15
16
17
18


os.makedirs("tests/testdata", exist_ok=True)


def assert_target(name, ob):
    fname = f"tests/testdata/{name}.json"
    if os.path.exists(fname):
        with open(fname) as fh:
bzantium's avatar
bzantium committed
19
20
            # Use relative tolerance of 1e-5 and absolute tolerance of 1e-8
            # assuming most metrics work on `float32` values, which is the common
21
22
            # default floating type across popular libraries (PyTorch, Tensorflow, and JAX).
            assert flatten(json.load(fh)) == pytest.approx(
bzantium's avatar
bzantium committed
23
24
                flatten(json.loads(json.dumps(ob, sort_keys=True))), rel=1e-5, abs=1e-8
            )
25
    else:
bzantium's avatar
bzantium committed
26
        with open(fname, "w") as fh:
27
28
            json.dump(ob, fh, sort_keys=True)

29

30
31
32
33
def assert_target_hashed(name, ob):
    fname = f"tests/testdata/{name}"
    if os.path.exists(fname):
        with open(fname) as fh:
bzantium's avatar
bzantium committed
34
35
36
37
38
39
            assert (
                fh.read()
                == hashlib.sha256(
                    json.dumps(ob, sort_keys=True).encode("utf-8")
                ).hexdigest()
            )
40
    else:
bzantium's avatar
bzantium committed
41
42
43
44
45
46
47
        with open(fname, "w") as fh:
            fh.write(
                hashlib.sha256(
                    json.dumps(ob, sort_keys=True).encode("utf-8")
                ).hexdigest()
            )

48

49
# from https://stackoverflow.com/a/6027615
bzantium's avatar
bzantium committed
50
def flatten(d, parent_key="", sep="."):
51
52
53
    items = []
    for k, v in d.items():
        new_key = parent_key + sep + k if parent_key else k
bzantium's avatar
bzantium committed
54
        if isinstance(v, collections.abc.MutableMapping):
55
56
57
58
            items.extend(flatten(v, new_key, sep=sep).items())
        else:
            items.append((new_key, v))
    return dict(items)
59

bzantium's avatar
bzantium committed
60

61
62
# make sure eval results for a task version are stable

bzantium's avatar
bzantium committed
63

64
65
@pytest.mark.parametrize("taskname,task_class", tasks.TASK_REGISTRY.items())
def test_versions_stable(taskname, task_class):
66
    task_dict = tasks.get_task_dict([taskname])
bzantium's avatar
bzantium committed
67
    lm = models.get_model("dummy")()
68
69
70

    def ll_fn(reqs):
        for ctx, cont in reqs:
71
72
            if len(ctx) == 0:
                continue
73
            # space convention
bzantium's avatar
bzantium committed
74
75
76
            assert ctx[-1] != " "
            assert cont[0] == " " or ctx[-1] == "\n"

77
        assert_target_hashed(f"{taskname}-v{task_class.VERSION}-loglikelihood", reqs)
78
        res = []
bzantium's avatar
bzantium committed
79

80
81
82
83
84
85
86
        random.seed(42)
        for _ in reqs:
            res.append((-random.random(), False))

        return res

    def ll_perp_fn(reqs):
bzantium's avatar
bzantium committed
87
        for (string,) in reqs:
88
89
            assert isinstance(string, str)

bzantium's avatar
bzantium committed
90
91
92
        assert_target_hashed(
            f"{taskname}-v{task_class.VERSION}-loglikelihood_rolling", reqs
        )
93
94
95
96
97
98
99
        res = []

        random.seed(42)
        for _ in reqs:
            res.append(-random.random())

        return res
bzantium's avatar
bzantium committed
100

101
102
    def greedy_until(reqs):
        res = []
103
        assert_target_hashed(f"{taskname}-v{task_class.VERSION}-greedy_until", reqs)
bzantium's avatar
bzantium committed
104

105
106
        for ctx, _ in reqs:
            res.append("lol")
bzantium's avatar
bzantium committed
107
            assert ctx.strip() != ""
108
109
110
111
112
113

        return res

    lm.loglikelihood = ll_fn
    lm.loglikelihood_rolling = ll_perp_fn
    lm.greedy_until = greedy_until
114
115

    limit = None
116
    result = evaluator.evaluate(
bzantium's avatar
bzantium committed
117
118
119
120
121
122
        lm=lm,
        task_dict=task_dict,
        num_fewshot=0,
        limit=limit,
        bootstrap_iters=10,
        description_dict=None,
123
124
    )

125
    assert_target(f"{taskname}-v{task_class.VERSION}-res", result)