test_tasks.py 6.78 KB
Newer Older
1
import os
2
from itertools import islice
3

4
import datasets
5
import pytest
6

7
import lm_eval.tasks as tasks
8
from lm_eval.api.task import ConfigurableTask
9
from lm_eval.evaluator_utils import get_task_list
Leo Gao's avatar
Leo Gao committed
10

11
12
13
from .utils import new_tasks


14
datasets.config.HF_DATASETS_TRUST_REMOTE_CODE = True
15
os.environ["TOKENIZERS_PARALLELISM"] = "false"
baberabb's avatar
baberabb committed
16
# Default Task
Baber Abbasi's avatar
Baber Abbasi committed
17
TASKS = ["arc_easy"]
baberabb's avatar
baberabb committed
18
19


20
21
22
23
24
def get_new_tasks_else_default():
    """
    Check if any modifications have been made to built-in tasks and return
    the list, otherwise return the default task list
    """
baberabb's avatar
baberabb committed
25
26
27
28
    global TASKS
    # CI: new_tasks checks if any modifications have been made
    task_classes = new_tasks()
    # Check if task_classes is empty
29
30
31
32
33
34
35
36
37
38
    return task_classes if task_classes else TASKS


def task_class(task_names=None, task_manager=None) -> ConfigurableTask:
    """
    Convert a list of task names to a list of ConfigurableTask instances
    """
    if task_manager is None:
        task_manager = tasks.TaskManager()
    res = tasks.get_task_dict(task_names, task_manager)
39
40
41
    res = [x.task for x in get_task_list(res)]

    return res
baberabb's avatar
baberabb committed
42
43
44


@pytest.fixture()
baberabb's avatar
baberabb committed
45
46
def limit() -> int:
    return 10
47
48
49


# Tests
50
51
52
53
54
class BaseTasks:
    """
    Base class for testing tasks
    """

baberabb's avatar
baberabb committed
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
    def test_download(self, task_class: ConfigurableTask):
        task_class.download()
        assert task_class.dataset is not None

    def test_has_training_docs(self, task_class: ConfigurableTask):
        assert task_class.has_training_docs() in [True, False]

    def test_check_training_docs(self, task_class: ConfigurableTask):
        if task_class.has_training_docs():
            assert task_class._config["training_split"] is not None

    def test_has_validation_docs(self, task_class):
        assert task_class.has_validation_docs() in [True, False]

    def test_check_validation_docs(self, task_class):
        if task_class.has_validation_docs():
            assert task_class._config["validation_split"] is not None

    def test_has_test_docs(self, task_class):
        assert task_class.has_test_docs() in [True, False]

    def test_check_test_docs(self, task_class):
        task = task_class
        if task.has_test_docs():
            assert task._config["test_split"] is not None

    def test_should_decontaminate(self, task_class):
        task = task_class
        assert task.should_decontaminate() in [True, False]
        if task.should_decontaminate():
            assert task._config["doc_to_decontamination_query"] is not None

    def test_doc_to_text(self, task_class, limit):
        task = task_class
        arr = (
            list(islice(task.test_docs(), limit))
            if task.has_test_docs()
            else list(islice(task.validation_docs(), limit))
        )
        _array = [task.doc_to_text(doc) for doc in arr]
        # space convention; allow txt to have length 0 for perplexity-like tasks since the model tacks an <|endoftext|> on
96
        target_delimiter: str = task.config.target_delimiter
97
        if not task.multiple_input:
98
99
100
101
102
103
            for x in _array:
                assert isinstance(x, str)
                assert (
                    (x[-1].isspace() is False if len(x) > 0 else True)
                    if target_delimiter.isspace()
                    else True
Baber Abbasi's avatar
Baber Abbasi committed
104
105
106
                ), (
                    "doc_to_text ends in a whitespace and target delimiter also a whitespace"
                )
107
108
        else:
            pass
baberabb's avatar
baberabb committed
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130

    def test_create_choices(self, task_class, limit):
        task = task_class
        arr = (
            list(islice(task.test_docs(), limit))
            if task.has_test_docs()
            else list(islice(task.validation_docs(), limit))
        )
        if "multiple_choice" in task._config.output_type:
            _array = [task.doc_to_choice(doc) for doc in arr]
            assert all(isinstance(x, list) for x in _array)
            assert all(isinstance(x[0], str) for x in _array)

    def test_doc_to_target(self, task_class, limit):
        task = task_class
        arr = (
            list(islice(task.test_docs(), limit))
            if task.has_test_docs()
            else list(islice(task.validation_docs(), limit))
        )
        _array_target = [task.doc_to_target(doc) for doc in arr]
        if task._config.output_type == "multiple_choice":
131
132
133
134
135
            # TODO<baber>: label can be string or int; add better test conditions
            assert all(
                (isinstance(label, int) or isinstance(label, str))
                for label in _array_target
            )
baberabb's avatar
baberabb committed
136
137
138
139
140
141
142
143
144
145
146
147
148

    def test_build_all_requests(self, task_class, limit):
        task_class.build_all_requests(rank=1, limit=limit, world_size=1)
        assert task_class.instances is not None

    # ToDO: Add proper testing
    def test_construct_requests(self, task_class, limit):
        task = task_class
        arr = (
            list(islice(task.test_docs(), limit))
            if task.has_test_docs()
            else list(islice(task.validation_docs(), limit))
        )
149
150
151
152
153
154
155
        # ctx is "" for multiple input tasks
        requests = [
            task.construct_requests(
                doc=doc, ctx="" if task.multiple_input else task.doc_to_text(doc)
            )
            for doc in arr
        ]
baberabb's avatar
baberabb committed
156
        assert len(requests) == limit if limit else True
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210


@pytest.mark.parametrize(
    "task_class",
    task_class(get_new_tasks_else_default()),
    ids=lambda x: f"{x.config.task}",
)
class TestNewTasksElseDefault(BaseTasks):
    """
    Test class parameterized with a list of new/modified tasks
    (or a set of default tasks if none have been modified)
    """


@pytest.mark.parametrize(
    "task_class",
    task_class(
        ["arc_easy_unitxt"], tasks.TaskManager(include_path="./tests/testconfigs")
    ),
    ids=lambda x: f"{x.config.task}",
)
class TestUnitxtTasks(BaseTasks):
    """
    Test class for Unitxt tasks parameterized with a small custom
    task as described here:
      https://www.unitxt.ai/en/latest/docs/lm_eval.html
    """

    def test_check_training_docs(self, task_class: ConfigurableTask):
        if task_class.has_training_docs():
            assert task_class.dataset["train"] is not None

    def test_check_validation_docs(self, task_class):
        if task_class.has_validation_docs():
            assert task_class.dataset["validation"] is not None

    def test_check_test_docs(self, task_class):
        task = task_class
        if task.has_test_docs():
            assert task.dataset["test"] is not None

    def test_doc_to_text(self, task_class, limit: int):
        task = task_class
        arr = (
            list(islice(task.test_docs(), limit))
            if task.has_test_docs()
            else list(islice(task.validation_docs(), limit))
        )
        _array = [task.doc_to_text(doc) for doc in arr]
        if not task.multiple_input:
            for x in _array:
                assert isinstance(x, str)
        else:
            pass