task.py 16.9 KB
Newer Older
Baber's avatar
Baber committed
1
2
from __future__ import annotations

Baber's avatar
Baber committed
3
import logging
Baber's avatar
Baber committed
4
from collections.abc import Iterable
Baber's avatar
Baber committed
5
from dataclasses import asdict, dataclass, field
Baber's avatar
types  
Baber committed
6
7
8
from typing import TYPE_CHECKING, Any, Callable, Union

import datasets
Baber's avatar
Baber committed
9

10
from lm_eval.api.filter import FilterEnsemble
Baber's avatar
Baber committed
11
12
13
14
15
16
17
from lm_eval.api.instance import OutputType
from lm_eval.config.metric import MetricConfig
from lm_eval.config.utils import maybe_serialize


if TYPE_CHECKING:
    from lm_eval.api.samplers import ContextSampler
18
    from lm_eval.api.task import Task
19
    from lm_eval.config.template import TemplateConfig
Baber's avatar
Baber committed
20
21
22

eval_logger = logging.getLogger(__name__)

Baber's avatar
types  
Baber committed
23
24
DataSet = Union[datasets.Dataset, Iterable[dict[str, Any]]]

Baber's avatar
Baber committed
25
26
27
28
29
30

@dataclass
class RepeatConfig:
    """Encapsulates information about a single repeat."""

    repeats: int = 1
Baber's avatar
Baber committed
31
32
    metric_fn: str | Callable = "pass@N"
    kwargs: dict | None = field(default_factory=dict)
Baber's avatar
Baber committed
33
34
35
36
37
38
39


@dataclass
class FilterConfig:
    """Encapsulates information about a single filter."""

    name: str
40
41
    ensemble: FilterEnsemble
    metric_list: list[MetricConfig]
Baber's avatar
Baber committed
42
43
44
45


@dataclass
class FewshotConfig:
46
47
48
    # hack: this returns task.config.num_fewshot
    # to keep in sync as it is runtime-modified
    num_fewshot: Callable[[], int]
Baber's avatar
Baber committed
49
50
    split: str | None = None
    sampler: str | Callable = "default"
Baber's avatar
types  
Baber committed
51
52
    samples: Callable[[], DataSet] | DataSet | None = None
    process_docs: Callable[[DataSet], DataSet] | None = None
Baber's avatar
Baber committed
53
    fewshot_indices: list[int] | None = None
Baber's avatar
Baber committed
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
    rnd: int = field(init=False, default=False)

    def __post_init__(self) -> None:
        if self.samples is not None and not (
            isinstance(self.samples, list) or callable(self.samples)
        ):
            raise TypeError(
                "samples must be either list[dict] or callable returning list[dict]"
            )

        if self.split is not None and self.samples is not None:
            eval_logger.warning(
                "Both split and samples are configured; split will take precedence"
            )

    @property
    def has_source(self) -> bool:
        """Check if any fewshot source is configured."""
        return self.split is not None or self.samples is not None

    def _get_raw_docs(
        self, dataset
Baber's avatar
Baber committed
76
    ) -> list[dict] | Callable[[], Iterable[dict[str, Any]]] | None:
Baber's avatar
Baber committed
77
78
79
80
81
        """Get raw documents from configured source."""
        if self.split is not None:
            return dataset[self.split]

        if self.samples is not None:
Baber's avatar
Baber committed
82
            if isinstance(self.samples, list) or callable(self.samples):
Baber's avatar
Baber committed
83
84
85
86
87
88
                return self.samples
            else:
                raise TypeError(
                    "samples must be either a list of dicts or a callable returning a list"
                )

Baber's avatar
types  
Baber committed
89
    def get_docs(self, dataset) -> DataSet | None:
Baber's avatar
Baber committed
90
91
92
93
94
95
96
97
98
99
        """Get processed documents from configured source."""
        raw_docs = self._get_raw_docs(dataset)
        if raw_docs is None:
            return None

        if self.process_docs is not None:
            return self.process_docs(raw_docs)
        return raw_docs

    @property
Baber's avatar
nit  
Baber committed
100
    def get_sampler(self) -> Callable[..., Any] | None:
Baber's avatar
Baber committed
101
102
103
104
105
106
107
108
        from lm_eval.api import samplers

        if isinstance(self.sampler, str):
            return samplers.get_sampler(self.sampler)
        elif callable(self.sampler):
            return self.sampler

    def init_sampler(
Baber's avatar
Baber committed
109
110
        self, docs: list[dict], task: Task, rnd=None, fewshot_indices=None
    ) -> ContextSampler:
Baber's avatar
Baber committed
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
        """Initialize the sampler with the given documents and task."""
        if rnd is None:
            raise ValueError(
                "A `random.Random` generator argument must be provided to `rnd` of FewShotSampler!"
            )
        return self.get_sampler(
            docs,
            task,
            rnd=rnd,
            fewshot_indices=fewshot_indices
            if fewshot_indices
            else self.fewshot_indices,
        )


@dataclass
127
class TaskConfig:
Baber's avatar
Baber committed
128
    # task naming/registry
Baber's avatar
Baber committed
129
130
131
    task: str | None = None
    task_alias: str | None = None
    tag: str | list | None = None
Baber's avatar
Baber committed
132
133
134
    # HF dataset options.
    # which dataset to use,
    # and what splits for what purpose
Baber's avatar
types  
Baber committed
135
    custom_dataset: Callable[..., DataSet] | None = None
Baber's avatar
Baber committed
136
137
138
139
140
141
    dataset_path: str | None = None
    dataset_name: str | None = None
    dataset_kwargs: dict | None = field(default_factory=dict)
    training_split: str | None = None
    validation_split: str | None = None
    test_split: str | None = None
Baber's avatar
types  
Baber committed
142
    fewshot_split: str | None = None
Baber's avatar
Baber committed
143
144
    # formatting / prompting options.
    # see docs/advanced_task_guide.md for more info
Baber's avatar
types  
Baber committed
145
146
147
148
149
    process_docs: Callable[[DataSet], DataSet] | None = None
    doc_to_text: Callable[[dict[str, Any]], Any] | str | None = None
    doc_to_target: Callable[[dict[str, Any]], Any] | str | None = None
    doc_to_image: Callable[[dict[str, Any]], Any] | str | None = None
    doc_to_audio: Callable[[dict[str, Any]], Any] | str | None = None
Baber's avatar
Baber committed
150
    unsafe_code: bool = False
Baber's avatar
types  
Baber committed
151
152
153
154
    doc_to_choice: Callable[[dict[str, Any]], Any] | str | dict | list | None = None
    process_results: (
        Callable[[dict[str, Any], list[Any]], dict[str, Any]] | str | None
    ) = None
Baber's avatar
Baber committed
155
    use_prompt: str | None = None
Baber's avatar
Baber committed
156
157
158
    description: str = ""
    target_delimiter: str = " "
    fewshot_delimiter: str = "\n\n"
Baber's avatar
types  
Baber committed
159
    fewshot_config: dict[str, Any] | None = None
Baber's avatar
Baber committed
160
    # runtime configuration options
Baber's avatar
Baber committed
161
    num_fewshot: int | None = 0
Baber's avatar
types  
Baber committed
162
    generation_kwargs: dict[str, Any] | None = None
Baber's avatar
Baber committed
163
    # scoring options
Baber's avatar
Baber committed
164
    metric_list: list | None = None
Baber's avatar
Baber committed
165
166
    output_type: OutputType = "generate_until"
    repeats: int = 1
Baber's avatar
Baber committed
167
    filter_list: list[dict] | None = None
Baber's avatar
Baber committed
168
    should_decontaminate: bool = False
Baber's avatar
Baber committed
169
170
    doc_to_decontamination_query: str | None = None
    gen_prefix: str | None = None
Baber's avatar
Baber committed
171
    multiple_input: bool = False
Baber's avatar
Baber committed
172
    metadata: dict | None = field(
Baber's avatar
nit  
Baber committed
173
174
175
        default_factory=dict
    )  # by default, not used in the code. allows for users to pass arbitrary info to tasks

176
    _metric_list: list[MetricConfig] = field(default_factory=list)
Baber's avatar
Baber committed
177
    _filter_list: list[FilterConfig] = field(default_factory=list)
178
    # ds_cfg: DatasetConfig = field(init=False)
Baber's avatar
nit  
Baber committed
179
    fewshot_cfg: FewshotConfig = field(init=False)
Baber's avatar
Baber committed
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216

    def __post_init__(self) -> None:
        ### ---setup generation kwargs--- ###
        if self.generation_kwargs is not None:
            if self.output_type != "generate_until":
                eval_logger.warning(
                    f"[{self.task}] passed `generation_kwargs`, but not using `output_type: generate_until`!"
                )

            if "temperature" in self.generation_kwargs:
                self.generation_kwargs["temperature"] = float(
                    self.generation_kwargs["temperature"]
                )

            if "until" not in self.generation_kwargs:
                eval_logger.warning(
                    f"{self.task}: No `until` specified in `generation_kwargs`! Defaulting to the fewshot_delimiter={repr(self.fewshot_delimiter)}"
                )
                self.generation_kwargs["until"] = [self.fewshot_delimiter]
        else:
            if self.output_type == "generate_until":
                # ensure that we greedily generate in absence of explicit arguments otherwise
                self.generation_kwargs = {
                    "until": (
                        None
                        if self.fewshot_delimiter is None
                        else [self.fewshot_delimiter]
                    ),
                    "do_sample": False,
                    "temperature": 0,
                }
                eval_logger.warning(
                    f"{self.task}: No `generation_kwargs` specified in task config, defaulting to {self.generation_kwargs}"
                )
        # ---setup fewshot config--- #
        _fewshot_cfg = self.fewshot_config if self.fewshot_config is not None else {}
        self.fewshot_cfg = FewshotConfig(
Baber's avatar
Baber committed
217
            num_fewshot=lambda: self.num_fewshot or _fewshot_cfg.get("num_fewshot", 0),
Baber's avatar
Baber committed
218
219
220
221
222
223
224
            split=self.fewshot_split,
            sampler=_fewshot_cfg.get("sampler", "default"),
            samples=_fewshot_cfg.get("samples", None),
            process_docs=_fewshot_cfg.get("process_docs", None),
            fewshot_indices=_fewshot_cfg.get("fewshot_indices", None),
        )

Baber's avatar
Baber committed
225
    def _get_metric(self, metric_list: list[dict] | None = None) -> list[MetricConfig]:
Baber's avatar
nit  
Baber committed
226
227
228
229
230
231
232
233
234
        from lm_eval.api.registry import (
            AGGREGATION_REGISTRY,
            DEFAULT_METRIC_REGISTRY,
            get_aggregation,
            get_metric,
            get_metric_aggregation,
            is_higher_better,
        )

Baber's avatar
Baber committed
235
        # if metric_list defined inside a filter, use that; otherwise use the task's metric_list
236
        metric_list = metric_list or self.metric_list
Baber's avatar
Baber committed
237
        metrics = []
238
        if not metric_list:
Baber's avatar
Baber committed
239
240
241
242
243
244
245
246
247
248
            # ---------- 1. If no metrics defined, use defaults for output type ----------
            _metric_list = DEFAULT_METRIC_REGISTRY[self.output_type]
            eval_logger.info(
                f"No metrics defined in config, using default metrics for {self.output_type}={_metric_list}"
            )
            metrics.extend(
                MetricConfig(
                    name=metric_name,
                    fn=get_metric(metric_name),
                    aggregation_fn=get_metric_aggregation(metric_name),
249
                    higher_is_better=is_higher_better(metric_name) or True,
Baber's avatar
Baber committed
250
251
252
253
                )
                for metric_name in _metric_list
            )
        else:
Baber's avatar
nit  
Baber committed
254
            # ---------- 2. Process user-defined metrics from config ----------
255
            for metric_config in metric_list:
Baber's avatar
Baber committed
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
                metric_name = metric_config["metric"]
                _metric_fn_kwargs = {
                    key: metric_config[key]
                    for key in metric_config
                    if key
                    not in ["metric", "aggregation", "higher_is_better", "hf_evaluate"]
                }
                _hf_evaluate_metric: bool = metric_config.get("hf_evaluate", False)
                _metric_fn = None
                _aggregation = None

                if self.process_results is not None:
                    # User will compute metrics inside `process_results()`
                    _metric_name = None
                    _metric_fn_kwargs = {}
                elif callable(metric_name):
                    # User passed a function object
                    _metric_name = metric_name.__name__
                    _metric_fn = metric_name.__call__
                else:
                    # Normal: look up by name
Baber's avatar
Baber committed
277
278
                    _metric_name = metric_name
                    _metric_fn = get_metric(metric_name, _hf_evaluate_metric)
Baber's avatar
Baber committed
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315

                # ---------- 3. Decide how to aggregate examples ----------
                if "aggregation" in metric_config:
                    if isinstance(_agg_name := metric_config["aggregation"], str):
                        _aggregation = get_aggregation(_agg_name)
                    elif callable(_agg_name):  # noqa: E721
                        _aggregation = metric_config["aggregation"]
                else:
                    INV_AGG_REGISTRY = {v: k for k, v in AGGREGATION_REGISTRY.items()}
                    _aggregation = get_metric_aggregation(metric_name)
                    eval_logger.warning(
                        f"[Task: {self.task}] metric {metric_name} is defined, but aggregation is not. "
                        f"using default "
                        f"aggregation={INV_AGG_REGISTRY[_aggregation]}"
                    )

                # ---------- 4. Determine “higher-is-better” semantics ----------
                if "higher_is_better" in metric_config:
                    _higher_is_better = metric_config["higher_is_better"]
                else:
                    eval_logger.warning(
                        f"[Task: {self.task}] metric {metric_name} is defined, but higher_is_better is not. "
                        f"using default "
                        f"higher_is_better={is_higher_better(metric_name)}"
                    )
                    _higher_is_better = is_higher_better(metric_name)

                metrics.append(
                    MetricConfig(
                        name=_metric_name,
                        fn=_metric_fn,
                        kwargs=_metric_fn_kwargs,
                        aggregation_fn=_aggregation,
                        higher_is_better=_higher_is_better,
                        hf_evaluate=_hf_evaluate_metric,
                    )
                )
316
317
        for m in metrics:
            if m not in self._metric_list:
Baber's avatar
fixup  
Baber committed
318
                self._metric_list.append(m)
Baber's avatar
Baber committed
319
320
321
        return metrics

    @property
Baber's avatar
Baber committed
322
    def get_filters(self) -> list[FilterConfig]:
Baber's avatar
nit  
Baber committed
323
324
        from lm_eval.filters import build_filter_ensemble

Baber's avatar
Baber committed
325
326
327
328
        if not self.filter_list:
            eval_logger.debug(
                "No custom filters defined; falling back to 'take_first' for handling repeats."
            )
329
330
331
332
            return [
                FilterConfig(
                    name="none",
                    ensemble=build_filter_ensemble("none", [("take_first", None)]),
Baber's avatar
fixup  
Baber committed
333
                    metric_list=self._get_metric(metric_list=None),
334
335
                )
            ]
Baber's avatar
Baber committed
336
337
        else:

338
            def _strip_fn(d: dict) -> tuple[str, dict]:
339
340
341
                return d["function"], {
                    k: v for k, v in d.items() if k not in ["function", "metric_list"]
                }
Baber's avatar
Baber committed
342
343
344
345
346
347

            configs = (
                self.filter_list.values()
                if isinstance(self.filter_list, dict)
                else self.filter_list
            )
348
349
350
351
352
353
354
            x = [
                FilterConfig(
                    name=cfg["name"],
                    ensemble=build_filter_ensemble(
                        filter_name=cfg["name"],
                        components=[_strip_fn(f) for f in cfg["filter"]],
                    ),
Baber's avatar
fixup  
Baber committed
355
                    metric_list=self._get_metric(metric_list=cfg.get("metric_list")),
Baber's avatar
Baber committed
356
357
358
                )
                for cfg in configs
            ]
359
            return x
Baber's avatar
Baber committed
360

361
    @classmethod
Baber's avatar
Baber committed
362
    def from_yaml(cls, data: dict) -> TaskConfig:
363
364
365
        """Create a TaskConfig instance from a YAML-like dictionary."""
        return cls(**data)

366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
    @classmethod
    def from_template(cls, template: TemplateConfig, **kwargs) -> TaskConfig:
        """Create a TaskConfig instance from a template.

        Args:
            template: TemplateConfig instance (MCQTemplateConfig or ClozeTemplateConfig)
            **kwargs: Additional arguments to override template defaults

        Returns:
            TaskConfig instance configured from the template
        """
        from lm_eval.config.template import (
            ClozeTemplateConfig,
            MCQTemplateConfig,
        )

        # Extract base configuration from template
        config_dict = {
            "task": template.task,
            "doc_to_text": template.doc_to_text,
            "doc_to_choice": template.doc_to_choice,
            "doc_to_target": template.doc_to_target,
            "description": template.description,
            "target_delimiter": template.target_delimiter,
            "fewshot_delimiter": template.fewshot_delimiter,
            "metric_list": template.metric_list,
        }

        # Add common template attributes if they exist
        if hasattr(template, "answer_suffix"):
            config_dict["target_delimiter"] = (
                template.answer_suffix + template.target_delimiter
            )

        # Handle template-specific configurations
        if isinstance(template, MCQTemplateConfig):
            # For MCQ templates, set up multiple choice specific config
            config_dict["output_type"] = "multiple_choice"

            # MCQ templates typically use accuracy metrics
            if template.metric_list is None:
                config_dict["metric_list"] = [{"metric": "acc"}]

        elif isinstance(template, ClozeTemplateConfig):
            # For Cloze templates, set up generation config
            config_dict["output_type"] = "generate_until"

            # Cloze templates typically use accuracy and normalized accuracy
            if template.metric_list is None:
                config_dict["metric_list"] = [{"metric": "acc"}, {"metric": "acc_norm"}]
        else:
            # Generic template - try to infer output type
            if hasattr(template, "template"):
                if template.template == "mcq":
                    config_dict["output_type"] = "multiple_choice"
                elif template.template == "cloze":
                    config_dict["output_type"] = "generate_until"

        # Override with any user-provided kwargs
        config_dict.update(kwargs)

        # Create and return TaskConfig instance
        return cls(**config_dict)

Baber's avatar
Baber committed
430
    def to_dict(self, keep_callable: bool = False) -> dict:
Baber's avatar
Baber committed
431
432
433
434
435
436
437
438
        def _ser(x):
            if isinstance(x, dict):
                return {k: _ser(v) for k, v in x.items()}
            if isinstance(x, (list, tuple, set)):
                return type(x)(_ser(i) for i in x)
            return maybe_serialize(x, keep_callable)

        return {k: _ser(v) for k, v in asdict(self).items() if v is not None}