task.py 76.9 KB
Newer Older
1
import abc
2
import ast
lintangsutawika's avatar
lintangsutawika committed
3
import logging
4
import random
5
6
import re
from collections.abc import Callable
7
from copy import deepcopy
Baber's avatar
Baber committed
8
from dataclasses import asdict, dataclass, field
Baber's avatar
Baber committed
9
from functools import cached_property
10
from inspect import getsource
11
12
13
14
15
16
17
18
19
20
21
22
from typing import (
    Any,
    Dict,
    Iterable,
    Iterator,
    List,
    Literal,
    Mapping,
    Optional,
    Tuple,
    Union,
)
23
24
25

import datasets
import numpy as np
26
from tqdm import tqdm
Baber's avatar
Baber committed
27
from typing_extensions import deprecated
28
29

from lm_eval import utils
30
from lm_eval.api import samplers
31
32
from lm_eval.api.instance import Instance, OutputType
from lm_eval.api.metrics import bits_per_byte, mean, weighted_perplexity
lintangsutawika's avatar
lintangsutawika committed
33
from lm_eval.api.registry import (
34
35
    AGGREGATION_REGISTRY,
    DEFAULT_METRIC_REGISTRY,
haileyschoelkopf's avatar
haileyschoelkopf committed
36
    get_aggregation,
37
    get_metric,
38
    get_metric_aggregation,
haileyschoelkopf's avatar
haileyschoelkopf committed
39
    is_higher_better,
lintangsutawika's avatar
lintangsutawika committed
40
)
41
from lm_eval.caching.cache import load_from_cache, save_to_cache
42
43
44
from lm_eval.filters import build_filter_ensemble
from lm_eval.prompts import get_prompt

45

46
47
48
49
ALL_OUTPUT_TYPES = [
    "loglikelihood",
    "multiple_choice",
    "loglikelihood_rolling",
50
    "generate_until",
51
52
]

Lintang Sutawika's avatar
Lintang Sutawika committed
53
eval_logger = logging.getLogger(__name__)
54

lintangsutawika's avatar
lintangsutawika committed
55

Baber's avatar
Baber committed
56
57
58
59
60
61
62
63
64
65
@dataclass
class MetricConfig:
    """Encapsulates information about a single metric."""

    name: str
    fn: Optional[Callable] = None
    kwargs: Optional[dict] = None
    aggregation_fn: Optional[Callable] = None
    higher_is_better: bool = True
    hf_evaluate: bool = False
66
    is_elementwise: bool = True
Baber's avatar
Baber committed
67
68

    @cached_property
Baber's avatar
Baber committed
69
    def metric_name(self) -> str:
Baber's avatar
Baber committed
70
71
72
73
74
75
76
77
78
79
80
81
82
83
        return self.name

    @cached_property
    def aggregation(self) -> Callable:
        if self.aggregation_fn is None:
            return get_aggregation(self.name)
        return self.aggregation_fn

    @cached_property
    def _higher_is_better(self) -> bool:
        if self.higher_is_better is None:
            return is_higher_better(self.name)
        return self.higher_is_better

Baber's avatar
Baber committed
84
85
86
87
88
89
    def calculate_metric(self, *args, **kwargs) -> Any:
        """Calculates the metric using the provided function and arguments."""
        if self.fn is None:
            raise ValueError(f"Metric function for {self.name} is not defined.")
        return self.fn(*args, **{**self.kwargs, **kwargs})

Baber's avatar
Baber committed
90
91
92
93
94
95
    def compute_aggregation(self, values: List[Any]) -> Any:
        """Computes the aggregation of the metric values."""
        if self.aggregation_fn is None:
            raise ValueError(f"Aggregation function for {self.name} is not defined.")
        return self.aggregation_fn(values)

Baber's avatar
Baber committed
96

97
98
99
100
101
102
103
104
105
@dataclass
class RepeatConfig:
    """Encapsulates information about a single repeat."""

    repeats: int = 1
    metric_fn: Optional[Callable] = None
    kwargs: Optional[dict] = None


Baber's avatar
Baber committed
106
107
@dataclass
class FilterConfig:
Baber's avatar
nit  
Baber committed
108
    """Encapsulates information about a single filter."""
Baber's avatar
Baber committed
109
110
111
112
113
114

    name: str
    fn: Optional[Callable] = None
    kwargs: Optional[dict] = None


Baber's avatar
Baber committed
115
116
117
118
@dataclass
class FewshotConfig:
    sampler: str
    samples: list[dict]
119
    process_docs: Optional[Callable] = None
Baber's avatar
Baber committed
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
    fewshot_indices: Optional[list[int]] = None


@dataclass
class TemplateConfig:
    """Encapsulates information about a template."""

    template: str
    doc_to_text: Union[str, Callable[[dict], str]]
    doc_to_choice: Union[str, list, Callable[[dict], list]]
    doc_to_target: Union[int, Callable[[dict], int]]
    description: str
    context_prefix: str
    prefix_delimiter: str
    context_delimiter: str
    answer_suffix: str
    target_delimiter: str
    choice_format: Optional[str]
    choice_delimiter: Optional[str]
    fewshot_delimiter: str
    metric_list: Optional[Union[list[str], list[MetricConfig]]] = field(
        default_factory=lambda: ["acc", "acc_norm"]
    )


@dataclass
class MCQTemplateConfig:
    """Encapsulates information about a template."""

    doc_to_text: Union[str, Callable[[dict], str]]
    doc_to_choice: Union[str, list, Callable[[dict], list]]
    doc_to_target: Union[int, Callable[[dict], int]]
    template = "mcq"
    context_prefix: str = "Question:"
    prefix_delimiter: str = " "
    context_delimiter: str = "\n"
    answer_suffix: str = "Answer:"
    target_delimiter: str = "\n"
    choice_format: Optional[str] = "letters"
    choice_delimiter: Optional[str] = "\n"
    fewshot_delimiter: str = "\n\n"
    metric_list: Optional[list[MetricConfig]] = field(default_factory=lambda: ["acc"])


@dataclass
class ClozeTemplateConfig:
    """Encapsulates information about a template."""

    doc_to_text: Union[str, Callable[[dict], str]]
    doc_to_choice: Union[str, list, Callable[[dict], list]]
    doc_to_target: Union[int, Callable[[dict], int]]
    template: str = "cloze"
    description: str = ""
    context_prefix: str = "Question:"
    prefix_delimiter: str = " "
    context_delimiter: str = "\n"
    answer_suffix: str = "Answer:"
    target_delimiter: str = " "
    choice_format: Optional[str] = None
    choice_delimiter: Optional[str] = None
    fewshot_delimiter: str = "\n\n"
    metric_list: Optional[list[MetricConfig]] = field(
        default_factory=lambda: ["acc", "acc_norm"]
    )
Baber's avatar
Baber committed
184
185


Baber's avatar
Baber committed
186
187
188
189
190
191
192
193
194
195
@dataclass
class DatasetConfig:
    """Encapsulates information about a dataset."""

    dataset_path: Optional[str] = None
    dataset_name: Optional[str] = None
    dataset_kwargs: Optional[dict] = None
    custom_dataset: Optional[Callable] = None


196
197
@dataclass
class TaskConfig(dict):
198
    # task naming/registry
199
200
    task: Optional[str] = None
    task_alias: Optional[str] = None
Lintang Sutawika's avatar
Lintang Sutawika committed
201
    tag: Optional[Union[str, list]] = None
202
203
204
    # HF dataset options.
    # which dataset to use,
    # and what splits for what purpose
Baber Abbasi's avatar
Baber Abbasi committed
205
    custom_dataset: Optional[Callable] = None
206
207
208
209
210
211
    dataset_path: Optional[str] = None
    dataset_name: Optional[str] = None
    dataset_kwargs: Optional[dict] = None
    training_split: Optional[str] = None
    validation_split: Optional[str] = None
    test_split: Optional[str] = None
212
    fewshot_split: Optional[str] = (
Baber Abbasi's avatar
Baber Abbasi committed
213
        None  # TODO: assert that this not None if num_fewshot > 0. (?) assert if this is same split as one evaluating (?)
214
    )
215
216
    # formatting / prompting options.
    # see docs/advanced_task_guide.md for more info
217
218
219
    process_docs: Optional[Callable] = None
    doc_to_text: Optional[Union[Callable, str]] = None
    doc_to_target: Optional[Union[Callable, str]] = None
Baber's avatar
Baber committed
220
221
    doc_to_image: Union[Callable, str, None] = None
    doc_to_audio: Union[Callable, str, None] = None
Hojin Lee's avatar
Hojin Lee committed
222
    unsafe_code: bool = False
223
224
225
    doc_to_choice: Optional[Union[Callable, str, dict, list]] = None
    process_results: Optional[Union[Callable, str]] = None
    use_prompt: Optional[str] = None
226
    description: str = ""
227
228
    target_delimiter: str = " "
    fewshot_delimiter: str = "\n\n"
229
    fewshot_config: Optional[dict] = None
230
    # runtime configuration options
231
    num_fewshot: Optional[int] = None
232
    # scoring options
233
234
235
    metric_list: Optional[list] = None
    output_type: OutputType = "generate_until"
    generation_kwargs: Optional[dict] = None
236
    repeats: int = 1
237
    filter_list: Optional[Union[str, list]] = None
238
    should_decontaminate: bool = False
239
    doc_to_decontamination_query: Optional[str] = None
Baber Abbasi's avatar
Baber Abbasi committed
240
    gen_prefix: Optional[str] = None
241
242
243
    metadata: Optional[dict] = (
        None  # by default, not used in the code. allows for users to pass arbitrary info to tasks
    )
Baber's avatar
nit  
Baber committed
244
245
    _metric_list = None
    _filter_list = None
246

Ethan Smith's avatar
Ethan Smith committed
247
    def __post_init__(self) -> None:
Lintang Sutawika's avatar
Lintang Sutawika committed
248
        if self.generation_kwargs is not None:
249
            if self.output_type != "generate_until":
250
                eval_logger.warning(
251
                    f"[{self.task}] passed `generation_kwargs`, but not using `output_type: generate_until`!"
Lintang Sutawika's avatar
Lintang Sutawika committed
252
253
254
255
256
257
258
259
                )

            if "temperature" in self.generation_kwargs:
                self.generation_kwargs["temperature"] = float(
                    self.generation_kwargs["temperature"]
                )

            if "until" not in self.generation_kwargs:
Baber Abbasi's avatar
Baber Abbasi committed
260
261
262
                eval_logger.warning(
                    f"{self.task}: No `until` specified in `generation_kwargs`! Defaulting to the fewshot_delimiter={repr(self.fewshot_delimiter)}"
                )
263
                self.generation_kwargs["until"] = [self.fewshot_delimiter]
Lintang Sutawika's avatar
Lintang Sutawika committed
264
        else:
265
            if self.output_type == "generate_until":
Lintang Sutawika's avatar
Lintang Sutawika committed
266
267
                # ensure that we greedily generate in absence of explicit arguments otherwise
                self.generation_kwargs = {
268
269
270
271
272
                    "until": (
                        None
                        if self.fewshot_delimiter is None
                        else [self.fewshot_delimiter]
                    ),
Lintang Sutawika's avatar
Lintang Sutawika committed
273
                    "do_sample": False,
Baber Abbasi's avatar
Baber Abbasi committed
274
                    "temperature": 0,
Lintang Sutawika's avatar
Lintang Sutawika committed
275
                }
Baber Abbasi's avatar
Baber Abbasi committed
276
277
278
                eval_logger.warning(
                    f"{self.task}: No `generation_kwargs` specified in task config, defaulting to {self.generation_kwargs}"
                )
279

Baber's avatar
Baber committed
280
281
282
283
284
285
286
287
288
289
290
        if self.metric_list is not None:
            for metric_config in self.metric_list:
                if "metric" not in metric_config:
                    raise ValueError(
                        "'metric' key not provided for an entry in 'metric_list', must be specified!"
                    )

    def get_metrics(self) -> list["MetricConfig"]:
        metrics = []
        if self.metric_list is None:
            _metric_list = DEFAULT_METRIC_REGISTRY[self.output_type]
Baber's avatar
Baber committed
291
292
293
            eval_logger.info(
                f"No metrics defined in config, using default metrics for {self.output_type}={_metric_list}"
            )
Baber's avatar
Baber committed
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
            metrics.extend(
                MetricConfig(
                    name=metric_name,
                    fn=get_metric(metric_name),
                    aggregation_fn=get_metric_aggregation(metric_name),
                    higher_is_better=is_higher_better(metric_name),
                )
                for metric_name in _metric_list
            )
        else:
            for metric_config in self.metric_list:
                if "metric" not in metric_config:
                    raise ValueError(
                        "'metric' key not provided for an entry in 'metric_list', must be specified!"
                    )
                metric_name = metric_config["metric"]
                _metric_fn_kwargs = {
                    key: metric_config[key]
                    for key in metric_config
                    if key
                    not in ["metric", "aggregation", "higher_is_better", "hf_evaluate"]
                }
                _hf_evaluate_metric: bool = metric_config.get("hf_evaluate", False)
                _metric_fn = None
                _aggregation = None

                if self.process_results is not None:
                    # User will compute metrics inside `process_results()`
                    _metric_name = None
                    _metric_fn_kwargs = {}
                elif callable(metric_name):
                    # User passed a function object
                    _metric_name = metric_name.__name__
                    _metric_fn = metric_name.__call__
                else:
                    # Normal: look up by name
                    _metric_name = get_metric(metric_name, _hf_evaluate_metric)

                # ---------- 3. Decide how to aggregate examples ----------
                if "aggregation" in metric_config:
                    if isinstance(_agg_name := metric_config["aggregation"], str):
                        _aggregation = get_aggregation(_agg_name)
                    elif callable(_agg_name):  # noqa: E721
                        _aggregation = metric_config["aggregation"]
                else:
                    INV_AGG_REGISTRY = {v: k for k, v in AGGREGATION_REGISTRY.items()}
                    _aggregation = get_metric_aggregation(metric_name)
                    eval_logger.warning(
                        f"[Task: {self.task}] metric {metric_name} is defined, but aggregation is not. "
                        f"using default "
                        f"aggregation={INV_AGG_REGISTRY[_aggregation]}"
                    )

                # ---------- 4. Determine “higher-is-better” semantics ----------
                if "higher_is_better" in metric_config:
                    _higher_is_better = metric_config["higher_is_better"]
                else:
                    eval_logger.warning(
                        f"[Task: {self.task}] metric {metric_name} is defined, but higher_is_better is not. "
                        f"using default "
                        f"higher_is_better={is_higher_better(metric_name)}"
                    )
                    _higher_is_better = is_higher_better(metric_name)

                metrics.append(
                    MetricConfig(
                        name=_metric_name,
                        fn=_metric_fn,
                        kwargs=_metric_fn_kwargs,
                        aggregation_fn=_aggregation,
                        higher_is_better=_higher_is_better,
                        hf_evaluate=_hf_evaluate_metric,
                    )
                )
        return metrics

Baber's avatar
Baber committed
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
    def get_filters(self):
        if self.filter_list is not None:
            _filter_list = []
            if isinstance(self.filter_list, dict):
                for filter_config in self.filter_list:
                    _filter_list.append(
                        build_filter_ensemble(
                            filter_name=filter_config["name"],
                            components=[
                                [
                                    {
                                        key: function[key]
                                        for key in function
                                        if key != "function"
                                    }
                                ]
                                for function in filter_config["filter"]
                            ],
                        )
                    )
        else:
            # TODO: handle repeats in a more general way rather than just discarding
            eval_logger.debug(
                "No custom filters defined. Using default 'take_first' filter for handling repeats."
            )
            _filter_list = [build_filter_ensemble("none", [["take_first", None]])]

        return _filter_list

399
400
401
    def __getitem__(self, item):
        return getattr(self, item)

402
403
404
    def __setitem__(self, item, value):
        return setattr(self, item, value)

405
    def to_dict(self, keep_callable: bool = False) -> dict:
406
407
        """dumps the current config as a dictionary object, as a printable format.
        null fields will not be printed.
haileyschoelkopf's avatar
haileyschoelkopf committed
408
        Used for dumping results alongside full task configuration
409

haileyschoelkopf's avatar
haileyschoelkopf committed
410
411
412
413
414
415
416
417
418
419
        :return: dict
            A printable dictionary version of the TaskConfig object.

        # TODO: should any default value in the TaskConfig not be printed?
        """
        cfg_dict = asdict(self)
        # remove values that are `None`
        for k, v in list(cfg_dict.items()):
            if v is None:
                cfg_dict.pop(k)
420
421
422
423
424
425
426
427
428
429
            elif k == "metric_list":
                for metric_dict in v:
                    for metric_key, metric_value in metric_dict.items():
                        if callable(metric_value):
                            metric_dict[metric_key] = self.serialize_function(
                                metric_value, keep_callable=keep_callable
                            )
                cfg_dict[k] = v
            elif callable(v):
                cfg_dict[k] = self.serialize_function(v, keep_callable=keep_callable)
haileyschoelkopf's avatar
haileyschoelkopf committed
430
        return cfg_dict
431

432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
    def serialize_function(
        self, value: Union[Callable, str], keep_callable=False
    ) -> Union[Callable, str]:
        """Serializes a given function or string.

        If 'keep_callable' is True, the original callable is returned.
        Otherwise, attempts to return the source code of the callable using 'getsource'.
        """
        if keep_callable:
            return value
        else:
            try:
                return getsource(value)
            except (TypeError, OSError):
                return str(value)

448
449
450
451
452
453
454
455
456
457
458

class Task(abc.ABC):
    """A task represents an entire benchmark including its dataset, problems,
    answers, and evaluation methods. See BoolQ for a simple example implementation

    A `doc` can be any python object which represents one instance of evaluation.
    This is usually a dictionary e.g.
        {"question": ..., "answer": ...} or
        {"question": ..., question, answer)
    """

459
    VERSION: Optional[Union[int, str]] = None
460

461
462
    # The name of the `Task` benchmark as denoted in the HuggingFace datasets Hub
    # or a path to a custom `datasets` loading script.
463
    DATASET_PATH: Optional[str] = None
464
465

    # The name of a subset within `DATASET_PATH`.
466
    DATASET_NAME: Optional[str] = None
467

468
    OUTPUT_TYPE: Optional[OutputType] = None
lintangsutawika's avatar
lintangsutawika committed
469

470
471
    def __init__(
        self,
472
473
474
475
        data_dir: Optional[str] = None,
        cache_dir: Optional[str] = None,
        download_mode: Optional[datasets.DownloadMode] = None,
        config: Optional[Mapping] = None,  # Union[dict, TaskConfig]
Ethan Smith's avatar
Ethan Smith committed
476
    ) -> None:
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
        """
        :param data_dir: str
            Stores the path to a local folder containing the `Task`'s data files.
            Use this to specify the path to manually downloaded data (usually when
            the dataset is not publicly accessible).
        :param cache_dir: str
            The directory to read/write the `Task` dataset. This follows the
            HuggingFace `datasets` API with the default cache directory located at:
                `~/.cache/huggingface/datasets`
            NOTE: You can change the cache location globally for a given process
            to another directory:
                `export HF_DATASETS_CACHE="/path/to/another/directory"`
        :param download_mode: datasets.DownloadMode
            How to treat pre-existing `Task` downloads and data.
            - `datasets.DownloadMode.REUSE_DATASET_IF_EXISTS`
                Reuse download and reuse dataset.
            - `datasets.DownloadMode.REUSE_CACHE_IF_EXISTS`
                Reuse download with fresh dataset.
            - `datasets.DownloadMode.FORCE_REDOWNLOAD`
                Fresh download and fresh dataset.
        """
        self.download(data_dir, cache_dir, download_mode)
499
500
501
        self._training_docs: Optional[list] = None
        self._fewshot_docs: Optional[list] = None
        self._instances: Optional[List[Instance]] = None
502

503
        self._config: TaskConfig = TaskConfig({**config}) if config else TaskConfig()
504

lintangsutawika's avatar
lintangsutawika committed
505
        self._filters = [build_filter_ensemble("none", [["take_first", None]])]
506
507
508
        self.fewshot_rnd: Optional[random.Random] = (
            None  # purposely induce errors in case of improper usage
        )
509

510
511
512
513
514
515
    def download(
        self,
        data_dir: Optional[str] = None,
        cache_dir: Optional[str] = None,
        download_mode=None,
    ) -> None:
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
        """Downloads and returns the task dataset.
        Override this method to download the dataset from a custom API.

        :param data_dir: str
            Stores the path to a local folder containing the `Task`'s data files.
            Use this to specify the path to manually downloaded data (usually when
            the dataset is not publicly accessible).
        :param cache_dir: str
            The directory to read/write the `Task` dataset. This follows the
            HuggingFace `datasets` API with the default cache directory located at:
                `~/.cache/huggingface/datasets`
            NOTE: You can change the cache location globally for a given process
            by setting the shell environment variable, `HF_DATASETS_CACHE`,
            to another directory:
                `export HF_DATASETS_CACHE="/path/to/another/directory"`
        :param download_mode: datasets.DownloadMode
            How to treat pre-existing `Task` downloads and data.
            - `datasets.DownloadMode.REUSE_DATASET_IF_EXISTS`
                Reuse download and reuse dataset.
            - `datasets.DownloadMode.REUSE_CACHE_IF_EXISTS`
                Reuse download with fresh dataset.
            - `datasets.DownloadMode.FORCE_REDOWNLOAD`
                Fresh download and fresh dataset.
        """
540
541
542
543
544
545
546
        self.dataset = datasets.load_dataset(
            path=self.DATASET_PATH,
            name=self.DATASET_NAME,
            data_dir=data_dir,
            cache_dir=cache_dir,
            download_mode=download_mode,
        )
547

548
    @property
549
    def config(self) -> TaskConfig:
550
551
552
        """Returns the TaskConfig associated with this class."""
        return self._config

553
    @abc.abstractmethod
Baber's avatar
Baber committed
554
    def has_training_docs(self) -> bool:
555
556
557
558
        """Whether the task has a training set"""
        pass

    @abc.abstractmethod
Baber's avatar
Baber committed
559
    def has_validation_docs(self) -> bool:
560
561
562
563
        """Whether the task has a validation set"""
        pass

    @abc.abstractmethod
Baber's avatar
Baber committed
564
    def has_test_docs(self) -> bool:
565
566
567
        """Whether the task has a test set"""
        pass

568
    def training_docs(self) -> Iterable:
569
570
571
572
573
574
        """
        :return: Iterable[obj]
            A iterable of any object, that doc_to_text can handle
        """
        return []

575
    def validation_docs(self) -> Iterable:
576
577
578
579
580
581
        """
        :return: Iterable[obj]
            A iterable of any object, that doc_to_text can handle
        """
        return []

582
    def test_docs(self) -> Iterable:
583
584
585
586
587
588
        """
        :return: Iterable[obj]
            A iterable of any object, that doc_to_text can handle
        """
        return []

589
    def fewshot_docs(self) -> Iterable:
590
591
592
593
594
595
596
597
598
        """
        :return: Iterable[obj]
            A iterable of any object, that doc_to_text can handle
        """
        if self.has_training_docs():
            return self.training_docs()
        elif self.has_validation_docs():
            return self.validation_docs()
        else:
Baber Abbasi's avatar
Baber Abbasi committed
599
600
601
602
603
            if self.config.get("num_fewshot", 0) > 0:
                eval_logger.warning(
                    f"[Task: {self.config.task}] has_training_docs and has_validation_docs are False"
                    ", using test_docs as fewshot_docs but this is not recommended."
                )
604
605
            return self.test_docs()

606
    def _process_doc(self, doc: dict) -> dict:
607
608
609
610
611
612
613
614
615
        """
        Override this to process (detokenize, strip, replace, etc.) individual
        documents. This can be used in a map over documents of a data split.
        E.g. `map(self._process_doc, self.dataset["validation"])`

        :return: dict
            The processed version of the specified `doc`.
        """
        return doc
lintangsutawika's avatar
lintangsutawika committed
616

617
    @property
618
    def instances(self) -> List[Instance]:
619
620
621
622
623
        """After calling `task.build_all_requests()`, tasks
        maintain a list of the dataset instances which will be evaluated.
        """
        return self._instances

Baber's avatar
Baber committed
624
    def fewshot_examples(self, k, rnd) -> Iterable[dict]:
625
626
627
628
629
        if self._training_docs is None:
            self._training_docs = list(self.training_docs())

        return rnd.sample(self._training_docs, k)

630
631
    def doc_to_decontamination_query(self, doc):
        raise NotImplementedError(
632
633
634
635
            "Override doc_to_decontamination_query with document specific decontamination query."
        )

    @abc.abstractmethod
Baber's avatar
Baber committed
636
    def doc_to_text(self, doc) -> str:
637
638
639
        pass

    @abc.abstractmethod
Baber's avatar
Baber committed
640
    def doc_to_target(self, doc) -> Union[str, int]:
641
642
        pass

643
644
645
646
    # not an abstractmethod because not every language-only task has to implement this
    def doc_to_image(self, doc):
        raise NotImplementedError

647
648
649
    def doc_to_audio(self, doc):
        raise NotImplementedError

Baber's avatar
Baber committed
650
    def doc_to_prefix(self, doc) -> str:
Baber Abbasi's avatar
Baber Abbasi committed
651
652
        return ""

653
654
    def build_all_requests(
        self,
655
        *,
656
        limit: Union[int, None] = None,
657
        samples: Optional[List[int]] = None,
658
659
660
661
662
663
664
665
666
        rank: int = 0,
        world_size: int = 1,
        cache_requests: bool = False,
        rewrite_requests_cache: bool = False,
        system_instruction: Optional[str] = None,
        apply_chat_template: bool = False,
        fewshot_as_multiturn: bool = False,
        chat_template: Optional[Callable] = None,
        tokenizer_name: str = "",
667
    ) -> None:
668
        """Build a set of Instances for a task, and store them in task.instances"""
669
670
671
672

        # used with caching
        og_limit = limit

673
        cache_key = f"requests-{self._config.task}-{self.config.num_fewshot}shot-rank{rank}-world_size{world_size}"
KonradSzafer's avatar
KonradSzafer committed
674
675
676
677
678
679
680
        cache_key += "-chat_template" if apply_chat_template else ""
        cache_key += "-fewshot_as_multiturn" if fewshot_as_multiturn else ""
        cache_key += (
            f"-system_prompt_hash{utils.hash_string(system_instruction)}"
            if system_instruction is not None
            else ""
        )
681
        cache_key += f"-tokenizer{tokenizer_name}"
682

Baber Abbasi's avatar
Baber Abbasi committed
683
        cached_instances = load_from_cache(file_name=cache_key, cache=cache_requests)
684
685
686
687
688
689
690
691
692
693
694
695
696

        if cache_requests and cached_instances and not rewrite_requests_cache:
            cached_instances = cached_instances[:limit]

            flattened_instances = [
                instance
                for instance_group in cached_instances
                for instance in instance_group
            ]

            self._instances = flattened_instances
            return

Baber Abbasi's avatar
Baber Abbasi committed
697
        eval_logger.info(f"Building contexts for {self.config.task} on rank {rank}...")
698

699
        instances = []
700
701
702
703
704
705
706
707
708
709

        # process all documents when caching is specified for simplicity
        if (
            cache_requests
            and (not cached_instances or rewrite_requests_cache)
            and limit is not None
        ):
            limit = None

        doc_id_docs = list(
710
711
712
            self.doc_iterator(
                rank=rank, limit=limit, samples=samples, world_size=world_size
            )
713
714
715
716
717
718
719
        )

        num_docs = len(doc_id_docs)

        for doc_id, doc in tqdm(
            doc_id_docs,
            total=num_docs,
lintangsutawika's avatar
lintangsutawika committed
720
        ):
721
            # sample fewshot context #TODO: need to offset doc_id by rank now!
722
            fewshot_ctx = self.fewshot_context(
723
                doc,
724
725
726
727
728
729
730
                num_fewshot=0
                if self.config.num_fewshot is None
                else self.config.num_fewshot,
                system_instruction=system_instruction,
                apply_chat_template=apply_chat_template,
                fewshot_as_multiturn=fewshot_as_multiturn,
                chat_template=chat_template,
Baber Abbasi's avatar
Baber Abbasi committed
731
                gen_prefix=self.doc_to_prefix(doc),
732
            )
733

734
            # TODO: we should override self.config.repeats if doing greedy gen so users don't waste time+compute
lintangsutawika's avatar
lintangsutawika committed
735
736
737
            inst = self.construct_requests(
                doc=doc,
                ctx=fewshot_ctx,
738
                metadata=(self.config["task"], doc_id, self.config.repeats),
739
                apply_chat_template=apply_chat_template,
740
                chat_template=chat_template,
lintangsutawika's avatar
lintangsutawika committed
741
            )
742
743
744
745

            if not isinstance(inst, list):
                inst = [inst]

746
747
748
749
750
751
752
753
754
755
756
757
758
            instances.append(inst)

        # now flatten, this is to allow slicing to work with pickles

        sliced_instances = instances[:og_limit]

        flattened_instances = [
            instance
            for instance_group in sliced_instances
            for instance in instance_group
        ]

        self._instances = flattened_instances
759

760
761
        if len(self._instances) == 0:
            raise ValueError("task.build_requests() did not find any docs!")
762

763
764
765
        if cache_requests and (not cached_instances or rewrite_requests_cache):
            save_to_cache(file_name=cache_key, obj=instances)

766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
    @abc.abstractmethod
    def construct_requests(self, doc, ctx, **kwargs):
        """Uses RequestFactory to construct Requests and returns an iterable of
        Requests which will be sent to the LM.

        :param doc:
            The document as returned from training_docs, validation_docs, or test_docs.
        :param ctx: str
            The context string, generated by fewshot_context. This includes the natural
            language description, as well as the few shot examples, and the question
            part of the document for `doc`.
        :param doc_idx: int
            The index of a document within `self.test_docs()` or `self.validation_docs()`,
            whichever is the main split used.
        :param repeats: int
        TODO: update this docstring
lintangsutawika's avatar
lintangsutawika committed
782
            The number of times each instance in a dataset is inferred on. Defaults to 1,
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
            can be increased for techniques like majority voting.
        """
        pass

    @abc.abstractmethod
    def process_results(self, doc, results):
        """Take a single document and the LM results and evaluates, returning a
        dict where keys are the names of submetrics and values are the values of
        the metric for that one document

        :param doc:
            The document as returned from training_docs, validation_docs, or test_docs.
        :param results:
            The results of the requests created in construct_requests.
        """
        pass

Baber's avatar
Baber committed
800
    @deprecated("not used anymore")
801
802
803
804
805
806
807
808
    def aggregation(self):
        """
        :returns: {str: [metric_score] -> float}
            A dictionary where keys are the names of submetrics and values are
            functions that aggregate a list of metric scores
        """
        pass

Baber's avatar
Baber committed
809
    @deprecated("not used anymore")
810
811
812
813
814
815
816
817
    def higher_is_better(self):
        """
        :returns: {str: bool}
            A dictionary where keys are the names of submetrics and values are
            whether a higher value of the submetric is better
        """
        pass

818
819
820
    def get_config(self, key: str) -> Any:
        return getattr(self._config, key, None)

haileyschoelkopf's avatar
haileyschoelkopf committed
821
    @classmethod
Baber's avatar
Baber committed
822
    def count_bytes(cls, doc) -> int:
haileyschoelkopf's avatar
haileyschoelkopf committed
823
824
825
826
        """Used for byte-level perplexity metrics in rolling loglikelihood"""
        return len(doc.encode("utf-8"))

    @classmethod
Baber's avatar
Baber committed
827
    def count_words(cls, doc) -> int:
haileyschoelkopf's avatar
haileyschoelkopf committed
828
829
830
        """Downstream loglikelihood_rolling perplexity tasks with custom word boundaries should override this!"""
        return len(re.split(r"\s+", doc))

831
    @utils.positional_deprecated
Baber Abbasi's avatar
Baber Abbasi committed
832
    def fewshot_context(self, doc, num_fewshot, rnd=None, description=None, **kwargs):
833
834
835
836
837
838
839
        """Returns a fewshot context string that is made up of a prepended description
        (if provided), the `num_fewshot` number of examples, and an appended prompt example.

        :param doc: str
            The document as returned from training_docs, validation_docs, or test_docs.
        :param num_fewshot: int
            The number of fewshot examples to provide in the returned context string.
lintangsutawika's avatar
lintangsutawika committed
840
841
842
843
844
        :param rnd: random.Random
            The pseudo-random number generator used to randomly sample examples.
            WARNING: This is currently a required arg although it's optionalized with a default `None`.
        :param description: str
            The task's description that will be prepended to the fewshot examples.
845
846
847
        :returns: str
            The fewshot context.
        """
848
        if rnd is None:
849
850
851
852
853
854
            if self.fewshot_rnd is not None:
                rnd = self.fewshot_rnd
            else:
                raise ValueError(
                    "A `random.Random` generator argument must be provided to `rnd`"
                )
lintangsutawika's avatar
lintangsutawika committed
855

856
        description = description if description else ""
857
858

        if num_fewshot == 0:
lintangsutawika's avatar
lintangsutawika committed
859
            labeled_examples = ""
860
        else:
lintangsutawika's avatar
lintangsutawika committed
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
            # for sets with no training docs, draw from other set *but ensure no overlap with current doc*
            if self.has_training_docs():
                fewshotex = self.fewshot_examples(k=num_fewshot, rnd=rnd)
            else:
                if self._fewshot_docs is None:
                    self._fewshot_docs = list(
                        self.validation_docs()
                        if self.has_validation_docs()
                        else self.test_docs()
                    )

                fewshotex = rnd.sample(self._fewshot_docs, num_fewshot + 1)

                # get rid of the doc that's the one we're evaluating, if it's in the fewshot
                fewshotex = [x for x in fewshotex if x != doc][:num_fewshot]

            labeled_examples = (
                "\n\n".join(
                    [
                        self.doc_to_text(doc) + self.doc_to_target(doc)
                        for doc in fewshotex
                    ]
                )
                + "\n\n"
lintangsutawika's avatar
lintangsutawika committed
885
            )
886
887

        example = self.doc_to_text(doc)
lintangsutawika's avatar
lintangsutawika committed
888
        return description + labeled_examples + example
889

890
    def apply_filters(self) -> Optional[List[Instance]]:
Baber Abbasi's avatar
Baber Abbasi committed
891
        """Iterates over FilterEnsembles and applies them to instances"""
lintangsutawika's avatar
lintangsutawika committed
892
893
        if hasattr(self, "_filters"):
            for f in self._filters:
894
                f.apply(self._instances)
lintangsutawika's avatar
lintangsutawika committed
895
896
897
        else:
            eval_logger.warning("No filter defined, passing through instances")
            return self._instances
898

baberabb's avatar
baberabb committed
899
    def dump_config(self) -> dict:
Baber Abbasi's avatar
Baber Abbasi committed
900
        """Returns the config as a dictionary."""
901
        # TODO: this should only return the overrides applied to a non-YAML task's configuration.
902
        # (num_fewshot)
903
        return self.config.to_dict()
904

Baber Abbasi's avatar
Baber Abbasi committed
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
    def set_config(self, key: str, value: Any, update: bool = False) -> None:
        """Set or update the configuration for a given key."""
        if key is None:
            raise ValueError("Key must be provided.")

        if update:
            current_value = getattr(self._config, key, {})
            if not isinstance(current_value, dict):
                raise TypeError(
                    f"Expected a dict for key '{key}', got {type(current_value).__name__} instead."
                )
            current_value.update(value)
        else:
            setattr(self._config, key, value)

    def override_metric(self, metric_name: str) -> None:
        """
        Override the default metrics used for evaluation with custom metrics.

        Parameters:
        - metric_name (str): The name of the custom metric to override. Should be registered in api.metrics.
        """
Baber's avatar
Baber committed
927
928
929
930
931
932
933
        # if not isinstance(self, ConfigurableTask):
        #     self.process_results = lambda x, y: {metric_name: get_metric(metric_name)}
        #     self.aggregation = lambda: {
        #         metric_name: get_metric_aggregation(metric_name)
        #     }
        setattr(self._config, "metric_list", [MetricConfig(name=metric_name)])
        setattr(self._config, "process_results", lambda *args: {"bypass": 0})
Baber Abbasi's avatar
Baber Abbasi committed
934

935
936
937
938
939
    def set_fewshot_seed(self, seed: Optional[int] = None) -> None:
        self.fewshot_rnd = random.Random(seed)
        if hasattr(self, "sampler"):
            self.sampler.rnd = self.fewshot_rnd

940
    @property
Baber's avatar
Baber committed
941
    def eval_docs(self) -> Union[datasets.Dataset, Iterable[dict]]:
942
943
944
945
946
        if self.has_test_docs():
            return self.test_docs()
        elif self.has_validation_docs():
            return self.validation_docs()
        else:
947
948
949
            raise ValueError(
                f"Task dataset (path={self.DATASET_PATH}, name={self.DATASET_NAME}) must have valid or test docs!"
            )
950
951

    def doc_iterator(
952
953
954
955
956
957
        self,
        *,
        rank: int = 0,
        limit: Union[int, None] = None,
        world_size: int = 1,
        samples: Optional[List[int]] = None,
958
    ) -> Iterator[Tuple[int, Any]]:
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
        if samples:
            n = len(self.eval_docs)
            assert all([e < n for e in samples]), (
                f"Elements of --samples should be in the interval [0,k-1] where k is the number of total examples. In this case, k={n}."
            )
            eval_logger.info(
                f"{self.config.task}: Evaluating on {len(samples)} examples"
            )
            doc_iterator = utils.create_iterator(
                enumerate(x for i, x in enumerate(self.eval_docs) if i in samples),
                rank=int(rank),
                limit=None,  # limit does not matter here since we are selecting samples directly
                world_size=int(world_size),
            )
        else:
            limit = int(limit) if limit else None
            doc_iterator = utils.create_iterator(
                enumerate(self.eval_docs),
                rank=int(rank),
                limit=limit,
                world_size=int(world_size),
            )
981
982
        return doc_iterator

983
984

class ConfigurableTask(Task):
985
    VERSION = "Yaml"
986
    OUTPUT_TYPE = None
987
    CONFIG = None
988
989

    def __init__(
990
991
992
993
994
        self,
        data_dir=None,
        cache_dir=None,
        download_mode=None,
        config: Optional[dict] = None,
Baber's avatar
Baber committed
995
    ) -> None:
996
        # Get pre-configured attributes
997
        self._config = self.CONFIG
998

999
        # Use new configurations if there was no preconfiguration
1000
        if self.config is None:
1001
            self._config = TaskConfig(**config)
1002
1003
        # Overwrite configs
        else:
lintangsutawika's avatar
lintangsutawika committed
1004
            if config is not None:
1005
                self._config.__dict__.update(config)
1006

1007
        if self.config is None:
lintangsutawika's avatar
lintangsutawika committed
1008
1009
1010
            raise ValueError(
                "Must pass a config to ConfigurableTask, either in cls.CONFIG or `config` kwarg"
            )
1011

1012
1013
1014
1015
        if isinstance(self.config.metadata, dict):
            if "version" in self.config.metadata:
                self.VERSION = self.config.metadata["version"]

1016
        if self.config.output_type is not None:
1017
1018
1019
1020
            if self.config.output_type not in ALL_OUTPUT_TYPES:
                raise ValueError(
                    f"Got invalid output_type '{self.config.output_type}', must be in '{','.join(ALL_OUTPUT_TYPES)}'"
                )
1021
            self.OUTPUT_TYPE = self.config.output_type
1022

1023
1024
1025
1026
        if self.config.doc_to_image is not None:
            # mark the task as requiring multimodality.
            self.MULTIMODAL = True

1027
1028
1029
1030
        if self.config.doc_to_audio:
            # mark the task as requiring multimodality.
            self.MULTIMODAL = True

Hojin Lee's avatar
Hojin Lee committed
1031
1032
1033
        if self.config.unsafe_code is not False:
            self.UNSAFE_CODE = True

1034
1035
        if self.config.dataset_path is not None:
            self.DATASET_PATH = self.config.dataset_path
1036

1037
1038
        if self.config.dataset_name is not None:
            self.DATASET_NAME = self.config.dataset_name
1039

Baber's avatar
Baber committed
1040
        self.metric_list: list[MetricConfig] = self.config.get_metrics()
1041

1042
        self.download(self.config.dataset_kwargs)
1043
1044
1045
        self._training_docs = None
        self._fewshot_docs = None

Baber's avatar
Baber committed
1046
1047
        self._filters = self.config.get_filters()

1048
1049
        if self.config.use_prompt is not None:
            eval_logger.info(f"loading prompt {self.config.use_prompt}")
1050
            self.prompt = get_prompt(
1051
                self.config.use_prompt, self.DATASET_PATH, self.DATASET_NAME
lintangsutawika's avatar
lintangsutawika committed
1052
            )
1053
1054
1055
        else:
            self.prompt = None

lintangsutawika's avatar
lintangsutawika committed
1056
        if self.fewshot_docs() is not None:
1057
1058
1059
1060
            self.fewshot_rnd = (
                random.Random()
            )  # setting with no seed, to be overridden at a later time
            config_sampler: Union[str, Callable] = (
haileyschoelkopf's avatar
haileyschoelkopf committed
1061
1062
1063
                self.config.fewshot_config.get("sampler", "default")
                if self.config.fewshot_config
                else "default"
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
            )
            if isinstance(config_sampler, str):
                self.sampler = samplers.get_sampler(config_sampler)(
                    list(self.fewshot_docs()), self, rnd=self.fewshot_rnd
                )
            elif callable(config_sampler) and issubclass(
                config_sampler, samplers.ContextSampler
            ):
                self.sampler = config_sampler(
                    docs=list(self.fewshot_docs()), task=self, rnd=self.fewshot_rnd
                )
            else:
                raise TypeError(
                    f"fewshot_config.sampler should be a string or callable of ContextSampler type, "
                    f"not {type(config_sampler)}"
                )
1080

1081
        self.task_docs = self.eval_docs
1082

1083
        # Test One Doc
1084
        self.features = list(self.task_docs.features.keys())
1085
1086
        self.multiple_input = 0
        self.multiple_target = 0
1087
        test_doc = self.task_docs[0]
1088
        test_text = self.doc_to_text(test_doc)
1089
        test_target = self.doc_to_target(test_doc)
lintangsutawika's avatar
lintangsutawika committed
1090

1091
        if self.config.doc_to_choice is not None:
lintangsutawika's avatar
lintangsutawika committed
1092
            test_choice = self.doc_to_choice(test_doc)
1093
            if not isinstance(test_choice, list):
lintangsutawika's avatar
lintangsutawika committed
1094
                eval_logger.error("doc_to_choice must return list")
1095
1096
            else:
                num_choice = len(test_choice)
1097

1098
            if isinstance(test_text, int):
Baber Abbasi's avatar
Baber Abbasi committed
1099
1100
1101
                eval_logger.debug(
                    "doc_to_text returned an int. Assuming multiple inputs."
                )
1102
                self.multiple_input = num_choice
1103
1104
        else:
            test_choice = None
1105

1106
        if isinstance(test_target, list):
Baber Abbasi's avatar
Baber Abbasi committed
1107
1108
1109
            eval_logger.debug(
                "doc_to_target returned a list. Assuming multiple targets."
            )
1110
            self.multiple_target = len(test_target)
1111
        else:
1112
            if (isinstance(test_target, int)) and (test_choice is not None):
lintangsutawika's avatar
lintangsutawika committed
1113
                test_target = test_choice[test_target]
1114
            else:
lintangsutawika's avatar
lintangsutawika committed
1115
                test_target = str(test_target)
1116

1117
1118
1119
        if test_choice is not None:
            check_choices = test_choice
        else:
lintangsutawika's avatar
lintangsutawika committed
1120
            check_choices = [test_target]
1121
1122
1123
1124
        if self.config.doc_to_choice is not None:
            for choice in check_choices:
                choice_has_whitespace = True if choice[0].isspace() else False
                delimiter_has_whitespace = (
1125
1126
                    True
                    if self.config.target_delimiter.rstrip()
1127
                    != self.config.target_delimiter
1128
                    else False
1129
                )
1130

1131
                if delimiter_has_whitespace and choice_has_whitespace:
1132
1133
                    eval_logger.debug(
                        f'Both target_delimiter "{self.config.target_delimiter}" and target choice: "{choice}" have whitespace'
1134
1135
                    )
                elif (not delimiter_has_whitespace) and (not choice_has_whitespace):
1136
                    eval_logger.debug(
1137
                        f'Both target_delimiter "{self.config.target_delimiter}" and target choice: "{choice}" do not have whitespace, ignore if the language you are evaluating on does not require/use whitespace'
1138
1139
                    )

Baber Abbasi's avatar
Baber Abbasi committed
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
    def download(
        self, dataset_kwargs: Optional[Dict[str, Any]] = None, **kwargs
    ) -> None:
        if isinstance(self.config.custom_dataset, Callable):
            eval_logger.warning(
                f"{self.config.task}: Custom kwargs can be passed to `--metadata` in console (as json string) or to the TaskManager."
                + "\nFor example --metadata='{\"max_seq_lengths\":[4096, 8192]}'. For details see task Readme."
            )
            self.dataset = self.config.custom_dataset(
                **(self.config.metadata or {}), **(self.config.dataset_kwargs or {})
            )
        else:
            self.dataset = datasets.load_dataset(
                path=self.DATASET_PATH,
                name=self.DATASET_NAME,
                **dataset_kwargs if dataset_kwargs is not None else {},
            )
1157

baberabb's avatar
baberabb committed
1158
    def has_training_docs(self) -> bool:
1159
        if self.config.training_split is not None:
1160
1161
1162
1163
            return True
        else:
            return False

baberabb's avatar
baberabb committed
1164
    def has_validation_docs(self) -> bool:
1165
        if self.config.validation_split is not None:
1166
1167
1168
1169
            return True
        else:
            return False

baberabb's avatar
baberabb committed
1170
    def has_test_docs(self) -> bool:
1171
        if self.config.test_split is not None:
1172
1173
1174
1175
            return True
        else:
            return False

Baber's avatar
Baber committed
1176
    def training_docs(self) -> Optional[datasets.Dataset]:
1177
        if self.has_training_docs():
1178
1179
1180
            if self.config.process_docs is not None:
                return self.config.process_docs(
                    self.dataset[self.config.training_split]
1181
                )
1182
            return self.dataset[self.config.training_split]
1183

Baber's avatar
Baber committed
1184
    def validation_docs(self) -> Optional[datasets.Dataset]:
1185
        if self.has_validation_docs():
1186
1187
1188
            if self.config.process_docs is not None:
                return self.config.process_docs(
                    self.dataset[self.config.validation_split]
1189
                )
1190
            return self.dataset[self.config.validation_split]
1191

Baber's avatar
Baber committed
1192
    def test_docs(self) -> Optional[datasets.Dataset]:
1193
        if self.has_test_docs():
1194
1195
1196
            if self.config.process_docs is not None:
                return self.config.process_docs(self.dataset[self.config.test_split])
            return self.dataset[self.config.test_split]
1197

1198
    def fewshot_docs(self):
1199
        if self.config.fewshot_split is not None:
1200
1201
            if self.config.process_docs is not None:
                return self.config.process_docs(self.dataset[self.config.fewshot_split])
1202
            return self.dataset[self.config.fewshot_split]
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
        elif (
            self.config.fewshot_config is not None
            and self.config.fewshot_config.get("samples", None) is not None
        ):
            if isinstance(self.config.fewshot_config["samples"], list):
                return self.config.fewshot_config["samples"]
            elif callable(self.config.fewshot_config["samples"]):
                return self.config.fewshot_config["samples"]()
            else:
                raise Exception(
                    "`fewshot_config['samples']` was incorrectly defined in the configuration. It should be either a list of samples as a dict, or function returning this list."
                )
1215
        else:
1216
            if (self.config.num_fewshot is not None) and (self.config.num_fewshot > 0):
1217
                eval_logger.warning(
Lintang Sutawika's avatar
Lintang Sutawika committed
1218
                    f"[Task: {self.config.task}] "
1219
1220
1221
1222
                    "num_fewshot > 0 but fewshot_split is None. "
                    "using preconfigured rule."
                )
            return super().fewshot_docs()
1223

KonradSzafer's avatar
KonradSzafer committed
1224
1225
1226
1227
1228
    @staticmethod
    def append_target_question(
        labeled_examples: List[Dict[str, str]],
        question: str,
        fewshot_as_multiturn: bool = False,
Baber Abbasi's avatar
Baber Abbasi committed
1229
        gen_prefix: Optional[str] = None,
KonradSzafer's avatar
KonradSzafer committed
1230
1231
1232
1233
1234
1235
1236
1237
    ) -> None:
        """Adds a target question to the labeled examples list.
        If fewshot_as_multiturn is True, or labeled_examples is empty, or the last entry is a system turn, appends the question as a new user entry.
        Otherwise, it is appended to the last user entry, ensuring that the conversation alternates between the user and the assistant.
        """
        if not fewshot_as_multiturn:
            # if no messages or last message is system, append as new user entry
            if len(labeled_examples) == 0 or labeled_examples[-1]["role"] == "system":
1238
                labeled_examples.append({"role": "user", "content": question})
KonradSzafer's avatar
KonradSzafer committed
1239
1240
            # if last message is user, append to it to avoid two user messages in a row
            else:
1241
                labeled_examples[-1]["content"] += question
KonradSzafer's avatar
KonradSzafer committed
1242
1243
        else:
            # if fewshot_as_multiturn is True, append as next user entry (last is always assistant)
1244
            labeled_examples.append({"role": "user", "content": question})
Baber Abbasi's avatar
Baber Abbasi committed
1245
1246
        if gen_prefix:
            labeled_examples.append({"role": "assistant", "content": gen_prefix})
KonradSzafer's avatar
KonradSzafer committed
1247

lintangsutawika's avatar
lintangsutawika committed
1248
    @utils.positional_deprecated
KonradSzafer's avatar
KonradSzafer committed
1249
1250
    def fewshot_context(
        self,
Baber Abbasi's avatar
Baber Abbasi committed
1251
        doc: dict,
KonradSzafer's avatar
KonradSzafer committed
1252
1253
1254
1255
        num_fewshot: int,
        system_instruction: Optional[str] = None,
        apply_chat_template: bool = False,
        fewshot_as_multiturn: bool = False,
1256
        chat_template: Optional[Callable] = None,
Baber Abbasi's avatar
Baber Abbasi committed
1257
        gen_prefix: Optional[str] = None,
Baber's avatar
Baber committed
1258
    ) -> Union[str, List[str], None]:
lintangsutawika's avatar
lintangsutawika committed
1259
1260
1261
1262
1263
1264
1265
        """Returns a fewshot context string that is made up of a prepended description
        (if provided), the `num_fewshot` number of examples, and an appended prompt example.

        :param doc: str
            The document as returned from training_docs, validation_docs, or test_docs.
        :param num_fewshot: int
            The number of fewshot examples to provide in the returned context string.
KonradSzafer's avatar
KonradSzafer committed
1266
1267
1268
1269
1270
1271
        :param  system_instruction: str
            System instruction to be applied to the prompt.
        :param apply_chat_template: bool
            Whether to apply the chat template to the fewshot context.
        :param fewshot_as_multiturn: bool
            Whether to provide the fewshot examples as a multiturn conversation or a single user turn.
1272
1273
        :param chat_template:
            callable (from lm.apply_chat_template) that takes in a list[Dict] chat transcript and renders it into a string.
1274
1275
        :param gen_prefix:
            String to append after the <|assistant|> token.
lintangsutawika's avatar
lintangsutawika committed
1276
1277
1278
        :returns: str
            The fewshot context.
        """
KonradSzafer's avatar
KonradSzafer committed
1279
1280
1281
1282
1283
1284
        if apply_chat_template:
            labeled_examples = []
        else:
            labeled_examples = ""

        # get task description
1285
1286
        if description := self.config.description:
            description = utils.apply_template(self.config.description, doc)
lintangsutawika's avatar
lintangsutawika committed
1287

KonradSzafer's avatar
KonradSzafer committed
1288
1289
1290
1291
1292
1293
1294
1295
1296
        # create system prompt based on the provided system instruction and description
        if system_instruction is not None and description:
            system_prompt = (
                f"{system_instruction}{self.sampler.fewshot_delimiter}{description}"
            )
        elif system_instruction is not None:
            system_prompt = system_instruction
        elif description:
            system_prompt = description
lintangsutawika's avatar
lintangsutawika committed
1297
        else:
KonradSzafer's avatar
KonradSzafer committed
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
            system_prompt = ""

        # add system prompt if specified
        if system_prompt:
            if apply_chat_template:
                labeled_examples.append({"role": "system", "content": system_prompt})
            else:
                labeled_examples = system_prompt
        # if few-shot - append examples after the system prompt
        if num_fewshot > 0:
            if apply_chat_template:
                labeled_examples.extend(
                    self.sampler.get_chat_context(
Baber Abbasi's avatar
Baber Abbasi committed
1311
1312
1313
                        doc,
                        num_fewshot,
                        fewshot_as_multiturn,
Baber Abbasi's avatar
Baber Abbasi committed
1314
                        gen_prefix=gen_prefix,
KonradSzafer's avatar
KonradSzafer committed
1315
1316
1317
                    )
                )
            else:
Baber Abbasi's avatar
Baber Abbasi committed
1318
                labeled_examples += self.sampler.get_context(
Baber Abbasi's avatar
Baber Abbasi committed
1319
                    doc, num_fewshot, gen_prefix=gen_prefix
Baber Abbasi's avatar
Baber Abbasi committed
1320
                )
lintangsutawika's avatar
lintangsutawika committed
1321
1322

        example = self.doc_to_text(doc)
KonradSzafer's avatar
KonradSzafer committed
1323
1324
        if apply_chat_template:
            if self.multiple_input:
Baber Abbasi's avatar
Baber Abbasi committed
1325
                # TODO: append prefill?
1326
1327
                if not labeled_examples:
                    return ""
1328
                return chat_template(labeled_examples)
KonradSzafer's avatar
KonradSzafer committed
1329
1330
            if isinstance(example, str):
                self.append_target_question(
Baber Abbasi's avatar
Baber Abbasi committed
1331
1332
1333
                    labeled_examples,
                    example,
                    fewshot_as_multiturn,
Baber Abbasi's avatar
Baber Abbasi committed
1334
                    gen_prefix=gen_prefix,
KonradSzafer's avatar
KonradSzafer committed
1335
1336
1337
1338
1339
1340
1341
                )
            # for loglikelihood create a list of questions with appended choices
            elif isinstance(example, list):
                labeled_examples_list = []
                # copy chat history for each example and append the answer
                for ex in example:
                    chat = deepcopy(labeled_examples)
Baber Abbasi's avatar
Baber Abbasi committed
1342
1343
1344
1345
                    self.append_target_question(
                        chat,
                        ex,
                        fewshot_as_multiturn,
Baber Abbasi's avatar
Baber Abbasi committed
1346
                        gen_prefix=gen_prefix,
Baber Abbasi's avatar
Baber Abbasi committed
1347
1348
1349
1350
1351
                    )
                    # TODO: append prefill?
                    labeled_examples_list.append(
                        chat_template(
                            chat,
Baber Abbasi's avatar
Baber Abbasi committed
1352
                            add_generation_prompt=False if gen_prefix else True,
Baber Abbasi's avatar
Baber Abbasi committed
1353
1354
                        )
                    )
KonradSzafer's avatar
KonradSzafer committed
1355
1356
1357
1358
1359
1360
                return labeled_examples_list
            # if example is an integer, append the choice or convert to string
            elif isinstance(example, int):
                if self.config.doc_to_choice is not None:
                    choices = self.doc_to_choice(doc)
                    self.append_target_question(
Baber Abbasi's avatar
Baber Abbasi committed
1361
1362
1363
                        labeled_examples,
                        choices[example],
                        fewshot_as_multiturn,
Baber Abbasi's avatar
Baber Abbasi committed
1364
                        gen_prefix=gen_prefix,
KonradSzafer's avatar
KonradSzafer committed
1365
1366
1367
                    )
                else:
                    self.append_target_question(
Baber Abbasi's avatar
Baber Abbasi committed
1368
1369
1370
                        labeled_examples,
                        str(example),
                        fewshot_as_multiturn,
Baber Abbasi's avatar
Baber Abbasi committed
1371
                        gen_prefix=gen_prefix,
KonradSzafer's avatar
KonradSzafer committed
1372
1373
                    )
                # return lm.apply_chat_template(labeled_examples)
Baber Abbasi's avatar
Baber Abbasi committed
1374
1375
            return chat_template(
                labeled_examples,
Baber Abbasi's avatar
Baber Abbasi committed
1376
                add_generation_prompt=False if gen_prefix else True,
Baber Abbasi's avatar
Baber Abbasi committed
1377
            )
1378
        else:
Baber Abbasi's avatar
Baber Abbasi committed
1379
            prefix = (
Baber Abbasi's avatar
Baber Abbasi committed
1380
1381
                self.config.target_delimiter + gen_prefix
                if gen_prefix is not None
Baber Abbasi's avatar
Baber Abbasi committed
1382
1383
                else ""
            )
KonradSzafer's avatar
KonradSzafer committed
1384
1385
            if self.multiple_input:
                return labeled_examples
1386
            if isinstance(example, str):
Baber Abbasi's avatar
Baber Abbasi committed
1387
                return labeled_examples + example + prefix
1388
            elif isinstance(example, list):
Baber Abbasi's avatar
Baber Abbasi committed
1389
                return [labeled_examples + ex + prefix for ex in example]
1390
1391
1392
            elif isinstance(example, int):
                if self.config.doc_to_choice is not None:
                    choices = self.doc_to_choice(doc)
Baber Abbasi's avatar
Baber Abbasi committed
1393
                    return labeled_examples + choices[example] + prefix
1394
                else:
Baber Abbasi's avatar
Baber Abbasi committed
1395
                    return labeled_examples + str(example) + prefix
lintangsutawika's avatar
lintangsutawika committed
1396

Baber Abbasi's avatar
Baber Abbasi committed
1397
    def apply_filters(self) -> Optional[List[Instance]]:
Baber Abbasi's avatar
Baber Abbasi committed
1398
        """Iterates over FilterEnsembles and applies them to instances"""
1399
1400
        if hasattr(self, "_filters"):
            for f in self._filters:
1401
                f.apply(self._instances)
1402
1403
1404
1405
        else:
            eval_logger.warning("No filter defined, passing through instances")
            return self._instances

1406
    def should_decontaminate(self):
1407
        return self.config.should_decontaminate
1408

Baber Abbasi's avatar
Baber Abbasi committed
1409
    def doc_to_decontamination_query(self, doc: dict):
1410
        if self.config.should_decontaminate:
1411
1412
            if self.config.doc_to_decontamination_query is None:
                return self.doc_to_text(doc)
1413
            else:
1414
1415
1416
1417
1418
1419
1420
1421
1422
1423
1424
                doc_to_decontamination_query = self.config.doc_to_decontamination_query
                if doc_to_decontamination_query in self.features:
                    return doc[doc_to_decontamination_query]
                elif callable(doc_to_decontamination_query):
                    return doc_to_decontamination_query(doc)
                else:
                    return ast.literal_eval(
                        utils.apply_template(
                            self.config.doc_to_decontamination_query, doc
                        )
                    )
1425

1426
    def _process_doc(self, doc: dict) -> dict:
1427
1428
1429
1430
1431
1432
1433
1434
1435
1436
        """
        Override this to process (detokenize, strip, replace, etc.) individual
        documents. This can be used in a map over documents of a data split.
        E.g. `map(self._process_doc, self.dataset["validation"])`

        :return: dict
            The processed version of the specified `doc`.
        """
        return doc

Yu Shi Jie's avatar
Yu Shi Jie committed
1437
    def doc_to_text(self, doc, doc_to_text=None):
1438
1439
        if self.prompt is not None:
            doc_to_text = self.prompt
Yu Shi Jie's avatar
Yu Shi Jie committed
1440
1441
        elif doc_to_text is not None:
            doc_to_text = doc_to_text
1442
        else:
1443
            doc_to_text = self.config.doc_to_text
lintangsutawika's avatar
lintangsutawika committed
1444

1445
        if isinstance(doc_to_text, int):
1446
            return doc_to_text
1447
        elif isinstance(doc_to_text, str):
1448
            if doc_to_text in self.features:
1449
                # if self.config.doc_to_choice is not None:
1450
1451
                #     return self.doc_to_choice(doc)[doc[doc_to_text]]
                # else:
1452
1453
                return doc[doc_to_text]
            else:
lintangsutawika's avatar
lintangsutawika committed
1454
                text_string = utils.apply_template(doc_to_text, doc)
Baber's avatar
nit  
Baber committed
1455
                if text_string.isdigit() and self.config.doc_to_choice is not None:
lintangsutawika's avatar
lintangsutawika committed
1456
1457
1458
                    return ast.literal_eval(text_string)
                else:
                    return text_string
1459
        elif callable(doc_to_text):
1460
            return doc_to_text(doc)
lintangsutawika's avatar
lintangsutawika committed
1461
        # Used when applying a Promptsource template
1462
        elif hasattr(doc_to_text, "apply"):
1463
1464
1465
1466
1467
            applied_prompt = doc_to_text.apply(doc)
            if len(applied_prompt) == 2:
                return applied_prompt[0]
            else:
                eval_logger.warning("Applied prompt returns empty string")
1468
                return self.config.fewshot_delimiter
1469
        else:
1470
            print(type(doc_to_text))
1471
            raise TypeError
1472

Yu Shi Jie's avatar
Yu Shi Jie committed
1473
    def doc_to_target(self, doc: Mapping, doc_to_target=None) -> Union[int, str, list]:
1474
1475
        if self.prompt is not None:
            doc_to_target = self.prompt
Yu Shi Jie's avatar
Yu Shi Jie committed
1476
1477
        elif doc_to_target is not None:
            doc_to_target = doc_to_target
1478
        else:
1479
            doc_to_target = self.config.doc_to_target
1480

1481
        if isinstance(doc_to_target, int):
1482
            return doc_to_target
1483
        elif isinstance(doc_to_target, str):
1484
            if doc_to_target in self.features:
1485
                # if self.config.doc_to_choice is not None:
1486
1487
1488
                #     return self.doc_to_choice(doc)[doc[doc_to_target]]
                # else:
                return doc[doc_to_target]
1489
            else:
lintangsutawika's avatar
lintangsutawika committed
1490
                target_string = utils.apply_template(doc_to_target, doc)
Baber's avatar
nit  
Baber committed
1491
                if target_string.isdigit() and self.config.doc_to_choice is not None:
lintangsutawika's avatar
lintangsutawika committed
1492
                    return ast.literal_eval(target_string)
lintangsutawika's avatar
lintangsutawika committed
1493
1494
1495
1496
1497
                elif (
                    len(target_string) >= 2
                    and (target_string[0] == "[")
                    and (target_string[-1] == "]")
                ):
baberabb's avatar
baberabb committed
1498
1499
1500
1501
                    try:
                        return ast.literal_eval(target_string)
                    except (SyntaxError, ValueError):
                        return target_string
lintangsutawika's avatar
lintangsutawika committed
1502
1503
                else:
                    return target_string
1504
        elif isinstance(doc_to_target, list):
1505
            return doc_to_target
1506
        elif callable(doc_to_target):
1507
            return doc_to_target(doc)
lintangsutawika's avatar
lintangsutawika committed
1508
        # Used when applying a Promptsource template
1509
        elif hasattr(doc_to_target, "apply"):
1510
            applied_prompt = doc_to_target.apply(doc)
1511
1512
1513
1514
            if len(applied_prompt) == 2:
                return applied_prompt[1]
            else:
                eval_logger.warning("Applied prompt returns empty string")
1515
                return self.config.fewshot_delimiter
1516
1517
        else:
            raise TypeError
1518

Yu Shi Jie's avatar
Yu Shi Jie committed
1519
    def doc_to_choice(self, doc: Any, doc_to_choice=None) -> List[str]:
1520
1521
        if self.prompt is not None:
            doc_to_choice = self.prompt
Yu Shi Jie's avatar
Yu Shi Jie committed
1522
1523
        elif doc_to_choice is not None:
            doc_to_choice = doc_to_choice
1524
        elif self.config.doc_to_choice is None:
1525
1526
            eval_logger.error("doc_to_choice was called but not set in config")
        else:
1527
            doc_to_choice = self.config.doc_to_choice
1528

1529
        if isinstance(doc_to_choice, str):
1530
1531
1532
1533
            if doc_to_choice in self.features:
                return doc[doc_to_choice]
            else:
                return ast.literal_eval(utils.apply_template(doc_to_choice, doc))
1534
        elif isinstance(doc_to_choice, list):
1535
            return doc_to_choice
1536
        elif isinstance(doc_to_choice, dict):
1537
1538
1539
1540
1541
1542
1543
            return list(doc_to_choice.values())
        elif callable(doc_to_choice):
            return doc_to_choice(doc)
        elif hasattr(doc_to_choice, "get_answer_choices_list"):
            return doc_to_choice.get_answer_choices_list(doc)
        else:
            raise TypeError
1544

Baber's avatar
Baber committed
1545
    def doc_to_image(self, doc: Any, doc_to_image=None) -> Union[int, str, list, None]:
1546
1547
1548
1549
1550
1551
1552
1553
1554
1555
1556
1557
1558
1559
1560
1561
1562
1563
1564
1565
1566
1567
        if doc_to_image is not None:
            doc_to_image = doc_to_image
        elif self.config.doc_to_image is not None:
            doc_to_image = self.config.doc_to_image
        else:
            return None

        if isinstance(doc_to_image, list):
            image_feature = [
                self.doc_to_image(doc, feature) for feature in doc_to_image
            ]
            return [feature for feature in image_feature if feature is not None]
        elif isinstance(doc_to_image, str):
            if doc_to_image in self.features:
                return doc[doc_to_image]
            else:
                return ast.literal_eval(utils.apply_template(doc_to_image, doc))
        elif callable(doc_to_image):
            return doc_to_image(doc)
        else:
            return None

Baber's avatar
Baber committed
1568
    def doc_to_audio(self, doc: Any, doc_to_audio=None) -> Union[int, str, list, None]:
1569
1570
1571
1572
1573
1574
1575
1576
1577
1578
1579
1580
1581
1582
1583
1584
1585
1586
1587
1588
1589
1590
        if doc_to_audio is not None:
            doc_to_audio = doc_to_audio
        elif self.config.doc_to_audio is not None:
            doc_to_audio = self.config.doc_to_audio
        else:
            return None

        if isinstance(doc_to_audio, list):
            audio_feature = [
                self.doc_to_audio(doc, feature) for feature in doc_to_audio
            ]
            return [feature for feature in audio_feature if feature is not None]
        elif isinstance(doc_to_audio, str):
            if doc_to_audio in self.features:
                return doc[doc_to_audio]
            else:
                return ast.literal_eval(utils.apply_template(doc_to_audio, doc))
        elif callable(doc_to_audio):
            return doc_to_audio(doc)
        else:
            return None

Baber's avatar
Baber committed
1591
    def doc_to_prefix(self, doc) -> Optional[str]:
Baber Abbasi's avatar
Baber Abbasi committed
1592
1593
1594
1595
1596
1597
1598
        if (gen_prefix := self.config.gen_prefix) is not None:
            if gen_prefix in self.features:
                return doc[gen_prefix]
            else:
                return utils.apply_template(gen_prefix, doc)
        return None

baberabb's avatar
baberabb committed
1599
1600
1601
    def construct_requests(
        self, doc: dict, ctx: str, **kwargs
    ) -> Union[List[Instance], Instance]:
1602
        apply_chat_template = kwargs.pop("apply_chat_template", False)
1603
        chat_template: Callable | None = kwargs.pop("chat_template", None)
1604

1605
1606
        aux_arguments = None

1607
        if self.OUTPUT_TYPE == "loglikelihood":
lintangsutawika's avatar
lintangsutawika committed
1608
            arguments = (ctx, self.doc_to_target(doc))
1609
        elif self.OUTPUT_TYPE == "loglikelihood_rolling":
lintangsutawika's avatar
lintangsutawika committed
1610
            arguments = (self.doc_to_target(doc),)
1611
        elif self.OUTPUT_TYPE == "multiple_choice":
1612
            choices = self.doc_to_choice(doc)
1613
            target_delimiter = self.config.target_delimiter
1614
1615
            if apply_chat_template:
                target_delimiter = ""
1616
1617
            if self.multiple_input:
                # If there are multiple inputs, choices are placed in the ctx
1618
                # apply chat_template to choices if apply_chat_template
1619
                cont = self.doc_to_target(doc)
1620

1621
                arguments = [
1622
1623
1624
1625
1626
1627
1628
1629
1630
1631
                    (
                        ctx
                        + (
                            chat_template([{"role": "user", "content": choice}])
                            if apply_chat_template
                            else choice
                        ),
                        f"{target_delimiter}{cont}",
                    )
                    for choice in choices
1632
                ]
1633
            else:
1634
                # Otherwise they are placed in the continuation
1635
                arguments = [(ctx, f"{target_delimiter}{cont}") for cont in choices]
1636

1637
            # TODO: we should raise a warning telling users this will at most ~2x runtime.
Baber's avatar
Baber committed
1638
            if "acc_mutual_info" in [m.metric_name for m in self.metric_list]:
1639
1640
1641
1642
1643
1644
                # if we are calculating multiple choice accuracy
                # using mutual information instead of raw loglikelihood as metric, need unconditional lls.

                # here mutual info refers to calculating
                # log(P(choice|ctx) / P(choice)) = log(P(choice|ctx)) - log(P(choice))
                # in other words normalizing by subtracting the unconditional logprob of each choice.
1645
1646
1647
1648
                # TODO: should these be strided? will have to modify the processing in process_results if so
                aux_arguments = [
                    ("", f"{target_delimiter}{choice}") for choice in choices
                ]
1649
1650
1651
1652
1653
1654
1655
1656
1657
1658
1659
1660
1661
1662
1663

                arguments.extend(aux_arguments)

        elif self.OUTPUT_TYPE == "generate_until":
            arguments = (ctx, deepcopy(self.config.generation_kwargs))

        multimodal_arg = {}
        if (
            self.config.doc_to_image
        ):  # TODO: ensure that non-multimodal tasks aren't getting visual args
            multimodal_arg = {
                **multimodal_arg,
                **{"visual": self.doc_to_image(doc)},
            }

1664
1665
1666
1667
1668
1669
1670
1671
        if (
            self.config.doc_to_audio
        ):  # TODO: ensure that non-multimodal tasks aren't getting audio args
            multimodal_arg = {
                **multimodal_arg,
                **{"audio": self.doc_to_audio(doc)},
            }

1672
1673
1674
1675
1676
1677
1678
        if bool(multimodal_arg):
            if isinstance(arguments, list):
                arguments = [arg + (multimodal_arg,) for arg in arguments]
            else:
                arguments = arguments + (multimodal_arg,)

        if self.OUTPUT_TYPE == "multiple_choice":
1679
            request_list = [
1680
1681
                Instance(
                    request_type="loglikelihood",
lintangsutawika's avatar
lintangsutawika committed
1682
                    doc=doc,
1683
                    arguments=arg,
1684
                    idx=i,
1685
1686
                    **kwargs,
                )
1687
                for i, arg in enumerate(arguments)
1688
            ]
1689
1690

            return request_list
lintangsutawika's avatar
lintangsutawika committed
1691

lintangsutawika's avatar
lintangsutawika committed
1692
        return Instance(
1693
1694
1695
1696
1697
            request_type=self.OUTPUT_TYPE,
            doc=doc,
            arguments=arguments,
            idx=0,
            **kwargs,
lintangsutawika's avatar
lintangsutawika committed
1698
        )
1699
1700

    def process_results(self, doc, results):
1701
1702
        if callable(self.config.process_results):
            return self.config.process_results(doc, results)
lintangsutawika's avatar
lintangsutawika committed
1703

1704
        result_dict = {}
Baber's avatar
Baber committed
1705
        use_metric = list(m.metric_name for m in self.metric_list)
1706
1707
1708
        if self.OUTPUT_TYPE == "loglikelihood":
            results = results[0]
            ll, is_greedy = results
1709
1710
1711
1712
            return {
                **({"perplexity": ll} if "perplexity" in use_metric else {}),
                **({"acc": int(is_greedy)} if "acc" in use_metric else {}),
            }
1713
        elif self.OUTPUT_TYPE == "loglikelihood_rolling":
haileyschoelkopf's avatar
haileyschoelkopf committed
1714
            (loglikelihood,) = results
1715
1716
            _words = self.count_words(self.doc_to_target(doc))
            _bytes = self.count_bytes(self.doc_to_target(doc))
haileyschoelkopf's avatar
haileyschoelkopf committed
1717
            return {
1718
1719
1720
1721
1722
1723
1724
1725
1726
1727
1728
1729
1730
1731
1732
                **(
                    {"word_perplexity": (loglikelihood, _words)}
                    if "word_perplexity" in use_metric
                    else {}
                ),
                **(
                    {"byte_perplexity": (loglikelihood, _bytes)}
                    if "byte_perplexity" in use_metric
                    else {}
                ),
                **(
                    {"bits_per_byte": (loglikelihood, _bytes)}
                    if "bits_per_byte" in use_metric
                    else {}
                ),
haileyschoelkopf's avatar
haileyschoelkopf committed
1733
            }
1734
        elif self.OUTPUT_TYPE == "multiple_choice":
1735
            lls, is_greedy = zip(*results)
lintangsutawika's avatar
lintangsutawika committed
1736

1737
            # retrieve choices in List[str] form, to compute choice lengths, etc.
1738
            choices = self.doc_to_choice(doc)
1739
1740
            completion_len = np.array([float(len(i)) for i in choices])

Baber's avatar
Baber committed
1741
            if 2 * len(choices) == len(lls) and "acc_mutual_info" in use_metric:
1742
1743
                # then we are doing mutual info.
                # this stores the "dryrun" / unconditional answer loglikelihoods
1744
1745
                # as we extend the args list with unconditional ("", continuation) pairs
                lls_unconditional = lls[len(choices) :]
1746
1747
                if len(lls_unconditional) != len(choices):
                    raise ValueError
1748
                # and this stores our "regular" conditional loglikelihoods
1749
                lls = lls[: len(choices)]
1750

1751
1752
            pred = np.argmax(lls)
            pred_norm = np.argmax(lls / completion_len)
lintangsutawika's avatar
lintangsutawika committed
1753

1754
1755
            if self.multiple_input:
                gold = self.doc_to_text(doc)
1756
            else:
1757
                gold = self.doc_to_target(doc)
1758
1759

            gold_index_error = False
1760
            if isinstance(gold, list):
Lintang Sutawika's avatar
Lintang Sutawika committed
1761
1762
                gold = [i if i < len(choices) else -100 for i in gold]
                if -100 in gold:
1763
1764
                    gold_index_error = True
            else:
1765
                if isinstance(gold, int):
Lintang Sutawika's avatar
Lintang Sutawika committed
1766
                    gold = gold if gold < len(choices) else -100
1767
                elif isinstance(gold, str):
Lintang Sutawika's avatar
Lintang Sutawika committed
1768
                    gold = choices.index(gold) if gold in choices else -100
lintangsutawika's avatar
lintangsutawika committed
1769

Lintang Sutawika's avatar
Lintang Sutawika committed
1770
                if gold == -100:
1771
1772
1773
1774
                    gold_index_error = True

            if gold_index_error:
                eval_logger.warning(
lintangsutawika's avatar
lintangsutawika committed
1775
                    f"Label index was not in within range of available choices,"
1776
1777
                    f"Sample:\n\n{doc}\n\n"
                )
lintangsutawika's avatar
lintangsutawika committed
1778

1779
            if self.multiple_target:
lintangsutawika's avatar
lintangsutawika committed
1780
1781
                acc = 1.0 if pred in gold else 0.0
                acc_norm = 1.0 if pred_norm in gold else 0.0
Lintang Sutawika's avatar
Lintang Sutawika committed
1782
                exact_match = int(any([is_greedy[i] if i != -100 else 0 for i in gold]))
lintangsutawika's avatar
lintangsutawika committed
1783
1784
1785
            else:
                acc = 1.0 if pred == gold else 0.0
                acc_norm = 1.0 if pred_norm == gold else 0.0
1786
                # TODO: this gets score of 0 on arc_challenge for pythia-70m. need to test that this works properly
Lintang Sutawika's avatar
Lintang Sutawika committed
1787
                exact_match = int(is_greedy[gold]) if gold != -100 else 0
1788

Lintang Sutawika's avatar
Lintang Sutawika committed
1789
1790
1791
1792
            prob_norm = utils.softmax(lls)

            # TODO use keyword arguments to the metric?
            # gold, pred, norm stuff, the original lls,
1793
            result_dict = {
1794
                **({"acc": acc} if "acc" in use_metric else {}),
1795
1796
                **({"f1": (gold, pred)} if "f1" in use_metric else {}),
                **({"mcc": (gold, pred)} if "mcc" in use_metric else {}),
1797
                **({"acc_norm": acc_norm} if "acc_norm" in use_metric else {}),
1798
                **({"exact_match": exact_match} if "exact_match" in use_metric else {}),
Lintang Sutawika's avatar
Lintang Sutawika committed
1799
1800
1801
1802
1803
                **(
                    {"brier_score": (gold, prob_norm)}
                    if "brier_score" in use_metric
                    else {}
                ),
1804
1805
            }

1806
            if "acc_mutual_info" in use_metric:
lintangsutawika's avatar
lintangsutawika committed
1807
1808
1809
                lls_mutual_info = [
                    ll_c - ll_u for ll_c, ll_u in zip(lls, lls_unconditional)
                ]
1810
1811
1812
                acc_mutual_info = 1.0 if np.argmax(lls_mutual_info) == gold else 0.0
                result_dict["acc_mutual_info"] = acc_mutual_info

1813
        elif self.OUTPUT_TYPE == "generate_until":
1814
            gold = self.doc_to_target(doc)
Chris's avatar
Chris committed
1815
            result = results[0]
1816
            if self.config.doc_to_choice is not None:
lintangsutawika's avatar
lintangsutawika committed
1817
                # If you set doc_to_choice,
lintangsutawika's avatar
lintangsutawika committed
1818
                # it assumes that doc_to_target returns a number.
1819
1820
                choices = self.doc_to_choice(doc)
                gold = choices[gold]
1821
1822
            # we expect multiple_targets to be a list.
            elif self.multiple_target:
baberabb's avatar
baberabb committed
1823
                gold = list(gold)
Hojin Lee's avatar
Hojin Lee committed
1824
1825
            # TODO: handle this better
            elif type(gold) is not type(result) and not (
Baber's avatar
Baber committed
1826
                "bypass" in use_metric or isinstance(result, list)
1827
            ):
Chris's avatar
Chris committed
1828
1829
                # cast gold to the same type as result
                gold = type(result)(gold)
1830

Baber's avatar
Baber committed
1831
            for metric in self.metric_list:
haileyschoelkopf's avatar
haileyschoelkopf committed
1832
1833
1834
1835
1836
                if self.multiple_target:
                    # in the case where we have multiple targets,
                    # return true if any are true
                    # TODO: this may break for multipLe_target, non zero-or-1 metrics
                    scores = []
haileyschoelkopf's avatar
haileyschoelkopf committed
1837
1838
1839
1840
                    if not isinstance(gold, list):
                        # sometimes, a multiple_target dataset has exceptions where one doc has only one string answer
                        # print(gold)
                        gold = [gold]
Baber's avatar
Baber committed
1841
                    if metric.name == "exact_match":
1842
                        result = [result for _ in range(len(gold))]
Baber's avatar
Baber committed
1843
                        scores = metric.fn(
1844
1845
                            references=gold,
                            predictions=result,
Baber's avatar
Baber committed
1846
                            **metric.kwargs,
1847
1848
                        )[metric]
                        result_score = 1.0 if scores > 0.0 else 0.0
haileyschoelkopf's avatar
haileyschoelkopf committed
1849
                    else:
1850
1851
                        for gold_option in gold:
                            try:
Baber's avatar
Baber committed
1852
                                result_score = metric.fn(
1853
1854
                                    references=[gold_option],
                                    predictions=[result],
Baber's avatar
Baber committed
1855
                                    **metric.kwargs,
1856
1857
1858
1859
                                )
                            except (
                                TypeError
                            ):  # TODO: this is hacky and I don't want to do it
Baber's avatar
Baber committed
1860
                                result_score = metric.fn([gold_option, result])
1861
1862
1863
1864
1865
1866
1867
1868
                            if isinstance(result_score, dict):
                                # TODO: this handles the case where HF evaluate returns a dict.
                                result_score = result_score[metric]
                            scores.append(result_score)
                        if any(scores):
                            result_score = 1.0
                        else:
                            result_score = 0.0
haileyschoelkopf's avatar
haileyschoelkopf committed
1869
                else:
1870
                    try:
Baber's avatar
Baber committed
1871
                        result_score = metric.fn(
1872
1873
                            references=[gold],
                            predictions=[result],
Baber's avatar
Baber committed
1874
                            **metric.kwargs,
1875
                        )
1876
                    except TypeError:  # needed for now in order to use a different interface between our own metrics and HF Evaluate metrics
Baber's avatar
Baber committed
1877
                        result_score = metric.fn([gold, result])
1878
1879
1880
1881
1882
1883
1884
                if isinstance(result_score, dict):
                    # TODO: this handles the case where HF evaluate returns a dict.
                    # This allows for multiple metrics to be returned from the same function
                    for k, v in result_score.items():
                        result_dict[k] = v
                else:
                    result_dict[metric] = result_score
1885
        else:
lintangsutawika's avatar
lintangsutawika committed
1886
1887
            raise ValueError(
                f"Passed invalid output_type '{self.OUTPUT_TYPE}' ! Please use one of ",
1888
                "'loglikelihood', 'loglikelihood_rolling', 'generate_until' or 'multiple_choice'",
1889
            )
1890
1891
1892

        return result_dict

Baber Abbasi's avatar
Baber Abbasi committed
1893
    def aggregation(self) -> dict:
Baber's avatar
Baber committed
1894
        return {k.name: k.aggregation_fn for k in self.metric_list}
1895

Baber Abbasi's avatar
Baber Abbasi committed
1896
    def higher_is_better(self) -> dict:
Baber's avatar
Baber committed
1897
        return {k.name: k.higher_is_better for k in self.metric_list}
1898

Baber Abbasi's avatar
Baber Abbasi committed
1899
1900
1901
    def get_config(self, key: str) -> Any:
        return getattr(self._config, key, None)

Lintang Sutawika's avatar
Lintang Sutawika committed
1902
    @property
Baber's avatar
Baber committed
1903
    def task_name(self) -> Optional[str]:
Lintang Sutawika's avatar
Lintang Sutawika committed
1904
1905
        return getattr(self.config, "task", None)

1906
1907
1908
1909
1910
    def __repr__(self):
        return (
            f"ConfigurableTask(task_name={getattr(self.config, 'task', None)},"
            f"output_type={self.OUTPUT_TYPE},"
            f"num_fewshot={getattr(self.config, 'num_fewshot', None)},"
Baber Abbasi's avatar
Baber Abbasi committed
1911
            f"num_samples={len(self.eval_docs)})"
1912
1913
        )

1914
1915

class MultipleChoiceTask(Task):
1916
    OUTPUT_TYPE = "loglikelihood"
1917

baberabb's avatar
baberabb committed
1918
    def doc_to_target(self, doc: dict) -> str:
1919
1920
        return " " + doc["choices"][doc["gold"]]

baberabb's avatar
baberabb committed
1921
    def construct_requests(self, doc: dict, ctx: str, **kwargs) -> List[Instance]:
1922
        # TODO: add mutual info here?
lintangsutawika's avatar
lintangsutawika committed
1923
1924
        return [
            Instance(
haileyschoelkopf's avatar
haileyschoelkopf committed
1925
                request_type="loglikelihood",
lintangsutawika's avatar
lintangsutawika committed
1926
                doc=doc,
1927
                arguments=(ctx, " {}".format(choice)),
1928
                idx=i,
1929
1930
                **kwargs,
            )
lintangsutawika's avatar
lintangsutawika committed
1931
1932
            for i, choice in enumerate(doc["choices"])
        ]
1933

1934
    def process_results(self, doc: dict, results: Iterable[Tuple[float, bool]]) -> dict:
lintangsutawika's avatar
lintangsutawika committed
1935
1936
1937
        results = [
            res[0] for res in results
        ]  # only retain loglikelihoods, discard is_greedy TODO: do we need is_greedy anywhere?
1938
1939
1940
1941
1942
1943
1944
1945
1946
1947
1948
        gold = doc["gold"]

        acc = 1.0 if np.argmax(results) == gold else 0.0
        completion_len = np.array([float(len(i)) for i in doc["choices"]])
        acc_norm = 1.0 if np.argmax(results / completion_len) == gold else 0.0

        return {
            "acc": acc,
            "acc_norm": acc_norm,
        }

baberabb's avatar
baberabb committed
1949
    def higher_is_better(self) -> dict:
1950
1951
1952
1953
1954
        return {
            "acc": True,
            "acc_norm": True,
        }

baberabb's avatar
baberabb committed
1955
    def aggregation(self) -> dict:
1956
1957
1958
1959
1960
1961
        return {
            "acc": mean,
            "acc_norm": mean,
        }


lintangsutawika's avatar
lintangsutawika committed
1962
class PerplexityTask(Task):
1963
1964
    OUTPUT_TYPE = "loglikelihood_rolling"

baberabb's avatar
baberabb committed
1965
    def has_training_docs(self) -> bool:
1966
1967
        return False

baberabb's avatar
baberabb committed
1968
    def fewshot_examples(self, k: int, rnd) -> List:
1969
1970
1971
1972
        if k != 0:
            raise ValueError(
                "The number of fewshot examples must be 0 for perplexity tasks."
            )
1973
1974
        return []

baberabb's avatar
baberabb committed
1975
    def fewshot_context(self, doc: dict, num_fewshot: int) -> Literal[""]:
1976
1977
1978
1979
        if num_fewshot != 0:
            raise ValueError(
                "The number of fewshot examples must be 0 for perplexity tasks."
            )
1980
1981
1982

        return ""

baberabb's avatar
baberabb committed
1983
    def higher_is_better(self) -> dict:
1984
1985
1986
1987
1988
1989
1990
1991
1992
        return {
            "word_perplexity": False,
            "byte_perplexity": False,
            "bits_per_byte": False,
        }

    def doc_to_decontamination_query(self, doc):
        return doc

Ethan Smith's avatar
Ethan Smith committed
1993
    def doc_to_text(self, doc) -> str:
1994
1995
1996
1997
1998
        return ""

    def doc_to_target(self, doc):
        return doc

1999
2000
2001
    def construct_requests(self, doc: dict, ctx: Optional[str], **kwargs):
        if bool(ctx):
            raise ValueError
2002

lintangsutawika's avatar
lintangsutawika committed
2003
2004
2005
2006
2007
2008
2009
        return Instance(
            request_type=self.OUTPUT_TYPE,
            doc=doc,
            arguments=(self.doc_to_target(doc),),
            idx=0,
            **kwargs,
        )
2010

2011
    def process_results(self, doc: dict, results: Tuple[float]) -> dict:
2012
        (loglikelihood,) = results
haileyschoelkopf's avatar
haileyschoelkopf committed
2013
2014
        words = self.count_words(self.doc_to_target(doc))
        bytes_ = self.count_bytes(self.doc_to_target(doc))
2015
2016
2017
2018
2019
2020
        return {
            "word_perplexity": (loglikelihood, words),
            "byte_perplexity": (loglikelihood, bytes_),
            "bits_per_byte": (loglikelihood, bytes_),
        }

baberabb's avatar
baberabb committed
2021
    def aggregation(self) -> dict:
2022
2023
2024
2025
2026
2027
2028
        return {
            "word_perplexity": weighted_perplexity,
            "byte_perplexity": weighted_perplexity,
            "bits_per_byte": bits_per_byte,
        }

    @classmethod
baberabb's avatar
baberabb committed
2029
    def count_bytes(cls, doc) -> int:
2030
2031
2032
        return len(doc.encode("utf-8"))

    @classmethod
baberabb's avatar
baberabb committed
2033
    def count_words(cls, doc) -> int:
2034
        """Downstream tasks with custom word boundaries should override this!"""
Lintang Sutawika's avatar
Lintang Sutawika committed
2035
        return len(re.split(r"\s+", doc))