task.py 60.3 KB
Newer Older
1
import abc
2
import ast
lintangsutawika's avatar
lintangsutawika committed
3
import logging
4
import random
5
6
import re
from collections.abc import Callable
7
from copy import deepcopy
8
from dataclasses import asdict, dataclass
9
from inspect import getsource
10
11
12
13
14
15
16
17
18
19
20
21
from typing import (
    Any,
    Dict,
    Iterable,
    Iterator,
    List,
    Literal,
    Mapping,
    Optional,
    Tuple,
    Union,
)
22
23
24

import datasets
import numpy as np
25
from tqdm import tqdm
26
27

from lm_eval import utils
28
from lm_eval.api import samplers
29
30
from lm_eval.api.instance import Instance, OutputType
from lm_eval.api.metrics import bits_per_byte, mean, weighted_perplexity
lintangsutawika's avatar
lintangsutawika committed
31
from lm_eval.api.registry import (
32
33
    AGGREGATION_REGISTRY,
    DEFAULT_METRIC_REGISTRY,
haileyschoelkopf's avatar
haileyschoelkopf committed
34
    get_aggregation,
35
    get_metric,
36
    get_metric_aggregation,
haileyschoelkopf's avatar
haileyschoelkopf committed
37
    is_higher_better,
lintangsutawika's avatar
lintangsutawika committed
38
)
39
from lm_eval.caching.cache import load_from_cache, save_to_cache
40
41
42
from lm_eval.filters import build_filter_ensemble
from lm_eval.prompts import get_prompt

43

44
45
46
47
ALL_OUTPUT_TYPES = [
    "loglikelihood",
    "multiple_choice",
    "loglikelihood_rolling",
48
    "generate_until",
49
50
]

51
eval_logger = logging.getLogger("lm-eval")
52

lintangsutawika's avatar
lintangsutawika committed
53

lintangsutawika's avatar
lintangsutawika committed
54
55
@dataclass
class GroupConfig(dict):
lintangsutawika's avatar
lintangsutawika committed
56
57
58
    group: Optional[str] = None
    group_alias: Optional[str] = None
    task: Optional[Union[str, list]] = None
lintangsutawika's avatar
lintangsutawika committed
59
60
61
    aggregate_metric: Optional[str] = False
    aggregate_fn: Optional[str] = "mean"
    weight_by_size: Optional[str] = False
lintangsutawika's avatar
lintangsutawika committed
62
    metric_alias: Optional[str] = None
63
    version: Optional[str] = 0
lintangsutawika's avatar
lintangsutawika committed
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106

    def __getitem__(self, item):
        return getattr(self, item)

    def __setitem__(self, item, value):
        return setattr(self, item, value)

    def to_dict(self, keep_callable: bool = False) -> dict:
        """dumps the current config as a dictionary object, as a printable format.
        null fields will not be printed.
        Used for dumping results alongside full task configuration

        :return: dict
            A printable dictionary version of the TaskConfig object.

        # TODO: should any default value in the TaskConfig not be printed?
        """
        cfg_dict = asdict(self)
        # remove values that are `None`
        for k, v in list(cfg_dict.items()):
            if v is None:
                cfg_dict.pop(k)
            elif callable(v):
                cfg_dict[k] = self.serialize_function(v, keep_callable=keep_callable)
        return cfg_dict

    def serialize_function(
        self, value: Union[Callable, str], keep_callable=False
    ) -> Union[Callable, str]:
        """Serializes a given function or string.

        If 'keep_callable' is True, the original callable is returned.
        Otherwise, attempts to return the source code of the callable using 'getsource'.
        """
        if keep_callable:
            return value
        else:
            try:
                return getsource(value)
            except (TypeError, OSError):
                return str(value)


lintangsutawika's avatar
lintangsutawika committed
107
108
109
110
111
class ConfigurableGroup(abc.ABC):
    def __init__(
        self,
        config: Optional[dict] = None,
    ) -> None:
lintangsutawika's avatar
lintangsutawika committed
112
113
        # Create a unique identifier ID
        self._task_id = str(uuid.uuid1())
lintangsutawika's avatar
lintangsutawika committed
114
115
116
117
118
        self._config = GroupConfig(**config)

    @property
    def group(self):
        return self._config.group
119

lintangsutawika's avatar
lintangsutawika committed
120
121
122
    @property
    def group_alias(self):
        return self._config.group_alias
123
124
125
126
127

    @property
    def version(self):
        return self._config.version

lintangsutawika's avatar
lintangsutawika committed
128
129
130
131
    @property
    def config(self):
        return self._config.to_dict()

lintangsutawika's avatar
lintangsutawika committed
132
133
134
135
    @property
    def task_id(self) -> Any:
        return self._task_id

lintangsutawika's avatar
lintangsutawika committed
136
137
    def __repr__(self):
        return (
138
            f"ConfigurableGroup(group={self.group}," f"group_alias={self.group_alias})"
lintangsutawika's avatar
lintangsutawika committed
139
140
        )

141

142
143
@dataclass
class TaskConfig(dict):
144
    # task naming/registry
145
146
    task: Optional[str] = None
    task_alias: Optional[str] = None
lintangsutawika's avatar
lintangsutawika committed
147
    tag: Optional[Union[str, list]] = None
148
149
    group: Optional[Union[str, list]] = None
    group_alias: Optional[Union[str, list]] = None
150
151
152
    # HF dataset options.
    # which dataset to use,
    # and what splits for what purpose
153
154
155
156
157
158
159
160
161
    dataset_path: Optional[str] = None
    dataset_name: Optional[str] = None
    dataset_kwargs: Optional[dict] = None
    training_split: Optional[str] = None
    validation_split: Optional[str] = None
    test_split: Optional[str] = None
    fewshot_split: Optional[
        str
    ] = None  # TODO: assert that this not None if num_fewshot > 0. (?) assert if this is same split as one evaling (?)
162
163
    # formatting / prompting options.
    # see docs/advanced_task_guide.md for more info
164
165
166
167
168
169
    process_docs: Optional[Callable] = None
    doc_to_text: Optional[Union[Callable, str]] = None
    doc_to_target: Optional[Union[Callable, str]] = None
    doc_to_choice: Optional[Union[Callable, str, dict, list]] = None
    process_results: Optional[Union[Callable, str]] = None
    use_prompt: Optional[str] = None
170
    description: str = ""
171
172
    target_delimiter: str = " "
    fewshot_delimiter: str = "\n\n"
173
    fewshot_config: Optional[dict] = None
174
    # runtime configuration options
175
    num_fewshot: Optional[int] = None
176
    # scoring options
177
178
179
    metric_list: Optional[list] = None
    output_type: OutputType = "generate_until"
    generation_kwargs: Optional[dict] = None
180
    repeats: int = 1
181
    filter_list: Optional[Union[str, list]] = None
182
    should_decontaminate: bool = False
183
184
185
186
    doc_to_decontamination_query: Optional[str] = None
    metadata: Optional[
        dict
    ] = None  # by default, not used in the code. allows for users to pass arbitrary info to tasks
187

Ethan Smith's avatar
Ethan Smith committed
188
    def __post_init__(self) -> None:
Lintang Sutawika's avatar
Lintang Sutawika committed
189
        if self.generation_kwargs is not None:
190
            if self.output_type != "generate_until":
191
                eval_logger.warning(
192
                    f"[{self.task}] passed `generation_kwargs`, but not using `output_type: generate_until`!"
Lintang Sutawika's avatar
Lintang Sutawika committed
193
194
195
196
197
198
199
200
                )

            if "temperature" in self.generation_kwargs:
                self.generation_kwargs["temperature"] = float(
                    self.generation_kwargs["temperature"]
                )

            if "until" not in self.generation_kwargs:
201
                self.generation_kwargs["until"] = [self.fewshot_delimiter]
Lintang Sutawika's avatar
Lintang Sutawika committed
202
        else:
203
            if self.output_type == "generate_until":
Lintang Sutawika's avatar
Lintang Sutawika committed
204
205
                # ensure that we greedily generate in absence of explicit arguments otherwise
                self.generation_kwargs = {
206
207
208
209
210
                    "until": (
                        None
                        if self.fewshot_delimiter is None
                        else [self.fewshot_delimiter]
                    ),
Lintang Sutawika's avatar
Lintang Sutawika committed
211
212
                    "do_sample": False,
                }
213

214
215
216
    def __getitem__(self, item):
        return getattr(self, item)

217
218
219
    def __setitem__(self, item, value):
        return setattr(self, item, value)

220
    def to_dict(self, keep_callable: bool = False) -> dict:
221
222
        """dumps the current config as a dictionary object, as a printable format.
        null fields will not be printed.
haileyschoelkopf's avatar
haileyschoelkopf committed
223
        Used for dumping results alongside full task configuration
224

haileyschoelkopf's avatar
haileyschoelkopf committed
225
226
227
228
229
230
231
232
233
234
        :return: dict
            A printable dictionary version of the TaskConfig object.

        # TODO: should any default value in the TaskConfig not be printed?
        """
        cfg_dict = asdict(self)
        # remove values that are `None`
        for k, v in list(cfg_dict.items()):
            if v is None:
                cfg_dict.pop(k)
235
236
237
238
239
240
241
242
243
244
            elif k == "metric_list":
                for metric_dict in v:
                    for metric_key, metric_value in metric_dict.items():
                        if callable(metric_value):
                            metric_dict[metric_key] = self.serialize_function(
                                metric_value, keep_callable=keep_callable
                            )
                cfg_dict[k] = v
            elif callable(v):
                cfg_dict[k] = self.serialize_function(v, keep_callable=keep_callable)
haileyschoelkopf's avatar
haileyschoelkopf committed
245
        return cfg_dict
246

247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
    def serialize_function(
        self, value: Union[Callable, str], keep_callable=False
    ) -> Union[Callable, str]:
        """Serializes a given function or string.

        If 'keep_callable' is True, the original callable is returned.
        Otherwise, attempts to return the source code of the callable using 'getsource'.
        """
        if keep_callable:
            return value
        else:
            try:
                return getsource(value)
            except (TypeError, OSError):
                return str(value)

263
264
265
266
267
268
269
270
271
272
273

class Task(abc.ABC):
    """A task represents an entire benchmark including its dataset, problems,
    answers, and evaluation methods. See BoolQ for a simple example implementation

    A `doc` can be any python object which represents one instance of evaluation.
    This is usually a dictionary e.g.
        {"question": ..., "answer": ...} or
        {"question": ..., question, answer)
    """

274
    VERSION: Optional[Union[int, str]] = None
275

276
277
    # The name of the `Task` benchmark as denoted in the HuggingFace datasets Hub
    # or a path to a custom `datasets` loading script.
278
    DATASET_PATH: Optional[str] = None
279
280

    # The name of a subset within `DATASET_PATH`.
281
    DATASET_NAME: Optional[str] = None
282

283
    OUTPUT_TYPE: Optional[OutputType] = None
lintangsutawika's avatar
lintangsutawika committed
284

285
286
    def __init__(
        self,
287
288
289
290
        data_dir: Optional[str] = None,
        cache_dir: Optional[str] = None,
        download_mode: Optional[datasets.DownloadMode] = None,
        config: Optional[Mapping] = None,  # Union[dict, TaskConfig]
Ethan Smith's avatar
Ethan Smith committed
291
    ) -> None:
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
        """
        :param data_dir: str
            Stores the path to a local folder containing the `Task`'s data files.
            Use this to specify the path to manually downloaded data (usually when
            the dataset is not publicly accessible).
        :param cache_dir: str
            The directory to read/write the `Task` dataset. This follows the
            HuggingFace `datasets` API with the default cache directory located at:
                `~/.cache/huggingface/datasets`
            NOTE: You can change the cache location globally for a given process
            to another directory:
                `export HF_DATASETS_CACHE="/path/to/another/directory"`
        :param download_mode: datasets.DownloadMode
            How to treat pre-existing `Task` downloads and data.
            - `datasets.DownloadMode.REUSE_DATASET_IF_EXISTS`
                Reuse download and reuse dataset.
            - `datasets.DownloadMode.REUSE_CACHE_IF_EXISTS`
                Reuse download with fresh dataset.
            - `datasets.DownloadMode.FORCE_REDOWNLOAD`
                Fresh download and fresh dataset.
        """
        self.download(data_dir, cache_dir, download_mode)
314
315
316
        self._training_docs: Optional[list] = None
        self._fewshot_docs: Optional[list] = None
        self._instances: Optional[List[Instance]] = None
317

318
        self._config: TaskConfig = TaskConfig({**config}) if config else TaskConfig()
319

lintangsutawika's avatar
lintangsutawika committed
320
        self._filters = [build_filter_ensemble("none", [["take_first", None]])]
321
322
323
        self.fewshot_rnd: Optional[
            random.Random
        ] = None  # purposely induce errors in case of improper usage
324

325
326
327
328
329
330
    def download(
        self,
        data_dir: Optional[str] = None,
        cache_dir: Optional[str] = None,
        download_mode=None,
    ) -> None:
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
        """Downloads and returns the task dataset.
        Override this method to download the dataset from a custom API.

        :param data_dir: str
            Stores the path to a local folder containing the `Task`'s data files.
            Use this to specify the path to manually downloaded data (usually when
            the dataset is not publicly accessible).
        :param cache_dir: str
            The directory to read/write the `Task` dataset. This follows the
            HuggingFace `datasets` API with the default cache directory located at:
                `~/.cache/huggingface/datasets`
            NOTE: You can change the cache location globally for a given process
            by setting the shell environment variable, `HF_DATASETS_CACHE`,
            to another directory:
                `export HF_DATASETS_CACHE="/path/to/another/directory"`
        :param download_mode: datasets.DownloadMode
            How to treat pre-existing `Task` downloads and data.
            - `datasets.DownloadMode.REUSE_DATASET_IF_EXISTS`
                Reuse download and reuse dataset.
            - `datasets.DownloadMode.REUSE_CACHE_IF_EXISTS`
                Reuse download with fresh dataset.
            - `datasets.DownloadMode.FORCE_REDOWNLOAD`
                Fresh download and fresh dataset.
        """
355
356
357
358
359
360
361
        self.dataset = datasets.load_dataset(
            path=self.DATASET_PATH,
            name=self.DATASET_NAME,
            data_dir=data_dir,
            cache_dir=cache_dir,
            download_mode=download_mode,
        )
362

363
    @property
364
    def config(self) -> TaskConfig:
365
366
367
        """Returns the TaskConfig associated with this class."""
        return self._config

368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
    @abc.abstractmethod
    def has_training_docs(self):
        """Whether the task has a training set"""
        pass

    @abc.abstractmethod
    def has_validation_docs(self):
        """Whether the task has a validation set"""
        pass

    @abc.abstractmethod
    def has_test_docs(self):
        """Whether the task has a test set"""
        pass

383
    def training_docs(self) -> Iterable:
384
385
386
387
388
389
        """
        :return: Iterable[obj]
            A iterable of any object, that doc_to_text can handle
        """
        return []

390
    def validation_docs(self) -> Iterable:
391
392
393
394
395
396
        """
        :return: Iterable[obj]
            A iterable of any object, that doc_to_text can handle
        """
        return []

397
    def test_docs(self) -> Iterable:
398
399
400
401
402
403
        """
        :return: Iterable[obj]
            A iterable of any object, that doc_to_text can handle
        """
        return []

404
    def fewshot_docs(self) -> Iterable:
405
406
407
408
409
410
411
412
413
        """
        :return: Iterable[obj]
            A iterable of any object, that doc_to_text can handle
        """
        if self.has_training_docs():
            return self.training_docs()
        elif self.has_validation_docs():
            return self.validation_docs()
        else:
lintangsutawika's avatar
lintangsutawika committed
414
            eval_logger.warning(
415
                f"[Task: {self.config.task}] has_training_docs and has_validation_docs are False"
416
                ", using test_docs as fewshot_docs but this is not recommended."
lintangsutawika's avatar
lintangsutawika committed
417
            )
418
419
            return self.test_docs()

420
    def _process_doc(self, doc: dict) -> dict:
421
422
423
424
425
426
427
428
429
        """
        Override this to process (detokenize, strip, replace, etc.) individual
        documents. This can be used in a map over documents of a data split.
        E.g. `map(self._process_doc, self.dataset["validation"])`

        :return: dict
            The processed version of the specified `doc`.
        """
        return doc
lintangsutawika's avatar
lintangsutawika committed
430

431
    @property
432
    def instances(self) -> List[Instance]:
433
434
435
436
437
438
439
440
441
442
443
        """After calling `task.build_all_requests()`, tasks
        maintain a list of the dataset instances which will be evaluated.
        """
        return self._instances

    def fewshot_examples(self, k, rnd):
        if self._training_docs is None:
            self._training_docs = list(self.training_docs())

        return rnd.sample(self._training_docs, k)

444
445
    def doc_to_decontamination_query(self, doc):
        raise NotImplementedError(
446
447
448
449
450
451
452
453
454
455
456
            "Override doc_to_decontamination_query with document specific decontamination query."
        )

    @abc.abstractmethod
    def doc_to_text(self, doc):
        pass

    @abc.abstractmethod
    def doc_to_target(self, doc):
        pass

457
458
    def build_all_requests(
        self,
459
        *,
460
461
462
463
464
465
        limit=None,
        rank=None,
        world_size=None,
        cache_requests=False,
        rewrite_requests_cache=False,
    ) -> None:
466
        """Build a set of Instances for a task, and store them in task.instances"""
467
468
469
470

        # used with caching
        og_limit = limit

471
        cache_key = f"requests-{self._config.task}-{self.config.num_fewshot}shot-rank{rank}-world_size{world_size}"
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486

        cached_instances = load_from_cache(file_name=cache_key)

        if cache_requests and cached_instances and not rewrite_requests_cache:
            cached_instances = cached_instances[:limit]

            flattened_instances = [
                instance
                for instance_group in cached_instances
                for instance in instance_group
            ]

            self._instances = flattened_instances
            return

Baber Abbasi's avatar
Baber Abbasi committed
487
        eval_logger.info(f"Building contexts for {self.config.task} on rank {rank}...")
488

489
        instances = []
490
491
492
493
494
495
496
497
498
499

        # process all documents when caching is specified for simplicity
        if (
            cache_requests
            and (not cached_instances or rewrite_requests_cache)
            and limit is not None
        ):
            limit = None

        doc_id_docs = list(
500
            self.doc_iterator(rank=rank, limit=limit, world_size=world_size)
501
502
503
504
505
506
507
        )

        num_docs = len(doc_id_docs)

        for doc_id, doc in tqdm(
            doc_id_docs,
            total=num_docs,
lintangsutawika's avatar
lintangsutawika committed
508
        ):
509
            # sample fewshot context #TODO: need to offset doc_id by rank now!
510
            fewshot_ctx = self.fewshot_context(
511
                doc,
512
                0 if self.config.num_fewshot is None else self.config.num_fewshot,
513
            )
514

515
            # TODO: we should override self.config.repeats if doing greedy gen so users don't waste time+compute
lintangsutawika's avatar
lintangsutawika committed
516
517
518
            inst = self.construct_requests(
                doc=doc,
                ctx=fewshot_ctx,
519
                metadata=(self.config["task"], doc_id, self.config.repeats),
lintangsutawika's avatar
lintangsutawika committed
520
            )
521
522
523
524

            if not isinstance(inst, list):
                inst = [inst]

525
526
527
528
529
530
531
532
533
534
535
536
537
            instances.append(inst)

        # now flatten, this is to allow slicing to work with pickles

        sliced_instances = instances[:og_limit]

        flattened_instances = [
            instance
            for instance_group in sliced_instances
            for instance in instance_group
        ]

        self._instances = flattened_instances
538

539
540
        if len(self._instances) == 0:
            raise ValueError("task.build_requests() did not find any docs!")
541

542
543
544
        if cache_requests and (not cached_instances or rewrite_requests_cache):
            save_to_cache(file_name=cache_key, obj=instances)

545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
    @abc.abstractmethod
    def construct_requests(self, doc, ctx, **kwargs):
        """Uses RequestFactory to construct Requests and returns an iterable of
        Requests which will be sent to the LM.

        :param doc:
            The document as returned from training_docs, validation_docs, or test_docs.
        :param ctx: str
            The context string, generated by fewshot_context. This includes the natural
            language description, as well as the few shot examples, and the question
            part of the document for `doc`.
        :param doc_idx: int
            The index of a document within `self.test_docs()` or `self.validation_docs()`,
            whichever is the main split used.
        :param repeats: int
        TODO: update this docstring
lintangsutawika's avatar
lintangsutawika committed
561
            The number of times each instance in a dataset is inferred on. Defaults to 1,
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
            can be increased for techniques like majority voting.
        """
        pass

    @abc.abstractmethod
    def process_results(self, doc, results):
        """Take a single document and the LM results and evaluates, returning a
        dict where keys are the names of submetrics and values are the values of
        the metric for that one document

        :param doc:
            The document as returned from training_docs, validation_docs, or test_docs.
        :param results:
            The results of the requests created in construct_requests.
        """
        pass

    @abc.abstractmethod
    def aggregation(self):
        """
        :returns: {str: [metric_score] -> float}
            A dictionary where keys are the names of submetrics and values are
            functions that aggregate a list of metric scores
        """
        pass

    @abc.abstractmethod
    def higher_is_better(self):
        """
        :returns: {str: bool}
            A dictionary where keys are the names of submetrics and values are
            whether a higher value of the submetric is better
        """
        pass

597
598
599
    def get_config(self, key: str) -> Any:
        return getattr(self._config, key, None)

haileyschoelkopf's avatar
haileyschoelkopf committed
600
601
602
603
604
605
606
607
608
609
    @classmethod
    def count_bytes(cls, doc):
        """Used for byte-level perplexity metrics in rolling loglikelihood"""
        return len(doc.encode("utf-8"))

    @classmethod
    def count_words(cls, doc):
        """Downstream loglikelihood_rolling perplexity tasks with custom word boundaries should override this!"""
        return len(re.split(r"\s+", doc))

610
    @utils.positional_deprecated
lintangsutawika's avatar
lintangsutawika committed
611
    def fewshot_context(
612
613
614
        self,
        doc,
        num_fewshot,
615
        rnd=None,
616
        description=None,
lintangsutawika's avatar
lintangsutawika committed
617
    ):
618
619
620
621
622
623
624
        """Returns a fewshot context string that is made up of a prepended description
        (if provided), the `num_fewshot` number of examples, and an appended prompt example.

        :param doc: str
            The document as returned from training_docs, validation_docs, or test_docs.
        :param num_fewshot: int
            The number of fewshot examples to provide in the returned context string.
lintangsutawika's avatar
lintangsutawika committed
625
626
627
628
629
        :param rnd: random.Random
            The pseudo-random number generator used to randomly sample examples.
            WARNING: This is currently a required arg although it's optionalized with a default `None`.
        :param description: str
            The task's description that will be prepended to the fewshot examples.
630
631
632
        :returns: str
            The fewshot context.
        """
633
        if rnd is None:
634
635
636
637
638
639
            if self.fewshot_rnd is not None:
                rnd = self.fewshot_rnd
            else:
                raise ValueError(
                    "A `random.Random` generator argument must be provided to `rnd`"
                )
lintangsutawika's avatar
lintangsutawika committed
640

641
        description = description if description else ""
642
643

        if num_fewshot == 0:
lintangsutawika's avatar
lintangsutawika committed
644
            labeled_examples = ""
645
        else:
lintangsutawika's avatar
lintangsutawika committed
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
            # for sets with no training docs, draw from other set *but ensure no overlap with current doc*
            if self.has_training_docs():
                fewshotex = self.fewshot_examples(k=num_fewshot, rnd=rnd)
            else:
                if self._fewshot_docs is None:
                    self._fewshot_docs = list(
                        self.validation_docs()
                        if self.has_validation_docs()
                        else self.test_docs()
                    )

                fewshotex = rnd.sample(self._fewshot_docs, num_fewshot + 1)

                # get rid of the doc that's the one we're evaluating, if it's in the fewshot
                fewshotex = [x for x in fewshotex if x != doc][:num_fewshot]

            labeled_examples = (
                "\n\n".join(
                    [
                        self.doc_to_text(doc) + self.doc_to_target(doc)
                        for doc in fewshotex
                    ]
                )
                + "\n\n"
lintangsutawika's avatar
lintangsutawika committed
670
            )
671
672

        example = self.doc_to_text(doc)
lintangsutawika's avatar
lintangsutawika committed
673
        return description + labeled_examples + example
674

675
    def apply_filters(self) -> Optional[List[Instance]]:
Baber Abbasi's avatar
Baber Abbasi committed
676
        """Iterates over FilterEnsembles and applies them to instances"""
lintangsutawika's avatar
lintangsutawika committed
677
678
        if hasattr(self, "_filters"):
            for f in self._filters:
679
                f.apply(self._instances)
lintangsutawika's avatar
lintangsutawika committed
680
681
682
        else:
            eval_logger.warning("No filter defined, passing through instances")
            return self._instances
683

baberabb's avatar
baberabb committed
684
    def dump_config(self) -> dict:
Baber Abbasi's avatar
Baber Abbasi committed
685
        """Returns the config as a dictionary."""
686
        # TODO: this should only return the overrides applied to a non-YAML task's configuration.
687
        # (num_fewshot)
688
        return self.config.to_dict()
689

Baber Abbasi's avatar
Baber Abbasi committed
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
    def set_config(self, key: str, value: Any, update: bool = False) -> None:
        """Set or update the configuration for a given key."""
        if key is None:
            raise ValueError("Key must be provided.")

        if update:
            current_value = getattr(self._config, key, {})
            if not isinstance(current_value, dict):
                raise TypeError(
                    f"Expected a dict for key '{key}', got {type(current_value).__name__} instead."
                )
            current_value.update(value)
        else:
            setattr(self._config, key, value)

    def override_metric(self, metric_name: str) -> None:
        """
        Override the default metrics used for evaluation with custom metrics.

        Parameters:
        - metric_name (str): The name of the custom metric to override. Should be registered in api.metrics.
        """
        (
            self._metric_fn_list,
            self._aggregation_list,
            self._metric_fn_kwargs,
            self._higher_is_better,
        ) = ({}, {}, {}, {})
        self._metric_fn_list[metric_name] = get_metric(metric_name)
        self._aggregation_list[metric_name] = get_metric_aggregation(metric_name)
        self._higher_is_better[metric_name] = is_higher_better(metric_name)
        self._metric_fn_kwargs[metric_name] = {}
        if not isinstance(self, ConfigurableTask):
            self.process_results = lambda x, y: {metric_name: get_metric(metric_name)}
            self.aggregation = lambda: {
                metric_name: get_metric_aggregation(metric_name)
            }
        setattr(self._config, "metric_list", [{"metric": metric_name}])
        setattr(self._config, "process_results", None)

730
731
732
733
734
    def set_fewshot_seed(self, seed: Optional[int] = None) -> None:
        self.fewshot_rnd = random.Random(seed)
        if hasattr(self, "sampler"):
            self.sampler.rnd = self.fewshot_rnd

735
736
737
738
739
740
741
    @property
    def eval_docs(self) -> Union[datasets.Dataset, List[dict]]:
        if self.has_test_docs():
            return self.test_docs()
        elif self.has_validation_docs():
            return self.validation_docs()
        else:
742
743
744
            raise ValueError(
                f"Task dataset (path={self.DATASET_PATH}, name={self.DATASET_NAME}) must have valid or test docs!"
            )
745
746
747
748
749
750
751
752
753
754
755
756
757

    def doc_iterator(
        self, *, rank: int = 0, limit: Union[int, None] = None, world_size: int = 1
    ) -> Iterator[Tuple[int, Any]]:
        limit = int(limit) if limit else None
        doc_iterator = utils.create_iterator(
            enumerate(self.eval_docs),
            rank=int(rank),
            limit=limit,
            world_size=int(world_size),
        )
        return doc_iterator

758
759

class ConfigurableTask(Task):
760
    VERSION = "Yaml"
761
    OUTPUT_TYPE = None
762
    CONFIG = None
763
764

    def __init__(
765
766
767
768
769
        self,
        data_dir=None,
        cache_dir=None,
        download_mode=None,
        config: Optional[dict] = None,
Ethan Smith's avatar
Ethan Smith committed
770
    ) -> None:  # TODO no super() call here
lintangsutawika's avatar
lintangsutawika committed
771
772
773
        # Create a unique identifier ID
        self._task_id = str(uuid.uuid1())

774
        # Get pre-configured attributes
775
        self._config = self.CONFIG
776

777
        # Use new configurations if there was no preconfiguration
778
        if self.config is None:
779
            self._config = TaskConfig(**config)
780
781
        # Overwrite configs
        else:
lintangsutawika's avatar
lintangsutawika committed
782
            if config is not None:
783
                self._config.__dict__.update(config)
784

785
        if self.config is None:
lintangsutawika's avatar
lintangsutawika committed
786
787
788
            raise ValueError(
                "Must pass a config to ConfigurableTask, either in cls.CONFIG or `config` kwarg"
            )
789

790
791
792
793
        if isinstance(self.config.metadata, dict):
            if "version" in self.config.metadata:
                self.VERSION = self.config.metadata["version"]

794
        if self.config.output_type is not None:
795
796
797
798
            if self.config.output_type not in ALL_OUTPUT_TYPES:
                raise ValueError(
                    f"Got invalid output_type '{self.config.output_type}', must be in '{','.join(ALL_OUTPUT_TYPES)}'"
                )
799
            self.OUTPUT_TYPE = self.config.output_type
800

801
802
        if self.config.dataset_path is not None:
            self.DATASET_PATH = self.config.dataset_path
803

804
805
        if self.config.dataset_name is not None:
            self.DATASET_NAME = self.config.dataset_name
806

807
808
809
810
        self._metric_fn_list = {}
        self._metric_fn_kwargs = {}
        self._aggregation_list = {}
        self._higher_is_better = {}
811

812
        if self.config.metric_list is None:
813
            # TODO: handle this in TaskConfig.__post_init__ ?
814
815
            _metric_list = DEFAULT_METRIC_REGISTRY[self.config.output_type]

816
            for metric_name in _metric_list:
haileyschoelkopf's avatar
haileyschoelkopf committed
817
                self._metric_fn_list[metric_name] = get_metric(metric_name)
lintangsutawika's avatar
lintangsutawika committed
818
                self._metric_fn_kwargs[metric_name] = {}
819
820
821
                self._aggregation_list[metric_name] = get_metric_aggregation(
                    metric_name
                )
haileyschoelkopf's avatar
haileyschoelkopf committed
822
                self._higher_is_better[metric_name] = is_higher_better(metric_name)
823
        else:
824
            for metric_config in self.config.metric_list:
825
826
827
828
                if "metric" not in metric_config:
                    raise ValueError(
                        "'metric' key not provided for an entry in 'metric_list', must be specified!"
                    )
829
830
831
832
                metric_name = metric_config["metric"]
                kwargs = {
                    key: metric_config[key]
                    for key in metric_config
Chris's avatar
Chris committed
833
834
                    if key
                    not in ["metric", "aggregation", "higher_is_better", "hf_evaluate"]
835
                }
Chris's avatar
Chris committed
836
837
838
839
                hf_evaluate_metric = (
                    "hf_evaluate" in metric_config
                    and metric_config["hf_evaluate"] is True
                )
840

841
                if self.config.process_results is not None:
842
843
                    self._metric_fn_list[metric_name] = None
                    self._metric_fn_kwargs[metric_name] = {}
844
845
846
847
848
849
                elif callable(metric_name):
                    metric_fn = metric_name.__call__
                    metric_name = metric_name.__name__
                    self._metric_fn_list[metric_name] = metric_fn
                    self._metric_fn_kwargs[metric_name] = kwargs
                else:
Chris's avatar
Chris committed
850
851
852
                    self._metric_fn_list[metric_name] = get_metric(
                        metric_name, hf_evaluate_metric
                    )
853
                    self._metric_fn_kwargs[metric_name] = kwargs
lintangsutawika's avatar
lintangsutawika committed
854

855
                if "aggregation" in metric_config:
856
                    agg_name = metric_config["aggregation"]
857
                    if isinstance(agg_name, str):
haileyschoelkopf's avatar
haileyschoelkopf committed
858
                        self._aggregation_list[metric_name] = get_aggregation(agg_name)
859
                    elif callable(agg_name):  # noqa: E721
860
861
862
                        self._aggregation_list[metric_name] = metric_config[
                            "aggregation"
                        ]
863
                else:
864
                    INV_AGG_REGISTRY = {v: k for k, v in AGGREGATION_REGISTRY.items()}
lintangsutawika's avatar
lintangsutawika committed
865
                    metric_agg = get_metric_aggregation(metric_name)
866
                    eval_logger.warning(
867
                        f"[Task: {self.config.task}] metric {metric_name} is defined, but aggregation is not. "
868
869
                        f"using default "
                        f"aggregation={INV_AGG_REGISTRY[metric_agg]}"
870
                    )
871
                    self._aggregation_list[metric_name] = metric_agg
lintangsutawika's avatar
lintangsutawika committed
872

873
874
875
876
877
878
                if "higher_is_better" in metric_config:
                    self._higher_is_better[metric_name] = metric_config[
                        "higher_is_better"
                    ]
                else:
                    eval_logger.warning(
879
                        f"[Task: {self.config.task}] metric {metric_name} is defined, but higher_is_better is not. "
880
                        f"using default "
haileyschoelkopf's avatar
haileyschoelkopf committed
881
                        f"higher_is_better={is_higher_better(metric_name)}"
882
                    )
haileyschoelkopf's avatar
haileyschoelkopf committed
883
                    self._higher_is_better[metric_name] = is_higher_better(metric_name)
884

885
        self.download(self.config.dataset_kwargs)
886
887
888
        self._training_docs = None
        self._fewshot_docs = None

889
        if self.config.filter_list is not None:
lintangsutawika's avatar
lintangsutawika committed
890
            self._filters = []
891
            for filter_config in self.config.filter_list:
892
893
894
895
896
897
898
899
900
                filter_name = filter_config["name"]
                filter_functions = filter_config["filter"]
                components = []
                for function in filter_functions:
                    kwargs = {
                        key: function[key] for key in function if key != "function"
                    }
                    components.append([function["function"], kwargs])
                filter_pipeline = build_filter_ensemble(filter_name, components)
lintangsutawika's avatar
lintangsutawika committed
901
                self._filters.append(filter_pipeline)
lintangsutawika's avatar
lintangsutawika committed
902
        else:
903
            self._filters = [build_filter_ensemble("none", [["take_first", None]])]
904

905
906
        if self.config.use_prompt is not None:
            eval_logger.info(f"loading prompt {self.config.use_prompt}")
907
            self.prompt = get_prompt(
908
                self.config.use_prompt, self.DATASET_PATH, self.DATASET_NAME
lintangsutawika's avatar
lintangsutawika committed
909
            )
910
911
912
        else:
            self.prompt = None

lintangsutawika's avatar
lintangsutawika committed
913
        if self.fewshot_docs() is not None:
914
915
916
917
            self.fewshot_rnd = (
                random.Random()
            )  # setting with no seed, to be overridden at a later time
            config_sampler: Union[str, Callable] = (
haileyschoelkopf's avatar
haileyschoelkopf committed
918
919
920
                self.config.fewshot_config.get("sampler", "default")
                if self.config.fewshot_config
                else "default"
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
            )
            if isinstance(config_sampler, str):
                self.sampler = samplers.get_sampler(config_sampler)(
                    list(self.fewshot_docs()), self, rnd=self.fewshot_rnd
                )
            elif callable(config_sampler) and issubclass(
                config_sampler, samplers.ContextSampler
            ):
                self.sampler = config_sampler(
                    docs=list(self.fewshot_docs()), task=self, rnd=self.fewshot_rnd
                )
            else:
                raise TypeError(
                    f"fewshot_config.sampler should be a string or callable of ContextSampler type, "
                    f"not {type(config_sampler)}"
                )
937

938
        self.task_docs = self.eval_docs
939

940
        # Test One Doc
941
        self.features = list(self.task_docs.features.keys())
942
943
        self.multiple_input = 0
        self.multiple_target = 0
944
        test_doc = self.task_docs[0]
945
        test_text = self.doc_to_text(test_doc)
946
        test_target = self.doc_to_target(test_doc)
lintangsutawika's avatar
lintangsutawika committed
947

948
        if self.config.doc_to_choice is not None:
lintangsutawika's avatar
lintangsutawika committed
949
            test_choice = self.doc_to_choice(test_doc)
950
            if not isinstance(test_choice, list):
lintangsutawika's avatar
lintangsutawika committed
951
                eval_logger.error("doc_to_choice must return list")
952
953
            else:
                num_choice = len(test_choice)
954

955
            if isinstance(test_text, int):
956
                self.multiple_input = num_choice
957
958
        else:
            test_choice = None
959

960
        if isinstance(test_target, list):
961
            self.multiple_target = len(test_target)
962
        else:
963
            if (isinstance(test_target, int)) and (test_choice is not None):
lintangsutawika's avatar
lintangsutawika committed
964
                test_target = test_choice[test_target]
965
            else:
lintangsutawika's avatar
lintangsutawika committed
966
                test_target = str(test_target)
967

968
969
970
        if test_choice is not None:
            check_choices = test_choice
        else:
lintangsutawika's avatar
lintangsutawika committed
971
            check_choices = [test_target]
972
973
974
975
        if self.config.doc_to_choice is not None:
            for choice in check_choices:
                choice_has_whitespace = True if choice[0].isspace() else False
                delimiter_has_whitespace = (
976
977
                    True
                    if self.config.target_delimiter.rstrip()
978
                    != self.config.target_delimiter
979
                    else False
980
                )
981

982
                if delimiter_has_whitespace and choice_has_whitespace:
983
984
                    eval_logger.debug(
                        f'Both target_delimiter "{self.config.target_delimiter}" and target choice: "{choice}" have whitespace'
985
986
                    )
                elif (not delimiter_has_whitespace) and (not choice_has_whitespace):
987
                    eval_logger.debug(
988
                        f'Both target_delimiter "{self.config.target_delimiter}" and target choice: "{choice}" do not have whitespace, ignore if the language you are evaluating on does not require/use whitespace'
989
990
                    )

991
    def download(self, dataset_kwargs: Optional[Dict[str, Any]] = None) -> None:
992
993
994
995
996
997
        self.dataset = datasets.load_dataset(
            path=self.DATASET_PATH,
            name=self.DATASET_NAME,
            **dataset_kwargs if dataset_kwargs is not None else {},
        )

baberabb's avatar
baberabb committed
998
    def has_training_docs(self) -> bool:
999
        if self.config.training_split is not None:
1000
1001
1002
1003
            return True
        else:
            return False

baberabb's avatar
baberabb committed
1004
    def has_validation_docs(self) -> bool:
1005
        if self.config.validation_split is not None:
1006
1007
1008
1009
            return True
        else:
            return False

baberabb's avatar
baberabb committed
1010
    def has_test_docs(self) -> bool:
1011
        if self.config.test_split is not None:
1012
1013
1014
1015
            return True
        else:
            return False

baberabb's avatar
baberabb committed
1016
    def training_docs(self) -> datasets.Dataset:
1017
        if self.has_training_docs():
1018
1019
1020
            if self.config.process_docs is not None:
                return self.config.process_docs(
                    self.dataset[self.config.training_split]
1021
                )
1022
            return self.dataset[self.config.training_split]
1023

baberabb's avatar
baberabb committed
1024
    def validation_docs(self) -> datasets.Dataset:
1025
        if self.has_validation_docs():
1026
1027
1028
            if self.config.process_docs is not None:
                return self.config.process_docs(
                    self.dataset[self.config.validation_split]
1029
                )
1030
            return self.dataset[self.config.validation_split]
1031

baberabb's avatar
baberabb committed
1032
    def test_docs(self) -> datasets.Dataset:
1033
        if self.has_test_docs():
1034
1035
1036
            if self.config.process_docs is not None:
                return self.config.process_docs(self.dataset[self.config.test_split])
            return self.dataset[self.config.test_split]
1037

1038
    def fewshot_docs(self):
1039
        if self.config.fewshot_split is not None:
1040
1041
            if self.config.process_docs is not None:
                return self.config.process_docs(self.dataset[self.config.fewshot_split])
1042
            return self.dataset[self.config.fewshot_split]
1043
        else:
1044
            if (self.config.num_fewshot is not None) and (self.config.num_fewshot > 0):
1045
                eval_logger.warning(
lintangsutawika's avatar
lintangsutawika committed
1046
                    f"[Task: {self.config.task}] "
1047
1048
1049
1050
                    "num_fewshot > 0 but fewshot_split is None. "
                    "using preconfigured rule."
                )
            return super().fewshot_docs()
1051

lintangsutawika's avatar
lintangsutawika committed
1052
    @utils.positional_deprecated
1053
    def fewshot_context(self, doc: str, num_fewshot: int) -> str:
lintangsutawika's avatar
lintangsutawika committed
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
        """Returns a fewshot context string that is made up of a prepended description
        (if provided), the `num_fewshot` number of examples, and an appended prompt example.

        :param doc: str
            The document as returned from training_docs, validation_docs, or test_docs.
        :param num_fewshot: int
            The number of fewshot examples to provide in the returned context string.
        :returns: str
            The fewshot context.
        """
1064
1065
        if description := self.config.description:
            description = utils.apply_template(self.config.description, doc)
lintangsutawika's avatar
lintangsutawika committed
1066
1067
1068

        if num_fewshot == 0:
            # always prepend the (possibly empty) task description
1069
            labeled_examples = description
lintangsutawika's avatar
lintangsutawika committed
1070
        else:
1071
            labeled_examples = description + self.sampler.get_context(doc, num_fewshot)
lintangsutawika's avatar
lintangsutawika committed
1072
1073

        example = self.doc_to_text(doc)
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
        if self.multiple_input:
            return labeled_examples
        else:
            if isinstance(example, str):
                return labeled_examples + example
            elif isinstance(example, list):
                return [labeled_examples + ex for ex in example]
            elif isinstance(example, int):
                if self.config.doc_to_choice is not None:
                    choices = self.doc_to_choice(doc)
                    return labeled_examples + choices[example]
                else:
                    return labeled_examples + str(example)
lintangsutawika's avatar
lintangsutawika committed
1087

1088
    def apply_filters(self):
Baber Abbasi's avatar
Baber Abbasi committed
1089
        """Iterates over FilterEnsembles and applies them to instances"""
1090
1091
        if hasattr(self, "_filters"):
            for f in self._filters:
1092
                f.apply(self._instances)
1093
1094
1095
1096
        else:
            eval_logger.warning("No filter defined, passing through instances")
            return self._instances

1097
    def should_decontaminate(self):
1098
        return self.config.should_decontaminate
1099
1100

    def doc_to_decontamination_query(self, doc):
1101
        if self.config.should_decontaminate:
1102
1103
            if self.config.doc_to_decontamination_query is None:
                return self.doc_to_text(doc)
1104
            else:
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
                doc_to_decontamination_query = self.config.doc_to_decontamination_query
                if doc_to_decontamination_query in self.features:
                    return doc[doc_to_decontamination_query]
                elif callable(doc_to_decontamination_query):
                    return doc_to_decontamination_query(doc)
                else:
                    return ast.literal_eval(
                        utils.apply_template(
                            self.config.doc_to_decontamination_query, doc
                        )
                    )
1116

1117
    def _process_doc(self, doc: dict) -> dict:
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
        """
        Override this to process (detokenize, strip, replace, etc.) individual
        documents. This can be used in a map over documents of a data split.
        E.g. `map(self._process_doc, self.dataset["validation"])`

        :return: dict
            The processed version of the specified `doc`.
        """
        return doc

    def doc_to_text(self, doc):
1129
1130
        if self.prompt is not None:
            doc_to_text = self.prompt
1131
        else:
1132
            doc_to_text = self.config.doc_to_text
lintangsutawika's avatar
lintangsutawika committed
1133

1134
        if isinstance(doc_to_text, int):
1135
            return doc_to_text
1136
        elif isinstance(doc_to_text, str):
1137
            if doc_to_text in self.features:
1138
                # if self.config.doc_to_choice is not None:
1139
1140
                #     return self.doc_to_choice(doc)[doc[doc_to_text]]
                # else:
1141
1142
                return doc[doc_to_text]
            else:
lintangsutawika's avatar
lintangsutawika committed
1143
                text_string = utils.apply_template(doc_to_text, doc)
lintangsutawika's avatar
lintangsutawika committed
1144
                if text_string.isdigit() and self._config.doc_to_choice is not None:
lintangsutawika's avatar
lintangsutawika committed
1145
1146
1147
                    return ast.literal_eval(text_string)
                else:
                    return text_string
1148
        elif callable(doc_to_text):
1149
            return doc_to_text(doc)
lintangsutawika's avatar
lintangsutawika committed
1150
        # Used when applying a Promptsource template
1151
        elif hasattr(doc_to_text, "apply"):
1152
1153
1154
1155
1156
            applied_prompt = doc_to_text.apply(doc)
            if len(applied_prompt) == 2:
                return applied_prompt[0]
            else:
                eval_logger.warning("Applied prompt returns empty string")
1157
                return self.config.fewshot_delimiter
1158
        else:
1159
            print(type(doc_to_text))
1160
            raise TypeError
1161

1162
    def doc_to_target(self, doc: Mapping) -> Union[int, str, list]:
1163
1164
        if self.prompt is not None:
            doc_to_target = self.prompt
1165
        else:
1166
            doc_to_target = self.config.doc_to_target
1167

1168
        if isinstance(doc_to_target, int):
1169
            return doc_to_target
1170
        elif isinstance(doc_to_target, str):
1171
            if doc_to_target in self.features:
1172
                # if self.config.doc_to_choice is not None:
1173
1174
1175
                #     return self.doc_to_choice(doc)[doc[doc_to_target]]
                # else:
                return doc[doc_to_target]
1176
            else:
lintangsutawika's avatar
lintangsutawika committed
1177
                target_string = utils.apply_template(doc_to_target, doc)
lintangsutawika's avatar
lintangsutawika committed
1178
                if target_string.isdigit() and self._config.doc_to_choice is not None:
lintangsutawika's avatar
lintangsutawika committed
1179
                    return ast.literal_eval(target_string)
lintangsutawika's avatar
lintangsutawika committed
1180
1181
1182
1183
1184
                elif (
                    len(target_string) >= 2
                    and (target_string[0] == "[")
                    and (target_string[-1] == "]")
                ):
baberabb's avatar
baberabb committed
1185
1186
1187
1188
                    try:
                        return ast.literal_eval(target_string)
                    except (SyntaxError, ValueError):
                        return target_string
lintangsutawika's avatar
lintangsutawika committed
1189
1190
                else:
                    return target_string
1191
        elif isinstance(doc_to_target, list):
1192
            return doc_to_target
1193
        elif callable(doc_to_target):
1194
            return doc_to_target(doc)
lintangsutawika's avatar
lintangsutawika committed
1195
        # Used when applying a Promptsource template
1196
        elif hasattr(doc_to_target, "apply"):
1197
            applied_prompt = doc_to_target.apply(doc)
1198
1199
1200
1201
            if len(applied_prompt) == 2:
                return applied_prompt[1]
            else:
                eval_logger.warning("Applied prompt returns empty string")
1202
                return self.config.fewshot_delimiter
1203
1204
        else:
            raise TypeError
1205

baberabb's avatar
baberabb committed
1206
    def doc_to_choice(self, doc: Any) -> List[str]:
1207
1208
        if self.prompt is not None:
            doc_to_choice = self.prompt
1209
        elif self.config.doc_to_choice is None:
1210
1211
            eval_logger.error("doc_to_choice was called but not set in config")
        else:
1212
            doc_to_choice = self.config.doc_to_choice
1213

1214
        if isinstance(doc_to_choice, str):
1215
1216
1217
1218
            if doc_to_choice in self.features:
                return doc[doc_to_choice]
            else:
                return ast.literal_eval(utils.apply_template(doc_to_choice, doc))
1219
        elif isinstance(doc_to_choice, list):
1220
            return doc_to_choice
1221
        elif isinstance(doc_to_choice, dict):
1222
1223
1224
1225
1226
1227
1228
            return list(doc_to_choice.values())
        elif callable(doc_to_choice):
            return doc_to_choice(doc)
        elif hasattr(doc_to_choice, "get_answer_choices_list"):
            return doc_to_choice.get_answer_choices_list(doc)
        else:
            raise TypeError
1229

baberabb's avatar
baberabb committed
1230
1231
1232
    def construct_requests(
        self, doc: dict, ctx: str, **kwargs
    ) -> Union[List[Instance], Instance]:
1233
        if self.OUTPUT_TYPE == "loglikelihood":
lintangsutawika's avatar
lintangsutawika committed
1234
            arguments = (ctx, self.doc_to_target(doc))
1235
        elif self.OUTPUT_TYPE == "loglikelihood_rolling":
lintangsutawika's avatar
lintangsutawika committed
1236
            arguments = (self.doc_to_target(doc),)
1237
        elif self.OUTPUT_TYPE == "multiple_choice":
1238
            choices = self.doc_to_choice(doc)
1239
            target_delimiter = self.config.target_delimiter
1240
1241
            if self.multiple_input:
                # If there are multiple inputs, choices are placed in the ctx
1242
                cont = self.doc_to_target(doc)
1243
1244
1245
                arguments = [
                    (ctx + choice, f"{target_delimiter}{cont}") for choice in choices
                ]
1246
            else:
1247
                # Otherwise they are placed in the continuation
1248
                arguments = [(ctx, f"{target_delimiter}{cont}") for cont in choices]
1249

1250
            request_list = [
1251
1252
                Instance(
                    request_type="loglikelihood",
lintangsutawika's avatar
lintangsutawika committed
1253
                    doc=doc,
1254
                    arguments=arg,
1255
                    idx=i,
1256
1257
                    **kwargs,
                )
1258
                for i, arg in enumerate(arguments)
1259
            ]
1260
            # TODO: we should raise a warning telling users this will at most ~2x runtime.
1261
            if "acc_mutual_info" in self._metric_fn_list.keys():
1262
1263
1264
                # if we are calculating multiple choice accuracy
                # using mutual information instead of raw loglikelihood as metric, need unconditional lls.

lintangsutawika's avatar
lintangsutawika committed
1265
                # here mutual info refers to calculating
1266
1267
1268
1269
1270
1271
                # log(P(choice|ctx) / P(choice)) = log(P(choice|ctx)) - log(P(choice))
                # in other words normalizing by subtracting the unconditional logprob of each choice.
                request_list.extend(
                    [
                        Instance(
                            request_type="loglikelihood",
lintangsutawika's avatar
lintangsutawika committed
1272
                            doc=doc,
1273
                            arguments=("", "{}".format(choice)),
1274
1275
1276
                            idx=i,
                            **kwargs,
                        )
lintangsutawika's avatar
lintangsutawika committed
1277
                        for i, choice in enumerate(choices)
1278
1279
1280
                    ]
                )
            return request_list
lintangsutawika's avatar
lintangsutawika committed
1281

1282
        elif self.OUTPUT_TYPE == "generate_until":
1283
            arguments = (ctx, deepcopy(self.config.generation_kwargs))
lintangsutawika's avatar
lintangsutawika committed
1284
1285

        return Instance(
lintangsutawika's avatar
lintangsutawika committed
1286
1287
            request_type=self.OUTPUT_TYPE, doc=doc, arguments=arguments, idx=0, **kwargs
        )
1288
1289

    def process_results(self, doc, results):
1290
1291
        if callable(self.config.process_results):
            return self.config.process_results(doc, results)
lintangsutawika's avatar
lintangsutawika committed
1292

1293
        result_dict = {}
1294
        use_metric = list(self._metric_fn_list.keys())
1295
1296
1297
        if self.OUTPUT_TYPE == "loglikelihood":
            results = results[0]
            ll, is_greedy = results
1298
1299
1300
1301
            return {
                **({"perplexity": ll} if "perplexity" in use_metric else {}),
                **({"acc": int(is_greedy)} if "acc" in use_metric else {}),
            }
1302
        elif self.OUTPUT_TYPE == "loglikelihood_rolling":
haileyschoelkopf's avatar
haileyschoelkopf committed
1303
            (loglikelihood,) = results
1304
1305
            _words = self.count_words(self.doc_to_target(doc))
            _bytes = self.count_bytes(self.doc_to_target(doc))
haileyschoelkopf's avatar
haileyschoelkopf committed
1306
            return {
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321
                **(
                    {"word_perplexity": (loglikelihood, _words)}
                    if "word_perplexity" in use_metric
                    else {}
                ),
                **(
                    {"byte_perplexity": (loglikelihood, _bytes)}
                    if "byte_perplexity" in use_metric
                    else {}
                ),
                **(
                    {"bits_per_byte": (loglikelihood, _bytes)}
                    if "bits_per_byte" in use_metric
                    else {}
                ),
haileyschoelkopf's avatar
haileyschoelkopf committed
1322
            }
1323
        elif self.OUTPUT_TYPE == "multiple_choice":
1324
            lls, is_greedy = zip(*results)
lintangsutawika's avatar
lintangsutawika committed
1325

1326
            # retrieve choices in List[str] form, to compute choice lengths, etc.
1327
            choices = self.doc_to_choice(doc)
1328
1329
            completion_len = np.array([float(len(i)) for i in choices])

1330
1331
            if (
                2 * len(choices) == len(lls)
1332
                and "acc_mutual_info" in self._metric_fn_list.keys()
1333
1334
1335
1336
            ):
                # then we are doing mutual info.
                # this stores the "dryrun" / unconditional answer loglikelihoods
                lls_unconditional = lls[1::2]
1337
1338
                if len(lls_unconditional) != len(choices):
                    raise ValueError
1339
1340
                # and this stores our "regular" conditional loglikelihoods
                lls = lls[::2]
1341

1342
1343
            pred = np.argmax(lls)
            pred_norm = np.argmax(lls / completion_len)
lintangsutawika's avatar
lintangsutawika committed
1344

1345
1346
            if self.multiple_input:
                gold = self.doc_to_text(doc)
1347
            else:
1348
                gold = self.doc_to_target(doc)
1349
1350

            gold_index_error = False
1351
            if isinstance(gold, list):
Lintang Sutawika's avatar
Lintang Sutawika committed
1352
1353
                gold = [i if i < len(choices) else -100 for i in gold]
                if -100 in gold:
1354
1355
                    gold_index_error = True
            else:
1356
                if isinstance(gold, int):
Lintang Sutawika's avatar
Lintang Sutawika committed
1357
                    gold = gold if gold < len(choices) else -100
1358
                elif isinstance(gold, str):
Lintang Sutawika's avatar
Lintang Sutawika committed
1359
                    gold = choices.index(gold) if gold in choices else -100
lintangsutawika's avatar
lintangsutawika committed
1360

Lintang Sutawika's avatar
Lintang Sutawika committed
1361
                if gold == -100:
1362
1363
1364
1365
                    gold_index_error = True

            if gold_index_error:
                eval_logger.warning(
lintangsutawika's avatar
lintangsutawika committed
1366
                    f"Label index was not in within range of available choices,"
1367
1368
                    f"Sample:\n\n{doc}\n\n"
                )
lintangsutawika's avatar
lintangsutawika committed
1369

1370
            if self.multiple_target:
lintangsutawika's avatar
lintangsutawika committed
1371
1372
                acc = 1.0 if pred in gold else 0.0
                acc_norm = 1.0 if pred_norm in gold else 0.0
Lintang Sutawika's avatar
Lintang Sutawika committed
1373
                exact_match = int(any([is_greedy[i] if i != -100 else 0 for i in gold]))
lintangsutawika's avatar
lintangsutawika committed
1374
1375
1376
            else:
                acc = 1.0 if pred == gold else 0.0
                acc_norm = 1.0 if pred_norm == gold else 0.0
1377
                # TODO: this gets score of 0 on arc_challenge for pythia-70m. need to test that this works properly
Lintang Sutawika's avatar
Lintang Sutawika committed
1378
                exact_match = int(is_greedy[gold]) if gold != -100 else 0
1379

Lintang Sutawika's avatar
Lintang Sutawika committed
1380
1381
1382
1383
            prob_norm = utils.softmax(lls)

            # TODO use keyword arguments to the metric?
            # gold, pred, norm stuff, the original lls,
1384
            result_dict = {
1385
                **({"acc": acc} if "acc" in use_metric else {}),
1386
1387
                **({"f1": (gold, pred)} if "f1" in use_metric else {}),
                **({"mcc": (gold, pred)} if "mcc" in use_metric else {}),
1388
                **({"acc_norm": acc_norm} if "acc_norm" in use_metric else {}),
1389
                **({"exact_match": exact_match} if "exact_match" in use_metric else {}),
Lintang Sutawika's avatar
Lintang Sutawika committed
1390
1391
1392
1393
1394
                **(
                    {"brier_score": (gold, prob_norm)}
                    if "brier_score" in use_metric
                    else {}
                ),
1395
1396
            }

1397
            if "acc_mutual_info" in use_metric:
lintangsutawika's avatar
lintangsutawika committed
1398
1399
1400
                lls_mutual_info = [
                    ll_c - ll_u for ll_c, ll_u in zip(lls, lls_unconditional)
                ]
1401
1402
1403
                acc_mutual_info = 1.0 if np.argmax(lls_mutual_info) == gold else 0.0
                result_dict["acc_mutual_info"] = acc_mutual_info

1404
        elif self.OUTPUT_TYPE == "generate_until":
1405
            gold = self.doc_to_target(doc)
Chris's avatar
Chris committed
1406
            result = results[0]
1407
            if self.config.doc_to_choice is not None:
lintangsutawika's avatar
lintangsutawika committed
1408
                # If you set doc_to_choice,
lintangsutawika's avatar
lintangsutawika committed
1409
                # it assumes that doc_to_target returns a number.
1410
1411
                choices = self.doc_to_choice(doc)
                gold = choices[gold]
1412
1413
            # we expect multiple_targets to be a list.
            elif self.multiple_target:
baberabb's avatar
baberabb committed
1414
                gold = list(gold)
Chris's avatar
Chris committed
1415
1416
1417
            elif type(gold) != type(result):
                # cast gold to the same type as result
                gold = type(result)(gold)
1418

lintangsutawika's avatar
lintangsutawika committed
1419
            for metric in self._metric_fn_list.keys():
haileyschoelkopf's avatar
haileyschoelkopf committed
1420
1421
1422
1423
1424
                if self.multiple_target:
                    # in the case where we have multiple targets,
                    # return true if any are true
                    # TODO: this may break for multipLe_target, non zero-or-1 metrics
                    scores = []
haileyschoelkopf's avatar
haileyschoelkopf committed
1425
1426
1427
1428
                    if not isinstance(gold, list):
                        # sometimes, a multiple_target dataset has exceptions where one doc has only one string answer
                        # print(gold)
                        gold = [gold]
1429
1430
1431
1432
1433
1434
1435
1436
                    if metric == "exact_match":
                        result = [result for _ in range(len(gold))]
                        scores = self._metric_fn_list[metric](
                            references=gold,
                            predictions=result,
                            **self._metric_fn_kwargs[metric],
                        )[metric]
                        result_score = 1.0 if scores > 0.0 else 0.0
haileyschoelkopf's avatar
haileyschoelkopf committed
1437
                    else:
1438
1439
1440
1441
1442
1443
1444
1445
1446
1447
1448
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458
                        for gold_option in gold:
                            try:
                                result_score = self._metric_fn_list[metric](
                                    references=[gold_option],
                                    predictions=[result],
                                    **self._metric_fn_kwargs[metric],
                                )
                            except (
                                TypeError
                            ):  # TODO: this is hacky and I don't want to do it
                                result_score = self._metric_fn_list[metric](
                                    [gold_option, result]
                                )
                            if isinstance(result_score, dict):
                                # TODO: this handles the case where HF evaluate returns a dict.
                                result_score = result_score[metric]
                            scores.append(result_score)
                        if any(scores):
                            result_score = 1.0
                        else:
                            result_score = 0.0
haileyschoelkopf's avatar
haileyschoelkopf committed
1459
                else:
1460
                    try:
1461
                        result_score = self._metric_fn_list[metric](
1462
1463
                            references=[gold],
                            predictions=[result],
1464
                            **self._metric_fn_kwargs[metric],
1465
                        )
1466
                    except TypeError:  # needed for now in order to use a different interface between our own metrics and HF Evaluate metrics
1467
                        result_score = self._metric_fn_list[metric]([gold, result])
1468
1469
1470
1471
                    if isinstance(result_score, dict):
                        # TODO: this handles the case where HF evaluate returns a dict.
                        result_score = result_score[metric]
                result_dict[metric] = result_score
1472
        else:
lintangsutawika's avatar
lintangsutawika committed
1473
1474
            raise ValueError(
                f"Passed invalid output_type '{self.OUTPUT_TYPE}' ! Please use one of ",
1475
                "'loglikelihood', 'loglikelihood_rolling', 'generate_until' or 'multiple_choice'",
1476
            )
1477
1478
1479

        return result_dict

Baber Abbasi's avatar
Baber Abbasi committed
1480
    def aggregation(self) -> dict:
1481
1482
        return self._aggregation_list

Baber Abbasi's avatar
Baber Abbasi committed
1483
    def higher_is_better(self) -> dict:
haileyschoelkopf's avatar
haileyschoelkopf committed
1484
        return self._higher_is_better
1485

Baber Abbasi's avatar
Baber Abbasi committed
1486
1487
1488
    def get_config(self, key: str) -> Any:
        return getattr(self._config, key, None)

lintangsutawika's avatar
lintangsutawika committed
1489
1490
1491
1492
    @property
    def task_id(self) -> Any:
        return self._task_id

1493
1494
1495
1496
1497
1498
1499
1500
    def __repr__(self):
        return (
            f"ConfigurableTask(task_name={getattr(self.config, 'task', None)},"
            f"output_type={self.OUTPUT_TYPE},"
            f"num_fewshot={getattr(self.config, 'num_fewshot', None)},"
            f"num_samples={len(self.eval_docs)})"
        )

1501
1502

class MultipleChoiceTask(Task):
1503
    OUTPUT_TYPE = "loglikelihood"
1504

baberabb's avatar
baberabb committed
1505
    def doc_to_target(self, doc: dict) -> str:
1506
1507
        return " " + doc["choices"][doc["gold"]]

baberabb's avatar
baberabb committed
1508
    def construct_requests(self, doc: dict, ctx: str, **kwargs) -> List[Instance]:
1509
        # TODO: add mutual info here?
lintangsutawika's avatar
lintangsutawika committed
1510
1511
        return [
            Instance(
haileyschoelkopf's avatar
haileyschoelkopf committed
1512
                request_type="loglikelihood",
lintangsutawika's avatar
lintangsutawika committed
1513
                doc=doc,
1514
                arguments=(ctx, " {}".format(choice)),
1515
                idx=i,
1516
1517
                **kwargs,
            )
lintangsutawika's avatar
lintangsutawika committed
1518
1519
            for i, choice in enumerate(doc["choices"])
        ]
1520

1521
    def process_results(self, doc: dict, results: Iterable[Tuple[float, bool]]) -> dict:
lintangsutawika's avatar
lintangsutawika committed
1522
1523
1524
        results = [
            res[0] for res in results
        ]  # only retain loglikelihoods, discard is_greedy TODO: do we need is_greedy anywhere?
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534
1535
        gold = doc["gold"]

        acc = 1.0 if np.argmax(results) == gold else 0.0
        completion_len = np.array([float(len(i)) for i in doc["choices"]])
        acc_norm = 1.0 if np.argmax(results / completion_len) == gold else 0.0

        return {
            "acc": acc,
            "acc_norm": acc_norm,
        }

baberabb's avatar
baberabb committed
1536
    def higher_is_better(self) -> dict:
1537
1538
1539
1540
1541
        return {
            "acc": True,
            "acc_norm": True,
        }

baberabb's avatar
baberabb committed
1542
    def aggregation(self) -> dict:
1543
1544
1545
1546
1547
1548
        return {
            "acc": mean,
            "acc_norm": mean,
        }


lintangsutawika's avatar
lintangsutawika committed
1549
class PerplexityTask(Task):
1550
1551
    OUTPUT_TYPE = "loglikelihood_rolling"

baberabb's avatar
baberabb committed
1552
    def has_training_docs(self) -> bool:
1553
1554
        return False

baberabb's avatar
baberabb committed
1555
    def fewshot_examples(self, k: int, rnd) -> List:
1556
1557
1558
1559
        if k != 0:
            raise ValueError(
                "The number of fewshot examples must be 0 for perplexity tasks."
            )
1560
1561
        return []

baberabb's avatar
baberabb committed
1562
    def fewshot_context(self, doc: dict, num_fewshot: int) -> Literal[""]:
1563
1564
1565
1566
        if num_fewshot != 0:
            raise ValueError(
                "The number of fewshot examples must be 0 for perplexity tasks."
            )
1567
1568
1569

        return ""

baberabb's avatar
baberabb committed
1570
    def higher_is_better(self) -> dict:
1571
1572
1573
1574
1575
1576
1577
1578
1579
        return {
            "word_perplexity": False,
            "byte_perplexity": False,
            "bits_per_byte": False,
        }

    def doc_to_decontamination_query(self, doc):
        return doc

Ethan Smith's avatar
Ethan Smith committed
1580
    def doc_to_text(self, doc) -> str:
1581
1582
1583
1584
1585
        return ""

    def doc_to_target(self, doc):
        return doc

1586
1587
1588
    def construct_requests(self, doc: dict, ctx: Optional[str], **kwargs):
        if bool(ctx):
            raise ValueError
1589

lintangsutawika's avatar
lintangsutawika committed
1590
1591
1592
1593
1594
1595
1596
        return Instance(
            request_type=self.OUTPUT_TYPE,
            doc=doc,
            arguments=(self.doc_to_target(doc),),
            idx=0,
            **kwargs,
        )
1597

1598
    def process_results(self, doc: dict, results: Tuple[float]) -> dict:
1599
        (loglikelihood,) = results
haileyschoelkopf's avatar
haileyschoelkopf committed
1600
1601
        words = self.count_words(self.doc_to_target(doc))
        bytes_ = self.count_bytes(self.doc_to_target(doc))
1602
1603
1604
1605
1606
1607
        return {
            "word_perplexity": (loglikelihood, words),
            "byte_perplexity": (loglikelihood, bytes_),
            "bits_per_byte": (loglikelihood, bytes_),
        }

baberabb's avatar
baberabb committed
1608
    def aggregation(self) -> dict:
1609
1610
1611
1612
1613
1614
1615
        return {
            "word_perplexity": weighted_perplexity,
            "byte_perplexity": weighted_perplexity,
            "bits_per_byte": bits_per_byte,
        }

    @classmethod
baberabb's avatar
baberabb committed
1616
    def count_bytes(cls, doc) -> int:
1617
1618
1619
        return len(doc.encode("utf-8"))

    @classmethod
baberabb's avatar
baberabb committed
1620
    def count_words(cls, doc) -> int:
1621
1622
        """Downstream tasks with custom word boundaries should override this!"""
        return len(re.split(r"\s+", doc))