task.py 60.5 KB
Newer Older
1
import abc
2
import ast
lintangsutawika's avatar
lintangsutawika committed
3
import logging
4
import random
5
import re
lintangsutawika's avatar
lintangsutawika committed
6
import uuid
7
from collections.abc import Callable
8
from copy import deepcopy
9
from dataclasses import asdict, dataclass
10
from inspect import getsource
11
12
13
14
15
16
17
18
19
20
21
22
from typing import (
    Any,
    Dict,
    Iterable,
    Iterator,
    List,
    Literal,
    Mapping,
    Optional,
    Tuple,
    Union,
)
23
24
25

import datasets
import numpy as np
26
from tqdm import tqdm
27
28

from lm_eval import utils
29
from lm_eval.api import samplers
30
31
from lm_eval.api.instance import Instance, OutputType
from lm_eval.api.metrics import bits_per_byte, mean, weighted_perplexity
lintangsutawika's avatar
lintangsutawika committed
32
from lm_eval.api.registry import (
33
34
    AGGREGATION_REGISTRY,
    DEFAULT_METRIC_REGISTRY,
haileyschoelkopf's avatar
haileyschoelkopf committed
35
    get_aggregation,
36
    get_metric,
37
    get_metric_aggregation,
haileyschoelkopf's avatar
haileyschoelkopf committed
38
    is_higher_better,
lintangsutawika's avatar
lintangsutawika committed
39
)
40
from lm_eval.caching.cache import load_from_cache, save_to_cache
41
42
43
from lm_eval.filters import build_filter_ensemble
from lm_eval.prompts import get_prompt

44

45
46
47
48
ALL_OUTPUT_TYPES = [
    "loglikelihood",
    "multiple_choice",
    "loglikelihood_rolling",
49
    "generate_until",
50
51
]

52
eval_logger = logging.getLogger("lm-eval")
53

lintangsutawika's avatar
lintangsutawika committed
54

lintangsutawika's avatar
lintangsutawika committed
55
56
@dataclass
class GroupConfig(dict):
lintangsutawika's avatar
lintangsutawika committed
57
58
59
    group: Optional[str] = None
    group_alias: Optional[str] = None
    task: Optional[Union[str, list]] = None
lintangsutawika's avatar
lintangsutawika committed
60
61
62
    aggregate_metric: Optional[str] = False
    aggregate_fn: Optional[str] = "mean"
    weight_by_size: Optional[str] = False
lintangsutawika's avatar
lintangsutawika committed
63
    metric_alias: Optional[str] = None
64
    version: Optional[str] = 0
lintangsutawika's avatar
lintangsutawika committed
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107

    def __getitem__(self, item):
        return getattr(self, item)

    def __setitem__(self, item, value):
        return setattr(self, item, value)

    def to_dict(self, keep_callable: bool = False) -> dict:
        """dumps the current config as a dictionary object, as a printable format.
        null fields will not be printed.
        Used for dumping results alongside full task configuration

        :return: dict
            A printable dictionary version of the TaskConfig object.

        # TODO: should any default value in the TaskConfig not be printed?
        """
        cfg_dict = asdict(self)
        # remove values that are `None`
        for k, v in list(cfg_dict.items()):
            if v is None:
                cfg_dict.pop(k)
            elif callable(v):
                cfg_dict[k] = self.serialize_function(v, keep_callable=keep_callable)
        return cfg_dict

    def serialize_function(
        self, value: Union[Callable, str], keep_callable=False
    ) -> Union[Callable, str]:
        """Serializes a given function or string.

        If 'keep_callable' is True, the original callable is returned.
        Otherwise, attempts to return the source code of the callable using 'getsource'.
        """
        if keep_callable:
            return value
        else:
            try:
                return getsource(value)
            except (TypeError, OSError):
                return str(value)


lintangsutawika's avatar
lintangsutawika committed
108
109
110
111
112
class ConfigurableGroup(abc.ABC):
    def __init__(
        self,
        config: Optional[dict] = None,
    ) -> None:
lintangsutawika's avatar
lintangsutawika committed
113
114
        # Create a unique identifier ID
        self._task_id = str(uuid.uuid1())
lintangsutawika's avatar
lintangsutawika committed
115
116
117
118
119
        self._config = GroupConfig(**config)

    @property
    def group(self):
        return self._config.group
120

lintangsutawika's avatar
lintangsutawika committed
121
122
123
    @property
    def group_alias(self):
        return self._config.group_alias
124
125
126
127
128

    @property
    def version(self):
        return self._config.version

lintangsutawika's avatar
lintangsutawika committed
129
130
131
132
    @property
    def config(self):
        return self._config.to_dict()

lintangsutawika's avatar
lintangsutawika committed
133
134
135
136
    @property
    def task_id(self) -> Any:
        return self._task_id

lintangsutawika's avatar
lintangsutawika committed
137
138
    def __repr__(self):
        return (
139
            f"ConfigurableGroup(group={self.group}," f"group_alias={self.group_alias})"
lintangsutawika's avatar
lintangsutawika committed
140
141
        )

142

143
144
@dataclass
class TaskConfig(dict):
145
    # task naming/registry
146
147
    task: Optional[str] = None
    task_alias: Optional[str] = None
lintangsutawika's avatar
lintangsutawika committed
148
    tag: Optional[Union[str, list]] = None
149
150
    group: Optional[Union[str, list]] = None
    group_alias: Optional[Union[str, list]] = None
151
152
153
    # HF dataset options.
    # which dataset to use,
    # and what splits for what purpose
154
155
156
157
158
159
160
161
162
    dataset_path: Optional[str] = None
    dataset_name: Optional[str] = None
    dataset_kwargs: Optional[dict] = None
    training_split: Optional[str] = None
    validation_split: Optional[str] = None
    test_split: Optional[str] = None
    fewshot_split: Optional[
        str
    ] = None  # TODO: assert that this not None if num_fewshot > 0. (?) assert if this is same split as one evaling (?)
163
164
    # formatting / prompting options.
    # see docs/advanced_task_guide.md for more info
165
166
167
168
169
170
    process_docs: Optional[Callable] = None
    doc_to_text: Optional[Union[Callable, str]] = None
    doc_to_target: Optional[Union[Callable, str]] = None
    doc_to_choice: Optional[Union[Callable, str, dict, list]] = None
    process_results: Optional[Union[Callable, str]] = None
    use_prompt: Optional[str] = None
171
    description: str = ""
172
173
    target_delimiter: str = " "
    fewshot_delimiter: str = "\n\n"
174
    fewshot_config: Optional[dict] = None
175
    # runtime configuration options
176
    num_fewshot: Optional[int] = None
177
    # scoring options
178
179
180
    metric_list: Optional[list] = None
    output_type: OutputType = "generate_until"
    generation_kwargs: Optional[dict] = None
181
    repeats: int = 1
182
    filter_list: Optional[Union[str, list]] = None
183
    should_decontaminate: bool = False
184
185
186
187
    doc_to_decontamination_query: Optional[str] = None
    metadata: Optional[
        dict
    ] = None  # by default, not used in the code. allows for users to pass arbitrary info to tasks
188

Ethan Smith's avatar
Ethan Smith committed
189
    def __post_init__(self) -> None:
Lintang Sutawika's avatar
Lintang Sutawika committed
190
        if self.generation_kwargs is not None:
191
            if self.output_type != "generate_until":
192
                eval_logger.warning(
193
                    f"[{self.task}] passed `generation_kwargs`, but not using `output_type: generate_until`!"
Lintang Sutawika's avatar
Lintang Sutawika committed
194
195
196
197
198
199
200
201
                )

            if "temperature" in self.generation_kwargs:
                self.generation_kwargs["temperature"] = float(
                    self.generation_kwargs["temperature"]
                )

            if "until" not in self.generation_kwargs:
202
                self.generation_kwargs["until"] = [self.fewshot_delimiter]
Lintang Sutawika's avatar
Lintang Sutawika committed
203
        else:
204
            if self.output_type == "generate_until":
Lintang Sutawika's avatar
Lintang Sutawika committed
205
206
                # ensure that we greedily generate in absence of explicit arguments otherwise
                self.generation_kwargs = {
207
208
209
210
211
                    "until": (
                        None
                        if self.fewshot_delimiter is None
                        else [self.fewshot_delimiter]
                    ),
Lintang Sutawika's avatar
Lintang Sutawika committed
212
213
                    "do_sample": False,
                }
214

215
216
217
    def __getitem__(self, item):
        return getattr(self, item)

218
219
220
    def __setitem__(self, item, value):
        return setattr(self, item, value)

221
    def to_dict(self, keep_callable: bool = False) -> dict:
222
223
        """dumps the current config as a dictionary object, as a printable format.
        null fields will not be printed.
haileyschoelkopf's avatar
haileyschoelkopf committed
224
        Used for dumping results alongside full task configuration
225

haileyschoelkopf's avatar
haileyschoelkopf committed
226
227
228
229
230
231
232
233
234
235
        :return: dict
            A printable dictionary version of the TaskConfig object.

        # TODO: should any default value in the TaskConfig not be printed?
        """
        cfg_dict = asdict(self)
        # remove values that are `None`
        for k, v in list(cfg_dict.items()):
            if v is None:
                cfg_dict.pop(k)
236
237
238
239
240
241
242
243
244
245
            elif k == "metric_list":
                for metric_dict in v:
                    for metric_key, metric_value in metric_dict.items():
                        if callable(metric_value):
                            metric_dict[metric_key] = self.serialize_function(
                                metric_value, keep_callable=keep_callable
                            )
                cfg_dict[k] = v
            elif callable(v):
                cfg_dict[k] = self.serialize_function(v, keep_callable=keep_callable)
haileyschoelkopf's avatar
haileyschoelkopf committed
246
        return cfg_dict
247

248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
    def serialize_function(
        self, value: Union[Callable, str], keep_callable=False
    ) -> Union[Callable, str]:
        """Serializes a given function or string.

        If 'keep_callable' is True, the original callable is returned.
        Otherwise, attempts to return the source code of the callable using 'getsource'.
        """
        if keep_callable:
            return value
        else:
            try:
                return getsource(value)
            except (TypeError, OSError):
                return str(value)

264
265
266
267
268
269
270
271
272
273
274

class Task(abc.ABC):
    """A task represents an entire benchmark including its dataset, problems,
    answers, and evaluation methods. See BoolQ for a simple example implementation

    A `doc` can be any python object which represents one instance of evaluation.
    This is usually a dictionary e.g.
        {"question": ..., "answer": ...} or
        {"question": ..., question, answer)
    """

275
    VERSION: Optional[Union[int, str]] = None
276

277
278
    # The name of the `Task` benchmark as denoted in the HuggingFace datasets Hub
    # or a path to a custom `datasets` loading script.
279
    DATASET_PATH: Optional[str] = None
280
281

    # The name of a subset within `DATASET_PATH`.
282
    DATASET_NAME: Optional[str] = None
283

284
    OUTPUT_TYPE: Optional[OutputType] = None
lintangsutawika's avatar
lintangsutawika committed
285

286
287
    def __init__(
        self,
288
289
290
291
        data_dir: Optional[str] = None,
        cache_dir: Optional[str] = None,
        download_mode: Optional[datasets.DownloadMode] = None,
        config: Optional[Mapping] = None,  # Union[dict, TaskConfig]
Ethan Smith's avatar
Ethan Smith committed
292
    ) -> None:
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
        """
        :param data_dir: str
            Stores the path to a local folder containing the `Task`'s data files.
            Use this to specify the path to manually downloaded data (usually when
            the dataset is not publicly accessible).
        :param cache_dir: str
            The directory to read/write the `Task` dataset. This follows the
            HuggingFace `datasets` API with the default cache directory located at:
                `~/.cache/huggingface/datasets`
            NOTE: You can change the cache location globally for a given process
            to another directory:
                `export HF_DATASETS_CACHE="/path/to/another/directory"`
        :param download_mode: datasets.DownloadMode
            How to treat pre-existing `Task` downloads and data.
            - `datasets.DownloadMode.REUSE_DATASET_IF_EXISTS`
                Reuse download and reuse dataset.
            - `datasets.DownloadMode.REUSE_CACHE_IF_EXISTS`
                Reuse download with fresh dataset.
            - `datasets.DownloadMode.FORCE_REDOWNLOAD`
                Fresh download and fresh dataset.
        """
        self.download(data_dir, cache_dir, download_mode)
315
316
317
        self._training_docs: Optional[list] = None
        self._fewshot_docs: Optional[list] = None
        self._instances: Optional[List[Instance]] = None
318

319
320
        # Create a unique identifier ID
        self._task_id = str(uuid.uuid1())
321
        self._config: TaskConfig = TaskConfig({**config}) if config else TaskConfig()
322

lintangsutawika's avatar
lintangsutawika committed
323
        self._filters = [build_filter_ensemble("none", [["take_first", None]])]
324
325
326
        self.fewshot_rnd: Optional[
            random.Random
        ] = None  # purposely induce errors in case of improper usage
327

328
329
330
331
332
333
    def download(
        self,
        data_dir: Optional[str] = None,
        cache_dir: Optional[str] = None,
        download_mode=None,
    ) -> None:
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
        """Downloads and returns the task dataset.
        Override this method to download the dataset from a custom API.

        :param data_dir: str
            Stores the path to a local folder containing the `Task`'s data files.
            Use this to specify the path to manually downloaded data (usually when
            the dataset is not publicly accessible).
        :param cache_dir: str
            The directory to read/write the `Task` dataset. This follows the
            HuggingFace `datasets` API with the default cache directory located at:
                `~/.cache/huggingface/datasets`
            NOTE: You can change the cache location globally for a given process
            by setting the shell environment variable, `HF_DATASETS_CACHE`,
            to another directory:
                `export HF_DATASETS_CACHE="/path/to/another/directory"`
        :param download_mode: datasets.DownloadMode
            How to treat pre-existing `Task` downloads and data.
            - `datasets.DownloadMode.REUSE_DATASET_IF_EXISTS`
                Reuse download and reuse dataset.
            - `datasets.DownloadMode.REUSE_CACHE_IF_EXISTS`
                Reuse download with fresh dataset.
            - `datasets.DownloadMode.FORCE_REDOWNLOAD`
                Fresh download and fresh dataset.
        """
358
359
360
361
362
363
364
        self.dataset = datasets.load_dataset(
            path=self.DATASET_PATH,
            name=self.DATASET_NAME,
            data_dir=data_dir,
            cache_dir=cache_dir,
            download_mode=download_mode,
        )
365

366
    @property
367
    def config(self) -> TaskConfig:
368
369
370
        """Returns the TaskConfig associated with this class."""
        return self._config

371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
    @abc.abstractmethod
    def has_training_docs(self):
        """Whether the task has a training set"""
        pass

    @abc.abstractmethod
    def has_validation_docs(self):
        """Whether the task has a validation set"""
        pass

    @abc.abstractmethod
    def has_test_docs(self):
        """Whether the task has a test set"""
        pass

386
    def training_docs(self) -> Iterable:
387
388
389
390
391
392
        """
        :return: Iterable[obj]
            A iterable of any object, that doc_to_text can handle
        """
        return []

393
    def validation_docs(self) -> Iterable:
394
395
396
397
398
399
        """
        :return: Iterable[obj]
            A iterable of any object, that doc_to_text can handle
        """
        return []

400
    def test_docs(self) -> Iterable:
401
402
403
404
405
406
        """
        :return: Iterable[obj]
            A iterable of any object, that doc_to_text can handle
        """
        return []

407
    def fewshot_docs(self) -> Iterable:
408
409
410
411
412
413
414
415
416
        """
        :return: Iterable[obj]
            A iterable of any object, that doc_to_text can handle
        """
        if self.has_training_docs():
            return self.training_docs()
        elif self.has_validation_docs():
            return self.validation_docs()
        else:
lintangsutawika's avatar
lintangsutawika committed
417
            eval_logger.warning(
418
                f"[Task: {self.config.task}] has_training_docs and has_validation_docs are False"
419
                ", using test_docs as fewshot_docs but this is not recommended."
lintangsutawika's avatar
lintangsutawika committed
420
            )
421
422
            return self.test_docs()

423
    def _process_doc(self, doc: dict) -> dict:
424
425
426
427
428
429
430
431
432
        """
        Override this to process (detokenize, strip, replace, etc.) individual
        documents. This can be used in a map over documents of a data split.
        E.g. `map(self._process_doc, self.dataset["validation"])`

        :return: dict
            The processed version of the specified `doc`.
        """
        return doc
lintangsutawika's avatar
lintangsutawika committed
433

434
    @property
435
    def instances(self) -> List[Instance]:
436
437
438
439
440
441
442
443
444
445
446
        """After calling `task.build_all_requests()`, tasks
        maintain a list of the dataset instances which will be evaluated.
        """
        return self._instances

    def fewshot_examples(self, k, rnd):
        if self._training_docs is None:
            self._training_docs = list(self.training_docs())

        return rnd.sample(self._training_docs, k)

447
448
    def doc_to_decontamination_query(self, doc):
        raise NotImplementedError(
449
450
451
452
453
454
455
456
457
458
459
            "Override doc_to_decontamination_query with document specific decontamination query."
        )

    @abc.abstractmethod
    def doc_to_text(self, doc):
        pass

    @abc.abstractmethod
    def doc_to_target(self, doc):
        pass

460
461
    def build_all_requests(
        self,
462
        *,
463
464
465
466
467
468
        limit=None,
        rank=None,
        world_size=None,
        cache_requests=False,
        rewrite_requests_cache=False,
    ) -> None:
469
        """Build a set of Instances for a task, and store them in task.instances"""
470
471
472
473

        # used with caching
        og_limit = limit

474
        cache_key = f"requests-{self._config.task}-{self.config.num_fewshot}shot-rank{rank}-world_size{world_size}"
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489

        cached_instances = load_from_cache(file_name=cache_key)

        if cache_requests and cached_instances and not rewrite_requests_cache:
            cached_instances = cached_instances[:limit]

            flattened_instances = [
                instance
                for instance_group in cached_instances
                for instance in instance_group
            ]

            self._instances = flattened_instances
            return

Baber Abbasi's avatar
Baber Abbasi committed
490
        eval_logger.info(f"Building contexts for {self.config.task} on rank {rank}...")
491

492
        instances = []
493
494
495
496
497
498
499
500
501
502

        # process all documents when caching is specified for simplicity
        if (
            cache_requests
            and (not cached_instances or rewrite_requests_cache)
            and limit is not None
        ):
            limit = None

        doc_id_docs = list(
503
            self.doc_iterator(rank=rank, limit=limit, world_size=world_size)
504
505
506
507
508
509
510
        )

        num_docs = len(doc_id_docs)

        for doc_id, doc in tqdm(
            doc_id_docs,
            total=num_docs,
lintangsutawika's avatar
lintangsutawika committed
511
        ):
512
            # sample fewshot context #TODO: need to offset doc_id by rank now!
513
            fewshot_ctx = self.fewshot_context(
514
                doc,
515
                0 if self.config.num_fewshot is None else self.config.num_fewshot,
516
            )
517

518
            # TODO: we should override self.config.repeats if doing greedy gen so users don't waste time+compute
lintangsutawika's avatar
lintangsutawika committed
519
520
521
            inst = self.construct_requests(
                doc=doc,
                ctx=fewshot_ctx,
522
                metadata=(self.config["task"], doc_id, self.config.repeats),
lintangsutawika's avatar
lintangsutawika committed
523
            )
524
525
526
527

            if not isinstance(inst, list):
                inst = [inst]

528
529
530
531
532
533
534
535
536
537
538
539
540
            instances.append(inst)

        # now flatten, this is to allow slicing to work with pickles

        sliced_instances = instances[:og_limit]

        flattened_instances = [
            instance
            for instance_group in sliced_instances
            for instance in instance_group
        ]

        self._instances = flattened_instances
541

542
543
        if len(self._instances) == 0:
            raise ValueError("task.build_requests() did not find any docs!")
544

545
546
547
        if cache_requests and (not cached_instances or rewrite_requests_cache):
            save_to_cache(file_name=cache_key, obj=instances)

548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
    @abc.abstractmethod
    def construct_requests(self, doc, ctx, **kwargs):
        """Uses RequestFactory to construct Requests and returns an iterable of
        Requests which will be sent to the LM.

        :param doc:
            The document as returned from training_docs, validation_docs, or test_docs.
        :param ctx: str
            The context string, generated by fewshot_context. This includes the natural
            language description, as well as the few shot examples, and the question
            part of the document for `doc`.
        :param doc_idx: int
            The index of a document within `self.test_docs()` or `self.validation_docs()`,
            whichever is the main split used.
        :param repeats: int
        TODO: update this docstring
lintangsutawika's avatar
lintangsutawika committed
564
            The number of times each instance in a dataset is inferred on. Defaults to 1,
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
            can be increased for techniques like majority voting.
        """
        pass

    @abc.abstractmethod
    def process_results(self, doc, results):
        """Take a single document and the LM results and evaluates, returning a
        dict where keys are the names of submetrics and values are the values of
        the metric for that one document

        :param doc:
            The document as returned from training_docs, validation_docs, or test_docs.
        :param results:
            The results of the requests created in construct_requests.
        """
        pass

    @abc.abstractmethod
    def aggregation(self):
        """
        :returns: {str: [metric_score] -> float}
            A dictionary where keys are the names of submetrics and values are
            functions that aggregate a list of metric scores
        """
        pass

    @abc.abstractmethod
    def higher_is_better(self):
        """
        :returns: {str: bool}
            A dictionary where keys are the names of submetrics and values are
            whether a higher value of the submetric is better
        """
        pass

600
601
602
    def get_config(self, key: str) -> Any:
        return getattr(self._config, key, None)

haileyschoelkopf's avatar
haileyschoelkopf committed
603
604
605
606
607
608
609
610
611
612
    @classmethod
    def count_bytes(cls, doc):
        """Used for byte-level perplexity metrics in rolling loglikelihood"""
        return len(doc.encode("utf-8"))

    @classmethod
    def count_words(cls, doc):
        """Downstream loglikelihood_rolling perplexity tasks with custom word boundaries should override this!"""
        return len(re.split(r"\s+", doc))

613
    @utils.positional_deprecated
lintangsutawika's avatar
lintangsutawika committed
614
    def fewshot_context(
615
616
617
        self,
        doc,
        num_fewshot,
618
        rnd=None,
619
        description=None,
lintangsutawika's avatar
lintangsutawika committed
620
    ):
621
622
623
624
625
626
627
        """Returns a fewshot context string that is made up of a prepended description
        (if provided), the `num_fewshot` number of examples, and an appended prompt example.

        :param doc: str
            The document as returned from training_docs, validation_docs, or test_docs.
        :param num_fewshot: int
            The number of fewshot examples to provide in the returned context string.
lintangsutawika's avatar
lintangsutawika committed
628
629
630
631
632
        :param rnd: random.Random
            The pseudo-random number generator used to randomly sample examples.
            WARNING: This is currently a required arg although it's optionalized with a default `None`.
        :param description: str
            The task's description that will be prepended to the fewshot examples.
633
634
635
        :returns: str
            The fewshot context.
        """
636
        if rnd is None:
637
638
639
640
641
642
            if self.fewshot_rnd is not None:
                rnd = self.fewshot_rnd
            else:
                raise ValueError(
                    "A `random.Random` generator argument must be provided to `rnd`"
                )
lintangsutawika's avatar
lintangsutawika committed
643

644
        description = description if description else ""
645
646

        if num_fewshot == 0:
lintangsutawika's avatar
lintangsutawika committed
647
            labeled_examples = ""
648
        else:
lintangsutawika's avatar
lintangsutawika committed
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
            # for sets with no training docs, draw from other set *but ensure no overlap with current doc*
            if self.has_training_docs():
                fewshotex = self.fewshot_examples(k=num_fewshot, rnd=rnd)
            else:
                if self._fewshot_docs is None:
                    self._fewshot_docs = list(
                        self.validation_docs()
                        if self.has_validation_docs()
                        else self.test_docs()
                    )

                fewshotex = rnd.sample(self._fewshot_docs, num_fewshot + 1)

                # get rid of the doc that's the one we're evaluating, if it's in the fewshot
                fewshotex = [x for x in fewshotex if x != doc][:num_fewshot]

            labeled_examples = (
                "\n\n".join(
                    [
                        self.doc_to_text(doc) + self.doc_to_target(doc)
                        for doc in fewshotex
                    ]
                )
                + "\n\n"
lintangsutawika's avatar
lintangsutawika committed
673
            )
674
675

        example = self.doc_to_text(doc)
lintangsutawika's avatar
lintangsutawika committed
676
        return description + labeled_examples + example
677

678
    def apply_filters(self) -> Optional[List[Instance]]:
Baber Abbasi's avatar
Baber Abbasi committed
679
        """Iterates over FilterEnsembles and applies them to instances"""
lintangsutawika's avatar
lintangsutawika committed
680
681
        if hasattr(self, "_filters"):
            for f in self._filters:
682
                f.apply(self._instances)
lintangsutawika's avatar
lintangsutawika committed
683
684
685
        else:
            eval_logger.warning("No filter defined, passing through instances")
            return self._instances
686

baberabb's avatar
baberabb committed
687
    def dump_config(self) -> dict:
Baber Abbasi's avatar
Baber Abbasi committed
688
        """Returns the config as a dictionary."""
689
        # TODO: this should only return the overrides applied to a non-YAML task's configuration.
690
        # (num_fewshot)
691
        return self.config.to_dict()
692

Baber Abbasi's avatar
Baber Abbasi committed
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
    def set_config(self, key: str, value: Any, update: bool = False) -> None:
        """Set or update the configuration for a given key."""
        if key is None:
            raise ValueError("Key must be provided.")

        if update:
            current_value = getattr(self._config, key, {})
            if not isinstance(current_value, dict):
                raise TypeError(
                    f"Expected a dict for key '{key}', got {type(current_value).__name__} instead."
                )
            current_value.update(value)
        else:
            setattr(self._config, key, value)

    def override_metric(self, metric_name: str) -> None:
        """
        Override the default metrics used for evaluation with custom metrics.

        Parameters:
        - metric_name (str): The name of the custom metric to override. Should be registered in api.metrics.
        """
        (
            self._metric_fn_list,
            self._aggregation_list,
            self._metric_fn_kwargs,
            self._higher_is_better,
        ) = ({}, {}, {}, {})
        self._metric_fn_list[metric_name] = get_metric(metric_name)
        self._aggregation_list[metric_name] = get_metric_aggregation(metric_name)
        self._higher_is_better[metric_name] = is_higher_better(metric_name)
        self._metric_fn_kwargs[metric_name] = {}
        if not isinstance(self, ConfigurableTask):
            self.process_results = lambda x, y: {metric_name: get_metric(metric_name)}
            self.aggregation = lambda: {
                metric_name: get_metric_aggregation(metric_name)
            }
        setattr(self._config, "metric_list", [{"metric": metric_name}])
        setattr(self._config, "process_results", None)

733
734
735
736
737
    def set_fewshot_seed(self, seed: Optional[int] = None) -> None:
        self.fewshot_rnd = random.Random(seed)
        if hasattr(self, "sampler"):
            self.sampler.rnd = self.fewshot_rnd

738
739
740
741
742
743
744
    @property
    def eval_docs(self) -> Union[datasets.Dataset, List[dict]]:
        if self.has_test_docs():
            return self.test_docs()
        elif self.has_validation_docs():
            return self.validation_docs()
        else:
745
746
747
            raise ValueError(
                f"Task dataset (path={self.DATASET_PATH}, name={self.DATASET_NAME}) must have valid or test docs!"
            )
748
749
750
751
752
753
754
755
756
757
758
759
760

    def doc_iterator(
        self, *, rank: int = 0, limit: Union[int, None] = None, world_size: int = 1
    ) -> Iterator[Tuple[int, Any]]:
        limit = int(limit) if limit else None
        doc_iterator = utils.create_iterator(
            enumerate(self.eval_docs),
            rank=int(rank),
            limit=limit,
            world_size=int(world_size),
        )
        return doc_iterator

761
762
763
    @property
    def task_id(self) -> Any:
        return self._task_id
764
765

class ConfigurableTask(Task):
766
    VERSION = "Yaml"
767
    OUTPUT_TYPE = None
768
    CONFIG = None
769
770

    def __init__(
771
772
773
774
775
        self,
        data_dir=None,
        cache_dir=None,
        download_mode=None,
        config: Optional[dict] = None,
Ethan Smith's avatar
Ethan Smith committed
776
    ) -> None:  # TODO no super() call here
lintangsutawika's avatar
lintangsutawika committed
777
778
779
        # Create a unique identifier ID
        self._task_id = str(uuid.uuid1())

780
        # Get pre-configured attributes
781
        self._config = self.CONFIG
782

783
        # Use new configurations if there was no preconfiguration
784
        if self.config is None:
785
            self._config = TaskConfig(**config)
786
787
        # Overwrite configs
        else:
lintangsutawika's avatar
lintangsutawika committed
788
            if config is not None:
789
                self._config.__dict__.update(config)
790

791
        if self.config is None:
lintangsutawika's avatar
lintangsutawika committed
792
793
794
            raise ValueError(
                "Must pass a config to ConfigurableTask, either in cls.CONFIG or `config` kwarg"
            )
795

796
797
798
799
        if isinstance(self.config.metadata, dict):
            if "version" in self.config.metadata:
                self.VERSION = self.config.metadata["version"]

800
        if self.config.output_type is not None:
801
802
803
804
            if self.config.output_type not in ALL_OUTPUT_TYPES:
                raise ValueError(
                    f"Got invalid output_type '{self.config.output_type}', must be in '{','.join(ALL_OUTPUT_TYPES)}'"
                )
805
            self.OUTPUT_TYPE = self.config.output_type
806

807
808
        if self.config.dataset_path is not None:
            self.DATASET_PATH = self.config.dataset_path
809

810
811
        if self.config.dataset_name is not None:
            self.DATASET_NAME = self.config.dataset_name
812

813
814
815
816
        self._metric_fn_list = {}
        self._metric_fn_kwargs = {}
        self._aggregation_list = {}
        self._higher_is_better = {}
817

818
        if self.config.metric_list is None:
819
            # TODO: handle this in TaskConfig.__post_init__ ?
820
821
            _metric_list = DEFAULT_METRIC_REGISTRY[self.config.output_type]

822
            for metric_name in _metric_list:
haileyschoelkopf's avatar
haileyschoelkopf committed
823
                self._metric_fn_list[metric_name] = get_metric(metric_name)
lintangsutawika's avatar
lintangsutawika committed
824
                self._metric_fn_kwargs[metric_name] = {}
825
826
827
                self._aggregation_list[metric_name] = get_metric_aggregation(
                    metric_name
                )
haileyschoelkopf's avatar
haileyschoelkopf committed
828
                self._higher_is_better[metric_name] = is_higher_better(metric_name)
829
        else:
830
            for metric_config in self.config.metric_list:
831
832
833
834
                if "metric" not in metric_config:
                    raise ValueError(
                        "'metric' key not provided for an entry in 'metric_list', must be specified!"
                    )
835
836
837
838
                metric_name = metric_config["metric"]
                kwargs = {
                    key: metric_config[key]
                    for key in metric_config
Chris's avatar
Chris committed
839
840
                    if key
                    not in ["metric", "aggregation", "higher_is_better", "hf_evaluate"]
841
                }
Chris's avatar
Chris committed
842
843
844
845
                hf_evaluate_metric = (
                    "hf_evaluate" in metric_config
                    and metric_config["hf_evaluate"] is True
                )
846

847
                if self.config.process_results is not None:
848
849
                    self._metric_fn_list[metric_name] = None
                    self._metric_fn_kwargs[metric_name] = {}
850
851
852
853
854
855
                elif callable(metric_name):
                    metric_fn = metric_name.__call__
                    metric_name = metric_name.__name__
                    self._metric_fn_list[metric_name] = metric_fn
                    self._metric_fn_kwargs[metric_name] = kwargs
                else:
Chris's avatar
Chris committed
856
857
858
                    self._metric_fn_list[metric_name] = get_metric(
                        metric_name, hf_evaluate_metric
                    )
859
                    self._metric_fn_kwargs[metric_name] = kwargs
lintangsutawika's avatar
lintangsutawika committed
860

861
                if "aggregation" in metric_config:
862
                    agg_name = metric_config["aggregation"]
863
                    if isinstance(agg_name, str):
haileyschoelkopf's avatar
haileyschoelkopf committed
864
                        self._aggregation_list[metric_name] = get_aggregation(agg_name)
865
                    elif callable(agg_name):  # noqa: E721
866
867
868
                        self._aggregation_list[metric_name] = metric_config[
                            "aggregation"
                        ]
869
                else:
870
                    INV_AGG_REGISTRY = {v: k for k, v in AGGREGATION_REGISTRY.items()}
lintangsutawika's avatar
lintangsutawika committed
871
                    metric_agg = get_metric_aggregation(metric_name)
872
                    eval_logger.warning(
873
                        f"[Task: {self.config.task}] metric {metric_name} is defined, but aggregation is not. "
874
875
                        f"using default "
                        f"aggregation={INV_AGG_REGISTRY[metric_agg]}"
876
                    )
877
                    self._aggregation_list[metric_name] = metric_agg
lintangsutawika's avatar
lintangsutawika committed
878

879
880
881
882
883
884
                if "higher_is_better" in metric_config:
                    self._higher_is_better[metric_name] = metric_config[
                        "higher_is_better"
                    ]
                else:
                    eval_logger.warning(
885
                        f"[Task: {self.config.task}] metric {metric_name} is defined, but higher_is_better is not. "
886
                        f"using default "
haileyschoelkopf's avatar
haileyschoelkopf committed
887
                        f"higher_is_better={is_higher_better(metric_name)}"
888
                    )
haileyschoelkopf's avatar
haileyschoelkopf committed
889
                    self._higher_is_better[metric_name] = is_higher_better(metric_name)
890

891
        self.download(self.config.dataset_kwargs)
892
893
894
        self._training_docs = None
        self._fewshot_docs = None

895
        if self.config.filter_list is not None:
lintangsutawika's avatar
lintangsutawika committed
896
            self._filters = []
897
            for filter_config in self.config.filter_list:
898
899
900
901
902
903
904
905
906
                filter_name = filter_config["name"]
                filter_functions = filter_config["filter"]
                components = []
                for function in filter_functions:
                    kwargs = {
                        key: function[key] for key in function if key != "function"
                    }
                    components.append([function["function"], kwargs])
                filter_pipeline = build_filter_ensemble(filter_name, components)
lintangsutawika's avatar
lintangsutawika committed
907
                self._filters.append(filter_pipeline)
lintangsutawika's avatar
lintangsutawika committed
908
        else:
909
            self._filters = [build_filter_ensemble("none", [["take_first", None]])]
910

911
912
        if self.config.use_prompt is not None:
            eval_logger.info(f"loading prompt {self.config.use_prompt}")
913
            self.prompt = get_prompt(
914
                self.config.use_prompt, self.DATASET_PATH, self.DATASET_NAME
lintangsutawika's avatar
lintangsutawika committed
915
            )
916
917
918
        else:
            self.prompt = None

lintangsutawika's avatar
lintangsutawika committed
919
        if self.fewshot_docs() is not None:
920
921
922
923
            self.fewshot_rnd = (
                random.Random()
            )  # setting with no seed, to be overridden at a later time
            config_sampler: Union[str, Callable] = (
haileyschoelkopf's avatar
haileyschoelkopf committed
924
925
926
                self.config.fewshot_config.get("sampler", "default")
                if self.config.fewshot_config
                else "default"
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
            )
            if isinstance(config_sampler, str):
                self.sampler = samplers.get_sampler(config_sampler)(
                    list(self.fewshot_docs()), self, rnd=self.fewshot_rnd
                )
            elif callable(config_sampler) and issubclass(
                config_sampler, samplers.ContextSampler
            ):
                self.sampler = config_sampler(
                    docs=list(self.fewshot_docs()), task=self, rnd=self.fewshot_rnd
                )
            else:
                raise TypeError(
                    f"fewshot_config.sampler should be a string or callable of ContextSampler type, "
                    f"not {type(config_sampler)}"
                )
943

944
        self.task_docs = self.eval_docs
945

946
        # Test One Doc
947
        self.features = list(self.task_docs.features.keys())
948
949
        self.multiple_input = 0
        self.multiple_target = 0
950
        test_doc = self.task_docs[0]
951
        test_text = self.doc_to_text(test_doc)
952
        test_target = self.doc_to_target(test_doc)
lintangsutawika's avatar
lintangsutawika committed
953

954
        if self.config.doc_to_choice is not None:
lintangsutawika's avatar
lintangsutawika committed
955
            test_choice = self.doc_to_choice(test_doc)
956
            if not isinstance(test_choice, list):
lintangsutawika's avatar
lintangsutawika committed
957
                eval_logger.error("doc_to_choice must return list")
958
959
            else:
                num_choice = len(test_choice)
960

961
            if isinstance(test_text, int):
962
                self.multiple_input = num_choice
963
964
        else:
            test_choice = None
965

966
        if isinstance(test_target, list):
967
            self.multiple_target = len(test_target)
968
        else:
969
            if (isinstance(test_target, int)) and (test_choice is not None):
lintangsutawika's avatar
lintangsutawika committed
970
                test_target = test_choice[test_target]
971
            else:
lintangsutawika's avatar
lintangsutawika committed
972
                test_target = str(test_target)
973

974
975
976
        if test_choice is not None:
            check_choices = test_choice
        else:
lintangsutawika's avatar
lintangsutawika committed
977
            check_choices = [test_target]
978
979
980
981
        if self.config.doc_to_choice is not None:
            for choice in check_choices:
                choice_has_whitespace = True if choice[0].isspace() else False
                delimiter_has_whitespace = (
982
983
                    True
                    if self.config.target_delimiter.rstrip()
984
                    != self.config.target_delimiter
985
                    else False
986
                )
987

988
                if delimiter_has_whitespace and choice_has_whitespace:
989
990
                    eval_logger.debug(
                        f'Both target_delimiter "{self.config.target_delimiter}" and target choice: "{choice}" have whitespace'
991
992
                    )
                elif (not delimiter_has_whitespace) and (not choice_has_whitespace):
993
                    eval_logger.debug(
994
                        f'Both target_delimiter "{self.config.target_delimiter}" and target choice: "{choice}" do not have whitespace, ignore if the language you are evaluating on does not require/use whitespace'
995
996
                    )

997
    def download(self, dataset_kwargs: Optional[Dict[str, Any]] = None) -> None:
998
999
1000
1001
1002
1003
        self.dataset = datasets.load_dataset(
            path=self.DATASET_PATH,
            name=self.DATASET_NAME,
            **dataset_kwargs if dataset_kwargs is not None else {},
        )

baberabb's avatar
baberabb committed
1004
    def has_training_docs(self) -> bool:
1005
        if self.config.training_split is not None:
1006
1007
1008
1009
            return True
        else:
            return False

baberabb's avatar
baberabb committed
1010
    def has_validation_docs(self) -> bool:
1011
        if self.config.validation_split is not None:
1012
1013
1014
1015
            return True
        else:
            return False

baberabb's avatar
baberabb committed
1016
    def has_test_docs(self) -> bool:
1017
        if self.config.test_split is not None:
1018
1019
1020
1021
            return True
        else:
            return False

baberabb's avatar
baberabb committed
1022
    def training_docs(self) -> datasets.Dataset:
1023
        if self.has_training_docs():
1024
1025
1026
            if self.config.process_docs is not None:
                return self.config.process_docs(
                    self.dataset[self.config.training_split]
1027
                )
1028
            return self.dataset[self.config.training_split]
1029

baberabb's avatar
baberabb committed
1030
    def validation_docs(self) -> datasets.Dataset:
1031
        if self.has_validation_docs():
1032
1033
1034
            if self.config.process_docs is not None:
                return self.config.process_docs(
                    self.dataset[self.config.validation_split]
1035
                )
1036
            return self.dataset[self.config.validation_split]
1037

baberabb's avatar
baberabb committed
1038
    def test_docs(self) -> datasets.Dataset:
1039
        if self.has_test_docs():
1040
1041
1042
            if self.config.process_docs is not None:
                return self.config.process_docs(self.dataset[self.config.test_split])
            return self.dataset[self.config.test_split]
1043

1044
    def fewshot_docs(self):
1045
        if self.config.fewshot_split is not None:
1046
1047
            if self.config.process_docs is not None:
                return self.config.process_docs(self.dataset[self.config.fewshot_split])
1048
            return self.dataset[self.config.fewshot_split]
1049
        else:
1050
            if (self.config.num_fewshot is not None) and (self.config.num_fewshot > 0):
1051
                eval_logger.warning(
lintangsutawika's avatar
lintangsutawika committed
1052
                    f"[Task: {self.config.task}] "
1053
1054
1055
1056
                    "num_fewshot > 0 but fewshot_split is None. "
                    "using preconfigured rule."
                )
            return super().fewshot_docs()
1057

lintangsutawika's avatar
lintangsutawika committed
1058
    @utils.positional_deprecated
1059
    def fewshot_context(self, doc: str, num_fewshot: int) -> str:
lintangsutawika's avatar
lintangsutawika committed
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
        """Returns a fewshot context string that is made up of a prepended description
        (if provided), the `num_fewshot` number of examples, and an appended prompt example.

        :param doc: str
            The document as returned from training_docs, validation_docs, or test_docs.
        :param num_fewshot: int
            The number of fewshot examples to provide in the returned context string.
        :returns: str
            The fewshot context.
        """
1070
1071
        if description := self.config.description:
            description = utils.apply_template(self.config.description, doc)
lintangsutawika's avatar
lintangsutawika committed
1072
1073
1074

        if num_fewshot == 0:
            # always prepend the (possibly empty) task description
1075
            labeled_examples = description
lintangsutawika's avatar
lintangsutawika committed
1076
        else:
1077
            labeled_examples = description + self.sampler.get_context(doc, num_fewshot)
lintangsutawika's avatar
lintangsutawika committed
1078
1079

        example = self.doc_to_text(doc)
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
        if self.multiple_input:
            return labeled_examples
        else:
            if isinstance(example, str):
                return labeled_examples + example
            elif isinstance(example, list):
                return [labeled_examples + ex for ex in example]
            elif isinstance(example, int):
                if self.config.doc_to_choice is not None:
                    choices = self.doc_to_choice(doc)
                    return labeled_examples + choices[example]
                else:
                    return labeled_examples + str(example)
lintangsutawika's avatar
lintangsutawika committed
1093

1094
    def apply_filters(self):
Baber Abbasi's avatar
Baber Abbasi committed
1095
        """Iterates over FilterEnsembles and applies them to instances"""
1096
1097
        if hasattr(self, "_filters"):
            for f in self._filters:
1098
                f.apply(self._instances)
1099
1100
1101
1102
        else:
            eval_logger.warning("No filter defined, passing through instances")
            return self._instances

1103
    def should_decontaminate(self):
1104
        return self.config.should_decontaminate
1105
1106

    def doc_to_decontamination_query(self, doc):
1107
        if self.config.should_decontaminate:
1108
1109
            if self.config.doc_to_decontamination_query is None:
                return self.doc_to_text(doc)
1110
            else:
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
                doc_to_decontamination_query = self.config.doc_to_decontamination_query
                if doc_to_decontamination_query in self.features:
                    return doc[doc_to_decontamination_query]
                elif callable(doc_to_decontamination_query):
                    return doc_to_decontamination_query(doc)
                else:
                    return ast.literal_eval(
                        utils.apply_template(
                            self.config.doc_to_decontamination_query, doc
                        )
                    )
1122

1123
    def _process_doc(self, doc: dict) -> dict:
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
        """
        Override this to process (detokenize, strip, replace, etc.) individual
        documents. This can be used in a map over documents of a data split.
        E.g. `map(self._process_doc, self.dataset["validation"])`

        :return: dict
            The processed version of the specified `doc`.
        """
        return doc

    def doc_to_text(self, doc):
1135
1136
        if self.prompt is not None:
            doc_to_text = self.prompt
1137
        else:
1138
            doc_to_text = self.config.doc_to_text
lintangsutawika's avatar
lintangsutawika committed
1139

1140
        if isinstance(doc_to_text, int):
1141
            return doc_to_text
1142
        elif isinstance(doc_to_text, str):
1143
            if doc_to_text in self.features:
1144
                # if self.config.doc_to_choice is not None:
1145
1146
                #     return self.doc_to_choice(doc)[doc[doc_to_text]]
                # else:
1147
1148
                return doc[doc_to_text]
            else:
lintangsutawika's avatar
lintangsutawika committed
1149
                text_string = utils.apply_template(doc_to_text, doc)
lintangsutawika's avatar
lintangsutawika committed
1150
                if text_string.isdigit() and self._config.doc_to_choice is not None:
lintangsutawika's avatar
lintangsutawika committed
1151
1152
1153
                    return ast.literal_eval(text_string)
                else:
                    return text_string
1154
        elif callable(doc_to_text):
1155
            return doc_to_text(doc)
lintangsutawika's avatar
lintangsutawika committed
1156
        # Used when applying a Promptsource template
1157
        elif hasattr(doc_to_text, "apply"):
1158
1159
1160
1161
1162
            applied_prompt = doc_to_text.apply(doc)
            if len(applied_prompt) == 2:
                return applied_prompt[0]
            else:
                eval_logger.warning("Applied prompt returns empty string")
1163
                return self.config.fewshot_delimiter
1164
        else:
1165
            print(type(doc_to_text))
1166
            raise TypeError
1167

1168
    def doc_to_target(self, doc: Mapping) -> Union[int, str, list]:
1169
1170
        if self.prompt is not None:
            doc_to_target = self.prompt
1171
        else:
1172
            doc_to_target = self.config.doc_to_target
1173

1174
        if isinstance(doc_to_target, int):
1175
            return doc_to_target
1176
        elif isinstance(doc_to_target, str):
1177
            if doc_to_target in self.features:
1178
                # if self.config.doc_to_choice is not None:
1179
1180
1181
                #     return self.doc_to_choice(doc)[doc[doc_to_target]]
                # else:
                return doc[doc_to_target]
1182
            else:
lintangsutawika's avatar
lintangsutawika committed
1183
                target_string = utils.apply_template(doc_to_target, doc)
lintangsutawika's avatar
lintangsutawika committed
1184
                if target_string.isdigit() and self._config.doc_to_choice is not None:
lintangsutawika's avatar
lintangsutawika committed
1185
                    return ast.literal_eval(target_string)
lintangsutawika's avatar
lintangsutawika committed
1186
1187
1188
1189
1190
                elif (
                    len(target_string) >= 2
                    and (target_string[0] == "[")
                    and (target_string[-1] == "]")
                ):
baberabb's avatar
baberabb committed
1191
1192
1193
1194
                    try:
                        return ast.literal_eval(target_string)
                    except (SyntaxError, ValueError):
                        return target_string
lintangsutawika's avatar
lintangsutawika committed
1195
1196
                else:
                    return target_string
1197
        elif isinstance(doc_to_target, list):
1198
            return doc_to_target
1199
        elif callable(doc_to_target):
1200
            return doc_to_target(doc)
lintangsutawika's avatar
lintangsutawika committed
1201
        # Used when applying a Promptsource template
1202
        elif hasattr(doc_to_target, "apply"):
1203
            applied_prompt = doc_to_target.apply(doc)
1204
1205
1206
1207
            if len(applied_prompt) == 2:
                return applied_prompt[1]
            else:
                eval_logger.warning("Applied prompt returns empty string")
1208
                return self.config.fewshot_delimiter
1209
1210
        else:
            raise TypeError
1211

baberabb's avatar
baberabb committed
1212
    def doc_to_choice(self, doc: Any) -> List[str]:
1213
1214
        if self.prompt is not None:
            doc_to_choice = self.prompt
1215
        elif self.config.doc_to_choice is None:
1216
1217
            eval_logger.error("doc_to_choice was called but not set in config")
        else:
1218
            doc_to_choice = self.config.doc_to_choice
1219

1220
        if isinstance(doc_to_choice, str):
1221
1222
1223
1224
            if doc_to_choice in self.features:
                return doc[doc_to_choice]
            else:
                return ast.literal_eval(utils.apply_template(doc_to_choice, doc))
1225
        elif isinstance(doc_to_choice, list):
1226
            return doc_to_choice
1227
        elif isinstance(doc_to_choice, dict):
1228
1229
1230
1231
1232
1233
1234
            return list(doc_to_choice.values())
        elif callable(doc_to_choice):
            return doc_to_choice(doc)
        elif hasattr(doc_to_choice, "get_answer_choices_list"):
            return doc_to_choice.get_answer_choices_list(doc)
        else:
            raise TypeError
1235

baberabb's avatar
baberabb committed
1236
1237
1238
    def construct_requests(
        self, doc: dict, ctx: str, **kwargs
    ) -> Union[List[Instance], Instance]:
1239
        if self.OUTPUT_TYPE == "loglikelihood":
lintangsutawika's avatar
lintangsutawika committed
1240
            arguments = (ctx, self.doc_to_target(doc))
1241
        elif self.OUTPUT_TYPE == "loglikelihood_rolling":
lintangsutawika's avatar
lintangsutawika committed
1242
            arguments = (self.doc_to_target(doc),)
1243
        elif self.OUTPUT_TYPE == "multiple_choice":
1244
            choices = self.doc_to_choice(doc)
1245
            target_delimiter = self.config.target_delimiter
1246
1247
            if self.multiple_input:
                # If there are multiple inputs, choices are placed in the ctx
1248
                cont = self.doc_to_target(doc)
1249
1250
1251
                arguments = [
                    (ctx + choice, f"{target_delimiter}{cont}") for choice in choices
                ]
1252
            else:
1253
                # Otherwise they are placed in the continuation
1254
                arguments = [(ctx, f"{target_delimiter}{cont}") for cont in choices]
1255

1256
            request_list = [
1257
1258
                Instance(
                    request_type="loglikelihood",
lintangsutawika's avatar
lintangsutawika committed
1259
                    doc=doc,
1260
                    arguments=arg,
1261
                    idx=i,
1262
1263
                    **kwargs,
                )
1264
                for i, arg in enumerate(arguments)
1265
            ]
1266
            # TODO: we should raise a warning telling users this will at most ~2x runtime.
1267
            if "acc_mutual_info" in self._metric_fn_list.keys():
1268
1269
1270
                # if we are calculating multiple choice accuracy
                # using mutual information instead of raw loglikelihood as metric, need unconditional lls.

lintangsutawika's avatar
lintangsutawika committed
1271
                # here mutual info refers to calculating
1272
1273
1274
1275
1276
1277
                # log(P(choice|ctx) / P(choice)) = log(P(choice|ctx)) - log(P(choice))
                # in other words normalizing by subtracting the unconditional logprob of each choice.
                request_list.extend(
                    [
                        Instance(
                            request_type="loglikelihood",
lintangsutawika's avatar
lintangsutawika committed
1278
                            doc=doc,
1279
                            arguments=("", "{}".format(choice)),
1280
1281
1282
                            idx=i,
                            **kwargs,
                        )
lintangsutawika's avatar
lintangsutawika committed
1283
                        for i, choice in enumerate(choices)
1284
1285
1286
                    ]
                )
            return request_list
lintangsutawika's avatar
lintangsutawika committed
1287

1288
        elif self.OUTPUT_TYPE == "generate_until":
1289
            arguments = (ctx, deepcopy(self.config.generation_kwargs))
lintangsutawika's avatar
lintangsutawika committed
1290
1291

        return Instance(
lintangsutawika's avatar
lintangsutawika committed
1292
1293
            request_type=self.OUTPUT_TYPE, doc=doc, arguments=arguments, idx=0, **kwargs
        )
1294
1295

    def process_results(self, doc, results):
1296
1297
        if callable(self.config.process_results):
            return self.config.process_results(doc, results)
lintangsutawika's avatar
lintangsutawika committed
1298

1299
        result_dict = {}
1300
        use_metric = list(self._metric_fn_list.keys())
1301
1302
1303
        if self.OUTPUT_TYPE == "loglikelihood":
            results = results[0]
            ll, is_greedy = results
1304
1305
1306
1307
            return {
                **({"perplexity": ll} if "perplexity" in use_metric else {}),
                **({"acc": int(is_greedy)} if "acc" in use_metric else {}),
            }
1308
        elif self.OUTPUT_TYPE == "loglikelihood_rolling":
haileyschoelkopf's avatar
haileyschoelkopf committed
1309
            (loglikelihood,) = results
1310
1311
            _words = self.count_words(self.doc_to_target(doc))
            _bytes = self.count_bytes(self.doc_to_target(doc))
haileyschoelkopf's avatar
haileyschoelkopf committed
1312
            return {
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
                **(
                    {"word_perplexity": (loglikelihood, _words)}
                    if "word_perplexity" in use_metric
                    else {}
                ),
                **(
                    {"byte_perplexity": (loglikelihood, _bytes)}
                    if "byte_perplexity" in use_metric
                    else {}
                ),
                **(
                    {"bits_per_byte": (loglikelihood, _bytes)}
                    if "bits_per_byte" in use_metric
                    else {}
                ),
haileyschoelkopf's avatar
haileyschoelkopf committed
1328
            }
1329
        elif self.OUTPUT_TYPE == "multiple_choice":
1330
            lls, is_greedy = zip(*results)
lintangsutawika's avatar
lintangsutawika committed
1331

1332
            # retrieve choices in List[str] form, to compute choice lengths, etc.
1333
            choices = self.doc_to_choice(doc)
1334
1335
            completion_len = np.array([float(len(i)) for i in choices])

1336
1337
            if (
                2 * len(choices) == len(lls)
1338
                and "acc_mutual_info" in self._metric_fn_list.keys()
1339
1340
1341
1342
            ):
                # then we are doing mutual info.
                # this stores the "dryrun" / unconditional answer loglikelihoods
                lls_unconditional = lls[1::2]
1343
1344
                if len(lls_unconditional) != len(choices):
                    raise ValueError
1345
1346
                # and this stores our "regular" conditional loglikelihoods
                lls = lls[::2]
1347

1348
1349
            pred = np.argmax(lls)
            pred_norm = np.argmax(lls / completion_len)
lintangsutawika's avatar
lintangsutawika committed
1350

1351
1352
            if self.multiple_input:
                gold = self.doc_to_text(doc)
1353
            else:
1354
                gold = self.doc_to_target(doc)
1355
1356

            gold_index_error = False
1357
            if isinstance(gold, list):
Lintang Sutawika's avatar
Lintang Sutawika committed
1358
1359
                gold = [i if i < len(choices) else -100 for i in gold]
                if -100 in gold:
1360
1361
                    gold_index_error = True
            else:
1362
                if isinstance(gold, int):
Lintang Sutawika's avatar
Lintang Sutawika committed
1363
                    gold = gold if gold < len(choices) else -100
1364
                elif isinstance(gold, str):
Lintang Sutawika's avatar
Lintang Sutawika committed
1365
                    gold = choices.index(gold) if gold in choices else -100
lintangsutawika's avatar
lintangsutawika committed
1366

Lintang Sutawika's avatar
Lintang Sutawika committed
1367
                if gold == -100:
1368
1369
1370
1371
                    gold_index_error = True

            if gold_index_error:
                eval_logger.warning(
lintangsutawika's avatar
lintangsutawika committed
1372
                    f"Label index was not in within range of available choices,"
1373
1374
                    f"Sample:\n\n{doc}\n\n"
                )
lintangsutawika's avatar
lintangsutawika committed
1375

1376
            if self.multiple_target:
lintangsutawika's avatar
lintangsutawika committed
1377
1378
                acc = 1.0 if pred in gold else 0.0
                acc_norm = 1.0 if pred_norm in gold else 0.0
Lintang Sutawika's avatar
Lintang Sutawika committed
1379
                exact_match = int(any([is_greedy[i] if i != -100 else 0 for i in gold]))
lintangsutawika's avatar
lintangsutawika committed
1380
1381
1382
            else:
                acc = 1.0 if pred == gold else 0.0
                acc_norm = 1.0 if pred_norm == gold else 0.0
1383
                # TODO: this gets score of 0 on arc_challenge for pythia-70m. need to test that this works properly
Lintang Sutawika's avatar
Lintang Sutawika committed
1384
                exact_match = int(is_greedy[gold]) if gold != -100 else 0
1385

Lintang Sutawika's avatar
Lintang Sutawika committed
1386
1387
1388
1389
            prob_norm = utils.softmax(lls)

            # TODO use keyword arguments to the metric?
            # gold, pred, norm stuff, the original lls,
1390
            result_dict = {
1391
                **({"acc": acc} if "acc" in use_metric else {}),
1392
1393
                **({"f1": (gold, pred)} if "f1" in use_metric else {}),
                **({"mcc": (gold, pred)} if "mcc" in use_metric else {}),
1394
                **({"acc_norm": acc_norm} if "acc_norm" in use_metric else {}),
1395
                **({"exact_match": exact_match} if "exact_match" in use_metric else {}),
Lintang Sutawika's avatar
Lintang Sutawika committed
1396
1397
1398
1399
1400
                **(
                    {"brier_score": (gold, prob_norm)}
                    if "brier_score" in use_metric
                    else {}
                ),
1401
1402
            }

1403
            if "acc_mutual_info" in use_metric:
lintangsutawika's avatar
lintangsutawika committed
1404
1405
1406
                lls_mutual_info = [
                    ll_c - ll_u for ll_c, ll_u in zip(lls, lls_unconditional)
                ]
1407
1408
1409
                acc_mutual_info = 1.0 if np.argmax(lls_mutual_info) == gold else 0.0
                result_dict["acc_mutual_info"] = acc_mutual_info

1410
        elif self.OUTPUT_TYPE == "generate_until":
1411
            gold = self.doc_to_target(doc)
Chris's avatar
Chris committed
1412
            result = results[0]
1413
            if self.config.doc_to_choice is not None:
lintangsutawika's avatar
lintangsutawika committed
1414
                # If you set doc_to_choice,
lintangsutawika's avatar
lintangsutawika committed
1415
                # it assumes that doc_to_target returns a number.
1416
1417
                choices = self.doc_to_choice(doc)
                gold = choices[gold]
1418
1419
            # we expect multiple_targets to be a list.
            elif self.multiple_target:
baberabb's avatar
baberabb committed
1420
                gold = list(gold)
Chris's avatar
Chris committed
1421
1422
1423
            elif type(gold) != type(result):
                # cast gold to the same type as result
                gold = type(result)(gold)
1424

lintangsutawika's avatar
lintangsutawika committed
1425
            for metric in self._metric_fn_list.keys():
haileyschoelkopf's avatar
haileyschoelkopf committed
1426
1427
1428
1429
1430
                if self.multiple_target:
                    # in the case where we have multiple targets,
                    # return true if any are true
                    # TODO: this may break for multipLe_target, non zero-or-1 metrics
                    scores = []
haileyschoelkopf's avatar
haileyschoelkopf committed
1431
1432
1433
1434
                    if not isinstance(gold, list):
                        # sometimes, a multiple_target dataset has exceptions where one doc has only one string answer
                        # print(gold)
                        gold = [gold]
1435
1436
1437
1438
1439
1440
1441
1442
                    if metric == "exact_match":
                        result = [result for _ in range(len(gold))]
                        scores = self._metric_fn_list[metric](
                            references=gold,
                            predictions=result,
                            **self._metric_fn_kwargs[metric],
                        )[metric]
                        result_score = 1.0 if scores > 0.0 else 0.0
haileyschoelkopf's avatar
haileyschoelkopf committed
1443
                    else:
1444
1445
1446
1447
1448
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458
1459
1460
1461
1462
1463
1464
                        for gold_option in gold:
                            try:
                                result_score = self._metric_fn_list[metric](
                                    references=[gold_option],
                                    predictions=[result],
                                    **self._metric_fn_kwargs[metric],
                                )
                            except (
                                TypeError
                            ):  # TODO: this is hacky and I don't want to do it
                                result_score = self._metric_fn_list[metric](
                                    [gold_option, result]
                                )
                            if isinstance(result_score, dict):
                                # TODO: this handles the case where HF evaluate returns a dict.
                                result_score = result_score[metric]
                            scores.append(result_score)
                        if any(scores):
                            result_score = 1.0
                        else:
                            result_score = 0.0
haileyschoelkopf's avatar
haileyschoelkopf committed
1465
                else:
1466
                    try:
1467
                        result_score = self._metric_fn_list[metric](
1468
1469
                            references=[gold],
                            predictions=[result],
1470
                            **self._metric_fn_kwargs[metric],
1471
                        )
1472
                    except TypeError:  # needed for now in order to use a different interface between our own metrics and HF Evaluate metrics
1473
                        result_score = self._metric_fn_list[metric]([gold, result])
1474
1475
1476
1477
                    if isinstance(result_score, dict):
                        # TODO: this handles the case where HF evaluate returns a dict.
                        result_score = result_score[metric]
                result_dict[metric] = result_score
1478
        else:
lintangsutawika's avatar
lintangsutawika committed
1479
1480
            raise ValueError(
                f"Passed invalid output_type '{self.OUTPUT_TYPE}' ! Please use one of ",
1481
                "'loglikelihood', 'loglikelihood_rolling', 'generate_until' or 'multiple_choice'",
1482
            )
1483
1484
1485

        return result_dict

Baber Abbasi's avatar
Baber Abbasi committed
1486
    def aggregation(self) -> dict:
1487
1488
        return self._aggregation_list

Baber Abbasi's avatar
Baber Abbasi committed
1489
    def higher_is_better(self) -> dict:
haileyschoelkopf's avatar
haileyschoelkopf committed
1490
        return self._higher_is_better
1491

Baber Abbasi's avatar
Baber Abbasi committed
1492
1493
1494
    def get_config(self, key: str) -> Any:
        return getattr(self._config, key, None)

lintangsutawika's avatar
lintangsutawika committed
1495
1496
1497
1498
    @property
    def task_id(self) -> Any:
        return self._task_id

1499
1500
1501
1502
1503
1504
1505
1506
    def __repr__(self):
        return (
            f"ConfigurableTask(task_name={getattr(self.config, 'task', None)},"
            f"output_type={self.OUTPUT_TYPE},"
            f"num_fewshot={getattr(self.config, 'num_fewshot', None)},"
            f"num_samples={len(self.eval_docs)})"
        )

1507
1508

class MultipleChoiceTask(Task):
1509
    OUTPUT_TYPE = "loglikelihood"
1510

baberabb's avatar
baberabb committed
1511
    def doc_to_target(self, doc: dict) -> str:
1512
1513
        return " " + doc["choices"][doc["gold"]]

baberabb's avatar
baberabb committed
1514
    def construct_requests(self, doc: dict, ctx: str, **kwargs) -> List[Instance]:
1515
        # TODO: add mutual info here?
lintangsutawika's avatar
lintangsutawika committed
1516
1517
        return [
            Instance(
haileyschoelkopf's avatar
haileyschoelkopf committed
1518
                request_type="loglikelihood",
lintangsutawika's avatar
lintangsutawika committed
1519
                doc=doc,
1520
                arguments=(ctx, " {}".format(choice)),
1521
                idx=i,
1522
1523
                **kwargs,
            )
lintangsutawika's avatar
lintangsutawika committed
1524
1525
            for i, choice in enumerate(doc["choices"])
        ]
1526

1527
    def process_results(self, doc: dict, results: Iterable[Tuple[float, bool]]) -> dict:
lintangsutawika's avatar
lintangsutawika committed
1528
1529
1530
        results = [
            res[0] for res in results
        ]  # only retain loglikelihoods, discard is_greedy TODO: do we need is_greedy anywhere?
1531
1532
1533
1534
1535
1536
1537
1538
1539
1540
1541
        gold = doc["gold"]

        acc = 1.0 if np.argmax(results) == gold else 0.0
        completion_len = np.array([float(len(i)) for i in doc["choices"]])
        acc_norm = 1.0 if np.argmax(results / completion_len) == gold else 0.0

        return {
            "acc": acc,
            "acc_norm": acc_norm,
        }

baberabb's avatar
baberabb committed
1542
    def higher_is_better(self) -> dict:
1543
1544
1545
1546
1547
        return {
            "acc": True,
            "acc_norm": True,
        }

baberabb's avatar
baberabb committed
1548
    def aggregation(self) -> dict:
1549
1550
1551
1552
1553
1554
        return {
            "acc": mean,
            "acc_norm": mean,
        }


lintangsutawika's avatar
lintangsutawika committed
1555
class PerplexityTask(Task):
1556
1557
    OUTPUT_TYPE = "loglikelihood_rolling"

baberabb's avatar
baberabb committed
1558
    def has_training_docs(self) -> bool:
1559
1560
        return False

baberabb's avatar
baberabb committed
1561
    def fewshot_examples(self, k: int, rnd) -> List:
1562
1563
1564
1565
        if k != 0:
            raise ValueError(
                "The number of fewshot examples must be 0 for perplexity tasks."
            )
1566
1567
        return []

baberabb's avatar
baberabb committed
1568
    def fewshot_context(self, doc: dict, num_fewshot: int) -> Literal[""]:
1569
1570
1571
1572
        if num_fewshot != 0:
            raise ValueError(
                "The number of fewshot examples must be 0 for perplexity tasks."
            )
1573
1574
1575

        return ""

baberabb's avatar
baberabb committed
1576
    def higher_is_better(self) -> dict:
1577
1578
1579
1580
1581
1582
1583
1584
1585
        return {
            "word_perplexity": False,
            "byte_perplexity": False,
            "bits_per_byte": False,
        }

    def doc_to_decontamination_query(self, doc):
        return doc

Ethan Smith's avatar
Ethan Smith committed
1586
    def doc_to_text(self, doc) -> str:
1587
1588
1589
1590
1591
        return ""

    def doc_to_target(self, doc):
        return doc

1592
1593
1594
    def construct_requests(self, doc: dict, ctx: Optional[str], **kwargs):
        if bool(ctx):
            raise ValueError
1595

lintangsutawika's avatar
lintangsutawika committed
1596
1597
1598
1599
1600
1601
1602
        return Instance(
            request_type=self.OUTPUT_TYPE,
            doc=doc,
            arguments=(self.doc_to_target(doc),),
            idx=0,
            **kwargs,
        )
1603

1604
    def process_results(self, doc: dict, results: Tuple[float]) -> dict:
1605
        (loglikelihood,) = results
haileyschoelkopf's avatar
haileyschoelkopf committed
1606
1607
        words = self.count_words(self.doc_to_target(doc))
        bytes_ = self.count_bytes(self.doc_to_target(doc))
1608
1609
1610
1611
1612
1613
        return {
            "word_perplexity": (loglikelihood, words),
            "byte_perplexity": (loglikelihood, bytes_),
            "bits_per_byte": (loglikelihood, bytes_),
        }

baberabb's avatar
baberabb committed
1614
    def aggregation(self) -> dict:
1615
1616
1617
1618
1619
1620
1621
        return {
            "word_perplexity": weighted_perplexity,
            "byte_perplexity": weighted_perplexity,
            "bits_per_byte": bits_per_byte,
        }

    @classmethod
baberabb's avatar
baberabb committed
1622
    def count_bytes(cls, doc) -> int:
1623
1624
1625
        return len(doc.encode("utf-8"))

    @classmethod
baberabb's avatar
baberabb committed
1626
    def count_words(cls, doc) -> int:
1627
1628
        """Downstream tasks with custom word boundaries should override this!"""
        return len(re.split(r"\s+", doc))