task.py 60.5 KB
Newer Older
1
import abc
2
import ast
lintangsutawika's avatar
lintangsutawika committed
3
import logging
4
import random
5
import re
lintangsutawika's avatar
lintangsutawika committed
6
import uuid
7
from collections.abc import Callable
8
from copy import deepcopy
9
from dataclasses import asdict, dataclass
10
from inspect import getsource
11
12
13
14
15
16
17
18
19
20
21
22
from typing import (
    Any,
    Dict,
    Iterable,
    Iterator,
    List,
    Literal,
    Mapping,
    Optional,
    Tuple,
    Union,
)
23
24
25

import datasets
import numpy as np
26
from tqdm import tqdm
27
28

from lm_eval import utils
29
from lm_eval.api import samplers
30
31
from lm_eval.api.instance import Instance, OutputType
from lm_eval.api.metrics import bits_per_byte, mean, weighted_perplexity
lintangsutawika's avatar
lintangsutawika committed
32
from lm_eval.api.registry import (
33
34
    AGGREGATION_REGISTRY,
    DEFAULT_METRIC_REGISTRY,
haileyschoelkopf's avatar
haileyschoelkopf committed
35
    get_aggregation,
36
    get_metric,
37
    get_metric_aggregation,
haileyschoelkopf's avatar
haileyschoelkopf committed
38
    is_higher_better,
lintangsutawika's avatar
lintangsutawika committed
39
)
40
from lm_eval.caching.cache import load_from_cache, save_to_cache
41
42
43
from lm_eval.filters import build_filter_ensemble
from lm_eval.prompts import get_prompt

44

45
46
47
48
ALL_OUTPUT_TYPES = [
    "loglikelihood",
    "multiple_choice",
    "loglikelihood_rolling",
49
    "generate_until",
50
51
]

52
eval_logger = logging.getLogger("lm-eval")
53

lintangsutawika's avatar
lintangsutawika committed
54

lintangsutawika's avatar
lintangsutawika committed
55
56
@dataclass
class GroupConfig(dict):
lintangsutawika's avatar
lintangsutawika committed
57
58
59
    group: Optional[str] = None
    group_alias: Optional[str] = None
    task: Optional[Union[str, list]] = None
60
    tag_to_task: Optional[str] = False
lintangsutawika's avatar
lintangsutawika committed
61
62
63
    aggregate_metric: Optional[str] = False
    aggregate_fn: Optional[str] = "mean"
    weight_by_size: Optional[str] = False
lintangsutawika's avatar
lintangsutawika committed
64
    metric_alias: Optional[str] = None
65
    version: Optional[str] = 0
lintangsutawika's avatar
lintangsutawika committed
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108

    def __getitem__(self, item):
        return getattr(self, item)

    def __setitem__(self, item, value):
        return setattr(self, item, value)

    def to_dict(self, keep_callable: bool = False) -> dict:
        """dumps the current config as a dictionary object, as a printable format.
        null fields will not be printed.
        Used for dumping results alongside full task configuration

        :return: dict
            A printable dictionary version of the TaskConfig object.

        # TODO: should any default value in the TaskConfig not be printed?
        """
        cfg_dict = asdict(self)
        # remove values that are `None`
        for k, v in list(cfg_dict.items()):
            if v is None:
                cfg_dict.pop(k)
            elif callable(v):
                cfg_dict[k] = self.serialize_function(v, keep_callable=keep_callable)
        return cfg_dict

    def serialize_function(
        self, value: Union[Callable, str], keep_callable=False
    ) -> Union[Callable, str]:
        """Serializes a given function or string.

        If 'keep_callable' is True, the original callable is returned.
        Otherwise, attempts to return the source code of the callable using 'getsource'.
        """
        if keep_callable:
            return value
        else:
            try:
                return getsource(value)
            except (TypeError, OSError):
                return str(value)


lintangsutawika's avatar
lintangsutawika committed
109
110
111
112
113
class ConfigurableGroup(abc.ABC):
    def __init__(
        self,
        config: Optional[dict] = None,
    ) -> None:
lintangsutawika's avatar
lintangsutawika committed
114
115
        # Create a unique identifier ID
        self._task_id = str(uuid.uuid1())
lintangsutawika's avatar
lintangsutawika committed
116
117
118
119
120
        self._config = GroupConfig(**config)

    @property
    def group(self):
        return self._config.group
121

lintangsutawika's avatar
lintangsutawika committed
122
123
124
    @property
    def group_alias(self):
        return self._config.group_alias
125
126
127
128
129

    @property
    def version(self):
        return self._config.version

lintangsutawika's avatar
lintangsutawika committed
130
131
132
133
    @property
    def config(self):
        return self._config.to_dict()

lintangsutawika's avatar
lintangsutawika committed
134
135
136
137
    @property
    def task_id(self) -> Any:
        return self._task_id

lintangsutawika's avatar
lintangsutawika committed
138
139
    def __repr__(self):
        return (
140
            f"ConfigurableGroup(group={self.group}," f"group_alias={self.group_alias})"
lintangsutawika's avatar
lintangsutawika committed
141
142
        )

143

144
145
@dataclass
class TaskConfig(dict):
146
    # task naming/registry
147
148
    task: Optional[str] = None
    task_alias: Optional[str] = None
lintangsutawika's avatar
lintangsutawika committed
149
    tag: Optional[Union[str, list]] = None
150
151
    group: Optional[Union[str, list]] = None
    group_alias: Optional[Union[str, list]] = None
152
153
154
    # HF dataset options.
    # which dataset to use,
    # and what splits for what purpose
155
156
157
158
159
160
161
162
163
    dataset_path: Optional[str] = None
    dataset_name: Optional[str] = None
    dataset_kwargs: Optional[dict] = None
    training_split: Optional[str] = None
    validation_split: Optional[str] = None
    test_split: Optional[str] = None
    fewshot_split: Optional[
        str
    ] = None  # TODO: assert that this not None if num_fewshot > 0. (?) assert if this is same split as one evaling (?)
164
165
    # formatting / prompting options.
    # see docs/advanced_task_guide.md for more info
166
167
168
169
170
171
    process_docs: Optional[Callable] = None
    doc_to_text: Optional[Union[Callable, str]] = None
    doc_to_target: Optional[Union[Callable, str]] = None
    doc_to_choice: Optional[Union[Callable, str, dict, list]] = None
    process_results: Optional[Union[Callable, str]] = None
    use_prompt: Optional[str] = None
172
    description: str = ""
173
174
    target_delimiter: str = " "
    fewshot_delimiter: str = "\n\n"
175
    fewshot_config: Optional[dict] = None
176
    # runtime configuration options
177
    num_fewshot: Optional[int] = None
178
    # scoring options
179
180
181
    metric_list: Optional[list] = None
    output_type: OutputType = "generate_until"
    generation_kwargs: Optional[dict] = None
182
    repeats: int = 1
183
    filter_list: Optional[Union[str, list]] = None
184
    should_decontaminate: bool = False
185
186
187
188
    doc_to_decontamination_query: Optional[str] = None
    metadata: Optional[
        dict
    ] = None  # by default, not used in the code. allows for users to pass arbitrary info to tasks
189

Ethan Smith's avatar
Ethan Smith committed
190
    def __post_init__(self) -> None:
Lintang Sutawika's avatar
Lintang Sutawika committed
191
        if self.generation_kwargs is not None:
192
            if self.output_type != "generate_until":
193
                eval_logger.warning(
194
                    f"[{self.task}] passed `generation_kwargs`, but not using `output_type: generate_until`!"
Lintang Sutawika's avatar
Lintang Sutawika committed
195
196
197
198
199
200
201
202
                )

            if "temperature" in self.generation_kwargs:
                self.generation_kwargs["temperature"] = float(
                    self.generation_kwargs["temperature"]
                )

            if "until" not in self.generation_kwargs:
203
                self.generation_kwargs["until"] = [self.fewshot_delimiter]
Lintang Sutawika's avatar
Lintang Sutawika committed
204
        else:
205
            if self.output_type == "generate_until":
Lintang Sutawika's avatar
Lintang Sutawika committed
206
207
                # ensure that we greedily generate in absence of explicit arguments otherwise
                self.generation_kwargs = {
208
209
210
211
212
                    "until": (
                        None
                        if self.fewshot_delimiter is None
                        else [self.fewshot_delimiter]
                    ),
Lintang Sutawika's avatar
Lintang Sutawika committed
213
214
                    "do_sample": False,
                }
215

216
217
218
    def __getitem__(self, item):
        return getattr(self, item)

219
220
221
    def __setitem__(self, item, value):
        return setattr(self, item, value)

222
    def to_dict(self, keep_callable: bool = False) -> dict:
223
224
        """dumps the current config as a dictionary object, as a printable format.
        null fields will not be printed.
haileyschoelkopf's avatar
haileyschoelkopf committed
225
        Used for dumping results alongside full task configuration
226

haileyschoelkopf's avatar
haileyschoelkopf committed
227
228
229
230
231
232
233
234
235
236
        :return: dict
            A printable dictionary version of the TaskConfig object.

        # TODO: should any default value in the TaskConfig not be printed?
        """
        cfg_dict = asdict(self)
        # remove values that are `None`
        for k, v in list(cfg_dict.items()):
            if v is None:
                cfg_dict.pop(k)
237
238
239
240
241
242
243
244
245
246
            elif k == "metric_list":
                for metric_dict in v:
                    for metric_key, metric_value in metric_dict.items():
                        if callable(metric_value):
                            metric_dict[metric_key] = self.serialize_function(
                                metric_value, keep_callable=keep_callable
                            )
                cfg_dict[k] = v
            elif callable(v):
                cfg_dict[k] = self.serialize_function(v, keep_callable=keep_callable)
haileyschoelkopf's avatar
haileyschoelkopf committed
247
        return cfg_dict
248

249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
    def serialize_function(
        self, value: Union[Callable, str], keep_callable=False
    ) -> Union[Callable, str]:
        """Serializes a given function or string.

        If 'keep_callable' is True, the original callable is returned.
        Otherwise, attempts to return the source code of the callable using 'getsource'.
        """
        if keep_callable:
            return value
        else:
            try:
                return getsource(value)
            except (TypeError, OSError):
                return str(value)

265
266
267
268
269
270
271
272
273
274
275

class Task(abc.ABC):
    """A task represents an entire benchmark including its dataset, problems,
    answers, and evaluation methods. See BoolQ for a simple example implementation

    A `doc` can be any python object which represents one instance of evaluation.
    This is usually a dictionary e.g.
        {"question": ..., "answer": ...} or
        {"question": ..., question, answer)
    """

276
    VERSION: Optional[Union[int, str]] = None
277

278
279
    # The name of the `Task` benchmark as denoted in the HuggingFace datasets Hub
    # or a path to a custom `datasets` loading script.
280
    DATASET_PATH: Optional[str] = None
281
282

    # The name of a subset within `DATASET_PATH`.
283
    DATASET_NAME: Optional[str] = None
284

285
    OUTPUT_TYPE: Optional[OutputType] = None
lintangsutawika's avatar
lintangsutawika committed
286

287
288
    def __init__(
        self,
289
290
291
292
        data_dir: Optional[str] = None,
        cache_dir: Optional[str] = None,
        download_mode: Optional[datasets.DownloadMode] = None,
        config: Optional[Mapping] = None,  # Union[dict, TaskConfig]
Ethan Smith's avatar
Ethan Smith committed
293
    ) -> None:
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
        """
        :param data_dir: str
            Stores the path to a local folder containing the `Task`'s data files.
            Use this to specify the path to manually downloaded data (usually when
            the dataset is not publicly accessible).
        :param cache_dir: str
            The directory to read/write the `Task` dataset. This follows the
            HuggingFace `datasets` API with the default cache directory located at:
                `~/.cache/huggingface/datasets`
            NOTE: You can change the cache location globally for a given process
            to another directory:
                `export HF_DATASETS_CACHE="/path/to/another/directory"`
        :param download_mode: datasets.DownloadMode
            How to treat pre-existing `Task` downloads and data.
            - `datasets.DownloadMode.REUSE_DATASET_IF_EXISTS`
                Reuse download and reuse dataset.
            - `datasets.DownloadMode.REUSE_CACHE_IF_EXISTS`
                Reuse download with fresh dataset.
            - `datasets.DownloadMode.FORCE_REDOWNLOAD`
                Fresh download and fresh dataset.
        """
        self.download(data_dir, cache_dir, download_mode)
316
317
318
        self._training_docs: Optional[list] = None
        self._fewshot_docs: Optional[list] = None
        self._instances: Optional[List[Instance]] = None
319

320
321
        # Create a unique identifier ID
        self._task_id = str(uuid.uuid1())
322
        self._config: TaskConfig = TaskConfig({**config}) if config else TaskConfig()
323

lintangsutawika's avatar
lintangsutawika committed
324
        self._filters = [build_filter_ensemble("none", [["take_first", None]])]
325
326
327
        self.fewshot_rnd: Optional[
            random.Random
        ] = None  # purposely induce errors in case of improper usage
328

329
330
331
332
333
334
    def download(
        self,
        data_dir: Optional[str] = None,
        cache_dir: Optional[str] = None,
        download_mode=None,
    ) -> None:
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
        """Downloads and returns the task dataset.
        Override this method to download the dataset from a custom API.

        :param data_dir: str
            Stores the path to a local folder containing the `Task`'s data files.
            Use this to specify the path to manually downloaded data (usually when
            the dataset is not publicly accessible).
        :param cache_dir: str
            The directory to read/write the `Task` dataset. This follows the
            HuggingFace `datasets` API with the default cache directory located at:
                `~/.cache/huggingface/datasets`
            NOTE: You can change the cache location globally for a given process
            by setting the shell environment variable, `HF_DATASETS_CACHE`,
            to another directory:
                `export HF_DATASETS_CACHE="/path/to/another/directory"`
        :param download_mode: datasets.DownloadMode
            How to treat pre-existing `Task` downloads and data.
            - `datasets.DownloadMode.REUSE_DATASET_IF_EXISTS`
                Reuse download and reuse dataset.
            - `datasets.DownloadMode.REUSE_CACHE_IF_EXISTS`
                Reuse download with fresh dataset.
            - `datasets.DownloadMode.FORCE_REDOWNLOAD`
                Fresh download and fresh dataset.
        """
359
360
361
362
363
364
365
        self.dataset = datasets.load_dataset(
            path=self.DATASET_PATH,
            name=self.DATASET_NAME,
            data_dir=data_dir,
            cache_dir=cache_dir,
            download_mode=download_mode,
        )
366

367
    @property
368
    def config(self) -> TaskConfig:
369
370
371
        """Returns the TaskConfig associated with this class."""
        return self._config

372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
    @abc.abstractmethod
    def has_training_docs(self):
        """Whether the task has a training set"""
        pass

    @abc.abstractmethod
    def has_validation_docs(self):
        """Whether the task has a validation set"""
        pass

    @abc.abstractmethod
    def has_test_docs(self):
        """Whether the task has a test set"""
        pass

387
    def training_docs(self) -> Iterable:
388
389
390
391
392
393
        """
        :return: Iterable[obj]
            A iterable of any object, that doc_to_text can handle
        """
        return []

394
    def validation_docs(self) -> Iterable:
395
396
397
398
399
400
        """
        :return: Iterable[obj]
            A iterable of any object, that doc_to_text can handle
        """
        return []

401
    def test_docs(self) -> Iterable:
402
403
404
405
406
407
        """
        :return: Iterable[obj]
            A iterable of any object, that doc_to_text can handle
        """
        return []

408
    def fewshot_docs(self) -> Iterable:
409
410
411
412
413
414
415
416
417
        """
        :return: Iterable[obj]
            A iterable of any object, that doc_to_text can handle
        """
        if self.has_training_docs():
            return self.training_docs()
        elif self.has_validation_docs():
            return self.validation_docs()
        else:
lintangsutawika's avatar
lintangsutawika committed
418
            eval_logger.warning(
419
                f"[Task: {self.config.task}] has_training_docs and has_validation_docs are False"
420
                ", using test_docs as fewshot_docs but this is not recommended."
lintangsutawika's avatar
lintangsutawika committed
421
            )
422
423
            return self.test_docs()

424
    def _process_doc(self, doc: dict) -> dict:
425
426
427
428
429
430
431
432
433
        """
        Override this to process (detokenize, strip, replace, etc.) individual
        documents. This can be used in a map over documents of a data split.
        E.g. `map(self._process_doc, self.dataset["validation"])`

        :return: dict
            The processed version of the specified `doc`.
        """
        return doc
lintangsutawika's avatar
lintangsutawika committed
434

435
    @property
436
    def instances(self) -> List[Instance]:
437
438
439
440
441
442
443
444
445
446
447
        """After calling `task.build_all_requests()`, tasks
        maintain a list of the dataset instances which will be evaluated.
        """
        return self._instances

    def fewshot_examples(self, k, rnd):
        if self._training_docs is None:
            self._training_docs = list(self.training_docs())

        return rnd.sample(self._training_docs, k)

448
449
    def doc_to_decontamination_query(self, doc):
        raise NotImplementedError(
450
451
452
453
454
455
456
457
458
459
460
            "Override doc_to_decontamination_query with document specific decontamination query."
        )

    @abc.abstractmethod
    def doc_to_text(self, doc):
        pass

    @abc.abstractmethod
    def doc_to_target(self, doc):
        pass

461
462
    def build_all_requests(
        self,
463
        *,
464
465
466
467
468
469
        limit=None,
        rank=None,
        world_size=None,
        cache_requests=False,
        rewrite_requests_cache=False,
    ) -> None:
470
        """Build a set of Instances for a task, and store them in task.instances"""
471
472
473
474

        # used with caching
        og_limit = limit

475
        cache_key = f"requests-{self._config.task}-{self.config.num_fewshot}shot-rank{rank}-world_size{world_size}"
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490

        cached_instances = load_from_cache(file_name=cache_key)

        if cache_requests and cached_instances and not rewrite_requests_cache:
            cached_instances = cached_instances[:limit]

            flattened_instances = [
                instance
                for instance_group in cached_instances
                for instance in instance_group
            ]

            self._instances = flattened_instances
            return

Baber Abbasi's avatar
Baber Abbasi committed
491
        eval_logger.info(f"Building contexts for {self.config.task} on rank {rank}...")
492

493
        instances = []
494
495
496
497
498
499
500
501
502
503

        # process all documents when caching is specified for simplicity
        if (
            cache_requests
            and (not cached_instances or rewrite_requests_cache)
            and limit is not None
        ):
            limit = None

        doc_id_docs = list(
504
            self.doc_iterator(rank=rank, limit=limit, world_size=world_size)
505
506
507
508
509
510
511
        )

        num_docs = len(doc_id_docs)

        for doc_id, doc in tqdm(
            doc_id_docs,
            total=num_docs,
lintangsutawika's avatar
lintangsutawika committed
512
        ):
513
            # sample fewshot context #TODO: need to offset doc_id by rank now!
514
            fewshot_ctx = self.fewshot_context(
515
                doc,
516
                0 if self.config.num_fewshot is None else self.config.num_fewshot,
517
            )
518

519
            # TODO: we should override self.config.repeats if doing greedy gen so users don't waste time+compute
lintangsutawika's avatar
lintangsutawika committed
520
521
522
            inst = self.construct_requests(
                doc=doc,
                ctx=fewshot_ctx,
523
                metadata=(self.config["task"], doc_id, self.config.repeats),
lintangsutawika's avatar
lintangsutawika committed
524
            )
525
526
527
528

            if not isinstance(inst, list):
                inst = [inst]

529
530
531
532
533
534
535
536
537
538
539
540
541
            instances.append(inst)

        # now flatten, this is to allow slicing to work with pickles

        sliced_instances = instances[:og_limit]

        flattened_instances = [
            instance
            for instance_group in sliced_instances
            for instance in instance_group
        ]

        self._instances = flattened_instances
542

543
544
        if len(self._instances) == 0:
            raise ValueError("task.build_requests() did not find any docs!")
545

546
547
548
        if cache_requests and (not cached_instances or rewrite_requests_cache):
            save_to_cache(file_name=cache_key, obj=instances)

549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
    @abc.abstractmethod
    def construct_requests(self, doc, ctx, **kwargs):
        """Uses RequestFactory to construct Requests and returns an iterable of
        Requests which will be sent to the LM.

        :param doc:
            The document as returned from training_docs, validation_docs, or test_docs.
        :param ctx: str
            The context string, generated by fewshot_context. This includes the natural
            language description, as well as the few shot examples, and the question
            part of the document for `doc`.
        :param doc_idx: int
            The index of a document within `self.test_docs()` or `self.validation_docs()`,
            whichever is the main split used.
        :param repeats: int
        TODO: update this docstring
lintangsutawika's avatar
lintangsutawika committed
565
            The number of times each instance in a dataset is inferred on. Defaults to 1,
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
            can be increased for techniques like majority voting.
        """
        pass

    @abc.abstractmethod
    def process_results(self, doc, results):
        """Take a single document and the LM results and evaluates, returning a
        dict where keys are the names of submetrics and values are the values of
        the metric for that one document

        :param doc:
            The document as returned from training_docs, validation_docs, or test_docs.
        :param results:
            The results of the requests created in construct_requests.
        """
        pass

    @abc.abstractmethod
    def aggregation(self):
        """
        :returns: {str: [metric_score] -> float}
            A dictionary where keys are the names of submetrics and values are
            functions that aggregate a list of metric scores
        """
        pass

    @abc.abstractmethod
    def higher_is_better(self):
        """
        :returns: {str: bool}
            A dictionary where keys are the names of submetrics and values are
            whether a higher value of the submetric is better
        """
        pass

601
602
603
    def get_config(self, key: str) -> Any:
        return getattr(self._config, key, None)

haileyschoelkopf's avatar
haileyschoelkopf committed
604
605
606
607
608
609
610
611
612
613
    @classmethod
    def count_bytes(cls, doc):
        """Used for byte-level perplexity metrics in rolling loglikelihood"""
        return len(doc.encode("utf-8"))

    @classmethod
    def count_words(cls, doc):
        """Downstream loglikelihood_rolling perplexity tasks with custom word boundaries should override this!"""
        return len(re.split(r"\s+", doc))

614
    @utils.positional_deprecated
lintangsutawika's avatar
lintangsutawika committed
615
    def fewshot_context(
616
617
618
        self,
        doc,
        num_fewshot,
619
        rnd=None,
620
        description=None,
lintangsutawika's avatar
lintangsutawika committed
621
    ):
622
623
624
625
626
627
628
        """Returns a fewshot context string that is made up of a prepended description
        (if provided), the `num_fewshot` number of examples, and an appended prompt example.

        :param doc: str
            The document as returned from training_docs, validation_docs, or test_docs.
        :param num_fewshot: int
            The number of fewshot examples to provide in the returned context string.
lintangsutawika's avatar
lintangsutawika committed
629
630
631
632
633
        :param rnd: random.Random
            The pseudo-random number generator used to randomly sample examples.
            WARNING: This is currently a required arg although it's optionalized with a default `None`.
        :param description: str
            The task's description that will be prepended to the fewshot examples.
634
635
636
        :returns: str
            The fewshot context.
        """
637
        if rnd is None:
638
639
640
641
642
643
            if self.fewshot_rnd is not None:
                rnd = self.fewshot_rnd
            else:
                raise ValueError(
                    "A `random.Random` generator argument must be provided to `rnd`"
                )
lintangsutawika's avatar
lintangsutawika committed
644

645
        description = description if description else ""
646
647

        if num_fewshot == 0:
lintangsutawika's avatar
lintangsutawika committed
648
            labeled_examples = ""
649
        else:
lintangsutawika's avatar
lintangsutawika committed
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
            # for sets with no training docs, draw from other set *but ensure no overlap with current doc*
            if self.has_training_docs():
                fewshotex = self.fewshot_examples(k=num_fewshot, rnd=rnd)
            else:
                if self._fewshot_docs is None:
                    self._fewshot_docs = list(
                        self.validation_docs()
                        if self.has_validation_docs()
                        else self.test_docs()
                    )

                fewshotex = rnd.sample(self._fewshot_docs, num_fewshot + 1)

                # get rid of the doc that's the one we're evaluating, if it's in the fewshot
                fewshotex = [x for x in fewshotex if x != doc][:num_fewshot]

            labeled_examples = (
                "\n\n".join(
                    [
                        self.doc_to_text(doc) + self.doc_to_target(doc)
                        for doc in fewshotex
                    ]
                )
                + "\n\n"
lintangsutawika's avatar
lintangsutawika committed
674
            )
675
676

        example = self.doc_to_text(doc)
lintangsutawika's avatar
lintangsutawika committed
677
        return description + labeled_examples + example
678

679
    def apply_filters(self) -> Optional[List[Instance]]:
Baber Abbasi's avatar
Baber Abbasi committed
680
        """Iterates over FilterEnsembles and applies them to instances"""
lintangsutawika's avatar
lintangsutawika committed
681
682
        if hasattr(self, "_filters"):
            for f in self._filters:
683
                f.apply(self._instances)
lintangsutawika's avatar
lintangsutawika committed
684
685
686
        else:
            eval_logger.warning("No filter defined, passing through instances")
            return self._instances
687

baberabb's avatar
baberabb committed
688
    def dump_config(self) -> dict:
Baber Abbasi's avatar
Baber Abbasi committed
689
        """Returns the config as a dictionary."""
690
        # TODO: this should only return the overrides applied to a non-YAML task's configuration.
691
        # (num_fewshot)
692
        return self.config.to_dict()
693

Baber Abbasi's avatar
Baber Abbasi committed
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
    def set_config(self, key: str, value: Any, update: bool = False) -> None:
        """Set or update the configuration for a given key."""
        if key is None:
            raise ValueError("Key must be provided.")

        if update:
            current_value = getattr(self._config, key, {})
            if not isinstance(current_value, dict):
                raise TypeError(
                    f"Expected a dict for key '{key}', got {type(current_value).__name__} instead."
                )
            current_value.update(value)
        else:
            setattr(self._config, key, value)

    def override_metric(self, metric_name: str) -> None:
        """
        Override the default metrics used for evaluation with custom metrics.

        Parameters:
        - metric_name (str): The name of the custom metric to override. Should be registered in api.metrics.
        """
        (
            self._metric_fn_list,
            self._aggregation_list,
            self._metric_fn_kwargs,
            self._higher_is_better,
        ) = ({}, {}, {}, {})
        self._metric_fn_list[metric_name] = get_metric(metric_name)
        self._aggregation_list[metric_name] = get_metric_aggregation(metric_name)
        self._higher_is_better[metric_name] = is_higher_better(metric_name)
        self._metric_fn_kwargs[metric_name] = {}
        if not isinstance(self, ConfigurableTask):
            self.process_results = lambda x, y: {metric_name: get_metric(metric_name)}
            self.aggregation = lambda: {
                metric_name: get_metric_aggregation(metric_name)
            }
        setattr(self._config, "metric_list", [{"metric": metric_name}])
        setattr(self._config, "process_results", None)

734
735
736
737
738
    def set_fewshot_seed(self, seed: Optional[int] = None) -> None:
        self.fewshot_rnd = random.Random(seed)
        if hasattr(self, "sampler"):
            self.sampler.rnd = self.fewshot_rnd

739
740
741
742
743
744
745
    @property
    def eval_docs(self) -> Union[datasets.Dataset, List[dict]]:
        if self.has_test_docs():
            return self.test_docs()
        elif self.has_validation_docs():
            return self.validation_docs()
        else:
746
747
748
            raise ValueError(
                f"Task dataset (path={self.DATASET_PATH}, name={self.DATASET_NAME}) must have valid or test docs!"
            )
749
750
751
752
753
754
755
756
757
758
759
760
761

    def doc_iterator(
        self, *, rank: int = 0, limit: Union[int, None] = None, world_size: int = 1
    ) -> Iterator[Tuple[int, Any]]:
        limit = int(limit) if limit else None
        doc_iterator = utils.create_iterator(
            enumerate(self.eval_docs),
            rank=int(rank),
            limit=limit,
            world_size=int(world_size),
        )
        return doc_iterator

762
763
764
    @property
    def task_id(self) -> Any:
        return self._task_id
765

766

767
class ConfigurableTask(Task):
768
    VERSION = "Yaml"
769
    OUTPUT_TYPE = None
770
    CONFIG = None
771
772

    def __init__(
773
774
775
776
777
        self,
        data_dir=None,
        cache_dir=None,
        download_mode=None,
        config: Optional[dict] = None,
Ethan Smith's avatar
Ethan Smith committed
778
    ) -> None:  # TODO no super() call here
lintangsutawika's avatar
lintangsutawika committed
779
780
781
        # Create a unique identifier ID
        self._task_id = str(uuid.uuid1())

782
        # Get pre-configured attributes
783
        self._config = self.CONFIG
784

785
        # Use new configurations if there was no preconfiguration
786
        if self.config is None:
787
            self._config = TaskConfig(**config)
788
789
        # Overwrite configs
        else:
lintangsutawika's avatar
lintangsutawika committed
790
            if config is not None:
791
                self._config.__dict__.update(config)
792

793
        if self.config is None:
lintangsutawika's avatar
lintangsutawika committed
794
795
796
            raise ValueError(
                "Must pass a config to ConfigurableTask, either in cls.CONFIG or `config` kwarg"
            )
797

798
799
800
801
        if isinstance(self.config.metadata, dict):
            if "version" in self.config.metadata:
                self.VERSION = self.config.metadata["version"]

802
        if self.config.output_type is not None:
803
804
805
806
            if self.config.output_type not in ALL_OUTPUT_TYPES:
                raise ValueError(
                    f"Got invalid output_type '{self.config.output_type}', must be in '{','.join(ALL_OUTPUT_TYPES)}'"
                )
807
            self.OUTPUT_TYPE = self.config.output_type
808

809
810
        if self.config.dataset_path is not None:
            self.DATASET_PATH = self.config.dataset_path
811

812
813
        if self.config.dataset_name is not None:
            self.DATASET_NAME = self.config.dataset_name
814

815
816
817
818
        self._metric_fn_list = {}
        self._metric_fn_kwargs = {}
        self._aggregation_list = {}
        self._higher_is_better = {}
819

820
        if self.config.metric_list is None:
821
            # TODO: handle this in TaskConfig.__post_init__ ?
822
823
            _metric_list = DEFAULT_METRIC_REGISTRY[self.config.output_type]

824
            for metric_name in _metric_list:
haileyschoelkopf's avatar
haileyschoelkopf committed
825
                self._metric_fn_list[metric_name] = get_metric(metric_name)
lintangsutawika's avatar
lintangsutawika committed
826
                self._metric_fn_kwargs[metric_name] = {}
827
828
829
                self._aggregation_list[metric_name] = get_metric_aggregation(
                    metric_name
                )
haileyschoelkopf's avatar
haileyschoelkopf committed
830
                self._higher_is_better[metric_name] = is_higher_better(metric_name)
831
        else:
832
            for metric_config in self.config.metric_list:
833
834
835
836
                if "metric" not in metric_config:
                    raise ValueError(
                        "'metric' key not provided for an entry in 'metric_list', must be specified!"
                    )
837
838
839
840
                metric_name = metric_config["metric"]
                kwargs = {
                    key: metric_config[key]
                    for key in metric_config
Chris's avatar
Chris committed
841
842
                    if key
                    not in ["metric", "aggregation", "higher_is_better", "hf_evaluate"]
843
                }
Chris's avatar
Chris committed
844
845
846
847
                hf_evaluate_metric = (
                    "hf_evaluate" in metric_config
                    and metric_config["hf_evaluate"] is True
                )
848

849
                if self.config.process_results is not None:
850
851
                    self._metric_fn_list[metric_name] = None
                    self._metric_fn_kwargs[metric_name] = {}
852
853
854
855
856
857
                elif callable(metric_name):
                    metric_fn = metric_name.__call__
                    metric_name = metric_name.__name__
                    self._metric_fn_list[metric_name] = metric_fn
                    self._metric_fn_kwargs[metric_name] = kwargs
                else:
Chris's avatar
Chris committed
858
859
860
                    self._metric_fn_list[metric_name] = get_metric(
                        metric_name, hf_evaluate_metric
                    )
861
                    self._metric_fn_kwargs[metric_name] = kwargs
lintangsutawika's avatar
lintangsutawika committed
862

863
                if "aggregation" in metric_config:
864
                    agg_name = metric_config["aggregation"]
865
                    if isinstance(agg_name, str):
haileyschoelkopf's avatar
haileyschoelkopf committed
866
                        self._aggregation_list[metric_name] = get_aggregation(agg_name)
867
                    elif callable(agg_name):  # noqa: E721
868
869
870
                        self._aggregation_list[metric_name] = metric_config[
                            "aggregation"
                        ]
871
                else:
872
                    INV_AGG_REGISTRY = {v: k for k, v in AGGREGATION_REGISTRY.items()}
lintangsutawika's avatar
lintangsutawika committed
873
                    metric_agg = get_metric_aggregation(metric_name)
874
                    eval_logger.warning(
875
                        f"[Task: {self.config.task}] metric {metric_name} is defined, but aggregation is not. "
876
877
                        f"using default "
                        f"aggregation={INV_AGG_REGISTRY[metric_agg]}"
878
                    )
879
                    self._aggregation_list[metric_name] = metric_agg
lintangsutawika's avatar
lintangsutawika committed
880

881
882
883
884
885
886
                if "higher_is_better" in metric_config:
                    self._higher_is_better[metric_name] = metric_config[
                        "higher_is_better"
                    ]
                else:
                    eval_logger.warning(
887
                        f"[Task: {self.config.task}] metric {metric_name} is defined, but higher_is_better is not. "
888
                        f"using default "
haileyschoelkopf's avatar
haileyschoelkopf committed
889
                        f"higher_is_better={is_higher_better(metric_name)}"
890
                    )
haileyschoelkopf's avatar
haileyschoelkopf committed
891
                    self._higher_is_better[metric_name] = is_higher_better(metric_name)
892

893
        self.download(self.config.dataset_kwargs)
894
895
896
        self._training_docs = None
        self._fewshot_docs = None

897
        if self.config.filter_list is not None:
lintangsutawika's avatar
lintangsutawika committed
898
            self._filters = []
899
            for filter_config in self.config.filter_list:
900
901
902
903
904
905
906
907
908
                filter_name = filter_config["name"]
                filter_functions = filter_config["filter"]
                components = []
                for function in filter_functions:
                    kwargs = {
                        key: function[key] for key in function if key != "function"
                    }
                    components.append([function["function"], kwargs])
                filter_pipeline = build_filter_ensemble(filter_name, components)
lintangsutawika's avatar
lintangsutawika committed
909
                self._filters.append(filter_pipeline)
lintangsutawika's avatar
lintangsutawika committed
910
        else:
911
            self._filters = [build_filter_ensemble("none", [["take_first", None]])]
912

913
914
        if self.config.use_prompt is not None:
            eval_logger.info(f"loading prompt {self.config.use_prompt}")
915
            self.prompt = get_prompt(
916
                self.config.use_prompt, self.DATASET_PATH, self.DATASET_NAME
lintangsutawika's avatar
lintangsutawika committed
917
            )
918
919
920
        else:
            self.prompt = None

lintangsutawika's avatar
lintangsutawika committed
921
        if self.fewshot_docs() is not None:
922
923
924
925
            self.fewshot_rnd = (
                random.Random()
            )  # setting with no seed, to be overridden at a later time
            config_sampler: Union[str, Callable] = (
haileyschoelkopf's avatar
haileyschoelkopf committed
926
927
928
                self.config.fewshot_config.get("sampler", "default")
                if self.config.fewshot_config
                else "default"
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
            )
            if isinstance(config_sampler, str):
                self.sampler = samplers.get_sampler(config_sampler)(
                    list(self.fewshot_docs()), self, rnd=self.fewshot_rnd
                )
            elif callable(config_sampler) and issubclass(
                config_sampler, samplers.ContextSampler
            ):
                self.sampler = config_sampler(
                    docs=list(self.fewshot_docs()), task=self, rnd=self.fewshot_rnd
                )
            else:
                raise TypeError(
                    f"fewshot_config.sampler should be a string or callable of ContextSampler type, "
                    f"not {type(config_sampler)}"
                )
945

946
        self.task_docs = self.eval_docs
947

948
        # Test One Doc
949
        self.features = list(self.task_docs.features.keys())
950
951
        self.multiple_input = 0
        self.multiple_target = 0
952
        test_doc = self.task_docs[0]
953
        test_text = self.doc_to_text(test_doc)
954
        test_target = self.doc_to_target(test_doc)
lintangsutawika's avatar
lintangsutawika committed
955

956
        if self.config.doc_to_choice is not None:
lintangsutawika's avatar
lintangsutawika committed
957
            test_choice = self.doc_to_choice(test_doc)
958
            if not isinstance(test_choice, list):
lintangsutawika's avatar
lintangsutawika committed
959
                eval_logger.error("doc_to_choice must return list")
960
961
            else:
                num_choice = len(test_choice)
962

963
            if isinstance(test_text, int):
964
                self.multiple_input = num_choice
965
966
        else:
            test_choice = None
967

968
        if isinstance(test_target, list):
969
            self.multiple_target = len(test_target)
970
        else:
971
            if (isinstance(test_target, int)) and (test_choice is not None):
lintangsutawika's avatar
lintangsutawika committed
972
                test_target = test_choice[test_target]
973
            else:
lintangsutawika's avatar
lintangsutawika committed
974
                test_target = str(test_target)
975

976
977
978
        if test_choice is not None:
            check_choices = test_choice
        else:
lintangsutawika's avatar
lintangsutawika committed
979
            check_choices = [test_target]
980
981
982
983
        if self.config.doc_to_choice is not None:
            for choice in check_choices:
                choice_has_whitespace = True if choice[0].isspace() else False
                delimiter_has_whitespace = (
984
985
                    True
                    if self.config.target_delimiter.rstrip()
986
                    != self.config.target_delimiter
987
                    else False
988
                )
989

990
                if delimiter_has_whitespace and choice_has_whitespace:
991
992
                    eval_logger.debug(
                        f'Both target_delimiter "{self.config.target_delimiter}" and target choice: "{choice}" have whitespace'
993
994
                    )
                elif (not delimiter_has_whitespace) and (not choice_has_whitespace):
995
                    eval_logger.debug(
996
                        f'Both target_delimiter "{self.config.target_delimiter}" and target choice: "{choice}" do not have whitespace, ignore if the language you are evaluating on does not require/use whitespace'
997
998
                    )

999
    def download(self, dataset_kwargs: Optional[Dict[str, Any]] = None) -> None:
1000
1001
1002
1003
1004
1005
        self.dataset = datasets.load_dataset(
            path=self.DATASET_PATH,
            name=self.DATASET_NAME,
            **dataset_kwargs if dataset_kwargs is not None else {},
        )

baberabb's avatar
baberabb committed
1006
    def has_training_docs(self) -> bool:
1007
        if self.config.training_split is not None:
1008
1009
1010
1011
            return True
        else:
            return False

baberabb's avatar
baberabb committed
1012
    def has_validation_docs(self) -> bool:
1013
        if self.config.validation_split is not None:
1014
1015
1016
1017
            return True
        else:
            return False

baberabb's avatar
baberabb committed
1018
    def has_test_docs(self) -> bool:
1019
        if self.config.test_split is not None:
1020
1021
1022
1023
            return True
        else:
            return False

baberabb's avatar
baberabb committed
1024
    def training_docs(self) -> datasets.Dataset:
1025
        if self.has_training_docs():
1026
1027
1028
            if self.config.process_docs is not None:
                return self.config.process_docs(
                    self.dataset[self.config.training_split]
1029
                )
1030
            return self.dataset[self.config.training_split]
1031

baberabb's avatar
baberabb committed
1032
    def validation_docs(self) -> datasets.Dataset:
1033
        if self.has_validation_docs():
1034
1035
1036
            if self.config.process_docs is not None:
                return self.config.process_docs(
                    self.dataset[self.config.validation_split]
1037
                )
1038
            return self.dataset[self.config.validation_split]
1039

baberabb's avatar
baberabb committed
1040
    def test_docs(self) -> datasets.Dataset:
1041
        if self.has_test_docs():
1042
1043
1044
            if self.config.process_docs is not None:
                return self.config.process_docs(self.dataset[self.config.test_split])
            return self.dataset[self.config.test_split]
1045

1046
    def fewshot_docs(self):
1047
        if self.config.fewshot_split is not None:
1048
1049
            if self.config.process_docs is not None:
                return self.config.process_docs(self.dataset[self.config.fewshot_split])
1050
            return self.dataset[self.config.fewshot_split]
1051
        else:
1052
            if (self.config.num_fewshot is not None) and (self.config.num_fewshot > 0):
1053
                eval_logger.warning(
lintangsutawika's avatar
lintangsutawika committed
1054
                    f"[Task: {self.config.task}] "
1055
1056
1057
1058
                    "num_fewshot > 0 but fewshot_split is None. "
                    "using preconfigured rule."
                )
            return super().fewshot_docs()
1059

lintangsutawika's avatar
lintangsutawika committed
1060
    @utils.positional_deprecated
1061
    def fewshot_context(self, doc: str, num_fewshot: int) -> str:
lintangsutawika's avatar
lintangsutawika committed
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
        """Returns a fewshot context string that is made up of a prepended description
        (if provided), the `num_fewshot` number of examples, and an appended prompt example.

        :param doc: str
            The document as returned from training_docs, validation_docs, or test_docs.
        :param num_fewshot: int
            The number of fewshot examples to provide in the returned context string.
        :returns: str
            The fewshot context.
        """
1072
1073
        if description := self.config.description:
            description = utils.apply_template(self.config.description, doc)
lintangsutawika's avatar
lintangsutawika committed
1074
1075
1076

        if num_fewshot == 0:
            # always prepend the (possibly empty) task description
1077
            labeled_examples = description
lintangsutawika's avatar
lintangsutawika committed
1078
        else:
1079
            labeled_examples = description + self.sampler.get_context(doc, num_fewshot)
lintangsutawika's avatar
lintangsutawika committed
1080
1081

        example = self.doc_to_text(doc)
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
        if self.multiple_input:
            return labeled_examples
        else:
            if isinstance(example, str):
                return labeled_examples + example
            elif isinstance(example, list):
                return [labeled_examples + ex for ex in example]
            elif isinstance(example, int):
                if self.config.doc_to_choice is not None:
                    choices = self.doc_to_choice(doc)
                    return labeled_examples + choices[example]
                else:
                    return labeled_examples + str(example)
lintangsutawika's avatar
lintangsutawika committed
1095

1096
    def apply_filters(self):
Baber Abbasi's avatar
Baber Abbasi committed
1097
        """Iterates over FilterEnsembles and applies them to instances"""
1098
1099
        if hasattr(self, "_filters"):
            for f in self._filters:
1100
                f.apply(self._instances)
1101
1102
1103
1104
        else:
            eval_logger.warning("No filter defined, passing through instances")
            return self._instances

1105
    def should_decontaminate(self):
1106
        return self.config.should_decontaminate
1107
1108

    def doc_to_decontamination_query(self, doc):
1109
        if self.config.should_decontaminate:
1110
1111
            if self.config.doc_to_decontamination_query is None:
                return self.doc_to_text(doc)
1112
            else:
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
                doc_to_decontamination_query = self.config.doc_to_decontamination_query
                if doc_to_decontamination_query in self.features:
                    return doc[doc_to_decontamination_query]
                elif callable(doc_to_decontamination_query):
                    return doc_to_decontamination_query(doc)
                else:
                    return ast.literal_eval(
                        utils.apply_template(
                            self.config.doc_to_decontamination_query, doc
                        )
                    )
1124

1125
    def _process_doc(self, doc: dict) -> dict:
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
        """
        Override this to process (detokenize, strip, replace, etc.) individual
        documents. This can be used in a map over documents of a data split.
        E.g. `map(self._process_doc, self.dataset["validation"])`

        :return: dict
            The processed version of the specified `doc`.
        """
        return doc

    def doc_to_text(self, doc):
1137
1138
        if self.prompt is not None:
            doc_to_text = self.prompt
1139
        else:
1140
            doc_to_text = self.config.doc_to_text
lintangsutawika's avatar
lintangsutawika committed
1141

1142
        if isinstance(doc_to_text, int):
1143
            return doc_to_text
1144
        elif isinstance(doc_to_text, str):
1145
            if doc_to_text in self.features:
1146
                # if self.config.doc_to_choice is not None:
1147
1148
                #     return self.doc_to_choice(doc)[doc[doc_to_text]]
                # else:
1149
1150
                return doc[doc_to_text]
            else:
lintangsutawika's avatar
lintangsutawika committed
1151
                text_string = utils.apply_template(doc_to_text, doc)
lintangsutawika's avatar
lintangsutawika committed
1152
                if text_string.isdigit() and self._config.doc_to_choice is not None:
lintangsutawika's avatar
lintangsutawika committed
1153
1154
1155
                    return ast.literal_eval(text_string)
                else:
                    return text_string
1156
        elif callable(doc_to_text):
1157
            return doc_to_text(doc)
lintangsutawika's avatar
lintangsutawika committed
1158
        # Used when applying a Promptsource template
1159
        elif hasattr(doc_to_text, "apply"):
1160
1161
1162
1163
1164
            applied_prompt = doc_to_text.apply(doc)
            if len(applied_prompt) == 2:
                return applied_prompt[0]
            else:
                eval_logger.warning("Applied prompt returns empty string")
1165
                return self.config.fewshot_delimiter
1166
        else:
1167
            print(type(doc_to_text))
1168
            raise TypeError
1169

1170
    def doc_to_target(self, doc: Mapping) -> Union[int, str, list]:
1171
1172
        if self.prompt is not None:
            doc_to_target = self.prompt
1173
        else:
1174
            doc_to_target = self.config.doc_to_target
1175

1176
        if isinstance(doc_to_target, int):
1177
            return doc_to_target
1178
        elif isinstance(doc_to_target, str):
1179
            if doc_to_target in self.features:
1180
                # if self.config.doc_to_choice is not None:
1181
1182
1183
                #     return self.doc_to_choice(doc)[doc[doc_to_target]]
                # else:
                return doc[doc_to_target]
1184
            else:
lintangsutawika's avatar
lintangsutawika committed
1185
                target_string = utils.apply_template(doc_to_target, doc)
lintangsutawika's avatar
lintangsutawika committed
1186
                if target_string.isdigit() and self._config.doc_to_choice is not None:
lintangsutawika's avatar
lintangsutawika committed
1187
                    return ast.literal_eval(target_string)
lintangsutawika's avatar
lintangsutawika committed
1188
1189
1190
1191
1192
                elif (
                    len(target_string) >= 2
                    and (target_string[0] == "[")
                    and (target_string[-1] == "]")
                ):
baberabb's avatar
baberabb committed
1193
1194
1195
1196
                    try:
                        return ast.literal_eval(target_string)
                    except (SyntaxError, ValueError):
                        return target_string
lintangsutawika's avatar
lintangsutawika committed
1197
1198
                else:
                    return target_string
1199
        elif isinstance(doc_to_target, list):
1200
            return doc_to_target
1201
        elif callable(doc_to_target):
1202
            return doc_to_target(doc)
lintangsutawika's avatar
lintangsutawika committed
1203
        # Used when applying a Promptsource template
1204
        elif hasattr(doc_to_target, "apply"):
1205
            applied_prompt = doc_to_target.apply(doc)
1206
1207
1208
1209
            if len(applied_prompt) == 2:
                return applied_prompt[1]
            else:
                eval_logger.warning("Applied prompt returns empty string")
1210
                return self.config.fewshot_delimiter
1211
1212
        else:
            raise TypeError
1213

baberabb's avatar
baberabb committed
1214
    def doc_to_choice(self, doc: Any) -> List[str]:
1215
1216
        if self.prompt is not None:
            doc_to_choice = self.prompt
1217
        elif self.config.doc_to_choice is None:
1218
1219
            eval_logger.error("doc_to_choice was called but not set in config")
        else:
1220
            doc_to_choice = self.config.doc_to_choice
1221

1222
        if isinstance(doc_to_choice, str):
1223
1224
1225
1226
            if doc_to_choice in self.features:
                return doc[doc_to_choice]
            else:
                return ast.literal_eval(utils.apply_template(doc_to_choice, doc))
1227
        elif isinstance(doc_to_choice, list):
1228
            return doc_to_choice
1229
        elif isinstance(doc_to_choice, dict):
1230
1231
1232
1233
1234
1235
1236
            return list(doc_to_choice.values())
        elif callable(doc_to_choice):
            return doc_to_choice(doc)
        elif hasattr(doc_to_choice, "get_answer_choices_list"):
            return doc_to_choice.get_answer_choices_list(doc)
        else:
            raise TypeError
1237

baberabb's avatar
baberabb committed
1238
1239
1240
    def construct_requests(
        self, doc: dict, ctx: str, **kwargs
    ) -> Union[List[Instance], Instance]:
1241
        if self.OUTPUT_TYPE == "loglikelihood":
lintangsutawika's avatar
lintangsutawika committed
1242
            arguments = (ctx, self.doc_to_target(doc))
1243
        elif self.OUTPUT_TYPE == "loglikelihood_rolling":
lintangsutawika's avatar
lintangsutawika committed
1244
            arguments = (self.doc_to_target(doc),)
1245
        elif self.OUTPUT_TYPE == "multiple_choice":
1246
            choices = self.doc_to_choice(doc)
1247
            target_delimiter = self.config.target_delimiter
1248
1249
            if self.multiple_input:
                # If there are multiple inputs, choices are placed in the ctx
1250
                cont = self.doc_to_target(doc)
1251
1252
1253
                arguments = [
                    (ctx + choice, f"{target_delimiter}{cont}") for choice in choices
                ]
1254
            else:
1255
                # Otherwise they are placed in the continuation
1256
                arguments = [(ctx, f"{target_delimiter}{cont}") for cont in choices]
1257

1258
            request_list = [
1259
1260
                Instance(
                    request_type="loglikelihood",
lintangsutawika's avatar
lintangsutawika committed
1261
                    doc=doc,
1262
                    arguments=arg,
1263
                    idx=i,
1264
1265
                    **kwargs,
                )
1266
                for i, arg in enumerate(arguments)
1267
            ]
1268
            # TODO: we should raise a warning telling users this will at most ~2x runtime.
1269
            if "acc_mutual_info" in self._metric_fn_list.keys():
1270
1271
1272
                # if we are calculating multiple choice accuracy
                # using mutual information instead of raw loglikelihood as metric, need unconditional lls.

lintangsutawika's avatar
lintangsutawika committed
1273
                # here mutual info refers to calculating
1274
1275
1276
1277
1278
1279
                # log(P(choice|ctx) / P(choice)) = log(P(choice|ctx)) - log(P(choice))
                # in other words normalizing by subtracting the unconditional logprob of each choice.
                request_list.extend(
                    [
                        Instance(
                            request_type="loglikelihood",
lintangsutawika's avatar
lintangsutawika committed
1280
                            doc=doc,
1281
                            arguments=("", "{}".format(choice)),
1282
1283
1284
                            idx=i,
                            **kwargs,
                        )
lintangsutawika's avatar
lintangsutawika committed
1285
                        for i, choice in enumerate(choices)
1286
1287
1288
                    ]
                )
            return request_list
lintangsutawika's avatar
lintangsutawika committed
1289

1290
        elif self.OUTPUT_TYPE == "generate_until":
1291
            arguments = (ctx, deepcopy(self.config.generation_kwargs))
lintangsutawika's avatar
lintangsutawika committed
1292
1293

        return Instance(
lintangsutawika's avatar
lintangsutawika committed
1294
1295
            request_type=self.OUTPUT_TYPE, doc=doc, arguments=arguments, idx=0, **kwargs
        )
1296
1297

    def process_results(self, doc, results):
1298
1299
        if callable(self.config.process_results):
            return self.config.process_results(doc, results)
lintangsutawika's avatar
lintangsutawika committed
1300

1301
        result_dict = {}
1302
        use_metric = list(self._metric_fn_list.keys())
1303
1304
1305
        if self.OUTPUT_TYPE == "loglikelihood":
            results = results[0]
            ll, is_greedy = results
1306
1307
1308
1309
            return {
                **({"perplexity": ll} if "perplexity" in use_metric else {}),
                **({"acc": int(is_greedy)} if "acc" in use_metric else {}),
            }
1310
        elif self.OUTPUT_TYPE == "loglikelihood_rolling":
haileyschoelkopf's avatar
haileyschoelkopf committed
1311
            (loglikelihood,) = results
1312
1313
            _words = self.count_words(self.doc_to_target(doc))
            _bytes = self.count_bytes(self.doc_to_target(doc))
haileyschoelkopf's avatar
haileyschoelkopf committed
1314
            return {
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
                **(
                    {"word_perplexity": (loglikelihood, _words)}
                    if "word_perplexity" in use_metric
                    else {}
                ),
                **(
                    {"byte_perplexity": (loglikelihood, _bytes)}
                    if "byte_perplexity" in use_metric
                    else {}
                ),
                **(
                    {"bits_per_byte": (loglikelihood, _bytes)}
                    if "bits_per_byte" in use_metric
                    else {}
                ),
haileyschoelkopf's avatar
haileyschoelkopf committed
1330
            }
1331
        elif self.OUTPUT_TYPE == "multiple_choice":
1332
            lls, is_greedy = zip(*results)
lintangsutawika's avatar
lintangsutawika committed
1333

1334
            # retrieve choices in List[str] form, to compute choice lengths, etc.
1335
            choices = self.doc_to_choice(doc)
1336
1337
            completion_len = np.array([float(len(i)) for i in choices])

1338
1339
            if (
                2 * len(choices) == len(lls)
1340
                and "acc_mutual_info" in self._metric_fn_list.keys()
1341
1342
1343
1344
            ):
                # then we are doing mutual info.
                # this stores the "dryrun" / unconditional answer loglikelihoods
                lls_unconditional = lls[1::2]
1345
1346
                if len(lls_unconditional) != len(choices):
                    raise ValueError
1347
1348
                # and this stores our "regular" conditional loglikelihoods
                lls = lls[::2]
1349

1350
1351
            pred = np.argmax(lls)
            pred_norm = np.argmax(lls / completion_len)
lintangsutawika's avatar
lintangsutawika committed
1352

1353
1354
            if self.multiple_input:
                gold = self.doc_to_text(doc)
1355
            else:
1356
                gold = self.doc_to_target(doc)
1357
1358

            gold_index_error = False
1359
            if isinstance(gold, list):
Lintang Sutawika's avatar
Lintang Sutawika committed
1360
1361
                gold = [i if i < len(choices) else -100 for i in gold]
                if -100 in gold:
1362
1363
                    gold_index_error = True
            else:
1364
                if isinstance(gold, int):
Lintang Sutawika's avatar
Lintang Sutawika committed
1365
                    gold = gold if gold < len(choices) else -100
1366
                elif isinstance(gold, str):
Lintang Sutawika's avatar
Lintang Sutawika committed
1367
                    gold = choices.index(gold) if gold in choices else -100
lintangsutawika's avatar
lintangsutawika committed
1368

Lintang Sutawika's avatar
Lintang Sutawika committed
1369
                if gold == -100:
1370
1371
1372
1373
                    gold_index_error = True

            if gold_index_error:
                eval_logger.warning(
lintangsutawika's avatar
lintangsutawika committed
1374
                    f"Label index was not in within range of available choices,"
1375
1376
                    f"Sample:\n\n{doc}\n\n"
                )
lintangsutawika's avatar
lintangsutawika committed
1377

1378
            if self.multiple_target:
lintangsutawika's avatar
lintangsutawika committed
1379
1380
                acc = 1.0 if pred in gold else 0.0
                acc_norm = 1.0 if pred_norm in gold else 0.0
Lintang Sutawika's avatar
Lintang Sutawika committed
1381
                exact_match = int(any([is_greedy[i] if i != -100 else 0 for i in gold]))
lintangsutawika's avatar
lintangsutawika committed
1382
1383
1384
            else:
                acc = 1.0 if pred == gold else 0.0
                acc_norm = 1.0 if pred_norm == gold else 0.0
1385
                # TODO: this gets score of 0 on arc_challenge for pythia-70m. need to test that this works properly
Lintang Sutawika's avatar
Lintang Sutawika committed
1386
                exact_match = int(is_greedy[gold]) if gold != -100 else 0
1387

Lintang Sutawika's avatar
Lintang Sutawika committed
1388
1389
1390
1391
            prob_norm = utils.softmax(lls)

            # TODO use keyword arguments to the metric?
            # gold, pred, norm stuff, the original lls,
1392
            result_dict = {
1393
                **({"acc": acc} if "acc" in use_metric else {}),
1394
1395
                **({"f1": (gold, pred)} if "f1" in use_metric else {}),
                **({"mcc": (gold, pred)} if "mcc" in use_metric else {}),
1396
                **({"acc_norm": acc_norm} if "acc_norm" in use_metric else {}),
1397
                **({"exact_match": exact_match} if "exact_match" in use_metric else {}),
Lintang Sutawika's avatar
Lintang Sutawika committed
1398
1399
1400
1401
1402
                **(
                    {"brier_score": (gold, prob_norm)}
                    if "brier_score" in use_metric
                    else {}
                ),
1403
1404
            }

1405
            if "acc_mutual_info" in use_metric:
lintangsutawika's avatar
lintangsutawika committed
1406
1407
1408
                lls_mutual_info = [
                    ll_c - ll_u for ll_c, ll_u in zip(lls, lls_unconditional)
                ]
1409
1410
1411
                acc_mutual_info = 1.0 if np.argmax(lls_mutual_info) == gold else 0.0
                result_dict["acc_mutual_info"] = acc_mutual_info

1412
        elif self.OUTPUT_TYPE == "generate_until":
1413
            gold = self.doc_to_target(doc)
Chris's avatar
Chris committed
1414
            result = results[0]
1415
            if self.config.doc_to_choice is not None:
lintangsutawika's avatar
lintangsutawika committed
1416
                # If you set doc_to_choice,
lintangsutawika's avatar
lintangsutawika committed
1417
                # it assumes that doc_to_target returns a number.
1418
1419
                choices = self.doc_to_choice(doc)
                gold = choices[gold]
1420
1421
            # we expect multiple_targets to be a list.
            elif self.multiple_target:
baberabb's avatar
baberabb committed
1422
                gold = list(gold)
Chris's avatar
Chris committed
1423
1424
1425
            elif type(gold) != type(result):
                # cast gold to the same type as result
                gold = type(result)(gold)
1426

lintangsutawika's avatar
lintangsutawika committed
1427
            for metric in self._metric_fn_list.keys():
haileyschoelkopf's avatar
haileyschoelkopf committed
1428
1429
1430
1431
1432
                if self.multiple_target:
                    # in the case where we have multiple targets,
                    # return true if any are true
                    # TODO: this may break for multipLe_target, non zero-or-1 metrics
                    scores = []
haileyschoelkopf's avatar
haileyschoelkopf committed
1433
1434
1435
1436
                    if not isinstance(gold, list):
                        # sometimes, a multiple_target dataset has exceptions where one doc has only one string answer
                        # print(gold)
                        gold = [gold]
1437
1438
1439
1440
1441
1442
1443
1444
                    if metric == "exact_match":
                        result = [result for _ in range(len(gold))]
                        scores = self._metric_fn_list[metric](
                            references=gold,
                            predictions=result,
                            **self._metric_fn_kwargs[metric],
                        )[metric]
                        result_score = 1.0 if scores > 0.0 else 0.0
haileyschoelkopf's avatar
haileyschoelkopf committed
1445
                    else:
1446
1447
1448
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458
1459
1460
1461
1462
1463
1464
1465
1466
                        for gold_option in gold:
                            try:
                                result_score = self._metric_fn_list[metric](
                                    references=[gold_option],
                                    predictions=[result],
                                    **self._metric_fn_kwargs[metric],
                                )
                            except (
                                TypeError
                            ):  # TODO: this is hacky and I don't want to do it
                                result_score = self._metric_fn_list[metric](
                                    [gold_option, result]
                                )
                            if isinstance(result_score, dict):
                                # TODO: this handles the case where HF evaluate returns a dict.
                                result_score = result_score[metric]
                            scores.append(result_score)
                        if any(scores):
                            result_score = 1.0
                        else:
                            result_score = 0.0
haileyschoelkopf's avatar
haileyschoelkopf committed
1467
                else:
1468
                    try:
1469
                        result_score = self._metric_fn_list[metric](
1470
1471
                            references=[gold],
                            predictions=[result],
1472
                            **self._metric_fn_kwargs[metric],
1473
                        )
1474
                    except TypeError:  # needed for now in order to use a different interface between our own metrics and HF Evaluate metrics
1475
                        result_score = self._metric_fn_list[metric]([gold, result])
1476
1477
1478
1479
                    if isinstance(result_score, dict):
                        # TODO: this handles the case where HF evaluate returns a dict.
                        result_score = result_score[metric]
                result_dict[metric] = result_score
1480
        else:
lintangsutawika's avatar
lintangsutawika committed
1481
1482
            raise ValueError(
                f"Passed invalid output_type '{self.OUTPUT_TYPE}' ! Please use one of ",
1483
                "'loglikelihood', 'loglikelihood_rolling', 'generate_until' or 'multiple_choice'",
1484
            )
1485
1486
1487

        return result_dict

Baber Abbasi's avatar
Baber Abbasi committed
1488
    def aggregation(self) -> dict:
1489
1490
        return self._aggregation_list

Baber Abbasi's avatar
Baber Abbasi committed
1491
    def higher_is_better(self) -> dict:
haileyschoelkopf's avatar
haileyschoelkopf committed
1492
        return self._higher_is_better
1493

Baber Abbasi's avatar
Baber Abbasi committed
1494
1495
1496
    def get_config(self, key: str) -> Any:
        return getattr(self._config, key, None)

lintangsutawika's avatar
lintangsutawika committed
1497
1498
1499
1500
    @property
    def task_id(self) -> Any:
        return self._task_id

1501
1502
1503
1504
1505
1506
1507
1508
    def __repr__(self):
        return (
            f"ConfigurableTask(task_name={getattr(self.config, 'task', None)},"
            f"output_type={self.OUTPUT_TYPE},"
            f"num_fewshot={getattr(self.config, 'num_fewshot', None)},"
            f"num_samples={len(self.eval_docs)})"
        )

1509
1510

class MultipleChoiceTask(Task):
1511
    OUTPUT_TYPE = "loglikelihood"
1512

baberabb's avatar
baberabb committed
1513
    def doc_to_target(self, doc: dict) -> str:
1514
1515
        return " " + doc["choices"][doc["gold"]]

baberabb's avatar
baberabb committed
1516
    def construct_requests(self, doc: dict, ctx: str, **kwargs) -> List[Instance]:
1517
        # TODO: add mutual info here?
lintangsutawika's avatar
lintangsutawika committed
1518
1519
        return [
            Instance(
haileyschoelkopf's avatar
haileyschoelkopf committed
1520
                request_type="loglikelihood",
lintangsutawika's avatar
lintangsutawika committed
1521
                doc=doc,
1522
                arguments=(ctx, " {}".format(choice)),
1523
                idx=i,
1524
1525
                **kwargs,
            )
lintangsutawika's avatar
lintangsutawika committed
1526
1527
            for i, choice in enumerate(doc["choices"])
        ]
1528

1529
    def process_results(self, doc: dict, results: Iterable[Tuple[float, bool]]) -> dict:
lintangsutawika's avatar
lintangsutawika committed
1530
1531
1532
        results = [
            res[0] for res in results
        ]  # only retain loglikelihoods, discard is_greedy TODO: do we need is_greedy anywhere?
1533
1534
1535
1536
1537
1538
1539
1540
1541
1542
1543
        gold = doc["gold"]

        acc = 1.0 if np.argmax(results) == gold else 0.0
        completion_len = np.array([float(len(i)) for i in doc["choices"]])
        acc_norm = 1.0 if np.argmax(results / completion_len) == gold else 0.0

        return {
            "acc": acc,
            "acc_norm": acc_norm,
        }

baberabb's avatar
baberabb committed
1544
    def higher_is_better(self) -> dict:
1545
1546
1547
1548
1549
        return {
            "acc": True,
            "acc_norm": True,
        }

baberabb's avatar
baberabb committed
1550
    def aggregation(self) -> dict:
1551
1552
1553
1554
1555
1556
        return {
            "acc": mean,
            "acc_norm": mean,
        }


lintangsutawika's avatar
lintangsutawika committed
1557
class PerplexityTask(Task):
1558
1559
    OUTPUT_TYPE = "loglikelihood_rolling"

baberabb's avatar
baberabb committed
1560
    def has_training_docs(self) -> bool:
1561
1562
        return False

baberabb's avatar
baberabb committed
1563
    def fewshot_examples(self, k: int, rnd) -> List:
1564
1565
1566
1567
        if k != 0:
            raise ValueError(
                "The number of fewshot examples must be 0 for perplexity tasks."
            )
1568
1569
        return []

baberabb's avatar
baberabb committed
1570
    def fewshot_context(self, doc: dict, num_fewshot: int) -> Literal[""]:
1571
1572
1573
1574
        if num_fewshot != 0:
            raise ValueError(
                "The number of fewshot examples must be 0 for perplexity tasks."
            )
1575
1576
1577

        return ""

baberabb's avatar
baberabb committed
1578
    def higher_is_better(self) -> dict:
1579
1580
1581
1582
1583
1584
1585
1586
1587
        return {
            "word_perplexity": False,
            "byte_perplexity": False,
            "bits_per_byte": False,
        }

    def doc_to_decontamination_query(self, doc):
        return doc

Ethan Smith's avatar
Ethan Smith committed
1588
    def doc_to_text(self, doc) -> str:
1589
1590
1591
1592
1593
        return ""

    def doc_to_target(self, doc):
        return doc

1594
1595
1596
    def construct_requests(self, doc: dict, ctx: Optional[str], **kwargs):
        if bool(ctx):
            raise ValueError
1597

lintangsutawika's avatar
lintangsutawika committed
1598
1599
1600
1601
1602
1603
1604
        return Instance(
            request_type=self.OUTPUT_TYPE,
            doc=doc,
            arguments=(self.doc_to_target(doc),),
            idx=0,
            **kwargs,
        )
1605

1606
    def process_results(self, doc: dict, results: Tuple[float]) -> dict:
1607
        (loglikelihood,) = results
haileyschoelkopf's avatar
haileyschoelkopf committed
1608
1609
        words = self.count_words(self.doc_to_target(doc))
        bytes_ = self.count_bytes(self.doc_to_target(doc))
1610
1611
1612
1613
1614
1615
        return {
            "word_perplexity": (loglikelihood, words),
            "byte_perplexity": (loglikelihood, bytes_),
            "bits_per_byte": (loglikelihood, bytes_),
        }

baberabb's avatar
baberabb committed
1616
    def aggregation(self) -> dict:
1617
1618
1619
1620
1621
1622
1623
        return {
            "word_perplexity": weighted_perplexity,
            "byte_perplexity": weighted_perplexity,
            "bits_per_byte": bits_per_byte,
        }

    @classmethod
baberabb's avatar
baberabb committed
1624
    def count_bytes(cls, doc) -> int:
1625
1626
1627
        return len(doc.encode("utf-8"))

    @classmethod
baberabb's avatar
baberabb committed
1628
    def count_words(cls, doc) -> int:
1629
1630
        """Downstream tasks with custom word boundaries should override this!"""
        return len(re.split(r"\s+", doc))