"vscode:/vscode.git/clone" did not exist on "b83cd632f8f17e5d0a02d07a0f1a7e9f1f9b0d12"
task.py 60.3 KB
Newer Older
1
import abc
2
import ast
lintangsutawika's avatar
lintangsutawika committed
3
import logging
4
import random
5
import re
lintangsutawika's avatar
lintangsutawika committed
6
import uuid
7
from collections.abc import Callable
8
from copy import deepcopy
9
from dataclasses import asdict, dataclass
10
from inspect import getsource
11
12
13
14
15
16
17
18
19
20
21
22
from typing import (
    Any,
    Dict,
    Iterable,
    Iterator,
    List,
    Literal,
    Mapping,
    Optional,
    Tuple,
    Union,
)
23
24
25

import datasets
import numpy as np
26
from tqdm import tqdm
27
28

from lm_eval import utils
29
from lm_eval.api import samplers
30
31
from lm_eval.api.instance import Instance, OutputType
from lm_eval.api.metrics import bits_per_byte, mean, weighted_perplexity
lintangsutawika's avatar
lintangsutawika committed
32
from lm_eval.api.registry import (
33
34
    AGGREGATION_REGISTRY,
    DEFAULT_METRIC_REGISTRY,
haileyschoelkopf's avatar
haileyschoelkopf committed
35
    get_aggregation,
36
    get_metric,
37
    get_metric_aggregation,
haileyschoelkopf's avatar
haileyschoelkopf committed
38
    is_higher_better,
lintangsutawika's avatar
lintangsutawika committed
39
)
40
from lm_eval.caching.cache import load_from_cache, save_to_cache
41
42
43
from lm_eval.filters import build_filter_ensemble
from lm_eval.prompts import get_prompt

44

45
46
47
48
ALL_OUTPUT_TYPES = [
    "loglikelihood",
    "multiple_choice",
    "loglikelihood_rolling",
49
    "generate_until",
50
51
]

52
eval_logger = logging.getLogger("lm-eval")
53

lintangsutawika's avatar
lintangsutawika committed
54

lintangsutawika's avatar
lintangsutawika committed
55
56
@dataclass
class GroupConfig(dict):
lintangsutawika's avatar
lintangsutawika committed
57
58
59
    group: Optional[str] = None
    group_alias: Optional[str] = None
    task: Optional[Union[str, list]] = None
lintangsutawika's avatar
lintangsutawika committed
60
61
62
    aggregate_metric: Optional[str] = False
    aggregate_fn: Optional[str] = "mean"
    weight_by_size: Optional[str] = False
lintangsutawika's avatar
lintangsutawika committed
63
    metric_alias: Optional[str] = None
64
    version: Optional[str] = 0
lintangsutawika's avatar
lintangsutawika committed
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107

    def __getitem__(self, item):
        return getattr(self, item)

    def __setitem__(self, item, value):
        return setattr(self, item, value)

    def to_dict(self, keep_callable: bool = False) -> dict:
        """dumps the current config as a dictionary object, as a printable format.
        null fields will not be printed.
        Used for dumping results alongside full task configuration

        :return: dict
            A printable dictionary version of the TaskConfig object.

        # TODO: should any default value in the TaskConfig not be printed?
        """
        cfg_dict = asdict(self)
        # remove values that are `None`
        for k, v in list(cfg_dict.items()):
            if v is None:
                cfg_dict.pop(k)
            elif callable(v):
                cfg_dict[k] = self.serialize_function(v, keep_callable=keep_callable)
        return cfg_dict

    def serialize_function(
        self, value: Union[Callable, str], keep_callable=False
    ) -> Union[Callable, str]:
        """Serializes a given function or string.

        If 'keep_callable' is True, the original callable is returned.
        Otherwise, attempts to return the source code of the callable using 'getsource'.
        """
        if keep_callable:
            return value
        else:
            try:
                return getsource(value)
            except (TypeError, OSError):
                return str(value)


lintangsutawika's avatar
lintangsutawika committed
108
109
110
111
112
class ConfigurableGroup(abc.ABC):
    def __init__(
        self,
        config: Optional[dict] = None,
    ) -> None:
lintangsutawika's avatar
lintangsutawika committed
113
114
        # Create a unique identifier ID
        self._task_id = str(uuid.uuid1())
lintangsutawika's avatar
lintangsutawika committed
115
116
117
118
119
        self._config = GroupConfig(**config)

    @property
    def group(self):
        return self._config.group
120

lintangsutawika's avatar
lintangsutawika committed
121
122
123
    @property
    def group_alias(self):
        return self._config.group_alias
124
125
126
127
128

    @property
    def version(self):
        return self._config.version

lintangsutawika's avatar
lintangsutawika committed
129
130
131
132
    @property
    def config(self):
        return self._config.to_dict()

lintangsutawika's avatar
lintangsutawika committed
133
134
135
136
    @property
    def task_id(self) -> Any:
        return self._task_id

lintangsutawika's avatar
lintangsutawika committed
137
138
    def __repr__(self):
        return (
139
            f"ConfigurableGroup(group={self.group}," f"group_alias={self.group_alias})"
lintangsutawika's avatar
lintangsutawika committed
140
141
        )

142

143
144
@dataclass
class TaskConfig(dict):
145
    # task naming/registry
146
147
    task: Optional[str] = None
    task_alias: Optional[str] = None
lintangsutawika's avatar
lintangsutawika committed
148
    tag: Optional[Union[str, list]] = None
149
150
    group: Optional[Union[str, list]] = None
    group_alias: Optional[Union[str, list]] = None
151
152
153
    # HF dataset options.
    # which dataset to use,
    # and what splits for what purpose
154
155
156
157
158
159
160
161
162
    dataset_path: Optional[str] = None
    dataset_name: Optional[str] = None
    dataset_kwargs: Optional[dict] = None
    training_split: Optional[str] = None
    validation_split: Optional[str] = None
    test_split: Optional[str] = None
    fewshot_split: Optional[
        str
    ] = None  # TODO: assert that this not None if num_fewshot > 0. (?) assert if this is same split as one evaling (?)
163
164
    # formatting / prompting options.
    # see docs/advanced_task_guide.md for more info
165
166
167
168
169
170
    process_docs: Optional[Callable] = None
    doc_to_text: Optional[Union[Callable, str]] = None
    doc_to_target: Optional[Union[Callable, str]] = None
    doc_to_choice: Optional[Union[Callable, str, dict, list]] = None
    process_results: Optional[Union[Callable, str]] = None
    use_prompt: Optional[str] = None
171
    description: str = ""
172
173
    target_delimiter: str = " "
    fewshot_delimiter: str = "\n\n"
174
    fewshot_config: Optional[dict] = None
175
    # runtime configuration options
176
    num_fewshot: Optional[int] = None
177
    # scoring options
178
179
180
    metric_list: Optional[list] = None
    output_type: OutputType = "generate_until"
    generation_kwargs: Optional[dict] = None
181
    repeats: int = 1
182
    filter_list: Optional[Union[str, list]] = None
183
    should_decontaminate: bool = False
184
185
186
187
    doc_to_decontamination_query: Optional[str] = None
    metadata: Optional[
        dict
    ] = None  # by default, not used in the code. allows for users to pass arbitrary info to tasks
188

Ethan Smith's avatar
Ethan Smith committed
189
    def __post_init__(self) -> None:
Lintang Sutawika's avatar
Lintang Sutawika committed
190
        if self.generation_kwargs is not None:
191
            if self.output_type != "generate_until":
192
                eval_logger.warning(
193
                    f"[{self.task}] passed `generation_kwargs`, but not using `output_type: generate_until`!"
Lintang Sutawika's avatar
Lintang Sutawika committed
194
195
196
197
198
199
200
201
                )

            if "temperature" in self.generation_kwargs:
                self.generation_kwargs["temperature"] = float(
                    self.generation_kwargs["temperature"]
                )

            if "until" not in self.generation_kwargs:
202
                self.generation_kwargs["until"] = [self.fewshot_delimiter]
Lintang Sutawika's avatar
Lintang Sutawika committed
203
        else:
204
            if self.output_type == "generate_until":
Lintang Sutawika's avatar
Lintang Sutawika committed
205
206
                # ensure that we greedily generate in absence of explicit arguments otherwise
                self.generation_kwargs = {
207
208
209
210
211
                    "until": (
                        None
                        if self.fewshot_delimiter is None
                        else [self.fewshot_delimiter]
                    ),
Lintang Sutawika's avatar
Lintang Sutawika committed
212
213
                    "do_sample": False,
                }
214

215
216
217
    def __getitem__(self, item):
        return getattr(self, item)

218
219
220
    def __setitem__(self, item, value):
        return setattr(self, item, value)

221
    def to_dict(self, keep_callable: bool = False) -> dict:
222
223
        """dumps the current config as a dictionary object, as a printable format.
        null fields will not be printed.
haileyschoelkopf's avatar
haileyschoelkopf committed
224
        Used for dumping results alongside full task configuration
225

haileyschoelkopf's avatar
haileyschoelkopf committed
226
227
228
229
230
231
232
233
234
235
        :return: dict
            A printable dictionary version of the TaskConfig object.

        # TODO: should any default value in the TaskConfig not be printed?
        """
        cfg_dict = asdict(self)
        # remove values that are `None`
        for k, v in list(cfg_dict.items()):
            if v is None:
                cfg_dict.pop(k)
236
237
238
239
240
241
242
243
244
245
            elif k == "metric_list":
                for metric_dict in v:
                    for metric_key, metric_value in metric_dict.items():
                        if callable(metric_value):
                            metric_dict[metric_key] = self.serialize_function(
                                metric_value, keep_callable=keep_callable
                            )
                cfg_dict[k] = v
            elif callable(v):
                cfg_dict[k] = self.serialize_function(v, keep_callable=keep_callable)
haileyschoelkopf's avatar
haileyschoelkopf committed
246
        return cfg_dict
247

248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
    def serialize_function(
        self, value: Union[Callable, str], keep_callable=False
    ) -> Union[Callable, str]:
        """Serializes a given function or string.

        If 'keep_callable' is True, the original callable is returned.
        Otherwise, attempts to return the source code of the callable using 'getsource'.
        """
        if keep_callable:
            return value
        else:
            try:
                return getsource(value)
            except (TypeError, OSError):
                return str(value)

264
265
266
267
268
269
270
271
272
273
274

class Task(abc.ABC):
    """A task represents an entire benchmark including its dataset, problems,
    answers, and evaluation methods. See BoolQ for a simple example implementation

    A `doc` can be any python object which represents one instance of evaluation.
    This is usually a dictionary e.g.
        {"question": ..., "answer": ...} or
        {"question": ..., question, answer)
    """

275
    VERSION: Optional[Union[int, str]] = None
276

277
278
    # The name of the `Task` benchmark as denoted in the HuggingFace datasets Hub
    # or a path to a custom `datasets` loading script.
279
    DATASET_PATH: Optional[str] = None
280
281

    # The name of a subset within `DATASET_PATH`.
282
    DATASET_NAME: Optional[str] = None
283

284
    OUTPUT_TYPE: Optional[OutputType] = None
lintangsutawika's avatar
lintangsutawika committed
285

286
287
    def __init__(
        self,
288
289
290
291
        data_dir: Optional[str] = None,
        cache_dir: Optional[str] = None,
        download_mode: Optional[datasets.DownloadMode] = None,
        config: Optional[Mapping] = None,  # Union[dict, TaskConfig]
Ethan Smith's avatar
Ethan Smith committed
292
    ) -> None:
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
        """
        :param data_dir: str
            Stores the path to a local folder containing the `Task`'s data files.
            Use this to specify the path to manually downloaded data (usually when
            the dataset is not publicly accessible).
        :param cache_dir: str
            The directory to read/write the `Task` dataset. This follows the
            HuggingFace `datasets` API with the default cache directory located at:
                `~/.cache/huggingface/datasets`
            NOTE: You can change the cache location globally for a given process
            to another directory:
                `export HF_DATASETS_CACHE="/path/to/another/directory"`
        :param download_mode: datasets.DownloadMode
            How to treat pre-existing `Task` downloads and data.
            - `datasets.DownloadMode.REUSE_DATASET_IF_EXISTS`
                Reuse download and reuse dataset.
            - `datasets.DownloadMode.REUSE_CACHE_IF_EXISTS`
                Reuse download with fresh dataset.
            - `datasets.DownloadMode.FORCE_REDOWNLOAD`
                Fresh download and fresh dataset.
        """
        self.download(data_dir, cache_dir, download_mode)
315
316
317
        self._training_docs: Optional[list] = None
        self._fewshot_docs: Optional[list] = None
        self._instances: Optional[List[Instance]] = None
318

319
        self._config: TaskConfig = TaskConfig({**config}) if config else TaskConfig()
320

lintangsutawika's avatar
lintangsutawika committed
321
        self._filters = [build_filter_ensemble("none", [["take_first", None]])]
322
323
324
        self.fewshot_rnd: Optional[
            random.Random
        ] = None  # purposely induce errors in case of improper usage
325

326
327
328
329
330
331
    def download(
        self,
        data_dir: Optional[str] = None,
        cache_dir: Optional[str] = None,
        download_mode=None,
    ) -> None:
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
        """Downloads and returns the task dataset.
        Override this method to download the dataset from a custom API.

        :param data_dir: str
            Stores the path to a local folder containing the `Task`'s data files.
            Use this to specify the path to manually downloaded data (usually when
            the dataset is not publicly accessible).
        :param cache_dir: str
            The directory to read/write the `Task` dataset. This follows the
            HuggingFace `datasets` API with the default cache directory located at:
                `~/.cache/huggingface/datasets`
            NOTE: You can change the cache location globally for a given process
            by setting the shell environment variable, `HF_DATASETS_CACHE`,
            to another directory:
                `export HF_DATASETS_CACHE="/path/to/another/directory"`
        :param download_mode: datasets.DownloadMode
            How to treat pre-existing `Task` downloads and data.
            - `datasets.DownloadMode.REUSE_DATASET_IF_EXISTS`
                Reuse download and reuse dataset.
            - `datasets.DownloadMode.REUSE_CACHE_IF_EXISTS`
                Reuse download with fresh dataset.
            - `datasets.DownloadMode.FORCE_REDOWNLOAD`
                Fresh download and fresh dataset.
        """
356
357
358
359
360
361
362
        self.dataset = datasets.load_dataset(
            path=self.DATASET_PATH,
            name=self.DATASET_NAME,
            data_dir=data_dir,
            cache_dir=cache_dir,
            download_mode=download_mode,
        )
363

364
    @property
365
    def config(self) -> TaskConfig:
366
367
368
        """Returns the TaskConfig associated with this class."""
        return self._config

369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
    @abc.abstractmethod
    def has_training_docs(self):
        """Whether the task has a training set"""
        pass

    @abc.abstractmethod
    def has_validation_docs(self):
        """Whether the task has a validation set"""
        pass

    @abc.abstractmethod
    def has_test_docs(self):
        """Whether the task has a test set"""
        pass

384
    def training_docs(self) -> Iterable:
385
386
387
388
389
390
        """
        :return: Iterable[obj]
            A iterable of any object, that doc_to_text can handle
        """
        return []

391
    def validation_docs(self) -> Iterable:
392
393
394
395
396
397
        """
        :return: Iterable[obj]
            A iterable of any object, that doc_to_text can handle
        """
        return []

398
    def test_docs(self) -> Iterable:
399
400
401
402
403
404
        """
        :return: Iterable[obj]
            A iterable of any object, that doc_to_text can handle
        """
        return []

405
    def fewshot_docs(self) -> Iterable:
406
407
408
409
410
411
412
413
414
        """
        :return: Iterable[obj]
            A iterable of any object, that doc_to_text can handle
        """
        if self.has_training_docs():
            return self.training_docs()
        elif self.has_validation_docs():
            return self.validation_docs()
        else:
lintangsutawika's avatar
lintangsutawika committed
415
            eval_logger.warning(
416
                f"[Task: {self.config.task}] has_training_docs and has_validation_docs are False"
417
                ", using test_docs as fewshot_docs but this is not recommended."
lintangsutawika's avatar
lintangsutawika committed
418
            )
419
420
            return self.test_docs()

421
    def _process_doc(self, doc: dict) -> dict:
422
423
424
425
426
427
428
429
430
        """
        Override this to process (detokenize, strip, replace, etc.) individual
        documents. This can be used in a map over documents of a data split.
        E.g. `map(self._process_doc, self.dataset["validation"])`

        :return: dict
            The processed version of the specified `doc`.
        """
        return doc
lintangsutawika's avatar
lintangsutawika committed
431

432
    @property
433
    def instances(self) -> List[Instance]:
434
435
436
437
438
439
440
441
442
443
444
        """After calling `task.build_all_requests()`, tasks
        maintain a list of the dataset instances which will be evaluated.
        """
        return self._instances

    def fewshot_examples(self, k, rnd):
        if self._training_docs is None:
            self._training_docs = list(self.training_docs())

        return rnd.sample(self._training_docs, k)

445
446
    def doc_to_decontamination_query(self, doc):
        raise NotImplementedError(
447
448
449
450
451
452
453
454
455
456
457
            "Override doc_to_decontamination_query with document specific decontamination query."
        )

    @abc.abstractmethod
    def doc_to_text(self, doc):
        pass

    @abc.abstractmethod
    def doc_to_target(self, doc):
        pass

458
459
    def build_all_requests(
        self,
460
        *,
461
462
463
464
465
466
        limit=None,
        rank=None,
        world_size=None,
        cache_requests=False,
        rewrite_requests_cache=False,
    ) -> None:
467
        """Build a set of Instances for a task, and store them in task.instances"""
468
469
470
471

        # used with caching
        og_limit = limit

472
        cache_key = f"requests-{self._config.task}-{self.config.num_fewshot}shot-rank{rank}-world_size{world_size}"
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487

        cached_instances = load_from_cache(file_name=cache_key)

        if cache_requests and cached_instances and not rewrite_requests_cache:
            cached_instances = cached_instances[:limit]

            flattened_instances = [
                instance
                for instance_group in cached_instances
                for instance in instance_group
            ]

            self._instances = flattened_instances
            return

Baber Abbasi's avatar
Baber Abbasi committed
488
        eval_logger.info(f"Building contexts for {self.config.task} on rank {rank}...")
489

490
        instances = []
491
492
493
494
495
496
497
498
499
500

        # process all documents when caching is specified for simplicity
        if (
            cache_requests
            and (not cached_instances or rewrite_requests_cache)
            and limit is not None
        ):
            limit = None

        doc_id_docs = list(
501
            self.doc_iterator(rank=rank, limit=limit, world_size=world_size)
502
503
504
505
506
507
508
        )

        num_docs = len(doc_id_docs)

        for doc_id, doc in tqdm(
            doc_id_docs,
            total=num_docs,
lintangsutawika's avatar
lintangsutawika committed
509
        ):
510
            # sample fewshot context #TODO: need to offset doc_id by rank now!
511
            fewshot_ctx = self.fewshot_context(
512
                doc,
513
                0 if self.config.num_fewshot is None else self.config.num_fewshot,
514
            )
515

516
            # TODO: we should override self.config.repeats if doing greedy gen so users don't waste time+compute
lintangsutawika's avatar
lintangsutawika committed
517
518
519
            inst = self.construct_requests(
                doc=doc,
                ctx=fewshot_ctx,
520
                metadata=(self.config["task"], doc_id, self.config.repeats),
lintangsutawika's avatar
lintangsutawika committed
521
            )
522
523
524
525

            if not isinstance(inst, list):
                inst = [inst]

526
527
528
529
530
531
532
533
534
535
536
537
538
            instances.append(inst)

        # now flatten, this is to allow slicing to work with pickles

        sliced_instances = instances[:og_limit]

        flattened_instances = [
            instance
            for instance_group in sliced_instances
            for instance in instance_group
        ]

        self._instances = flattened_instances
539

540
541
        if len(self._instances) == 0:
            raise ValueError("task.build_requests() did not find any docs!")
542

543
544
545
        if cache_requests and (not cached_instances or rewrite_requests_cache):
            save_to_cache(file_name=cache_key, obj=instances)

546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
    @abc.abstractmethod
    def construct_requests(self, doc, ctx, **kwargs):
        """Uses RequestFactory to construct Requests and returns an iterable of
        Requests which will be sent to the LM.

        :param doc:
            The document as returned from training_docs, validation_docs, or test_docs.
        :param ctx: str
            The context string, generated by fewshot_context. This includes the natural
            language description, as well as the few shot examples, and the question
            part of the document for `doc`.
        :param doc_idx: int
            The index of a document within `self.test_docs()` or `self.validation_docs()`,
            whichever is the main split used.
        :param repeats: int
        TODO: update this docstring
lintangsutawika's avatar
lintangsutawika committed
562
            The number of times each instance in a dataset is inferred on. Defaults to 1,
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
            can be increased for techniques like majority voting.
        """
        pass

    @abc.abstractmethod
    def process_results(self, doc, results):
        """Take a single document and the LM results and evaluates, returning a
        dict where keys are the names of submetrics and values are the values of
        the metric for that one document

        :param doc:
            The document as returned from training_docs, validation_docs, or test_docs.
        :param results:
            The results of the requests created in construct_requests.
        """
        pass

    @abc.abstractmethod
    def aggregation(self):
        """
        :returns: {str: [metric_score] -> float}
            A dictionary where keys are the names of submetrics and values are
            functions that aggregate a list of metric scores
        """
        pass

    @abc.abstractmethod
    def higher_is_better(self):
        """
        :returns: {str: bool}
            A dictionary where keys are the names of submetrics and values are
            whether a higher value of the submetric is better
        """
        pass

598
599
600
    def get_config(self, key: str) -> Any:
        return getattr(self._config, key, None)

haileyschoelkopf's avatar
haileyschoelkopf committed
601
602
603
604
605
606
607
608
609
610
    @classmethod
    def count_bytes(cls, doc):
        """Used for byte-level perplexity metrics in rolling loglikelihood"""
        return len(doc.encode("utf-8"))

    @classmethod
    def count_words(cls, doc):
        """Downstream loglikelihood_rolling perplexity tasks with custom word boundaries should override this!"""
        return len(re.split(r"\s+", doc))

611
    @utils.positional_deprecated
lintangsutawika's avatar
lintangsutawika committed
612
    def fewshot_context(
613
614
615
        self,
        doc,
        num_fewshot,
616
        rnd=None,
617
        description=None,
lintangsutawika's avatar
lintangsutawika committed
618
    ):
619
620
621
622
623
624
625
        """Returns a fewshot context string that is made up of a prepended description
        (if provided), the `num_fewshot` number of examples, and an appended prompt example.

        :param doc: str
            The document as returned from training_docs, validation_docs, or test_docs.
        :param num_fewshot: int
            The number of fewshot examples to provide in the returned context string.
lintangsutawika's avatar
lintangsutawika committed
626
627
628
629
630
        :param rnd: random.Random
            The pseudo-random number generator used to randomly sample examples.
            WARNING: This is currently a required arg although it's optionalized with a default `None`.
        :param description: str
            The task's description that will be prepended to the fewshot examples.
631
632
633
        :returns: str
            The fewshot context.
        """
634
        if rnd is None:
635
636
637
638
639
640
            if self.fewshot_rnd is not None:
                rnd = self.fewshot_rnd
            else:
                raise ValueError(
                    "A `random.Random` generator argument must be provided to `rnd`"
                )
lintangsutawika's avatar
lintangsutawika committed
641

642
        description = description if description else ""
643
644

        if num_fewshot == 0:
lintangsutawika's avatar
lintangsutawika committed
645
            labeled_examples = ""
646
        else:
lintangsutawika's avatar
lintangsutawika committed
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
            # for sets with no training docs, draw from other set *but ensure no overlap with current doc*
            if self.has_training_docs():
                fewshotex = self.fewshot_examples(k=num_fewshot, rnd=rnd)
            else:
                if self._fewshot_docs is None:
                    self._fewshot_docs = list(
                        self.validation_docs()
                        if self.has_validation_docs()
                        else self.test_docs()
                    )

                fewshotex = rnd.sample(self._fewshot_docs, num_fewshot + 1)

                # get rid of the doc that's the one we're evaluating, if it's in the fewshot
                fewshotex = [x for x in fewshotex if x != doc][:num_fewshot]

            labeled_examples = (
                "\n\n".join(
                    [
                        self.doc_to_text(doc) + self.doc_to_target(doc)
                        for doc in fewshotex
                    ]
                )
                + "\n\n"
lintangsutawika's avatar
lintangsutawika committed
671
            )
672
673

        example = self.doc_to_text(doc)
lintangsutawika's avatar
lintangsutawika committed
674
        return description + labeled_examples + example
675

676
    def apply_filters(self) -> Optional[List[Instance]]:
Baber Abbasi's avatar
Baber Abbasi committed
677
        """Iterates over FilterEnsembles and applies them to instances"""
lintangsutawika's avatar
lintangsutawika committed
678
679
        if hasattr(self, "_filters"):
            for f in self._filters:
680
                f.apply(self._instances)
lintangsutawika's avatar
lintangsutawika committed
681
682
683
        else:
            eval_logger.warning("No filter defined, passing through instances")
            return self._instances
684

baberabb's avatar
baberabb committed
685
    def dump_config(self) -> dict:
Baber Abbasi's avatar
Baber Abbasi committed
686
        """Returns the config as a dictionary."""
687
        # TODO: this should only return the overrides applied to a non-YAML task's configuration.
688
        # (num_fewshot)
689
        return self.config.to_dict()
690

Baber Abbasi's avatar
Baber Abbasi committed
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
    def set_config(self, key: str, value: Any, update: bool = False) -> None:
        """Set or update the configuration for a given key."""
        if key is None:
            raise ValueError("Key must be provided.")

        if update:
            current_value = getattr(self._config, key, {})
            if not isinstance(current_value, dict):
                raise TypeError(
                    f"Expected a dict for key '{key}', got {type(current_value).__name__} instead."
                )
            current_value.update(value)
        else:
            setattr(self._config, key, value)

    def override_metric(self, metric_name: str) -> None:
        """
        Override the default metrics used for evaluation with custom metrics.

        Parameters:
        - metric_name (str): The name of the custom metric to override. Should be registered in api.metrics.
        """
        (
            self._metric_fn_list,
            self._aggregation_list,
            self._metric_fn_kwargs,
            self._higher_is_better,
        ) = ({}, {}, {}, {})
        self._metric_fn_list[metric_name] = get_metric(metric_name)
        self._aggregation_list[metric_name] = get_metric_aggregation(metric_name)
        self._higher_is_better[metric_name] = is_higher_better(metric_name)
        self._metric_fn_kwargs[metric_name] = {}
        if not isinstance(self, ConfigurableTask):
            self.process_results = lambda x, y: {metric_name: get_metric(metric_name)}
            self.aggregation = lambda: {
                metric_name: get_metric_aggregation(metric_name)
            }
        setattr(self._config, "metric_list", [{"metric": metric_name}])
        setattr(self._config, "process_results", None)

731
732
733
734
735
    def set_fewshot_seed(self, seed: Optional[int] = None) -> None:
        self.fewshot_rnd = random.Random(seed)
        if hasattr(self, "sampler"):
            self.sampler.rnd = self.fewshot_rnd

736
737
738
739
740
741
742
    @property
    def eval_docs(self) -> Union[datasets.Dataset, List[dict]]:
        if self.has_test_docs():
            return self.test_docs()
        elif self.has_validation_docs():
            return self.validation_docs()
        else:
743
744
745
            raise ValueError(
                f"Task dataset (path={self.DATASET_PATH}, name={self.DATASET_NAME}) must have valid or test docs!"
            )
746
747
748
749
750
751
752
753
754
755
756
757
758

    def doc_iterator(
        self, *, rank: int = 0, limit: Union[int, None] = None, world_size: int = 1
    ) -> Iterator[Tuple[int, Any]]:
        limit = int(limit) if limit else None
        doc_iterator = utils.create_iterator(
            enumerate(self.eval_docs),
            rank=int(rank),
            limit=limit,
            world_size=int(world_size),
        )
        return doc_iterator

759
760

class ConfigurableTask(Task):
761
    VERSION = "Yaml"
762
    OUTPUT_TYPE = None
763
    CONFIG = None
764
765

    def __init__(
766
767
768
769
770
        self,
        data_dir=None,
        cache_dir=None,
        download_mode=None,
        config: Optional[dict] = None,
Ethan Smith's avatar
Ethan Smith committed
771
    ) -> None:  # TODO no super() call here
lintangsutawika's avatar
lintangsutawika committed
772
773
774
        # Create a unique identifier ID
        self._task_id = str(uuid.uuid1())

775
        # Get pre-configured attributes
776
        self._config = self.CONFIG
777

778
        # Use new configurations if there was no preconfiguration
779
        if self.config is None:
780
            self._config = TaskConfig(**config)
781
782
        # Overwrite configs
        else:
lintangsutawika's avatar
lintangsutawika committed
783
            if config is not None:
784
                self._config.__dict__.update(config)
785

786
        if self.config is None:
lintangsutawika's avatar
lintangsutawika committed
787
788
789
            raise ValueError(
                "Must pass a config to ConfigurableTask, either in cls.CONFIG or `config` kwarg"
            )
790

791
792
793
794
        if isinstance(self.config.metadata, dict):
            if "version" in self.config.metadata:
                self.VERSION = self.config.metadata["version"]

795
        if self.config.output_type is not None:
796
797
798
799
            if self.config.output_type not in ALL_OUTPUT_TYPES:
                raise ValueError(
                    f"Got invalid output_type '{self.config.output_type}', must be in '{','.join(ALL_OUTPUT_TYPES)}'"
                )
800
            self.OUTPUT_TYPE = self.config.output_type
801

802
803
        if self.config.dataset_path is not None:
            self.DATASET_PATH = self.config.dataset_path
804

805
806
        if self.config.dataset_name is not None:
            self.DATASET_NAME = self.config.dataset_name
807

808
809
810
811
        self._metric_fn_list = {}
        self._metric_fn_kwargs = {}
        self._aggregation_list = {}
        self._higher_is_better = {}
812

813
        if self.config.metric_list is None:
814
            # TODO: handle this in TaskConfig.__post_init__ ?
815
816
            _metric_list = DEFAULT_METRIC_REGISTRY[self.config.output_type]

817
            for metric_name in _metric_list:
haileyschoelkopf's avatar
haileyschoelkopf committed
818
                self._metric_fn_list[metric_name] = get_metric(metric_name)
lintangsutawika's avatar
lintangsutawika committed
819
                self._metric_fn_kwargs[metric_name] = {}
820
821
822
                self._aggregation_list[metric_name] = get_metric_aggregation(
                    metric_name
                )
haileyschoelkopf's avatar
haileyschoelkopf committed
823
                self._higher_is_better[metric_name] = is_higher_better(metric_name)
824
        else:
825
            for metric_config in self.config.metric_list:
826
827
828
829
                if "metric" not in metric_config:
                    raise ValueError(
                        "'metric' key not provided for an entry in 'metric_list', must be specified!"
                    )
830
831
832
833
                metric_name = metric_config["metric"]
                kwargs = {
                    key: metric_config[key]
                    for key in metric_config
Chris's avatar
Chris committed
834
835
                    if key
                    not in ["metric", "aggregation", "higher_is_better", "hf_evaluate"]
836
                }
Chris's avatar
Chris committed
837
838
839
840
                hf_evaluate_metric = (
                    "hf_evaluate" in metric_config
                    and metric_config["hf_evaluate"] is True
                )
841

842
                if self.config.process_results is not None:
843
844
                    self._metric_fn_list[metric_name] = None
                    self._metric_fn_kwargs[metric_name] = {}
845
846
847
848
849
850
                elif callable(metric_name):
                    metric_fn = metric_name.__call__
                    metric_name = metric_name.__name__
                    self._metric_fn_list[metric_name] = metric_fn
                    self._metric_fn_kwargs[metric_name] = kwargs
                else:
Chris's avatar
Chris committed
851
852
853
                    self._metric_fn_list[metric_name] = get_metric(
                        metric_name, hf_evaluate_metric
                    )
854
                    self._metric_fn_kwargs[metric_name] = kwargs
lintangsutawika's avatar
lintangsutawika committed
855

856
                if "aggregation" in metric_config:
857
                    agg_name = metric_config["aggregation"]
858
                    if isinstance(agg_name, str):
haileyschoelkopf's avatar
haileyschoelkopf committed
859
                        self._aggregation_list[metric_name] = get_aggregation(agg_name)
860
                    elif callable(agg_name):  # noqa: E721
861
862
863
                        self._aggregation_list[metric_name] = metric_config[
                            "aggregation"
                        ]
864
                else:
865
                    INV_AGG_REGISTRY = {v: k for k, v in AGGREGATION_REGISTRY.items()}
lintangsutawika's avatar
lintangsutawika committed
866
                    metric_agg = get_metric_aggregation(metric_name)
867
                    eval_logger.warning(
868
                        f"[Task: {self.config.task}] metric {metric_name} is defined, but aggregation is not. "
869
870
                        f"using default "
                        f"aggregation={INV_AGG_REGISTRY[metric_agg]}"
871
                    )
872
                    self._aggregation_list[metric_name] = metric_agg
lintangsutawika's avatar
lintangsutawika committed
873

874
875
876
877
878
879
                if "higher_is_better" in metric_config:
                    self._higher_is_better[metric_name] = metric_config[
                        "higher_is_better"
                    ]
                else:
                    eval_logger.warning(
880
                        f"[Task: {self.config.task}] metric {metric_name} is defined, but higher_is_better is not. "
881
                        f"using default "
haileyschoelkopf's avatar
haileyschoelkopf committed
882
                        f"higher_is_better={is_higher_better(metric_name)}"
883
                    )
haileyschoelkopf's avatar
haileyschoelkopf committed
884
                    self._higher_is_better[metric_name] = is_higher_better(metric_name)
885

886
        self.download(self.config.dataset_kwargs)
887
888
889
        self._training_docs = None
        self._fewshot_docs = None

890
        if self.config.filter_list is not None:
lintangsutawika's avatar
lintangsutawika committed
891
            self._filters = []
892
            for filter_config in self.config.filter_list:
893
894
895
896
897
898
899
900
901
                filter_name = filter_config["name"]
                filter_functions = filter_config["filter"]
                components = []
                for function in filter_functions:
                    kwargs = {
                        key: function[key] for key in function if key != "function"
                    }
                    components.append([function["function"], kwargs])
                filter_pipeline = build_filter_ensemble(filter_name, components)
lintangsutawika's avatar
lintangsutawika committed
902
                self._filters.append(filter_pipeline)
lintangsutawika's avatar
lintangsutawika committed
903
        else:
904
            self._filters = [build_filter_ensemble("none", [["take_first", None]])]
905

906
907
        if self.config.use_prompt is not None:
            eval_logger.info(f"loading prompt {self.config.use_prompt}")
908
            self.prompt = get_prompt(
909
                self.config.use_prompt, self.DATASET_PATH, self.DATASET_NAME
lintangsutawika's avatar
lintangsutawika committed
910
            )
911
912
913
        else:
            self.prompt = None

lintangsutawika's avatar
lintangsutawika committed
914
        if self.fewshot_docs() is not None:
915
916
917
918
            self.fewshot_rnd = (
                random.Random()
            )  # setting with no seed, to be overridden at a later time
            config_sampler: Union[str, Callable] = (
haileyschoelkopf's avatar
haileyschoelkopf committed
919
920
921
                self.config.fewshot_config.get("sampler", "default")
                if self.config.fewshot_config
                else "default"
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
            )
            if isinstance(config_sampler, str):
                self.sampler = samplers.get_sampler(config_sampler)(
                    list(self.fewshot_docs()), self, rnd=self.fewshot_rnd
                )
            elif callable(config_sampler) and issubclass(
                config_sampler, samplers.ContextSampler
            ):
                self.sampler = config_sampler(
                    docs=list(self.fewshot_docs()), task=self, rnd=self.fewshot_rnd
                )
            else:
                raise TypeError(
                    f"fewshot_config.sampler should be a string or callable of ContextSampler type, "
                    f"not {type(config_sampler)}"
                )
938

939
        self.task_docs = self.eval_docs
940

941
        # Test One Doc
942
        self.features = list(self.task_docs.features.keys())
943
944
        self.multiple_input = 0
        self.multiple_target = 0
945
        test_doc = self.task_docs[0]
946
        test_text = self.doc_to_text(test_doc)
947
        test_target = self.doc_to_target(test_doc)
lintangsutawika's avatar
lintangsutawika committed
948

949
        if self.config.doc_to_choice is not None:
lintangsutawika's avatar
lintangsutawika committed
950
            test_choice = self.doc_to_choice(test_doc)
951
            if not isinstance(test_choice, list):
lintangsutawika's avatar
lintangsutawika committed
952
                eval_logger.error("doc_to_choice must return list")
953
954
            else:
                num_choice = len(test_choice)
955

956
            if isinstance(test_text, int):
957
                self.multiple_input = num_choice
958
959
        else:
            test_choice = None
960

961
        if isinstance(test_target, list):
962
            self.multiple_target = len(test_target)
963
        else:
964
            if (isinstance(test_target, int)) and (test_choice is not None):
lintangsutawika's avatar
lintangsutawika committed
965
                test_target = test_choice[test_target]
966
            else:
lintangsutawika's avatar
lintangsutawika committed
967
                test_target = str(test_target)
968

969
970
971
        if test_choice is not None:
            check_choices = test_choice
        else:
lintangsutawika's avatar
lintangsutawika committed
972
            check_choices = [test_target]
973
974
975
976
        if self.config.doc_to_choice is not None:
            for choice in check_choices:
                choice_has_whitespace = True if choice[0].isspace() else False
                delimiter_has_whitespace = (
977
978
                    True
                    if self.config.target_delimiter.rstrip()
979
                    != self.config.target_delimiter
980
                    else False
981
                )
982

983
                if delimiter_has_whitespace and choice_has_whitespace:
984
985
                    eval_logger.debug(
                        f'Both target_delimiter "{self.config.target_delimiter}" and target choice: "{choice}" have whitespace'
986
987
                    )
                elif (not delimiter_has_whitespace) and (not choice_has_whitespace):
988
                    eval_logger.debug(
989
                        f'Both target_delimiter "{self.config.target_delimiter}" and target choice: "{choice}" do not have whitespace, ignore if the language you are evaluating on does not require/use whitespace'
990
991
                    )

992
    def download(self, dataset_kwargs: Optional[Dict[str, Any]] = None) -> None:
993
994
995
996
997
998
        self.dataset = datasets.load_dataset(
            path=self.DATASET_PATH,
            name=self.DATASET_NAME,
            **dataset_kwargs if dataset_kwargs is not None else {},
        )

baberabb's avatar
baberabb committed
999
    def has_training_docs(self) -> bool:
1000
        if self.config.training_split is not None:
1001
1002
1003
1004
            return True
        else:
            return False

baberabb's avatar
baberabb committed
1005
    def has_validation_docs(self) -> bool:
1006
        if self.config.validation_split is not None:
1007
1008
1009
1010
            return True
        else:
            return False

baberabb's avatar
baberabb committed
1011
    def has_test_docs(self) -> bool:
1012
        if self.config.test_split is not None:
1013
1014
1015
1016
            return True
        else:
            return False

baberabb's avatar
baberabb committed
1017
    def training_docs(self) -> datasets.Dataset:
1018
        if self.has_training_docs():
1019
1020
1021
            if self.config.process_docs is not None:
                return self.config.process_docs(
                    self.dataset[self.config.training_split]
1022
                )
1023
            return self.dataset[self.config.training_split]
1024

baberabb's avatar
baberabb committed
1025
    def validation_docs(self) -> datasets.Dataset:
1026
        if self.has_validation_docs():
1027
1028
1029
            if self.config.process_docs is not None:
                return self.config.process_docs(
                    self.dataset[self.config.validation_split]
1030
                )
1031
            return self.dataset[self.config.validation_split]
1032

baberabb's avatar
baberabb committed
1033
    def test_docs(self) -> datasets.Dataset:
1034
        if self.has_test_docs():
1035
1036
1037
            if self.config.process_docs is not None:
                return self.config.process_docs(self.dataset[self.config.test_split])
            return self.dataset[self.config.test_split]
1038

1039
    def fewshot_docs(self):
1040
        if self.config.fewshot_split is not None:
1041
1042
            if self.config.process_docs is not None:
                return self.config.process_docs(self.dataset[self.config.fewshot_split])
1043
            return self.dataset[self.config.fewshot_split]
1044
        else:
1045
            if (self.config.num_fewshot is not None) and (self.config.num_fewshot > 0):
1046
                eval_logger.warning(
lintangsutawika's avatar
lintangsutawika committed
1047
                    f"[Task: {self.config.task}] "
1048
1049
1050
1051
                    "num_fewshot > 0 but fewshot_split is None. "
                    "using preconfigured rule."
                )
            return super().fewshot_docs()
1052

lintangsutawika's avatar
lintangsutawika committed
1053
    @utils.positional_deprecated
1054
    def fewshot_context(self, doc: str, num_fewshot: int) -> str:
lintangsutawika's avatar
lintangsutawika committed
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
        """Returns a fewshot context string that is made up of a prepended description
        (if provided), the `num_fewshot` number of examples, and an appended prompt example.

        :param doc: str
            The document as returned from training_docs, validation_docs, or test_docs.
        :param num_fewshot: int
            The number of fewshot examples to provide in the returned context string.
        :returns: str
            The fewshot context.
        """
1065
1066
        if description := self.config.description:
            description = utils.apply_template(self.config.description, doc)
lintangsutawika's avatar
lintangsutawika committed
1067
1068
1069

        if num_fewshot == 0:
            # always prepend the (possibly empty) task description
1070
            labeled_examples = description
lintangsutawika's avatar
lintangsutawika committed
1071
        else:
1072
            labeled_examples = description + self.sampler.get_context(doc, num_fewshot)
lintangsutawika's avatar
lintangsutawika committed
1073
1074

        example = self.doc_to_text(doc)
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
        if self.multiple_input:
            return labeled_examples
        else:
            if isinstance(example, str):
                return labeled_examples + example
            elif isinstance(example, list):
                return [labeled_examples + ex for ex in example]
            elif isinstance(example, int):
                if self.config.doc_to_choice is not None:
                    choices = self.doc_to_choice(doc)
                    return labeled_examples + choices[example]
                else:
                    return labeled_examples + str(example)
lintangsutawika's avatar
lintangsutawika committed
1088

1089
    def apply_filters(self):
Baber Abbasi's avatar
Baber Abbasi committed
1090
        """Iterates over FilterEnsembles and applies them to instances"""
1091
1092
        if hasattr(self, "_filters"):
            for f in self._filters:
1093
                f.apply(self._instances)
1094
1095
1096
1097
        else:
            eval_logger.warning("No filter defined, passing through instances")
            return self._instances

1098
    def should_decontaminate(self):
1099
        return self.config.should_decontaminate
1100
1101

    def doc_to_decontamination_query(self, doc):
1102
        if self.config.should_decontaminate:
1103
1104
            if self.config.doc_to_decontamination_query is None:
                return self.doc_to_text(doc)
1105
            else:
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
                doc_to_decontamination_query = self.config.doc_to_decontamination_query
                if doc_to_decontamination_query in self.features:
                    return doc[doc_to_decontamination_query]
                elif callable(doc_to_decontamination_query):
                    return doc_to_decontamination_query(doc)
                else:
                    return ast.literal_eval(
                        utils.apply_template(
                            self.config.doc_to_decontamination_query, doc
                        )
                    )
1117

1118
    def _process_doc(self, doc: dict) -> dict:
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
        """
        Override this to process (detokenize, strip, replace, etc.) individual
        documents. This can be used in a map over documents of a data split.
        E.g. `map(self._process_doc, self.dataset["validation"])`

        :return: dict
            The processed version of the specified `doc`.
        """
        return doc

    def doc_to_text(self, doc):
1130
1131
        if self.prompt is not None:
            doc_to_text = self.prompt
1132
        else:
1133
            doc_to_text = self.config.doc_to_text
lintangsutawika's avatar
lintangsutawika committed
1134

1135
        if isinstance(doc_to_text, int):
1136
            return doc_to_text
1137
        elif isinstance(doc_to_text, str):
1138
            if doc_to_text in self.features:
1139
                # if self.config.doc_to_choice is not None:
1140
1141
                #     return self.doc_to_choice(doc)[doc[doc_to_text]]
                # else:
1142
1143
                return doc[doc_to_text]
            else:
lintangsutawika's avatar
lintangsutawika committed
1144
                text_string = utils.apply_template(doc_to_text, doc)
lintangsutawika's avatar
lintangsutawika committed
1145
                if text_string.isdigit() and self._config.doc_to_choice is not None:
lintangsutawika's avatar
lintangsutawika committed
1146
1147
1148
                    return ast.literal_eval(text_string)
                else:
                    return text_string
1149
        elif callable(doc_to_text):
1150
            return doc_to_text(doc)
lintangsutawika's avatar
lintangsutawika committed
1151
        # Used when applying a Promptsource template
1152
        elif hasattr(doc_to_text, "apply"):
1153
1154
1155
1156
1157
            applied_prompt = doc_to_text.apply(doc)
            if len(applied_prompt) == 2:
                return applied_prompt[0]
            else:
                eval_logger.warning("Applied prompt returns empty string")
1158
                return self.config.fewshot_delimiter
1159
        else:
1160
            print(type(doc_to_text))
1161
            raise TypeError
1162

1163
    def doc_to_target(self, doc: Mapping) -> Union[int, str, list]:
1164
1165
        if self.prompt is not None:
            doc_to_target = self.prompt
1166
        else:
1167
            doc_to_target = self.config.doc_to_target
1168

1169
        if isinstance(doc_to_target, int):
1170
            return doc_to_target
1171
        elif isinstance(doc_to_target, str):
1172
            if doc_to_target in self.features:
1173
                # if self.config.doc_to_choice is not None:
1174
1175
1176
                #     return self.doc_to_choice(doc)[doc[doc_to_target]]
                # else:
                return doc[doc_to_target]
1177
            else:
lintangsutawika's avatar
lintangsutawika committed
1178
                target_string = utils.apply_template(doc_to_target, doc)
lintangsutawika's avatar
lintangsutawika committed
1179
                if target_string.isdigit() and self._config.doc_to_choice is not None:
lintangsutawika's avatar
lintangsutawika committed
1180
                    return ast.literal_eval(target_string)
lintangsutawika's avatar
lintangsutawika committed
1181
1182
1183
1184
1185
                elif (
                    len(target_string) >= 2
                    and (target_string[0] == "[")
                    and (target_string[-1] == "]")
                ):
baberabb's avatar
baberabb committed
1186
1187
1188
1189
                    try:
                        return ast.literal_eval(target_string)
                    except (SyntaxError, ValueError):
                        return target_string
lintangsutawika's avatar
lintangsutawika committed
1190
1191
                else:
                    return target_string
1192
        elif isinstance(doc_to_target, list):
1193
            return doc_to_target
1194
        elif callable(doc_to_target):
1195
            return doc_to_target(doc)
lintangsutawika's avatar
lintangsutawika committed
1196
        # Used when applying a Promptsource template
1197
        elif hasattr(doc_to_target, "apply"):
1198
            applied_prompt = doc_to_target.apply(doc)
1199
1200
1201
1202
            if len(applied_prompt) == 2:
                return applied_prompt[1]
            else:
                eval_logger.warning("Applied prompt returns empty string")
1203
                return self.config.fewshot_delimiter
1204
1205
        else:
            raise TypeError
1206

baberabb's avatar
baberabb committed
1207
    def doc_to_choice(self, doc: Any) -> List[str]:
1208
1209
        if self.prompt is not None:
            doc_to_choice = self.prompt
1210
        elif self.config.doc_to_choice is None:
1211
1212
            eval_logger.error("doc_to_choice was called but not set in config")
        else:
1213
            doc_to_choice = self.config.doc_to_choice
1214

1215
        if isinstance(doc_to_choice, str):
1216
1217
1218
1219
            if doc_to_choice in self.features:
                return doc[doc_to_choice]
            else:
                return ast.literal_eval(utils.apply_template(doc_to_choice, doc))
1220
        elif isinstance(doc_to_choice, list):
1221
            return doc_to_choice
1222
        elif isinstance(doc_to_choice, dict):
1223
1224
1225
1226
1227
1228
1229
            return list(doc_to_choice.values())
        elif callable(doc_to_choice):
            return doc_to_choice(doc)
        elif hasattr(doc_to_choice, "get_answer_choices_list"):
            return doc_to_choice.get_answer_choices_list(doc)
        else:
            raise TypeError
1230

baberabb's avatar
baberabb committed
1231
1232
1233
    def construct_requests(
        self, doc: dict, ctx: str, **kwargs
    ) -> Union[List[Instance], Instance]:
1234
        if self.OUTPUT_TYPE == "loglikelihood":
lintangsutawika's avatar
lintangsutawika committed
1235
            arguments = (ctx, self.doc_to_target(doc))
1236
        elif self.OUTPUT_TYPE == "loglikelihood_rolling":
lintangsutawika's avatar
lintangsutawika committed
1237
            arguments = (self.doc_to_target(doc),)
1238
        elif self.OUTPUT_TYPE == "multiple_choice":
1239
            choices = self.doc_to_choice(doc)
1240
            target_delimiter = self.config.target_delimiter
1241
1242
            if self.multiple_input:
                # If there are multiple inputs, choices are placed in the ctx
1243
                cont = self.doc_to_target(doc)
1244
1245
1246
                arguments = [
                    (ctx + choice, f"{target_delimiter}{cont}") for choice in choices
                ]
1247
            else:
1248
                # Otherwise they are placed in the continuation
1249
                arguments = [(ctx, f"{target_delimiter}{cont}") for cont in choices]
1250

1251
            request_list = [
1252
1253
                Instance(
                    request_type="loglikelihood",
lintangsutawika's avatar
lintangsutawika committed
1254
                    doc=doc,
1255
                    arguments=arg,
1256
                    idx=i,
1257
1258
                    **kwargs,
                )
1259
                for i, arg in enumerate(arguments)
1260
            ]
1261
            # TODO: we should raise a warning telling users this will at most ~2x runtime.
1262
            if "acc_mutual_info" in self._metric_fn_list.keys():
1263
1264
1265
                # if we are calculating multiple choice accuracy
                # using mutual information instead of raw loglikelihood as metric, need unconditional lls.

lintangsutawika's avatar
lintangsutawika committed
1266
                # here mutual info refers to calculating
1267
1268
1269
1270
1271
1272
                # log(P(choice|ctx) / P(choice)) = log(P(choice|ctx)) - log(P(choice))
                # in other words normalizing by subtracting the unconditional logprob of each choice.
                request_list.extend(
                    [
                        Instance(
                            request_type="loglikelihood",
lintangsutawika's avatar
lintangsutawika committed
1273
                            doc=doc,
1274
                            arguments=("", "{}".format(choice)),
1275
1276
1277
                            idx=i,
                            **kwargs,
                        )
lintangsutawika's avatar
lintangsutawika committed
1278
                        for i, choice in enumerate(choices)
1279
1280
1281
                    ]
                )
            return request_list
lintangsutawika's avatar
lintangsutawika committed
1282

1283
        elif self.OUTPUT_TYPE == "generate_until":
1284
            arguments = (ctx, deepcopy(self.config.generation_kwargs))
lintangsutawika's avatar
lintangsutawika committed
1285
1286

        return Instance(
lintangsutawika's avatar
lintangsutawika committed
1287
1288
            request_type=self.OUTPUT_TYPE, doc=doc, arguments=arguments, idx=0, **kwargs
        )
1289
1290

    def process_results(self, doc, results):
1291
1292
        if callable(self.config.process_results):
            return self.config.process_results(doc, results)
lintangsutawika's avatar
lintangsutawika committed
1293

1294
        result_dict = {}
1295
        use_metric = list(self._metric_fn_list.keys())
1296
1297
1298
        if self.OUTPUT_TYPE == "loglikelihood":
            results = results[0]
            ll, is_greedy = results
1299
1300
1301
1302
            return {
                **({"perplexity": ll} if "perplexity" in use_metric else {}),
                **({"acc": int(is_greedy)} if "acc" in use_metric else {}),
            }
1303
        elif self.OUTPUT_TYPE == "loglikelihood_rolling":
haileyschoelkopf's avatar
haileyschoelkopf committed
1304
            (loglikelihood,) = results
1305
1306
            _words = self.count_words(self.doc_to_target(doc))
            _bytes = self.count_bytes(self.doc_to_target(doc))
haileyschoelkopf's avatar
haileyschoelkopf committed
1307
            return {
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322
                **(
                    {"word_perplexity": (loglikelihood, _words)}
                    if "word_perplexity" in use_metric
                    else {}
                ),
                **(
                    {"byte_perplexity": (loglikelihood, _bytes)}
                    if "byte_perplexity" in use_metric
                    else {}
                ),
                **(
                    {"bits_per_byte": (loglikelihood, _bytes)}
                    if "bits_per_byte" in use_metric
                    else {}
                ),
haileyschoelkopf's avatar
haileyschoelkopf committed
1323
            }
1324
        elif self.OUTPUT_TYPE == "multiple_choice":
1325
            lls, is_greedy = zip(*results)
lintangsutawika's avatar
lintangsutawika committed
1326

1327
            # retrieve choices in List[str] form, to compute choice lengths, etc.
1328
            choices = self.doc_to_choice(doc)
1329
1330
            completion_len = np.array([float(len(i)) for i in choices])

1331
1332
            if (
                2 * len(choices) == len(lls)
1333
                and "acc_mutual_info" in self._metric_fn_list.keys()
1334
1335
1336
1337
            ):
                # then we are doing mutual info.
                # this stores the "dryrun" / unconditional answer loglikelihoods
                lls_unconditional = lls[1::2]
1338
1339
                if len(lls_unconditional) != len(choices):
                    raise ValueError
1340
1341
                # and this stores our "regular" conditional loglikelihoods
                lls = lls[::2]
1342

1343
1344
            pred = np.argmax(lls)
            pred_norm = np.argmax(lls / completion_len)
lintangsutawika's avatar
lintangsutawika committed
1345

1346
1347
            if self.multiple_input:
                gold = self.doc_to_text(doc)
1348
            else:
1349
                gold = self.doc_to_target(doc)
1350
1351

            gold_index_error = False
1352
            if isinstance(gold, list):
Lintang Sutawika's avatar
Lintang Sutawika committed
1353
1354
                gold = [i if i < len(choices) else -100 for i in gold]
                if -100 in gold:
1355
1356
                    gold_index_error = True
            else:
1357
                if isinstance(gold, int):
Lintang Sutawika's avatar
Lintang Sutawika committed
1358
                    gold = gold if gold < len(choices) else -100
1359
                elif isinstance(gold, str):
Lintang Sutawika's avatar
Lintang Sutawika committed
1360
                    gold = choices.index(gold) if gold in choices else -100
lintangsutawika's avatar
lintangsutawika committed
1361

Lintang Sutawika's avatar
Lintang Sutawika committed
1362
                if gold == -100:
1363
1364
1365
1366
                    gold_index_error = True

            if gold_index_error:
                eval_logger.warning(
lintangsutawika's avatar
lintangsutawika committed
1367
                    f"Label index was not in within range of available choices,"
1368
1369
                    f"Sample:\n\n{doc}\n\n"
                )
lintangsutawika's avatar
lintangsutawika committed
1370

1371
            if self.multiple_target:
lintangsutawika's avatar
lintangsutawika committed
1372
1373
                acc = 1.0 if pred in gold else 0.0
                acc_norm = 1.0 if pred_norm in gold else 0.0
Lintang Sutawika's avatar
Lintang Sutawika committed
1374
                exact_match = int(any([is_greedy[i] if i != -100 else 0 for i in gold]))
lintangsutawika's avatar
lintangsutawika committed
1375
1376
1377
            else:
                acc = 1.0 if pred == gold else 0.0
                acc_norm = 1.0 if pred_norm == gold else 0.0
1378
                # TODO: this gets score of 0 on arc_challenge for pythia-70m. need to test that this works properly
Lintang Sutawika's avatar
Lintang Sutawika committed
1379
                exact_match = int(is_greedy[gold]) if gold != -100 else 0
1380

Lintang Sutawika's avatar
Lintang Sutawika committed
1381
1382
1383
1384
            prob_norm = utils.softmax(lls)

            # TODO use keyword arguments to the metric?
            # gold, pred, norm stuff, the original lls,
1385
            result_dict = {
1386
                **({"acc": acc} if "acc" in use_metric else {}),
1387
1388
                **({"f1": (gold, pred)} if "f1" in use_metric else {}),
                **({"mcc": (gold, pred)} if "mcc" in use_metric else {}),
1389
                **({"acc_norm": acc_norm} if "acc_norm" in use_metric else {}),
1390
                **({"exact_match": exact_match} if "exact_match" in use_metric else {}),
Lintang Sutawika's avatar
Lintang Sutawika committed
1391
1392
1393
1394
1395
                **(
                    {"brier_score": (gold, prob_norm)}
                    if "brier_score" in use_metric
                    else {}
                ),
1396
1397
            }

1398
            if "acc_mutual_info" in use_metric:
lintangsutawika's avatar
lintangsutawika committed
1399
1400
1401
                lls_mutual_info = [
                    ll_c - ll_u for ll_c, ll_u in zip(lls, lls_unconditional)
                ]
1402
1403
1404
                acc_mutual_info = 1.0 if np.argmax(lls_mutual_info) == gold else 0.0
                result_dict["acc_mutual_info"] = acc_mutual_info

1405
        elif self.OUTPUT_TYPE == "generate_until":
1406
            gold = self.doc_to_target(doc)
Chris's avatar
Chris committed
1407
            result = results[0]
1408
            if self.config.doc_to_choice is not None:
lintangsutawika's avatar
lintangsutawika committed
1409
                # If you set doc_to_choice,
lintangsutawika's avatar
lintangsutawika committed
1410
                # it assumes that doc_to_target returns a number.
1411
1412
                choices = self.doc_to_choice(doc)
                gold = choices[gold]
1413
1414
            # we expect multiple_targets to be a list.
            elif self.multiple_target:
baberabb's avatar
baberabb committed
1415
                gold = list(gold)
Chris's avatar
Chris committed
1416
1417
1418
            elif type(gold) != type(result):
                # cast gold to the same type as result
                gold = type(result)(gold)
1419

lintangsutawika's avatar
lintangsutawika committed
1420
            for metric in self._metric_fn_list.keys():
haileyschoelkopf's avatar
haileyschoelkopf committed
1421
1422
1423
1424
1425
                if self.multiple_target:
                    # in the case where we have multiple targets,
                    # return true if any are true
                    # TODO: this may break for multipLe_target, non zero-or-1 metrics
                    scores = []
haileyschoelkopf's avatar
haileyschoelkopf committed
1426
1427
1428
1429
                    if not isinstance(gold, list):
                        # sometimes, a multiple_target dataset has exceptions where one doc has only one string answer
                        # print(gold)
                        gold = [gold]
1430
1431
1432
1433
1434
1435
1436
1437
                    if metric == "exact_match":
                        result = [result for _ in range(len(gold))]
                        scores = self._metric_fn_list[metric](
                            references=gold,
                            predictions=result,
                            **self._metric_fn_kwargs[metric],
                        )[metric]
                        result_score = 1.0 if scores > 0.0 else 0.0
haileyschoelkopf's avatar
haileyschoelkopf committed
1438
                    else:
1439
1440
1441
1442
1443
1444
1445
1446
1447
1448
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458
1459
                        for gold_option in gold:
                            try:
                                result_score = self._metric_fn_list[metric](
                                    references=[gold_option],
                                    predictions=[result],
                                    **self._metric_fn_kwargs[metric],
                                )
                            except (
                                TypeError
                            ):  # TODO: this is hacky and I don't want to do it
                                result_score = self._metric_fn_list[metric](
                                    [gold_option, result]
                                )
                            if isinstance(result_score, dict):
                                # TODO: this handles the case where HF evaluate returns a dict.
                                result_score = result_score[metric]
                            scores.append(result_score)
                        if any(scores):
                            result_score = 1.0
                        else:
                            result_score = 0.0
haileyschoelkopf's avatar
haileyschoelkopf committed
1460
                else:
1461
                    try:
1462
                        result_score = self._metric_fn_list[metric](
1463
1464
                            references=[gold],
                            predictions=[result],
1465
                            **self._metric_fn_kwargs[metric],
1466
                        )
1467
                    except TypeError:  # needed for now in order to use a different interface between our own metrics and HF Evaluate metrics
1468
                        result_score = self._metric_fn_list[metric]([gold, result])
1469
1470
1471
1472
                    if isinstance(result_score, dict):
                        # TODO: this handles the case where HF evaluate returns a dict.
                        result_score = result_score[metric]
                result_dict[metric] = result_score
1473
        else:
lintangsutawika's avatar
lintangsutawika committed
1474
1475
            raise ValueError(
                f"Passed invalid output_type '{self.OUTPUT_TYPE}' ! Please use one of ",
1476
                "'loglikelihood', 'loglikelihood_rolling', 'generate_until' or 'multiple_choice'",
1477
            )
1478
1479
1480

        return result_dict

Baber Abbasi's avatar
Baber Abbasi committed
1481
    def aggregation(self) -> dict:
1482
1483
        return self._aggregation_list

Baber Abbasi's avatar
Baber Abbasi committed
1484
    def higher_is_better(self) -> dict:
haileyschoelkopf's avatar
haileyschoelkopf committed
1485
        return self._higher_is_better
1486

Baber Abbasi's avatar
Baber Abbasi committed
1487
1488
1489
    def get_config(self, key: str) -> Any:
        return getattr(self._config, key, None)

lintangsutawika's avatar
lintangsutawika committed
1490
1491
1492
1493
    @property
    def task_id(self) -> Any:
        return self._task_id

1494
1495
1496
1497
1498
1499
1500
1501
    def __repr__(self):
        return (
            f"ConfigurableTask(task_name={getattr(self.config, 'task', None)},"
            f"output_type={self.OUTPUT_TYPE},"
            f"num_fewshot={getattr(self.config, 'num_fewshot', None)},"
            f"num_samples={len(self.eval_docs)})"
        )

1502
1503

class MultipleChoiceTask(Task):
1504
    OUTPUT_TYPE = "loglikelihood"
1505

baberabb's avatar
baberabb committed
1506
    def doc_to_target(self, doc: dict) -> str:
1507
1508
        return " " + doc["choices"][doc["gold"]]

baberabb's avatar
baberabb committed
1509
    def construct_requests(self, doc: dict, ctx: str, **kwargs) -> List[Instance]:
1510
        # TODO: add mutual info here?
lintangsutawika's avatar
lintangsutawika committed
1511
1512
        return [
            Instance(
haileyschoelkopf's avatar
haileyschoelkopf committed
1513
                request_type="loglikelihood",
lintangsutawika's avatar
lintangsutawika committed
1514
                doc=doc,
1515
                arguments=(ctx, " {}".format(choice)),
1516
                idx=i,
1517
1518
                **kwargs,
            )
lintangsutawika's avatar
lintangsutawika committed
1519
1520
            for i, choice in enumerate(doc["choices"])
        ]
1521

1522
    def process_results(self, doc: dict, results: Iterable[Tuple[float, bool]]) -> dict:
lintangsutawika's avatar
lintangsutawika committed
1523
1524
1525
        results = [
            res[0] for res in results
        ]  # only retain loglikelihoods, discard is_greedy TODO: do we need is_greedy anywhere?
1526
1527
1528
1529
1530
1531
1532
1533
1534
1535
1536
        gold = doc["gold"]

        acc = 1.0 if np.argmax(results) == gold else 0.0
        completion_len = np.array([float(len(i)) for i in doc["choices"]])
        acc_norm = 1.0 if np.argmax(results / completion_len) == gold else 0.0

        return {
            "acc": acc,
            "acc_norm": acc_norm,
        }

baberabb's avatar
baberabb committed
1537
    def higher_is_better(self) -> dict:
1538
1539
1540
1541
1542
        return {
            "acc": True,
            "acc_norm": True,
        }

baberabb's avatar
baberabb committed
1543
    def aggregation(self) -> dict:
1544
1545
1546
1547
1548
1549
        return {
            "acc": mean,
            "acc_norm": mean,
        }


lintangsutawika's avatar
lintangsutawika committed
1550
class PerplexityTask(Task):
1551
1552
    OUTPUT_TYPE = "loglikelihood_rolling"

baberabb's avatar
baberabb committed
1553
    def has_training_docs(self) -> bool:
1554
1555
        return False

baberabb's avatar
baberabb committed
1556
    def fewshot_examples(self, k: int, rnd) -> List:
1557
1558
1559
1560
        if k != 0:
            raise ValueError(
                "The number of fewshot examples must be 0 for perplexity tasks."
            )
1561
1562
        return []

baberabb's avatar
baberabb committed
1563
    def fewshot_context(self, doc: dict, num_fewshot: int) -> Literal[""]:
1564
1565
1566
1567
        if num_fewshot != 0:
            raise ValueError(
                "The number of fewshot examples must be 0 for perplexity tasks."
            )
1568
1569
1570

        return ""

baberabb's avatar
baberabb committed
1571
    def higher_is_better(self) -> dict:
1572
1573
1574
1575
1576
1577
1578
1579
1580
        return {
            "word_perplexity": False,
            "byte_perplexity": False,
            "bits_per_byte": False,
        }

    def doc_to_decontamination_query(self, doc):
        return doc

Ethan Smith's avatar
Ethan Smith committed
1581
    def doc_to_text(self, doc) -> str:
1582
1583
1584
1585
1586
        return ""

    def doc_to_target(self, doc):
        return doc

1587
1588
1589
    def construct_requests(self, doc: dict, ctx: Optional[str], **kwargs):
        if bool(ctx):
            raise ValueError
1590

lintangsutawika's avatar
lintangsutawika committed
1591
1592
1593
1594
1595
1596
1597
        return Instance(
            request_type=self.OUTPUT_TYPE,
            doc=doc,
            arguments=(self.doc_to_target(doc),),
            idx=0,
            **kwargs,
        )
1598

1599
    def process_results(self, doc: dict, results: Tuple[float]) -> dict:
1600
        (loglikelihood,) = results
haileyschoelkopf's avatar
haileyschoelkopf committed
1601
1602
        words = self.count_words(self.doc_to_target(doc))
        bytes_ = self.count_bytes(self.doc_to_target(doc))
1603
1604
1605
1606
1607
1608
        return {
            "word_perplexity": (loglikelihood, words),
            "byte_perplexity": (loglikelihood, bytes_),
            "bits_per_byte": (loglikelihood, bytes_),
        }

baberabb's avatar
baberabb committed
1609
    def aggregation(self) -> dict:
1610
1611
1612
1613
1614
1615
1616
        return {
            "word_perplexity": weighted_perplexity,
            "byte_perplexity": weighted_perplexity,
            "bits_per_byte": bits_per_byte,
        }

    @classmethod
baberabb's avatar
baberabb committed
1617
    def count_bytes(cls, doc) -> int:
1618
1619
1620
        return len(doc.encode("utf-8"))

    @classmethod
baberabb's avatar
baberabb committed
1621
    def count_words(cls, doc) -> int:
1622
1623
        """Downstream tasks with custom word boundaries should override this!"""
        return len(re.split(r"\s+", doc))