task.py 60.4 KB
Newer Older
1
import abc
2
import ast
lintangsutawika's avatar
lintangsutawika committed
3
import logging
4
import random
5
6
import re
from collections.abc import Callable
7
from copy import deepcopy
8
from dataclasses import asdict, dataclass
9
from inspect import getsource
10
11
12
13
14
15
16
17
18
19
20
21
from typing import (
    Any,
    Dict,
    Iterable,
    Iterator,
    List,
    Literal,
    Mapping,
    Optional,
    Tuple,
    Union,
)
22
23
24

import datasets
import numpy as np
25
from tqdm import tqdm
26
27

from lm_eval import utils
28
from lm_eval.api import samplers
29
30
from lm_eval.api.instance import Instance, OutputType
from lm_eval.api.metrics import bits_per_byte, mean, weighted_perplexity
lintangsutawika's avatar
lintangsutawika committed
31
from lm_eval.api.registry import (
32
33
    AGGREGATION_REGISTRY,
    DEFAULT_METRIC_REGISTRY,
haileyschoelkopf's avatar
haileyschoelkopf committed
34
    get_aggregation,
35
    get_metric,
36
    get_metric_aggregation,
haileyschoelkopf's avatar
haileyschoelkopf committed
37
    is_higher_better,
lintangsutawika's avatar
lintangsutawika committed
38
)
39
from lm_eval.caching.cache import load_from_cache, save_to_cache
40
41
42
from lm_eval.filters import build_filter_ensemble
from lm_eval.prompts import get_prompt

43

44
45
46
47
ALL_OUTPUT_TYPES = [
    "loglikelihood",
    "multiple_choice",
    "loglikelihood_rolling",
48
    "generate_until",
49
50
]

51
eval_logger = logging.getLogger("lm-eval")
52

lintangsutawika's avatar
lintangsutawika committed
53

54
55
@dataclass
class TaskConfig(dict):
56
    # task naming/registry
57
58
59
60
    task: Optional[str] = None
    task_alias: Optional[str] = None
    group: Optional[Union[str, list]] = None
    group_alias: Optional[Union[str, list]] = None
61
62
63
    # HF dataset options.
    # which dataset to use,
    # and what splits for what purpose
64
65
66
67
68
69
70
71
72
    dataset_path: Optional[str] = None
    dataset_name: Optional[str] = None
    dataset_kwargs: Optional[dict] = None
    training_split: Optional[str] = None
    validation_split: Optional[str] = None
    test_split: Optional[str] = None
    fewshot_split: Optional[
        str
    ] = None  # TODO: assert that this not None if num_fewshot > 0. (?) assert if this is same split as one evaling (?)
73
74
    # formatting / prompting options.
    # see docs/advanced_task_guide.md for more info
75
76
77
78
79
80
    process_docs: Optional[Callable] = None
    doc_to_text: Optional[Union[Callable, str]] = None
    doc_to_target: Optional[Union[Callable, str]] = None
    doc_to_choice: Optional[Union[Callable, str, dict, list]] = None
    process_results: Optional[Union[Callable, str]] = None
    use_prompt: Optional[str] = None
81
    description: str = ""
82
83
    target_delimiter: str = " "
    fewshot_delimiter: str = "\n\n"
84
    fewshot_config: Optional[dict] = None
85
    # runtime configuration options
86
    num_fewshot: Optional[int] = None
87
    # scoring options
88
89
90
    metric_list: Optional[list] = None
    output_type: OutputType = "generate_until"
    generation_kwargs: Optional[dict] = None
91
    repeats: int = 1
92
    filter_list: Optional[Union[str, list]] = None
93
    should_decontaminate: bool = False
94
95
96
97
    doc_to_decontamination_query: Optional[str] = None
    metadata: Optional[
        dict
    ] = None  # by default, not used in the code. allows for users to pass arbitrary info to tasks
98

Ethan Smith's avatar
Ethan Smith committed
99
    def __post_init__(self) -> None:
Lintang Sutawika's avatar
Lintang Sutawika committed
100
        if self.generation_kwargs is not None:
101
            if self.output_type != "generate_until":
102
                eval_logger.warning(
103
                    f"[{self.task}] passed `generation_kwargs`, but not using `output_type: generate_until`!"
Lintang Sutawika's avatar
Lintang Sutawika committed
104
105
106
107
108
109
110
111
                )

            if "temperature" in self.generation_kwargs:
                self.generation_kwargs["temperature"] = float(
                    self.generation_kwargs["temperature"]
                )

            if "until" not in self.generation_kwargs:
112
                self.generation_kwargs["until"] = [self.fewshot_delimiter]
Lintang Sutawika's avatar
Lintang Sutawika committed
113
        else:
114
            if self.output_type == "generate_until":
Lintang Sutawika's avatar
Lintang Sutawika committed
115
116
                # ensure that we greedily generate in absence of explicit arguments otherwise
                self.generation_kwargs = {
117
118
119
120
121
                    "until": (
                        None
                        if self.fewshot_delimiter is None
                        else [self.fewshot_delimiter]
                    ),
Lintang Sutawika's avatar
Lintang Sutawika committed
122
123
                    "do_sample": False,
                }
124

125
126
127
    def __getitem__(self, item):
        return getattr(self, item)

128
129
130
    def __setitem__(self, item, value):
        return setattr(self, item, value)

131
    def to_dict(self, keep_callable: bool = False) -> dict:
132
133
        """dumps the current config as a dictionary object, as a printable format.
        null fields will not be printed.
haileyschoelkopf's avatar
haileyschoelkopf committed
134
        Used for dumping results alongside full task configuration
135

haileyschoelkopf's avatar
haileyschoelkopf committed
136
137
138
139
140
141
142
143
144
145
        :return: dict
            A printable dictionary version of the TaskConfig object.

        # TODO: should any default value in the TaskConfig not be printed?
        """
        cfg_dict = asdict(self)
        # remove values that are `None`
        for k, v in list(cfg_dict.items()):
            if v is None:
                cfg_dict.pop(k)
146
147
148
149
150
151
152
153
154
155
            elif k == "metric_list":
                for metric_dict in v:
                    for metric_key, metric_value in metric_dict.items():
                        if callable(metric_value):
                            metric_dict[metric_key] = self.serialize_function(
                                metric_value, keep_callable=keep_callable
                            )
                cfg_dict[k] = v
            elif callable(v):
                cfg_dict[k] = self.serialize_function(v, keep_callable=keep_callable)
haileyschoelkopf's avatar
haileyschoelkopf committed
156
        return cfg_dict
157

158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
    def serialize_function(
        self, value: Union[Callable, str], keep_callable=False
    ) -> Union[Callable, str]:
        """Serializes a given function or string.

        If 'keep_callable' is True, the original callable is returned.
        Otherwise, attempts to return the source code of the callable using 'getsource'.
        """
        if keep_callable:
            return value
        else:
            try:
                return getsource(value)
            except (TypeError, OSError):
                return str(value)

174
175
176
177
178
179
180
181
182
183
184

class Task(abc.ABC):
    """A task represents an entire benchmark including its dataset, problems,
    answers, and evaluation methods. See BoolQ for a simple example implementation

    A `doc` can be any python object which represents one instance of evaluation.
    This is usually a dictionary e.g.
        {"question": ..., "answer": ...} or
        {"question": ..., question, answer)
    """

185
    VERSION: Optional[Union[int, str]] = None
186

187
188
    # The name of the `Task` benchmark as denoted in the HuggingFace datasets Hub
    # or a path to a custom `datasets` loading script.
189
    DATASET_PATH: Optional[str] = None
190
191

    # The name of a subset within `DATASET_PATH`.
192
    DATASET_NAME: Optional[str] = None
193

194
    OUTPUT_TYPE: Optional[OutputType] = None
lintangsutawika's avatar
lintangsutawika committed
195

196
197
    def __init__(
        self,
198
199
200
201
        data_dir: Optional[str] = None,
        cache_dir: Optional[str] = None,
        download_mode: Optional[datasets.DownloadMode] = None,
        config: Optional[Mapping] = None,  # Union[dict, TaskConfig]
Ethan Smith's avatar
Ethan Smith committed
202
    ) -> None:
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
        """
        :param data_dir: str
            Stores the path to a local folder containing the `Task`'s data files.
            Use this to specify the path to manually downloaded data (usually when
            the dataset is not publicly accessible).
        :param cache_dir: str
            The directory to read/write the `Task` dataset. This follows the
            HuggingFace `datasets` API with the default cache directory located at:
                `~/.cache/huggingface/datasets`
            NOTE: You can change the cache location globally for a given process
            to another directory:
                `export HF_DATASETS_CACHE="/path/to/another/directory"`
        :param download_mode: datasets.DownloadMode
            How to treat pre-existing `Task` downloads and data.
            - `datasets.DownloadMode.REUSE_DATASET_IF_EXISTS`
                Reuse download and reuse dataset.
            - `datasets.DownloadMode.REUSE_CACHE_IF_EXISTS`
                Reuse download with fresh dataset.
            - `datasets.DownloadMode.FORCE_REDOWNLOAD`
                Fresh download and fresh dataset.
        """
        self.download(data_dir, cache_dir, download_mode)
225
226
227
        self._training_docs: Optional[list] = None
        self._fewshot_docs: Optional[list] = None
        self._instances: Optional[List[Instance]] = None
228

229
        self._config: TaskConfig = TaskConfig({**config}) if config else TaskConfig()
230

lintangsutawika's avatar
lintangsutawika committed
231
        self._filters = [build_filter_ensemble("none", [["take_first", None]])]
232
233
234
        self.fewshot_rnd: Optional[
            random.Random
        ] = None  # purposely induce errors in case of improper usage
235

236
237
238
239
240
241
    def download(
        self,
        data_dir: Optional[str] = None,
        cache_dir: Optional[str] = None,
        download_mode=None,
    ) -> None:
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
        """Downloads and returns the task dataset.
        Override this method to download the dataset from a custom API.

        :param data_dir: str
            Stores the path to a local folder containing the `Task`'s data files.
            Use this to specify the path to manually downloaded data (usually when
            the dataset is not publicly accessible).
        :param cache_dir: str
            The directory to read/write the `Task` dataset. This follows the
            HuggingFace `datasets` API with the default cache directory located at:
                `~/.cache/huggingface/datasets`
            NOTE: You can change the cache location globally for a given process
            by setting the shell environment variable, `HF_DATASETS_CACHE`,
            to another directory:
                `export HF_DATASETS_CACHE="/path/to/another/directory"`
        :param download_mode: datasets.DownloadMode
            How to treat pre-existing `Task` downloads and data.
            - `datasets.DownloadMode.REUSE_DATASET_IF_EXISTS`
                Reuse download and reuse dataset.
            - `datasets.DownloadMode.REUSE_CACHE_IF_EXISTS`
                Reuse download with fresh dataset.
            - `datasets.DownloadMode.FORCE_REDOWNLOAD`
                Fresh download and fresh dataset.
        """
266
267
268
269
270
271
272
        self.dataset = datasets.load_dataset(
            path=self.DATASET_PATH,
            name=self.DATASET_NAME,
            data_dir=data_dir,
            cache_dir=cache_dir,
            download_mode=download_mode,
        )
273

274
    @property
275
    def config(self) -> TaskConfig:
276
277
278
        """Returns the TaskConfig associated with this class."""
        return self._config

279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
    @abc.abstractmethod
    def has_training_docs(self):
        """Whether the task has a training set"""
        pass

    @abc.abstractmethod
    def has_validation_docs(self):
        """Whether the task has a validation set"""
        pass

    @abc.abstractmethod
    def has_test_docs(self):
        """Whether the task has a test set"""
        pass

294
    def training_docs(self) -> Iterable:
295
296
297
298
299
300
        """
        :return: Iterable[obj]
            A iterable of any object, that doc_to_text can handle
        """
        return []

301
    def validation_docs(self) -> Iterable:
302
303
304
305
306
307
        """
        :return: Iterable[obj]
            A iterable of any object, that doc_to_text can handle
        """
        return []

308
    def test_docs(self) -> Iterable:
309
310
311
312
313
314
        """
        :return: Iterable[obj]
            A iterable of any object, that doc_to_text can handle
        """
        return []

315
    def fewshot_docs(self) -> Iterable:
316
317
318
319
320
321
322
323
324
        """
        :return: Iterable[obj]
            A iterable of any object, that doc_to_text can handle
        """
        if self.has_training_docs():
            return self.training_docs()
        elif self.has_validation_docs():
            return self.validation_docs()
        else:
lintangsutawika's avatar
lintangsutawika committed
325
            eval_logger.warning(
326
                f"[Task: {self.config.task}] has_training_docs and has_validation_docs are False"
327
                ", using test_docs as fewshot_docs but this is not recommended."
lintangsutawika's avatar
lintangsutawika committed
328
            )
329
330
            return self.test_docs()

331
    def _process_doc(self, doc: dict) -> dict:
332
333
334
335
336
337
338
339
340
        """
        Override this to process (detokenize, strip, replace, etc.) individual
        documents. This can be used in a map over documents of a data split.
        E.g. `map(self._process_doc, self.dataset["validation"])`

        :return: dict
            The processed version of the specified `doc`.
        """
        return doc
lintangsutawika's avatar
lintangsutawika committed
341

342
    @property
343
    def instances(self) -> List[Instance]:
344
345
346
347
348
349
350
351
352
353
354
        """After calling `task.build_all_requests()`, tasks
        maintain a list of the dataset instances which will be evaluated.
        """
        return self._instances

    def fewshot_examples(self, k, rnd):
        if self._training_docs is None:
            self._training_docs = list(self.training_docs())

        return rnd.sample(self._training_docs, k)

355
356
    def doc_to_decontamination_query(self, doc):
        raise NotImplementedError(
357
358
359
360
361
362
363
364
365
366
367
            "Override doc_to_decontamination_query with document specific decontamination query."
        )

    @abc.abstractmethod
    def doc_to_text(self, doc):
        pass

    @abc.abstractmethod
    def doc_to_target(self, doc):
        pass

368
369
    def build_all_requests(
        self,
370
        *,
371
372
373
374
375
        limit=None,
        rank=None,
        world_size=None,
        cache_requests=False,
        rewrite_requests_cache=False,
Konrad's avatar
Konrad committed
376
377
        apply_chat_template=False,
        tokenizer=None,
378
    ) -> None:
379
        """Build a set of Instances for a task, and store them in task.instances"""
380
381
382
383

        # used with caching
        og_limit = limit

384
        cache_key = f"requests-{self._config.task}-{self.config.num_fewshot}shot-rank{rank}-world_size{world_size}"
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399

        cached_instances = load_from_cache(file_name=cache_key)

        if cache_requests and cached_instances and not rewrite_requests_cache:
            cached_instances = cached_instances[:limit]

            flattened_instances = [
                instance
                for instance_group in cached_instances
                for instance in instance_group
            ]

            self._instances = flattened_instances
            return

Baber Abbasi's avatar
Baber Abbasi committed
400
        eval_logger.info(f"Building contexts for {self.config.task} on rank {rank}...")
401

402
        instances = []
403
404
405
406
407
408
409
410
411
412

        # process all documents when caching is specified for simplicity
        if (
            cache_requests
            and (not cached_instances or rewrite_requests_cache)
            and limit is not None
        ):
            limit = None

        doc_id_docs = list(
413
            self.doc_iterator(rank=rank, limit=limit, world_size=world_size)
414
415
416
417
418
419
420
        )

        num_docs = len(doc_id_docs)

        for doc_id, doc in tqdm(
            doc_id_docs,
            total=num_docs,
lintangsutawika's avatar
lintangsutawika committed
421
        ):
422
            # sample fewshot context #TODO: need to offset doc_id by rank now!
423
            fewshot_ctx = self.fewshot_context(
424
                doc,
425
                0 if self.config.num_fewshot is None else self.config.num_fewshot,
Konrad's avatar
Konrad committed
426
427
                apply_chat_template,
                tokenizer,
428
            )
429

430
            # TODO: we should override self.config.repeats if doing greedy gen so users don't waste time+compute
lintangsutawika's avatar
lintangsutawika committed
431
432
433
            inst = self.construct_requests(
                doc=doc,
                ctx=fewshot_ctx,
434
                metadata=(self.config["task"], doc_id, self.config.repeats),
lintangsutawika's avatar
lintangsutawika committed
435
            )
436
437
438
439

            if not isinstance(inst, list):
                inst = [inst]

440
441
442
443
444
445
446
447
448
449
450
451
452
            instances.append(inst)

        # now flatten, this is to allow slicing to work with pickles

        sliced_instances = instances[:og_limit]

        flattened_instances = [
            instance
            for instance_group in sliced_instances
            for instance in instance_group
        ]

        self._instances = flattened_instances
453

454
455
        if len(self._instances) == 0:
            raise ValueError("task.build_requests() did not find any docs!")
456

457
458
459
        if cache_requests and (not cached_instances or rewrite_requests_cache):
            save_to_cache(file_name=cache_key, obj=instances)

460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
    @abc.abstractmethod
    def construct_requests(self, doc, ctx, **kwargs):
        """Uses RequestFactory to construct Requests and returns an iterable of
        Requests which will be sent to the LM.

        :param doc:
            The document as returned from training_docs, validation_docs, or test_docs.
        :param ctx: str
            The context string, generated by fewshot_context. This includes the natural
            language description, as well as the few shot examples, and the question
            part of the document for `doc`.
        :param doc_idx: int
            The index of a document within `self.test_docs()` or `self.validation_docs()`,
            whichever is the main split used.
        :param repeats: int
        TODO: update this docstring
lintangsutawika's avatar
lintangsutawika committed
476
            The number of times each instance in a dataset is inferred on. Defaults to 1,
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
            can be increased for techniques like majority voting.
        """
        pass

    @abc.abstractmethod
    def process_results(self, doc, results):
        """Take a single document and the LM results and evaluates, returning a
        dict where keys are the names of submetrics and values are the values of
        the metric for that one document

        :param doc:
            The document as returned from training_docs, validation_docs, or test_docs.
        :param results:
            The results of the requests created in construct_requests.
        """
        pass

    @abc.abstractmethod
    def aggregation(self):
        """
        :returns: {str: [metric_score] -> float}
            A dictionary where keys are the names of submetrics and values are
            functions that aggregate a list of metric scores
        """
        pass

    @abc.abstractmethod
    def higher_is_better(self):
        """
        :returns: {str: bool}
            A dictionary where keys are the names of submetrics and values are
            whether a higher value of the submetric is better
        """
        pass

512
513
514
    def get_config(self, key: str) -> Any:
        return getattr(self._config, key, None)

haileyschoelkopf's avatar
haileyschoelkopf committed
515
516
517
518
519
520
521
522
523
524
    @classmethod
    def count_bytes(cls, doc):
        """Used for byte-level perplexity metrics in rolling loglikelihood"""
        return len(doc.encode("utf-8"))

    @classmethod
    def count_words(cls, doc):
        """Downstream loglikelihood_rolling perplexity tasks with custom word boundaries should override this!"""
        return len(re.split(r"\s+", doc))

525
    @utils.positional_deprecated
lintangsutawika's avatar
lintangsutawika committed
526
    def fewshot_context(
527
528
529
        self,
        doc,
        num_fewshot,
530
        rnd=None,
531
        description=None,
lintangsutawika's avatar
lintangsutawika committed
532
    ):
533
534
535
536
537
538
539
        """Returns a fewshot context string that is made up of a prepended description
        (if provided), the `num_fewshot` number of examples, and an appended prompt example.

        :param doc: str
            The document as returned from training_docs, validation_docs, or test_docs.
        :param num_fewshot: int
            The number of fewshot examples to provide in the returned context string.
lintangsutawika's avatar
lintangsutawika committed
540
541
542
543
544
        :param rnd: random.Random
            The pseudo-random number generator used to randomly sample examples.
            WARNING: This is currently a required arg although it's optionalized with a default `None`.
        :param description: str
            The task's description that will be prepended to the fewshot examples.
545
546
547
        :returns: str
            The fewshot context.
        """
548
        if rnd is None:
549
550
551
552
553
554
            if self.fewshot_rnd is not None:
                rnd = self.fewshot_rnd
            else:
                raise ValueError(
                    "A `random.Random` generator argument must be provided to `rnd`"
                )
lintangsutawika's avatar
lintangsutawika committed
555

556
        description = description if description else ""
557
558

        if num_fewshot == 0:
lintangsutawika's avatar
lintangsutawika committed
559
            labeled_examples = ""
560
        else:
lintangsutawika's avatar
lintangsutawika committed
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
            # for sets with no training docs, draw from other set *but ensure no overlap with current doc*
            if self.has_training_docs():
                fewshotex = self.fewshot_examples(k=num_fewshot, rnd=rnd)
            else:
                if self._fewshot_docs is None:
                    self._fewshot_docs = list(
                        self.validation_docs()
                        if self.has_validation_docs()
                        else self.test_docs()
                    )

                fewshotex = rnd.sample(self._fewshot_docs, num_fewshot + 1)

                # get rid of the doc that's the one we're evaluating, if it's in the fewshot
                fewshotex = [x for x in fewshotex if x != doc][:num_fewshot]

            labeled_examples = (
                "\n\n".join(
                    [
                        self.doc_to_text(doc) + self.doc_to_target(doc)
                        for doc in fewshotex
                    ]
                )
                + "\n\n"
lintangsutawika's avatar
lintangsutawika committed
585
            )
586
587

        example = self.doc_to_text(doc)
lintangsutawika's avatar
lintangsutawika committed
588
        return description + labeled_examples + example
589

590
    def apply_filters(self) -> Optional[List[Instance]]:
Baber Abbasi's avatar
Baber Abbasi committed
591
        """Iterates over FilterEnsembles and applies them to instances"""
lintangsutawika's avatar
lintangsutawika committed
592
593
        if hasattr(self, "_filters"):
            for f in self._filters:
594
                f.apply(self._instances)
lintangsutawika's avatar
lintangsutawika committed
595
596
597
        else:
            eval_logger.warning("No filter defined, passing through instances")
            return self._instances
598

baberabb's avatar
baberabb committed
599
    def dump_config(self) -> dict:
Baber Abbasi's avatar
Baber Abbasi committed
600
        """Returns the config as a dictionary."""
601
        # TODO: this should only return the overrides applied to a non-YAML task's configuration.
602
        # (num_fewshot)
603
        return self.config.to_dict()
604

Baber Abbasi's avatar
Baber Abbasi committed
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
    def set_config(self, key: str, value: Any, update: bool = False) -> None:
        """Set or update the configuration for a given key."""
        if key is None:
            raise ValueError("Key must be provided.")

        if update:
            current_value = getattr(self._config, key, {})
            if not isinstance(current_value, dict):
                raise TypeError(
                    f"Expected a dict for key '{key}', got {type(current_value).__name__} instead."
                )
            current_value.update(value)
        else:
            setattr(self._config, key, value)

    def override_metric(self, metric_name: str) -> None:
        """
        Override the default metrics used for evaluation with custom metrics.

        Parameters:
        - metric_name (str): The name of the custom metric to override. Should be registered in api.metrics.
        """
        (
            self._metric_fn_list,
            self._aggregation_list,
            self._metric_fn_kwargs,
            self._higher_is_better,
        ) = ({}, {}, {}, {})
        self._metric_fn_list[metric_name] = get_metric(metric_name)
        self._aggregation_list[metric_name] = get_metric_aggregation(metric_name)
        self._higher_is_better[metric_name] = is_higher_better(metric_name)
        self._metric_fn_kwargs[metric_name] = {}
        if not isinstance(self, ConfigurableTask):
            self.process_results = lambda x, y: {metric_name: get_metric(metric_name)}
            self.aggregation = lambda: {
                metric_name: get_metric_aggregation(metric_name)
            }
        setattr(self._config, "metric_list", [{"metric": metric_name}])
        setattr(self._config, "process_results", None)

645
646
647
648
649
    def set_fewshot_seed(self, seed: Optional[int] = None) -> None:
        self.fewshot_rnd = random.Random(seed)
        if hasattr(self, "sampler"):
            self.sampler.rnd = self.fewshot_rnd

650
651
652
653
654
655
656
    @property
    def eval_docs(self) -> Union[datasets.Dataset, List[dict]]:
        if self.has_test_docs():
            return self.test_docs()
        elif self.has_validation_docs():
            return self.validation_docs()
        else:
657
658
659
            raise ValueError(
                f"Task dataset (path={self.DATASET_PATH}, name={self.DATASET_NAME}) must have valid or test docs!"
            )
660
661
662
663
664
665
666
667
668
669
670
671
672

    def doc_iterator(
        self, *, rank: int = 0, limit: Union[int, None] = None, world_size: int = 1
    ) -> Iterator[Tuple[int, Any]]:
        limit = int(limit) if limit else None
        doc_iterator = utils.create_iterator(
            enumerate(self.eval_docs),
            rank=int(rank),
            limit=limit,
            world_size=int(world_size),
        )
        return doc_iterator

673
674

class ConfigurableTask(Task):
675
    VERSION = "Yaml"
676
    OUTPUT_TYPE = None
677
    CONFIG = None
678
679

    def __init__(
680
681
682
683
684
        self,
        data_dir=None,
        cache_dir=None,
        download_mode=None,
        config: Optional[dict] = None,
Ethan Smith's avatar
Ethan Smith committed
685
    ) -> None:  # TODO no super() call here
686
        # Get pre-configured attributes
687
        self._config = self.CONFIG
688

689
        # Use new configurations if there was no preconfiguration
690
        if self.config is None:
691
            self._config = TaskConfig(**config)
692
693
        # Overwrite configs
        else:
lintangsutawika's avatar
lintangsutawika committed
694
            if config is not None:
695
                self._config.__dict__.update(config)
696

697
        if self.config is None:
lintangsutawika's avatar
lintangsutawika committed
698
699
700
            raise ValueError(
                "Must pass a config to ConfigurableTask, either in cls.CONFIG or `config` kwarg"
            )
701

702
703
704
705
        if isinstance(self.config.metadata, dict):
            if "version" in self.config.metadata:
                self.VERSION = self.config.metadata["version"]

706
        if self.config.output_type is not None:
707
708
709
710
            if self.config.output_type not in ALL_OUTPUT_TYPES:
                raise ValueError(
                    f"Got invalid output_type '{self.config.output_type}', must be in '{','.join(ALL_OUTPUT_TYPES)}'"
                )
711
            self.OUTPUT_TYPE = self.config.output_type
712

713
714
        if self.config.dataset_path is not None:
            self.DATASET_PATH = self.config.dataset_path
715

716
717
        if self.config.dataset_name is not None:
            self.DATASET_NAME = self.config.dataset_name
718

719
720
721
722
        self._metric_fn_list = {}
        self._metric_fn_kwargs = {}
        self._aggregation_list = {}
        self._higher_is_better = {}
723

724
        if self.config.metric_list is None:
725
            # TODO: handle this in TaskConfig.__post_init__ ?
726
727
            _metric_list = DEFAULT_METRIC_REGISTRY[self.config.output_type]

728
            for metric_name in _metric_list:
haileyschoelkopf's avatar
haileyschoelkopf committed
729
                self._metric_fn_list[metric_name] = get_metric(metric_name)
lintangsutawika's avatar
lintangsutawika committed
730
                self._metric_fn_kwargs[metric_name] = {}
731
732
733
                self._aggregation_list[metric_name] = get_metric_aggregation(
                    metric_name
                )
haileyschoelkopf's avatar
haileyschoelkopf committed
734
                self._higher_is_better[metric_name] = is_higher_better(metric_name)
735
        else:
736
            for metric_config in self.config.metric_list:
737
738
739
740
                if "metric" not in metric_config:
                    raise ValueError(
                        "'metric' key not provided for an entry in 'metric_list', must be specified!"
                    )
741
742
743
744
                metric_name = metric_config["metric"]
                kwargs = {
                    key: metric_config[key]
                    for key in metric_config
Chris's avatar
Chris committed
745
746
                    if key
                    not in ["metric", "aggregation", "higher_is_better", "hf_evaluate"]
747
                }
Chris's avatar
Chris committed
748
749
750
751
                hf_evaluate_metric = (
                    "hf_evaluate" in metric_config
                    and metric_config["hf_evaluate"] is True
                )
752

753
                if self.config.process_results is not None:
754
755
                    self._metric_fn_list[metric_name] = None
                    self._metric_fn_kwargs[metric_name] = {}
756
757
758
759
760
761
                elif callable(metric_name):
                    metric_fn = metric_name.__call__
                    metric_name = metric_name.__name__
                    self._metric_fn_list[metric_name] = metric_fn
                    self._metric_fn_kwargs[metric_name] = kwargs
                else:
Chris's avatar
Chris committed
762
763
764
                    self._metric_fn_list[metric_name] = get_metric(
                        metric_name, hf_evaluate_metric
                    )
765
                    self._metric_fn_kwargs[metric_name] = kwargs
lintangsutawika's avatar
lintangsutawika committed
766

767
                if "aggregation" in metric_config:
768
                    agg_name = metric_config["aggregation"]
769
                    if isinstance(agg_name, str):
haileyschoelkopf's avatar
haileyschoelkopf committed
770
                        self._aggregation_list[metric_name] = get_aggregation(agg_name)
771
                    elif callable(agg_name):  # noqa: E721
772
773
774
                        self._aggregation_list[metric_name] = metric_config[
                            "aggregation"
                        ]
775
                else:
776
                    INV_AGG_REGISTRY = {v: k for k, v in AGGREGATION_REGISTRY.items()}
lintangsutawika's avatar
lintangsutawika committed
777
                    metric_agg = get_metric_aggregation(metric_name)
778
                    eval_logger.warning(
779
                        f"[Task: {self.config.task}] metric {metric_name} is defined, but aggregation is not. "
780
781
                        f"using default "
                        f"aggregation={INV_AGG_REGISTRY[metric_agg]}"
782
                    )
783
                    self._aggregation_list[metric_name] = metric_agg
lintangsutawika's avatar
lintangsutawika committed
784

785
786
787
788
789
790
                if "higher_is_better" in metric_config:
                    self._higher_is_better[metric_name] = metric_config[
                        "higher_is_better"
                    ]
                else:
                    eval_logger.warning(
791
                        f"[Task: {self.config.task}] metric {metric_name} is defined, but higher_is_better is not. "
792
                        f"using default "
haileyschoelkopf's avatar
haileyschoelkopf committed
793
                        f"higher_is_better={is_higher_better(metric_name)}"
794
                    )
haileyschoelkopf's avatar
haileyschoelkopf committed
795
                    self._higher_is_better[metric_name] = is_higher_better(metric_name)
796

797
        self.download(self.config.dataset_kwargs)
798
799
800
        self._training_docs = None
        self._fewshot_docs = None

801
        if self.config.filter_list is not None:
lintangsutawika's avatar
lintangsutawika committed
802
            self._filters = []
803
            for filter_config in self.config.filter_list:
804
805
806
807
808
809
810
811
812
                filter_name = filter_config["name"]
                filter_functions = filter_config["filter"]
                components = []
                for function in filter_functions:
                    kwargs = {
                        key: function[key] for key in function if key != "function"
                    }
                    components.append([function["function"], kwargs])
                filter_pipeline = build_filter_ensemble(filter_name, components)
lintangsutawika's avatar
lintangsutawika committed
813
                self._filters.append(filter_pipeline)
lintangsutawika's avatar
lintangsutawika committed
814
        else:
815
            self._filters = [build_filter_ensemble("none", [["take_first", None]])]
816

817
818
        if self.config.use_prompt is not None:
            eval_logger.info(f"loading prompt {self.config.use_prompt}")
819
            self.prompt = get_prompt(
820
                self.config.use_prompt, self.DATASET_PATH, self.DATASET_NAME
lintangsutawika's avatar
lintangsutawika committed
821
            )
822
823
824
        else:
            self.prompt = None

lintangsutawika's avatar
lintangsutawika committed
825
        if self.fewshot_docs() is not None:
826
827
828
829
            self.fewshot_rnd = (
                random.Random()
            )  # setting with no seed, to be overridden at a later time
            config_sampler: Union[str, Callable] = (
haileyschoelkopf's avatar
haileyschoelkopf committed
830
831
832
                self.config.fewshot_config.get("sampler", "default")
                if self.config.fewshot_config
                else "default"
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
            )
            if isinstance(config_sampler, str):
                self.sampler = samplers.get_sampler(config_sampler)(
                    list(self.fewshot_docs()), self, rnd=self.fewshot_rnd
                )
            elif callable(config_sampler) and issubclass(
                config_sampler, samplers.ContextSampler
            ):
                self.sampler = config_sampler(
                    docs=list(self.fewshot_docs()), task=self, rnd=self.fewshot_rnd
                )
            else:
                raise TypeError(
                    f"fewshot_config.sampler should be a string or callable of ContextSampler type, "
                    f"not {type(config_sampler)}"
                )
849

850
        self.task_docs = self.eval_docs
851

852
        # Test One Doc
853
        self.features = list(self.task_docs.features.keys())
854
855
        self.multiple_input = 0
        self.multiple_target = 0
856
        test_doc = self.task_docs[0]
857
        test_text = self.doc_to_text(test_doc)
858
        test_target = self.doc_to_target(test_doc)
lintangsutawika's avatar
lintangsutawika committed
859

860
        if self.config.doc_to_choice is not None:
lintangsutawika's avatar
lintangsutawika committed
861
            test_choice = self.doc_to_choice(test_doc)
862
            if not isinstance(test_choice, list):
lintangsutawika's avatar
lintangsutawika committed
863
                eval_logger.error("doc_to_choice must return list")
864
865
            else:
                num_choice = len(test_choice)
866

867
            if isinstance(test_text, int):
868
                self.multiple_input = num_choice
869
870
        else:
            test_choice = None
871

872
        if isinstance(test_target, list):
873
            self.multiple_target = len(test_target)
874
        else:
875
            if (isinstance(test_target, int)) and (test_choice is not None):
lintangsutawika's avatar
lintangsutawika committed
876
                test_target = test_choice[test_target]
877
            else:
lintangsutawika's avatar
lintangsutawika committed
878
                test_target = str(test_target)
879

880
881
882
        if test_choice is not None:
            check_choices = test_choice
        else:
lintangsutawika's avatar
lintangsutawika committed
883
            check_choices = [test_target]
884
885
886
887
        if self.config.doc_to_choice is not None:
            for choice in check_choices:
                choice_has_whitespace = True if choice[0].isspace() else False
                delimiter_has_whitespace = (
888
889
                    True
                    if self.config.target_delimiter.rstrip()
890
                    != self.config.target_delimiter
891
                    else False
892
                )
893

894
                if delimiter_has_whitespace and choice_has_whitespace:
895
896
                    eval_logger.debug(
                        f'Both target_delimiter "{self.config.target_delimiter}" and target choice: "{choice}" have whitespace'
897
898
                    )
                elif (not delimiter_has_whitespace) and (not choice_has_whitespace):
899
                    eval_logger.debug(
900
                        f'Both target_delimiter "{self.config.target_delimiter}" and target choice: "{choice}" do not have whitespace, ignore if the language you are evaluating on does not require/use whitespace'
901
902
                    )

903
    def download(self, dataset_kwargs: Optional[Dict[str, Any]] = None) -> None:
904
905
906
907
908
909
        self.dataset = datasets.load_dataset(
            path=self.DATASET_PATH,
            name=self.DATASET_NAME,
            **dataset_kwargs if dataset_kwargs is not None else {},
        )

baberabb's avatar
baberabb committed
910
    def has_training_docs(self) -> bool:
911
        if self.config.training_split is not None:
912
913
914
915
            return True
        else:
            return False

baberabb's avatar
baberabb committed
916
    def has_validation_docs(self) -> bool:
917
        if self.config.validation_split is not None:
918
919
920
921
            return True
        else:
            return False

baberabb's avatar
baberabb committed
922
    def has_test_docs(self) -> bool:
923
        if self.config.test_split is not None:
924
925
926
927
            return True
        else:
            return False

baberabb's avatar
baberabb committed
928
    def training_docs(self) -> datasets.Dataset:
929
        if self.has_training_docs():
930
931
932
            if self.config.process_docs is not None:
                return self.config.process_docs(
                    self.dataset[self.config.training_split]
933
                )
934
            return self.dataset[self.config.training_split]
935

baberabb's avatar
baberabb committed
936
    def validation_docs(self) -> datasets.Dataset:
937
        if self.has_validation_docs():
938
939
940
            if self.config.process_docs is not None:
                return self.config.process_docs(
                    self.dataset[self.config.validation_split]
941
                )
942
            return self.dataset[self.config.validation_split]
943

baberabb's avatar
baberabb committed
944
    def test_docs(self) -> datasets.Dataset:
945
        if self.has_test_docs():
946
947
948
            if self.config.process_docs is not None:
                return self.config.process_docs(self.dataset[self.config.test_split])
            return self.dataset[self.config.test_split]
949

950
    def fewshot_docs(self):
951
        if self.config.fewshot_split is not None:
952
953
            if self.config.process_docs is not None:
                return self.config.process_docs(self.dataset[self.config.fewshot_split])
954
            return self.dataset[self.config.fewshot_split]
955
        else:
956
            if (self.config.num_fewshot is not None) and (self.config.num_fewshot > 0):
957
                eval_logger.warning(
958
                    f"Task '{self.config.task}': "
959
960
961
962
                    "num_fewshot > 0 but fewshot_split is None. "
                    "using preconfigured rule."
                )
            return super().fewshot_docs()
963

Konrad's avatar
Konrad committed
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
    def convert_chat_history_to_string(self, chat_history: list, tokenizer=None) -> str:
        """Returns chat history tokenized or concatenated as a string.

        :param chat_history: list
            The chat history to convert to a string.
        :param tokenizer:
            Optional tokenizer to use for applying the chat template, if None, the sampler's fewshot_delimiter is used.
        """
        if tokenizer:
            return tokenizer.apply_chat_template(
                chat_history, tokenize=False, add_generation_prompt=True
            )
        else:
            return self.sampler.fewshot_delimiter + "".join(
                f"{s['role']}: {s['content']}" + self.sampler.fewshot_delimiter
                for s in chat_history
            )

lintangsutawika's avatar
lintangsutawika committed
982
    @utils.positional_deprecated
Konrad's avatar
Konrad committed
983
984
985
986
987
988
989
    def fewshot_context(
        self,
        doc: str,
        num_fewshot: int,
        apply_chat_template: bool = False,
        tokenizer=None,
    ) -> str:
lintangsutawika's avatar
lintangsutawika committed
990
991
992
993
994
995
996
        """Returns a fewshot context string that is made up of a prepended description
        (if provided), the `num_fewshot` number of examples, and an appended prompt example.

        :param doc: str
            The document as returned from training_docs, validation_docs, or test_docs.
        :param num_fewshot: int
            The number of fewshot examples to provide in the returned context string.
Konrad's avatar
Konrad committed
997
998
999
1000
        :param apply_chat_template: bool
            Whether to apply the chat template to the fewshot context.
        :param tokenizer:
            The tokenizer to use for applying the chat template.
lintangsutawika's avatar
lintangsutawika committed
1001
1002
1003
        :returns: str
            The fewshot context.
        """
1004
1005
        if description := self.config.description:
            description = utils.apply_template(self.config.description, doc)
lintangsutawika's avatar
lintangsutawika committed
1006

Konrad's avatar
Konrad committed
1007
        chat_history = []
lintangsutawika's avatar
lintangsutawika committed
1008
1009
        if num_fewshot == 0:
            # always prepend the (possibly empty) task description
Konrad's avatar
Konrad committed
1010
1011
1012
1013
            if apply_chat_template:
                chat_history.append({"role": "system", "content": description})
            else:
                labeled_examples = description
lintangsutawika's avatar
lintangsutawika committed
1014
        else:
Konrad's avatar
Konrad committed
1015
1016
1017
1018
1019
1020
1021
1022
            if apply_chat_template:
                chat_history = self.sampler.get_chat_context(
                    doc, num_fewshot, chat_history
                )
            else:
                labeled_examples = description + self.sampler.get_context(
                    doc, num_fewshot
                )
lintangsutawika's avatar
lintangsutawika committed
1023
1024

        example = self.doc_to_text(doc)
Konrad's avatar
Konrad committed
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
        if apply_chat_template:
            if not self.multiple_input:
                if isinstance(example, str):
                    chat_history.append({"role": "user", "content": example})
                elif isinstance(example, list):
                    chat_histories_list = []
                    for ex in example:
                        chat = deepcopy(chat_history)
                        chat.append({"role": "user", "content": ex})
                        chat_histories_list.append(
                            self.convert_chat_history_to_string(chat, tokenizer)
                        )
                    return chat_histories_list
                elif isinstance(example, int):
                    if self.config.doc_to_choice is not None:
                        choices = self.doc_to_choice(doc)
                        chat_history.append(
                            {"role": "user", "content": choices[example]}
                        )
                    else:
                        chat_history.append({"role": "user", "content": str(example)})
            return self.convert_chat_history_to_string(chat_history, tokenizer)
1047
        else:
Konrad's avatar
Konrad committed
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
            if self.multiple_input:
                return labeled_examples
            else:
                if isinstance(example, str):
                    return labeled_examples + example
                elif isinstance(example, list):
                    return [labeled_examples + ex for ex in example]
                elif isinstance(example, int):
                    if self.config.doc_to_choice is not None:
                        choices = self.doc_to_choice(doc)
                        return labeled_examples + choices[example]
                    else:
                        return labeled_examples + str(example)
lintangsutawika's avatar
lintangsutawika committed
1061

1062
    def apply_filters(self):
Baber Abbasi's avatar
Baber Abbasi committed
1063
        """Iterates over FilterEnsembles and applies them to instances"""
1064
1065
        if hasattr(self, "_filters"):
            for f in self._filters:
1066
                f.apply(self._instances)
1067
1068
1069
1070
        else:
            eval_logger.warning("No filter defined, passing through instances")
            return self._instances

1071
    def should_decontaminate(self):
1072
        return self.config.should_decontaminate
1073
1074

    def doc_to_decontamination_query(self, doc):
1075
        if self.config.should_decontaminate:
1076
1077
            if self.config.doc_to_decontamination_query is None:
                return self.doc_to_text(doc)
1078
            else:
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
                doc_to_decontamination_query = self.config.doc_to_decontamination_query
                if doc_to_decontamination_query in self.features:
                    return doc[doc_to_decontamination_query]
                elif callable(doc_to_decontamination_query):
                    return doc_to_decontamination_query(doc)
                else:
                    return ast.literal_eval(
                        utils.apply_template(
                            self.config.doc_to_decontamination_query, doc
                        )
                    )
1090

1091
    def _process_doc(self, doc: dict) -> dict:
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
        """
        Override this to process (detokenize, strip, replace, etc.) individual
        documents. This can be used in a map over documents of a data split.
        E.g. `map(self._process_doc, self.dataset["validation"])`

        :return: dict
            The processed version of the specified `doc`.
        """
        return doc

    def doc_to_text(self, doc):
1103
1104
        if self.prompt is not None:
            doc_to_text = self.prompt
1105
        else:
1106
            doc_to_text = self.config.doc_to_text
lintangsutawika's avatar
lintangsutawika committed
1107

1108
        if isinstance(doc_to_text, int):
1109
            return doc_to_text
1110
        elif isinstance(doc_to_text, str):
1111
            if doc_to_text in self.features:
1112
                # if self.config.doc_to_choice is not None:
1113
1114
                #     return self.doc_to_choice(doc)[doc[doc_to_text]]
                # else:
1115
1116
                return doc[doc_to_text]
            else:
lintangsutawika's avatar
lintangsutawika committed
1117
                text_string = utils.apply_template(doc_to_text, doc)
lintangsutawika's avatar
lintangsutawika committed
1118
                if text_string.isdigit() and self._config.doc_to_choice is not None:
lintangsutawika's avatar
lintangsutawika committed
1119
1120
1121
                    return ast.literal_eval(text_string)
                else:
                    return text_string
1122
        elif callable(doc_to_text):
1123
            return doc_to_text(doc)
lintangsutawika's avatar
lintangsutawika committed
1124
        # Used when applying a Promptsource template
1125
        elif hasattr(doc_to_text, "apply"):
1126
1127
1128
1129
1130
            applied_prompt = doc_to_text.apply(doc)
            if len(applied_prompt) == 2:
                return applied_prompt[0]
            else:
                eval_logger.warning("Applied prompt returns empty string")
1131
                return self.config.fewshot_delimiter
1132
        else:
1133
            print(type(doc_to_text))
1134
            raise TypeError
1135

1136
    def doc_to_target(self, doc: Mapping) -> Union[int, str, list]:
1137
1138
        if self.prompt is not None:
            doc_to_target = self.prompt
1139
        else:
1140
            doc_to_target = self.config.doc_to_target
1141

1142
        if isinstance(doc_to_target, int):
1143
            return doc_to_target
1144
        elif isinstance(doc_to_target, str):
1145
            if doc_to_target in self.features:
1146
                # if self.config.doc_to_choice is not None:
1147
1148
1149
                #     return self.doc_to_choice(doc)[doc[doc_to_target]]
                # else:
                return doc[doc_to_target]
1150
            else:
lintangsutawika's avatar
lintangsutawika committed
1151
                target_string = utils.apply_template(doc_to_target, doc)
lintangsutawika's avatar
lintangsutawika committed
1152
                if target_string.isdigit() and self._config.doc_to_choice is not None:
lintangsutawika's avatar
lintangsutawika committed
1153
                    return ast.literal_eval(target_string)
lintangsutawika's avatar
lintangsutawika committed
1154
1155
1156
1157
1158
                elif (
                    len(target_string) >= 2
                    and (target_string[0] == "[")
                    and (target_string[-1] == "]")
                ):
baberabb's avatar
baberabb committed
1159
1160
1161
1162
                    try:
                        return ast.literal_eval(target_string)
                    except (SyntaxError, ValueError):
                        return target_string
lintangsutawika's avatar
lintangsutawika committed
1163
1164
                else:
                    return target_string
1165
        elif isinstance(doc_to_target, list):
1166
            return doc_to_target
1167
        elif callable(doc_to_target):
1168
            return doc_to_target(doc)
lintangsutawika's avatar
lintangsutawika committed
1169
        # Used when applying a Promptsource template
1170
        elif hasattr(doc_to_target, "apply"):
1171
            applied_prompt = doc_to_target.apply(doc)
1172
1173
1174
1175
            if len(applied_prompt) == 2:
                return applied_prompt[1]
            else:
                eval_logger.warning("Applied prompt returns empty string")
1176
                return self.config.fewshot_delimiter
1177
1178
        else:
            raise TypeError
1179

baberabb's avatar
baberabb committed
1180
    def doc_to_choice(self, doc: Any) -> List[str]:
1181
1182
        if self.prompt is not None:
            doc_to_choice = self.prompt
1183
        elif self.config.doc_to_choice is None:
1184
1185
            eval_logger.error("doc_to_choice was called but not set in config")
        else:
1186
            doc_to_choice = self.config.doc_to_choice
1187

1188
        if isinstance(doc_to_choice, str):
1189
1190
1191
1192
            if doc_to_choice in self.features:
                return doc[doc_to_choice]
            else:
                return ast.literal_eval(utils.apply_template(doc_to_choice, doc))
1193
        elif isinstance(doc_to_choice, list):
1194
            return doc_to_choice
1195
        elif isinstance(doc_to_choice, dict):
1196
1197
1198
1199
1200
1201
1202
            return list(doc_to_choice.values())
        elif callable(doc_to_choice):
            return doc_to_choice(doc)
        elif hasattr(doc_to_choice, "get_answer_choices_list"):
            return doc_to_choice.get_answer_choices_list(doc)
        else:
            raise TypeError
1203

baberabb's avatar
baberabb committed
1204
1205
1206
    def construct_requests(
        self, doc: dict, ctx: str, **kwargs
    ) -> Union[List[Instance], Instance]:
1207
        if self.OUTPUT_TYPE == "loglikelihood":
lintangsutawika's avatar
lintangsutawika committed
1208
            arguments = (ctx, self.doc_to_target(doc))
1209
        elif self.OUTPUT_TYPE == "loglikelihood_rolling":
lintangsutawika's avatar
lintangsutawika committed
1210
            arguments = (self.doc_to_target(doc),)
1211
        elif self.OUTPUT_TYPE == "multiple_choice":
1212
            choices = self.doc_to_choice(doc)
1213
            target_delimiter = self.config.target_delimiter
1214
1215
            if self.multiple_input:
                # If there are multiple inputs, choices are placed in the ctx
1216
                cont = self.doc_to_target(doc)
1217
1218
1219
                arguments = [
                    (ctx + choice, f"{target_delimiter}{cont}") for choice in choices
                ]
1220
            else:
1221
                # Otherwise they are placed in the continuation
1222
                arguments = [(ctx, f"{target_delimiter}{cont}") for cont in choices]
1223

1224
            request_list = [
1225
1226
                Instance(
                    request_type="loglikelihood",
lintangsutawika's avatar
lintangsutawika committed
1227
                    doc=doc,
1228
                    arguments=arg,
1229
                    idx=i,
1230
1231
                    **kwargs,
                )
1232
                for i, arg in enumerate(arguments)
1233
            ]
1234
            # TODO: we should raise a warning telling users this will at most ~2x runtime.
1235
            if "acc_mutual_info" in self._metric_fn_list.keys():
1236
1237
1238
                # if we are calculating multiple choice accuracy
                # using mutual information instead of raw loglikelihood as metric, need unconditional lls.

lintangsutawika's avatar
lintangsutawika committed
1239
                # here mutual info refers to calculating
1240
1241
1242
1243
1244
1245
                # log(P(choice|ctx) / P(choice)) = log(P(choice|ctx)) - log(P(choice))
                # in other words normalizing by subtracting the unconditional logprob of each choice.
                request_list.extend(
                    [
                        Instance(
                            request_type="loglikelihood",
lintangsutawika's avatar
lintangsutawika committed
1246
                            doc=doc,
1247
                            arguments=("", "{}".format(choice)),
1248
1249
1250
                            idx=i,
                            **kwargs,
                        )
lintangsutawika's avatar
lintangsutawika committed
1251
                        for i, choice in enumerate(choices)
1252
1253
1254
                    ]
                )
            return request_list
lintangsutawika's avatar
lintangsutawika committed
1255

1256
        elif self.OUTPUT_TYPE == "generate_until":
1257
            arguments = (ctx, deepcopy(self.config.generation_kwargs))
lintangsutawika's avatar
lintangsutawika committed
1258
1259

        return Instance(
lintangsutawika's avatar
lintangsutawika committed
1260
1261
            request_type=self.OUTPUT_TYPE, doc=doc, arguments=arguments, idx=0, **kwargs
        )
1262
1263

    def process_results(self, doc, results):
1264
1265
        if callable(self.config.process_results):
            return self.config.process_results(doc, results)
lintangsutawika's avatar
lintangsutawika committed
1266

1267
        result_dict = {}
1268
        use_metric = list(self._metric_fn_list.keys())
1269
1270
1271
        if self.OUTPUT_TYPE == "loglikelihood":
            results = results[0]
            ll, is_greedy = results
1272
1273
1274
1275
            return {
                **({"perplexity": ll} if "perplexity" in use_metric else {}),
                **({"acc": int(is_greedy)} if "acc" in use_metric else {}),
            }
1276
        elif self.OUTPUT_TYPE == "loglikelihood_rolling":
haileyschoelkopf's avatar
haileyschoelkopf committed
1277
            (loglikelihood,) = results
1278
1279
            _words = self.count_words(self.doc_to_target(doc))
            _bytes = self.count_bytes(self.doc_to_target(doc))
haileyschoelkopf's avatar
haileyschoelkopf committed
1280
            return {
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
                **(
                    {"word_perplexity": (loglikelihood, _words)}
                    if "word_perplexity" in use_metric
                    else {}
                ),
                **(
                    {"byte_perplexity": (loglikelihood, _bytes)}
                    if "byte_perplexity" in use_metric
                    else {}
                ),
                **(
                    {"bits_per_byte": (loglikelihood, _bytes)}
                    if "bits_per_byte" in use_metric
                    else {}
                ),
haileyschoelkopf's avatar
haileyschoelkopf committed
1296
            }
1297
        elif self.OUTPUT_TYPE == "multiple_choice":
1298
            lls, is_greedy = zip(*results)
lintangsutawika's avatar
lintangsutawika committed
1299

1300
            # retrieve choices in List[str] form, to compute choice lengths, etc.
1301
            choices = self.doc_to_choice(doc)
1302
1303
            completion_len = np.array([float(len(i)) for i in choices])

1304
1305
            if (
                2 * len(choices) == len(lls)
1306
                and "acc_mutual_info" in self._metric_fn_list.keys()
1307
1308
1309
1310
            ):
                # then we are doing mutual info.
                # this stores the "dryrun" / unconditional answer loglikelihoods
                lls_unconditional = lls[1::2]
1311
1312
                if len(lls_unconditional) != len(choices):
                    raise ValueError
1313
1314
                # and this stores our "regular" conditional loglikelihoods
                lls = lls[::2]
1315

1316
1317
            pred = np.argmax(lls)
            pred_norm = np.argmax(lls / completion_len)
lintangsutawika's avatar
lintangsutawika committed
1318

1319
1320
            if self.multiple_input:
                gold = self.doc_to_text(doc)
1321
            else:
1322
                gold = self.doc_to_target(doc)
1323
1324

            gold_index_error = False
1325
            if isinstance(gold, list):
Lintang Sutawika's avatar
Lintang Sutawika committed
1326
1327
                gold = [i if i < len(choices) else -100 for i in gold]
                if -100 in gold:
1328
1329
                    gold_index_error = True
            else:
1330
                if isinstance(gold, int):
Lintang Sutawika's avatar
Lintang Sutawika committed
1331
                    gold = gold if gold < len(choices) else -100
1332
                elif isinstance(gold, str):
Lintang Sutawika's avatar
Lintang Sutawika committed
1333
                    gold = choices.index(gold) if gold in choices else -100
lintangsutawika's avatar
lintangsutawika committed
1334

Lintang Sutawika's avatar
Lintang Sutawika committed
1335
                if gold == -100:
1336
1337
1338
1339
                    gold_index_error = True

            if gold_index_error:
                eval_logger.warning(
lintangsutawika's avatar
lintangsutawika committed
1340
                    f"Label index was not in within range of available choices,"
1341
1342
                    f"Sample:\n\n{doc}\n\n"
                )
lintangsutawika's avatar
lintangsutawika committed
1343

1344
            if self.multiple_target:
lintangsutawika's avatar
lintangsutawika committed
1345
1346
                acc = 1.0 if pred in gold else 0.0
                acc_norm = 1.0 if pred_norm in gold else 0.0
Lintang Sutawika's avatar
Lintang Sutawika committed
1347
                exact_match = int(any([is_greedy[i] if i != -100 else 0 for i in gold]))
lintangsutawika's avatar
lintangsutawika committed
1348
1349
1350
            else:
                acc = 1.0 if pred == gold else 0.0
                acc_norm = 1.0 if pred_norm == gold else 0.0
1351
                # TODO: this gets score of 0 on arc_challenge for pythia-70m. need to test that this works properly
Lintang Sutawika's avatar
Lintang Sutawika committed
1352
                exact_match = int(is_greedy[gold]) if gold != -100 else 0
1353

Lintang Sutawika's avatar
Lintang Sutawika committed
1354
1355
1356
1357
            prob_norm = utils.softmax(lls)

            # TODO use keyword arguments to the metric?
            # gold, pred, norm stuff, the original lls,
1358
            result_dict = {
1359
                **({"acc": acc} if "acc" in use_metric else {}),
1360
1361
                **({"f1": (gold, pred)} if "f1" in use_metric else {}),
                **({"mcc": (gold, pred)} if "mcc" in use_metric else {}),
1362
                **({"acc_norm": acc_norm} if "acc_norm" in use_metric else {}),
1363
                **({"exact_match": exact_match} if "exact_match" in use_metric else {}),
Lintang Sutawika's avatar
Lintang Sutawika committed
1364
1365
1366
1367
1368
                **(
                    {"brier_score": (gold, prob_norm)}
                    if "brier_score" in use_metric
                    else {}
                ),
1369
1370
            }

1371
            if "acc_mutual_info" in use_metric:
lintangsutawika's avatar
lintangsutawika committed
1372
1373
1374
                lls_mutual_info = [
                    ll_c - ll_u for ll_c, ll_u in zip(lls, lls_unconditional)
                ]
1375
1376
1377
                acc_mutual_info = 1.0 if np.argmax(lls_mutual_info) == gold else 0.0
                result_dict["acc_mutual_info"] = acc_mutual_info

1378
        elif self.OUTPUT_TYPE == "generate_until":
1379
            gold = self.doc_to_target(doc)
Chris's avatar
Chris committed
1380
            result = results[0]
1381
            if self.config.doc_to_choice is not None:
lintangsutawika's avatar
lintangsutawika committed
1382
                # If you set doc_to_choice,
lintangsutawika's avatar
lintangsutawika committed
1383
                # it assumes that doc_to_target returns a number.
1384
1385
                choices = self.doc_to_choice(doc)
                gold = choices[gold]
1386
1387
            # we expect multiple_targets to be a list.
            elif self.multiple_target:
baberabb's avatar
baberabb committed
1388
                gold = list(gold)
Chris's avatar
Chris committed
1389
1390
1391
            elif type(gold) != type(result):
                # cast gold to the same type as result
                gold = type(result)(gold)
1392

lintangsutawika's avatar
lintangsutawika committed
1393
            for metric in self._metric_fn_list.keys():
haileyschoelkopf's avatar
haileyschoelkopf committed
1394
1395
1396
1397
1398
                if self.multiple_target:
                    # in the case where we have multiple targets,
                    # return true if any are true
                    # TODO: this may break for multipLe_target, non zero-or-1 metrics
                    scores = []
haileyschoelkopf's avatar
haileyschoelkopf committed
1399
1400
1401
1402
                    if not isinstance(gold, list):
                        # sometimes, a multiple_target dataset has exceptions where one doc has only one string answer
                        # print(gold)
                        gold = [gold]
1403
1404
1405
1406
1407
1408
1409
1410
                    if metric == "exact_match":
                        result = [result for _ in range(len(gold))]
                        scores = self._metric_fn_list[metric](
                            references=gold,
                            predictions=result,
                            **self._metric_fn_kwargs[metric],
                        )[metric]
                        result_score = 1.0 if scores > 0.0 else 0.0
haileyschoelkopf's avatar
haileyschoelkopf committed
1411
                    else:
1412
1413
1414
1415
1416
1417
1418
1419
1420
1421
1422
1423
1424
1425
1426
1427
1428
1429
1430
1431
1432
                        for gold_option in gold:
                            try:
                                result_score = self._metric_fn_list[metric](
                                    references=[gold_option],
                                    predictions=[result],
                                    **self._metric_fn_kwargs[metric],
                                )
                            except (
                                TypeError
                            ):  # TODO: this is hacky and I don't want to do it
                                result_score = self._metric_fn_list[metric](
                                    [gold_option, result]
                                )
                            if isinstance(result_score, dict):
                                # TODO: this handles the case where HF evaluate returns a dict.
                                result_score = result_score[metric]
                            scores.append(result_score)
                        if any(scores):
                            result_score = 1.0
                        else:
                            result_score = 0.0
haileyschoelkopf's avatar
haileyschoelkopf committed
1433
                else:
1434
                    try:
1435
                        result_score = self._metric_fn_list[metric](
1436
1437
                            references=[gold],
                            predictions=[result],
1438
                            **self._metric_fn_kwargs[metric],
1439
                        )
1440
                    except TypeError:  # needed for now in order to use a different interface between our own metrics and HF Evaluate metrics
1441
                        result_score = self._metric_fn_list[metric]([gold, result])
1442
1443
1444
1445
                    if isinstance(result_score, dict):
                        # TODO: this handles the case where HF evaluate returns a dict.
                        result_score = result_score[metric]
                result_dict[metric] = result_score
1446
        else:
lintangsutawika's avatar
lintangsutawika committed
1447
1448
            raise ValueError(
                f"Passed invalid output_type '{self.OUTPUT_TYPE}' ! Please use one of ",
1449
                "'loglikelihood', 'loglikelihood_rolling', 'generate_until' or 'multiple_choice'",
1450
            )
1451
1452
1453

        return result_dict

Baber Abbasi's avatar
Baber Abbasi committed
1454
    def aggregation(self) -> dict:
1455
1456
        return self._aggregation_list

Baber Abbasi's avatar
Baber Abbasi committed
1457
    def higher_is_better(self) -> dict:
haileyschoelkopf's avatar
haileyschoelkopf committed
1458
        return self._higher_is_better
1459

Baber Abbasi's avatar
Baber Abbasi committed
1460
1461
1462
    def get_config(self, key: str) -> Any:
        return getattr(self._config, key, None)

1463
1464
1465
1466
1467
1468
1469
1470
1471
    def __repr__(self):
        return (
            f"ConfigurableTask(task_name={getattr(self.config, 'task', None)},"
            f"group_name={getattr(self.config, 'group', None)},"
            f"output_type={self.OUTPUT_TYPE},"
            f"num_fewshot={getattr(self.config, 'num_fewshot', None)},"
            f"num_samples={len(self.eval_docs)})"
        )

1472
1473

class MultipleChoiceTask(Task):
1474
    OUTPUT_TYPE = "loglikelihood"
1475

baberabb's avatar
baberabb committed
1476
    def doc_to_target(self, doc: dict) -> str:
1477
1478
        return " " + doc["choices"][doc["gold"]]

baberabb's avatar
baberabb committed
1479
    def construct_requests(self, doc: dict, ctx: str, **kwargs) -> List[Instance]:
1480
        # TODO: add mutual info here?
lintangsutawika's avatar
lintangsutawika committed
1481
1482
        return [
            Instance(
haileyschoelkopf's avatar
haileyschoelkopf committed
1483
                request_type="loglikelihood",
lintangsutawika's avatar
lintangsutawika committed
1484
                doc=doc,
1485
                arguments=(ctx, " {}".format(choice)),
1486
                idx=i,
1487
1488
                **kwargs,
            )
lintangsutawika's avatar
lintangsutawika committed
1489
1490
            for i, choice in enumerate(doc["choices"])
        ]
1491

1492
    def process_results(self, doc: dict, results: Iterable[Tuple[float, bool]]) -> dict:
lintangsutawika's avatar
lintangsutawika committed
1493
1494
1495
        results = [
            res[0] for res in results
        ]  # only retain loglikelihoods, discard is_greedy TODO: do we need is_greedy anywhere?
1496
1497
1498
1499
1500
1501
1502
1503
1504
1505
1506
        gold = doc["gold"]

        acc = 1.0 if np.argmax(results) == gold else 0.0
        completion_len = np.array([float(len(i)) for i in doc["choices"]])
        acc_norm = 1.0 if np.argmax(results / completion_len) == gold else 0.0

        return {
            "acc": acc,
            "acc_norm": acc_norm,
        }

baberabb's avatar
baberabb committed
1507
    def higher_is_better(self) -> dict:
1508
1509
1510
1511
1512
        return {
            "acc": True,
            "acc_norm": True,
        }

baberabb's avatar
baberabb committed
1513
    def aggregation(self) -> dict:
1514
1515
1516
1517
1518
1519
        return {
            "acc": mean,
            "acc_norm": mean,
        }


lintangsutawika's avatar
lintangsutawika committed
1520
class PerplexityTask(Task):
1521
1522
    OUTPUT_TYPE = "loglikelihood_rolling"

baberabb's avatar
baberabb committed
1523
    def has_training_docs(self) -> bool:
1524
1525
        return False

baberabb's avatar
baberabb committed
1526
    def fewshot_examples(self, k: int, rnd) -> List:
1527
1528
1529
1530
        if k != 0:
            raise ValueError(
                "The number of fewshot examples must be 0 for perplexity tasks."
            )
1531
1532
        return []

baberabb's avatar
baberabb committed
1533
    def fewshot_context(self, doc: dict, num_fewshot: int) -> Literal[""]:
1534
1535
1536
1537
        if num_fewshot != 0:
            raise ValueError(
                "The number of fewshot examples must be 0 for perplexity tasks."
            )
1538
1539
1540

        return ""

baberabb's avatar
baberabb committed
1541
    def higher_is_better(self) -> dict:
1542
1543
1544
1545
1546
1547
1548
1549
1550
        return {
            "word_perplexity": False,
            "byte_perplexity": False,
            "bits_per_byte": False,
        }

    def doc_to_decontamination_query(self, doc):
        return doc

Ethan Smith's avatar
Ethan Smith committed
1551
    def doc_to_text(self, doc) -> str:
1552
1553
1554
1555
1556
        return ""

    def doc_to_target(self, doc):
        return doc

1557
1558
1559
    def construct_requests(self, doc: dict, ctx: Optional[str], **kwargs):
        if bool(ctx):
            raise ValueError
1560

lintangsutawika's avatar
lintangsutawika committed
1561
1562
1563
1564
1565
1566
1567
        return Instance(
            request_type=self.OUTPUT_TYPE,
            doc=doc,
            arguments=(self.doc_to_target(doc),),
            idx=0,
            **kwargs,
        )
1568

1569
    def process_results(self, doc: dict, results: Tuple[float]) -> dict:
1570
        (loglikelihood,) = results
haileyschoelkopf's avatar
haileyschoelkopf committed
1571
1572
        words = self.count_words(self.doc_to_target(doc))
        bytes_ = self.count_bytes(self.doc_to_target(doc))
1573
1574
1575
1576
1577
1578
        return {
            "word_perplexity": (loglikelihood, words),
            "byte_perplexity": (loglikelihood, bytes_),
            "bits_per_byte": (loglikelihood, bytes_),
        }

baberabb's avatar
baberabb committed
1579
    def aggregation(self) -> dict:
1580
1581
1582
1583
1584
1585
1586
        return {
            "word_perplexity": weighted_perplexity,
            "byte_perplexity": weighted_perplexity,
            "bits_per_byte": bits_per_byte,
        }

    @classmethod
baberabb's avatar
baberabb committed
1587
    def count_bytes(cls, doc) -> int:
1588
1589
1590
        return len(doc.encode("utf-8"))

    @classmethod
baberabb's avatar
baberabb committed
1591
    def count_words(cls, doc) -> int:
1592
1593
        """Downstream tasks with custom word boundaries should override this!"""
        return len(re.split(r"\s+", doc))