task.py 55 KB
Newer Older
1
import abc
2
import ast
lintangsutawika's avatar
lintangsutawika committed
3
import logging
4
import random
5
6
import re
from collections.abc import Callable
7
from copy import deepcopy
8
from dataclasses import asdict, dataclass
9
from inspect import getsource
10
from typing import Any, Iterator, List, Literal, Tuple, Union
11
12
13

import datasets
import numpy as np
14
from tqdm import tqdm
15
16

from lm_eval import utils
17
from lm_eval.api import samplers
haileyschoelkopf's avatar
haileyschoelkopf committed
18
from lm_eval.api.instance import Instance
lintangsutawika's avatar
lintangsutawika committed
19
from lm_eval.api.metrics import (
20
    bits_per_byte,
lintangsutawika's avatar
lintangsutawika committed
21
22
23
24
    mean,
    weighted_perplexity,
)
from lm_eval.api.registry import (
25
26
    AGGREGATION_REGISTRY,
    DEFAULT_METRIC_REGISTRY,
haileyschoelkopf's avatar
haileyschoelkopf committed
27
    get_aggregation,
28
    get_metric,
29
    get_metric_aggregation,
haileyschoelkopf's avatar
haileyschoelkopf committed
30
    is_higher_better,
lintangsutawika's avatar
lintangsutawika committed
31
)
32
from lm_eval.caching.cache import load_from_cache, save_to_cache
33
34
35
from lm_eval.filters import build_filter_ensemble
from lm_eval.prompts import get_prompt

36

37
38
39
40
ALL_OUTPUT_TYPES = [
    "loglikelihood",
    "multiple_choice",
    "loglikelihood_rolling",
41
    "generate_until",
42
43
]

44
eval_logger = logging.getLogger("lm-eval")
45

lintangsutawika's avatar
lintangsutawika committed
46

47
48
@dataclass
class TaskConfig(dict):
49
    # task naming/registry
50
    task: str = None
lintangsutawika's avatar
lintangsutawika committed
51
    task_alias: str = None
52
    group: Union[str, list] = None
lintangsutawika's avatar
lintangsutawika committed
53
    group_alias: Union[str, list] = None
54
55
56
    # HF dataset options.
    # which dataset to use,
    # and what splits for what purpose
57
58
    dataset_path: str = None
    dataset_name: str = None
59
    dataset_kwargs: dict = None
60
61
62
    training_split: str = None
    validation_split: str = None
    test_split: str = None
lintangsutawika's avatar
lintangsutawika committed
63
    fewshot_split: str = None  # TODO: assert that this not None if num_fewshot > 0. (?) assert if this is same split as one evaling (?)
64
65
    # formatting / prompting options.
    # see docs/advanced_task_guide.md for more info
66
    process_docs: Callable = None
67
68
    doc_to_text: Union[Callable, str] = None
    doc_to_target: Union[Callable, str] = None
lintangsutawika's avatar
lintangsutawika committed
69
    doc_to_choice: Union[Callable, str, dict, list] = None
lintangsutawika's avatar
lintangsutawika committed
70
    process_results: Union[Callable, str] = None
71
    use_prompt: str = None
72
    description: str = ""
73
74
    target_delimiter: str = " "
    fewshot_delimiter: str = "\n\n"
haileyschoelkopf's avatar
haileyschoelkopf committed
75
    fewshot_config: dict = None
76
    # runtime configuration options
77
    num_fewshot: int = None
78
    # scoring options
79
    metric_list: list = None
80
81
82
83
84
85
    output_type: Literal[
        "loglikelihood",
        "loglikelihood_rolling",
        "generate_until",
        "multiple_choice",
    ] = "generate_until"
86
    generation_kwargs: dict = None
87
    repeats: int = 1
lintangsutawika's avatar
lintangsutawika committed
88
    filter_list: Union[str, list] = None
89
90
    should_decontaminate: bool = False
    doc_to_decontamination_query: str = None
91
    metadata: dict = None  # by default, not used in the code. allows for users to pass arbitrary info to tasks
92

Ethan Smith's avatar
Ethan Smith committed
93
    def __post_init__(self) -> None:
Lintang Sutawika's avatar
Lintang Sutawika committed
94
        if self.generation_kwargs is not None:
95
            if self.output_type != "generate_until":
Lintang Sutawika's avatar
Lintang Sutawika committed
96
                eval_logger.warning(
97
                    f"[{self.task}] passed `generation_kwargs`, but not using `output_type: generate_until`!"
Lintang Sutawika's avatar
Lintang Sutawika committed
98
                )
99
                assert self.output_type != "generate_until"
Lintang Sutawika's avatar
Lintang Sutawika committed
100
101
102
103
104
105
106

            if "temperature" in self.generation_kwargs:
                self.generation_kwargs["temperature"] = float(
                    self.generation_kwargs["temperature"]
                )

            if "until" not in self.generation_kwargs:
107
                self.generation_kwargs["until"] = [self.fewshot_delimiter]
Lintang Sutawika's avatar
Lintang Sutawika committed
108
        else:
109
            if self.output_type == "generate_until":
Lintang Sutawika's avatar
Lintang Sutawika committed
110
111
                # ensure that we greedily generate in absence of explicit arguments otherwise
                self.generation_kwargs = {
112
113
114
115
116
                    "until": (
                        None
                        if self.fewshot_delimiter is None
                        else [self.fewshot_delimiter]
                    ),
Lintang Sutawika's avatar
Lintang Sutawika committed
117
118
                    "do_sample": False,
                }
119

120
121
122
    def __getitem__(self, item):
        return getattr(self, item)

123
124
125
    def __setitem__(self, item, value):
        return setattr(self, item, value)

126
    def to_dict(self, keep_callable: bool = False) -> dict:
127
128
        """dumps the current config as a dictionary object, as a printable format.
        null fields will not be printed.
haileyschoelkopf's avatar
haileyschoelkopf committed
129
        Used for dumping results alongside full task configuration
130

haileyschoelkopf's avatar
haileyschoelkopf committed
131
132
133
134
135
136
137
138
139
140
        :return: dict
            A printable dictionary version of the TaskConfig object.

        # TODO: should any default value in the TaskConfig not be printed?
        """
        cfg_dict = asdict(self)
        # remove values that are `None`
        for k, v in list(cfg_dict.items()):
            if v is None:
                cfg_dict.pop(k)
141
142
143
144
145
146
147
148
149
150
            elif k == "metric_list":
                for metric_dict in v:
                    for metric_key, metric_value in metric_dict.items():
                        if callable(metric_value):
                            metric_dict[metric_key] = self.serialize_function(
                                metric_value, keep_callable=keep_callable
                            )
                cfg_dict[k] = v
            elif callable(v):
                cfg_dict[k] = self.serialize_function(v, keep_callable=keep_callable)
haileyschoelkopf's avatar
haileyschoelkopf committed
151
        return cfg_dict
152

153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
    def serialize_function(
        self, value: Union[Callable, str], keep_callable=False
    ) -> Union[Callable, str]:
        """Serializes a given function or string.

        If 'keep_callable' is True, the original callable is returned.
        Otherwise, attempts to return the source code of the callable using 'getsource'.
        """
        if keep_callable:
            return value
        else:
            try:
                return getsource(value)
            except (TypeError, OSError):
                return str(value)

169
170
171
172
173
174
175
176
177
178
179
180

class Task(abc.ABC):
    """A task represents an entire benchmark including its dataset, problems,
    answers, and evaluation methods. See BoolQ for a simple example implementation

    A `doc` can be any python object which represents one instance of evaluation.
    This is usually a dictionary e.g.
        {"question": ..., "answer": ...} or
        {"question": ..., question, answer)
    """

    VERSION = None
181

182
183
184
185
186
187
188
189
    # The name of the `Task` benchmark as denoted in the HuggingFace datasets Hub
    # or a path to a custom `datasets` loading script.
    DATASET_PATH: str = None

    # The name of a subset within `DATASET_PATH`.
    DATASET_NAME: str = None

    OUTPUT_TYPE: str = None
lintangsutawika's avatar
lintangsutawika committed
190

191
192
193
194
195
196
    def __init__(
        self,
        data_dir=None,
        cache_dir=None,
        download_mode=None,
        config=None,
Ethan Smith's avatar
Ethan Smith committed
197
    ) -> None:
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
        """
        :param data_dir: str
            Stores the path to a local folder containing the `Task`'s data files.
            Use this to specify the path to manually downloaded data (usually when
            the dataset is not publicly accessible).
        :param cache_dir: str
            The directory to read/write the `Task` dataset. This follows the
            HuggingFace `datasets` API with the default cache directory located at:
                `~/.cache/huggingface/datasets`
            NOTE: You can change the cache location globally for a given process
            to another directory:
                `export HF_DATASETS_CACHE="/path/to/another/directory"`
        :param download_mode: datasets.DownloadMode
            How to treat pre-existing `Task` downloads and data.
            - `datasets.DownloadMode.REUSE_DATASET_IF_EXISTS`
                Reuse download and reuse dataset.
            - `datasets.DownloadMode.REUSE_CACHE_IF_EXISTS`
                Reuse download with fresh dataset.
            - `datasets.DownloadMode.FORCE_REDOWNLOAD`
                Fresh download and fresh dataset.
        """
        self.download(data_dir, cache_dir, download_mode)
        self._training_docs = None
        self._fewshot_docs = None
        self._instances = None

lintangsutawika's avatar
lintangsutawika committed
224
        self._config = TaskConfig({**config}) if config else TaskConfig()
225

lintangsutawika's avatar
lintangsutawika committed
226
        self._filters = [build_filter_ensemble("none", [["take_first", None]])]
227

Ethan Smith's avatar
Ethan Smith committed
228
    def download(self, data_dir=None, cache_dir=None, download_mode=None) -> None:
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
        """Downloads and returns the task dataset.
        Override this method to download the dataset from a custom API.

        :param data_dir: str
            Stores the path to a local folder containing the `Task`'s data files.
            Use this to specify the path to manually downloaded data (usually when
            the dataset is not publicly accessible).
        :param cache_dir: str
            The directory to read/write the `Task` dataset. This follows the
            HuggingFace `datasets` API with the default cache directory located at:
                `~/.cache/huggingface/datasets`
            NOTE: You can change the cache location globally for a given process
            by setting the shell environment variable, `HF_DATASETS_CACHE`,
            to another directory:
                `export HF_DATASETS_CACHE="/path/to/another/directory"`
        :param download_mode: datasets.DownloadMode
            How to treat pre-existing `Task` downloads and data.
            - `datasets.DownloadMode.REUSE_DATASET_IF_EXISTS`
                Reuse download and reuse dataset.
            - `datasets.DownloadMode.REUSE_CACHE_IF_EXISTS`
                Reuse download with fresh dataset.
            - `datasets.DownloadMode.FORCE_REDOWNLOAD`
                Fresh download and fresh dataset.
        """
253
254
255
256
257
258
259
        self.dataset = datasets.load_dataset(
            path=self.DATASET_PATH,
            name=self.DATASET_NAME,
            data_dir=data_dir,
            cache_dir=cache_dir,
            download_mode=download_mode,
        )
260

261
262
263
264
265
    @property
    def config(self):
        """Returns the TaskConfig associated with this class."""
        return self._config

266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
    @abc.abstractmethod
    def has_training_docs(self):
        """Whether the task has a training set"""
        pass

    @abc.abstractmethod
    def has_validation_docs(self):
        """Whether the task has a validation set"""
        pass

    @abc.abstractmethod
    def has_test_docs(self):
        """Whether the task has a test set"""
        pass

    def training_docs(self):
        """
        :return: Iterable[obj]
            A iterable of any object, that doc_to_text can handle
        """
        return []

    def validation_docs(self):
        """
        :return: Iterable[obj]
            A iterable of any object, that doc_to_text can handle
        """
        return []

    def test_docs(self):
        """
        :return: Iterable[obj]
            A iterable of any object, that doc_to_text can handle
        """
        return []

302
303
304
305
306
307
308
309
310
311
    def fewshot_docs(self):
        """
        :return: Iterable[obj]
            A iterable of any object, that doc_to_text can handle
        """
        if self.has_training_docs():
            return self.training_docs()
        elif self.has_validation_docs():
            return self.validation_docs()
        else:
lintangsutawika's avatar
lintangsutawika committed
312
            eval_logger.warning(
313
                f"[Task: {self.config.task}] has_training_docs and has_validation_docs are False"
314
                ", using test_docs as fewshot_docs but this is not recommended."
lintangsutawika's avatar
lintangsutawika committed
315
            )
316
317
            return self.test_docs()

318
319
320
321
322
323
324
325
326
327
    def _process_doc(self, doc):
        """
        Override this to process (detokenize, strip, replace, etc.) individual
        documents. This can be used in a map over documents of a data split.
        E.g. `map(self._process_doc, self.dataset["validation"])`

        :return: dict
            The processed version of the specified `doc`.
        """
        return doc
lintangsutawika's avatar
lintangsutawika committed
328

329
    @property
330
    def instances(self) -> List[Instance]:
331
332
333
334
335
336
337
338
339
340
341
        """After calling `task.build_all_requests()`, tasks
        maintain a list of the dataset instances which will be evaluated.
        """
        return self._instances

    def fewshot_examples(self, k, rnd):
        if self._training_docs is None:
            self._training_docs = list(self.training_docs())

        return rnd.sample(self._training_docs, k)

Ethan Smith's avatar
Ethan Smith committed
342
    def doc_to_decontamination_query(self, doc) -> None:
343
344
345
346
347
348
349
350
351
352
353
354
355
        print(
            "Override doc_to_decontamination_query with document specific decontamination query."
        )
        assert False

    @abc.abstractmethod
    def doc_to_text(self, doc):
        pass

    @abc.abstractmethod
    def doc_to_target(self, doc):
        pass

356
357
    def build_all_requests(
        self,
358
        *,
359
360
361
362
363
364
        limit=None,
        rank=None,
        world_size=None,
        cache_requests=False,
        rewrite_requests_cache=False,
    ) -> None:
365
        """Build a set of Instances for a task, and store them in task.instances"""
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385

        # used with caching
        og_limit = limit

        cache_key = f"requests-{self._config.task}"

        cached_instances = load_from_cache(file_name=cache_key)

        if cache_requests and cached_instances and not rewrite_requests_cache:
            cached_instances = cached_instances[:limit]

            flattened_instances = [
                instance
                for instance_group in cached_instances
                for instance in instance_group
            ]

            self._instances = flattened_instances
            return

Baber Abbasi's avatar
Baber Abbasi committed
386
        eval_logger.info(f"Building contexts for {self.config.task} on rank {rank}...")
387

388
        instances = []
389
390
391
392
393
394
395
396
397
398

        # process all documents when caching is specified for simplicity
        if (
            cache_requests
            and (not cached_instances or rewrite_requests_cache)
            and limit is not None
        ):
            limit = None

        doc_id_docs = list(
399
            self.doc_iterator(rank=rank, limit=limit, world_size=world_size)
400
401
402
403
404
405
406
        )

        num_docs = len(doc_id_docs)

        for doc_id, doc in tqdm(
            doc_id_docs,
            total=num_docs,
lintangsutawika's avatar
lintangsutawika committed
407
        ):
408
            # sample fewshot context #TODO: need to offset doc_id by rank now!
409
            fewshot_ctx = self.fewshot_context(
410
                doc,
411
                0 if self.config.num_fewshot is None else self.config.num_fewshot,
412
            )
413

414
            # TODO: we should override self.config.repeats if doing greedy gen so users don't waste time+compute
lintangsutawika's avatar
lintangsutawika committed
415
416
417
            inst = self.construct_requests(
                doc=doc,
                ctx=fewshot_ctx,
418
                metadata=(self.config["task"], doc_id, self.config.repeats),
lintangsutawika's avatar
lintangsutawika committed
419
            )
420
421
422
423

            if not isinstance(inst, list):
                inst = [inst]

424
425
426
427
428
429
430
431
432
433
434
435
436
            instances.append(inst)

        # now flatten, this is to allow slicing to work with pickles

        sliced_instances = instances[:og_limit]

        flattened_instances = [
            instance
            for instance_group in sliced_instances
            for instance in instance_group
        ]

        self._instances = flattened_instances
437
438
439

        assert len(self._instances) != 0, "task.build_requests() did not find any docs!"

440
441
442
        if cache_requests and (not cached_instances or rewrite_requests_cache):
            save_to_cache(file_name=cache_key, obj=instances)

443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
    @abc.abstractmethod
    def construct_requests(self, doc, ctx, **kwargs):
        """Uses RequestFactory to construct Requests and returns an iterable of
        Requests which will be sent to the LM.

        :param doc:
            The document as returned from training_docs, validation_docs, or test_docs.
        :param ctx: str
            The context string, generated by fewshot_context. This includes the natural
            language description, as well as the few shot examples, and the question
            part of the document for `doc`.
        :param doc_idx: int
            The index of a document within `self.test_docs()` or `self.validation_docs()`,
            whichever is the main split used.
        :param repeats: int
        TODO: update this docstring
lintangsutawika's avatar
lintangsutawika committed
459
            The number of times each instance in a dataset is inferred on. Defaults to 1,
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
            can be increased for techniques like majority voting.
        """
        pass

    @abc.abstractmethod
    def process_results(self, doc, results):
        """Take a single document and the LM results and evaluates, returning a
        dict where keys are the names of submetrics and values are the values of
        the metric for that one document

        :param doc:
            The document as returned from training_docs, validation_docs, or test_docs.
        :param results:
            The results of the requests created in construct_requests.
        """
        pass

    @abc.abstractmethod
    def aggregation(self):
        """
        :returns: {str: [metric_score] -> float}
            A dictionary where keys are the names of submetrics and values are
            functions that aggregate a list of metric scores
        """
        pass

    @abc.abstractmethod
    def higher_is_better(self):
        """
        :returns: {str: bool}
            A dictionary where keys are the names of submetrics and values are
            whether a higher value of the submetric is better
        """
        pass

495
496
497
    def get_config(self, key: str) -> Any:
        return getattr(self._config, key, None)

haileyschoelkopf's avatar
haileyschoelkopf committed
498
499
500
501
502
503
504
505
506
507
    @classmethod
    def count_bytes(cls, doc):
        """Used for byte-level perplexity metrics in rolling loglikelihood"""
        return len(doc.encode("utf-8"))

    @classmethod
    def count_words(cls, doc):
        """Downstream loglikelihood_rolling perplexity tasks with custom word boundaries should override this!"""
        return len(re.split(r"\s+", doc))

508
    @utils.positional_deprecated
lintangsutawika's avatar
lintangsutawika committed
509
    def fewshot_context(
510
511
512
513
514
        self,
        doc,
        num_fewshot,
        rnd=random.Random(1234),
        description=None,
lintangsutawika's avatar
lintangsutawika committed
515
    ):
516
517
518
519
520
521
522
        """Returns a fewshot context string that is made up of a prepended description
        (if provided), the `num_fewshot` number of examples, and an appended prompt example.

        :param doc: str
            The document as returned from training_docs, validation_docs, or test_docs.
        :param num_fewshot: int
            The number of fewshot examples to provide in the returned context string.
lintangsutawika's avatar
lintangsutawika committed
523
524
525
526
527
        :param rnd: random.Random
            The pseudo-random number generator used to randomly sample examples.
            WARNING: This is currently a required arg although it's optionalized with a default `None`.
        :param description: str
            The task's description that will be prepended to the fewshot examples.
528
529
530
        :returns: str
            The fewshot context.
        """
lintangsutawika's avatar
lintangsutawika committed
531
532
533
534
        assert (
            rnd is not None
        ), "A `random.Random` generator argument must be provided to `rnd`"

535
        description = description if description else ""
536
537

        if num_fewshot == 0:
lintangsutawika's avatar
lintangsutawika committed
538
            labeled_examples = ""
539
        else:
lintangsutawika's avatar
lintangsutawika committed
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
            # for sets with no training docs, draw from other set *but ensure no overlap with current doc*
            if self.has_training_docs():
                fewshotex = self.fewshot_examples(k=num_fewshot, rnd=rnd)
            else:
                if self._fewshot_docs is None:
                    self._fewshot_docs = list(
                        self.validation_docs()
                        if self.has_validation_docs()
                        else self.test_docs()
                    )

                fewshotex = rnd.sample(self._fewshot_docs, num_fewshot + 1)

                # get rid of the doc that's the one we're evaluating, if it's in the fewshot
                fewshotex = [x for x in fewshotex if x != doc][:num_fewshot]

            labeled_examples = (
                "\n\n".join(
                    [
                        self.doc_to_text(doc) + self.doc_to_target(doc)
                        for doc in fewshotex
                    ]
                )
                + "\n\n"
lintangsutawika's avatar
lintangsutawika committed
564
            )
565
566

        example = self.doc_to_text(doc)
lintangsutawika's avatar
lintangsutawika committed
567
        return description + labeled_examples + example
568
569

    def apply_filters(self):
Baber Abbasi's avatar
Baber Abbasi committed
570
        """Iterates over FilterEnsembles and applies them to instances"""
lintangsutawika's avatar
lintangsutawika committed
571
572
        if hasattr(self, "_filters"):
            for f in self._filters:
573
                f.apply(self._instances)
lintangsutawika's avatar
lintangsutawika committed
574
575
576
        else:
            eval_logger.warning("No filter defined, passing through instances")
            return self._instances
577

baberabb's avatar
baberabb committed
578
    def dump_config(self) -> dict:
Baber Abbasi's avatar
Baber Abbasi committed
579
        """Returns the config as a dictionary."""
580
        # TODO: this should only return the overrides applied to a non-YAML task's configuration.
581
        # (num_fewshot)
582
        return self.config.to_dict()
583

Baber Abbasi's avatar
Baber Abbasi committed
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
    def set_config(self, key: str, value: Any, update: bool = False) -> None:
        """Set or update the configuration for a given key."""
        if key is None:
            raise ValueError("Key must be provided.")

        if update:
            current_value = getattr(self._config, key, {})
            if not isinstance(current_value, dict):
                raise TypeError(
                    f"Expected a dict for key '{key}', got {type(current_value).__name__} instead."
                )
            current_value.update(value)
        else:
            setattr(self._config, key, value)

    def override_metric(self, metric_name: str) -> None:
        """
        Override the default metrics used for evaluation with custom metrics.

        Parameters:
        - metric_name (str): The name of the custom metric to override. Should be registered in api.metrics.
        """
        (
            self._metric_fn_list,
            self._aggregation_list,
            self._metric_fn_kwargs,
            self._higher_is_better,
        ) = ({}, {}, {}, {})
        self._metric_fn_list[metric_name] = get_metric(metric_name)
        self._aggregation_list[metric_name] = get_metric_aggregation(metric_name)
        self._higher_is_better[metric_name] = is_higher_better(metric_name)
        self._metric_fn_kwargs[metric_name] = {}
        if not isinstance(self, ConfigurableTask):
            self.process_results = lambda x, y: {metric_name: get_metric(metric_name)}
            self.aggregation = lambda: {
                metric_name: get_metric_aggregation(metric_name)
            }
        setattr(self._config, "metric_list", [{"metric": metric_name}])
        setattr(self._config, "process_results", None)

624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
    @property
    def eval_docs(self) -> Union[datasets.Dataset, List[dict]]:
        if self.has_test_docs():
            return self.test_docs()
        elif self.has_validation_docs():
            return self.validation_docs()
        else:
            assert False, f"Task dataset (path={self.DATASET_PATH}, name={self.DATASET_NAME}) must have valid or test docs!"

    def doc_iterator(
        self, *, rank: int = 0, limit: Union[int, None] = None, world_size: int = 1
    ) -> Iterator[Tuple[int, Any]]:
        limit = int(limit) if limit else None
        doc_iterator = utils.create_iterator(
            enumerate(self.eval_docs),
            rank=int(rank),
            limit=limit,
            world_size=int(world_size),
        )
        return doc_iterator

645
646

class ConfigurableTask(Task):
647
    VERSION = "Yaml"
648
    OUTPUT_TYPE = None
649
    CONFIG = None
650
651
652

    def __init__(
        self, data_dir=None, cache_dir=None, download_mode=None, config: dict = None
Ethan Smith's avatar
Ethan Smith committed
653
    ) -> None:  # TODO no super() call here
654
        # Get pre-configured attributes
655
        self._config = self.CONFIG
656

657
        # Use new configurations if there was no preconfiguration
658
        if self.config is None:
659
            self._config = TaskConfig(**config)
660
661
        # Overwrite configs
        else:
lintangsutawika's avatar
lintangsutawika committed
662
            if config is not None:
663
                self._config.__dict__.update(config)
664

665
        if self.config is None:
lintangsutawika's avatar
lintangsutawika committed
666
667
668
            raise ValueError(
                "Must pass a config to ConfigurableTask, either in cls.CONFIG or `config` kwarg"
            )
669

670
671
672
673
        if isinstance(self.config.metadata, dict):
            if "version" in self.config.metadata:
                self.VERSION = self.config.metadata["version"]

674
675
676
        if self.config.output_type is not None:
            assert self.config.output_type in ALL_OUTPUT_TYPES
            self.OUTPUT_TYPE = self.config.output_type
677

678
679
        if self.config.dataset_path is not None:
            self.DATASET_PATH = self.config.dataset_path
680

681
682
        if self.config.dataset_name is not None:
            self.DATASET_NAME = self.config.dataset_name
683

684
685
686
687
        self._metric_fn_list = {}
        self._metric_fn_kwargs = {}
        self._aggregation_list = {}
        self._higher_is_better = {}
688

689
        if self.config.metric_list is None:
690
            # TODO: handle this in TaskConfig.__post_init__ ?
691
692
            _metric_list = DEFAULT_METRIC_REGISTRY[self.config.output_type]

693
            for metric_name in _metric_list:
haileyschoelkopf's avatar
haileyschoelkopf committed
694
                self._metric_fn_list[metric_name] = get_metric(metric_name)
lintangsutawika's avatar
lintangsutawika committed
695
                self._metric_fn_kwargs[metric_name] = {}
696
697
698
                self._aggregation_list[metric_name] = get_metric_aggregation(
                    metric_name
                )
haileyschoelkopf's avatar
haileyschoelkopf committed
699
                self._higher_is_better[metric_name] = is_higher_better(metric_name)
700
        else:
701
            for metric_config in self.config.metric_list:
702
703
704
705
706
                assert "metric" in metric_config
                metric_name = metric_config["metric"]
                kwargs = {
                    key: metric_config[key]
                    for key in metric_config
Chris's avatar
Chris committed
707
708
                    if key
                    not in ["metric", "aggregation", "higher_is_better", "hf_evaluate"]
709
                }
Chris's avatar
Chris committed
710
711
712
713
                hf_evaluate_metric = (
                    "hf_evaluate" in metric_config
                    and metric_config["hf_evaluate"] is True
                )
714

715
                if self.config.process_results is not None:
716
717
                    self._metric_fn_list[metric_name] = None
                    self._metric_fn_kwargs[metric_name] = {}
718
719
720
721
722
723
                elif callable(metric_name):
                    metric_fn = metric_name.__call__
                    metric_name = metric_name.__name__
                    self._metric_fn_list[metric_name] = metric_fn
                    self._metric_fn_kwargs[metric_name] = kwargs
                else:
Chris's avatar
Chris committed
724
725
726
                    self._metric_fn_list[metric_name] = get_metric(
                        metric_name, hf_evaluate_metric
                    )
727
                    self._metric_fn_kwargs[metric_name] = kwargs
lintangsutawika's avatar
lintangsutawika committed
728

729
                if "aggregation" in metric_config:
730
                    agg_name = metric_config["aggregation"]
731
                    if isinstance(agg_name, str):
haileyschoelkopf's avatar
haileyschoelkopf committed
732
                        self._aggregation_list[metric_name] = get_aggregation(agg_name)
733
                    elif callable(agg_name):  # noqa: E721
734
735
736
                        self._aggregation_list[metric_name] = metric_config[
                            "aggregation"
                        ]
737
                else:
738
                    INV_AGG_REGISTRY = {v: k for k, v in AGGREGATION_REGISTRY.items()}
lintangsutawika's avatar
lintangsutawika committed
739
                    metric_agg = get_metric_aggregation(metric_name)
740
                    eval_logger.warning(
741
                        f"[Task: {self.config.task}] metric {metric_name} is defined, but aggregation is not. "
742
743
                        f"using default "
                        f"aggregation={INV_AGG_REGISTRY[metric_agg]}"
744
                    )
745
                    self._aggregation_list[metric_name] = metric_agg
lintangsutawika's avatar
lintangsutawika committed
746

747
748
749
750
751
752
                if "higher_is_better" in metric_config:
                    self._higher_is_better[metric_name] = metric_config[
                        "higher_is_better"
                    ]
                else:
                    eval_logger.warning(
753
                        f"[Task: {self.config.task}] metric {metric_name} is defined, but higher_is_better is not. "
754
                        f"using default "
haileyschoelkopf's avatar
haileyschoelkopf committed
755
                        f"higher_is_better={is_higher_better(metric_name)}"
756
                    )
haileyschoelkopf's avatar
haileyschoelkopf committed
757
                    self._higher_is_better[metric_name] = is_higher_better(metric_name)
758

759
        self.download(self.config.dataset_kwargs)
760
761
762
        self._training_docs = None
        self._fewshot_docs = None

763
        if self.config.filter_list is not None:
lintangsutawika's avatar
lintangsutawika committed
764
            self._filters = []
765
            for filter_config in self.config.filter_list:
766
767
768
769
770
771
772
773
774
                filter_name = filter_config["name"]
                filter_functions = filter_config["filter"]
                components = []
                for function in filter_functions:
                    kwargs = {
                        key: function[key] for key in function if key != "function"
                    }
                    components.append([function["function"], kwargs])
                filter_pipeline = build_filter_ensemble(filter_name, components)
lintangsutawika's avatar
lintangsutawika committed
775
                self._filters.append(filter_pipeline)
lintangsutawika's avatar
lintangsutawika committed
776
        else:
777
            self._filters = [build_filter_ensemble("none", [["take_first", None]])]
778

779
780
        if self.config.use_prompt is not None:
            eval_logger.info(f"loading prompt {self.config.use_prompt}")
781
            self.prompt = get_prompt(
782
                self.config.use_prompt, self.DATASET_PATH, self.DATASET_NAME
lintangsutawika's avatar
lintangsutawika committed
783
            )
784
785
786
        else:
            self.prompt = None

lintangsutawika's avatar
lintangsutawika committed
787
        if self.fewshot_docs() is not None:
haileyschoelkopf's avatar
haileyschoelkopf committed
788
            self.sampler = samplers.get_sampler(
haileyschoelkopf's avatar
haileyschoelkopf committed
789
790
791
                self.config.fewshot_config.get("sampler", "default")
                if self.config.fewshot_config
                else "default"
haileyschoelkopf's avatar
haileyschoelkopf committed
792
            )(list(self.fewshot_docs()), self, rnd=random.Random(1234))
793

794
        self.task_docs = self.eval_docs
795

796
        # Test One Doc
797
        self.features = list(self.task_docs.features.keys())
798
799
        self.multiple_input = 0
        self.multiple_target = 0
800
        test_doc = self.task_docs[0]
801
        test_text = self.doc_to_text(test_doc)
802
        test_target = self.doc_to_target(test_doc)
lintangsutawika's avatar
lintangsutawika committed
803

804
        if self.config.doc_to_choice is not None:
lintangsutawika's avatar
lintangsutawika committed
805
            test_choice = self.doc_to_choice(test_doc)
806
            if not isinstance(test_choice, list):
lintangsutawika's avatar
lintangsutawika committed
807
                eval_logger.error("doc_to_choice must return list")
808
809
            else:
                num_choice = len(test_choice)
810

811
            if isinstance(test_text, int):
812
                self.multiple_input = num_choice
813
814
        else:
            test_choice = None
815

816
        if isinstance(test_target, list):
817
            self.multiple_target = len(test_target)
818
        else:
819
            if (isinstance(test_target, int)) and (test_choice is not None):
lintangsutawika's avatar
lintangsutawika committed
820
                test_target = test_choice[test_target]
821
            else:
lintangsutawika's avatar
lintangsutawika committed
822
                test_target = str(test_target)
823

824
825
826
        if test_choice is not None:
            check_choices = test_choice
        else:
lintangsutawika's avatar
lintangsutawika committed
827
            check_choices = [test_target]
828
829
830
831
        if self.config.doc_to_choice is not None:
            for choice in check_choices:
                choice_has_whitespace = True if choice[0].isspace() else False
                delimiter_has_whitespace = (
832
833
                    True
                    if self.config.target_delimiter.rstrip()
834
                    != self.config.target_delimiter
835
                    else False
836
                )
837

838
                if delimiter_has_whitespace and choice_has_whitespace:
839
840
                    eval_logger.debug(
                        f'Both target_delimiter "{self.config.target_delimiter}" and target choice: "{choice}" have whitespace'
841
842
                    )
                elif (not delimiter_has_whitespace) and (not choice_has_whitespace):
843
                    eval_logger.debug(
844
                        f'Both target_delimiter "{self.config.target_delimiter}" and target choice: "{choice}" do not have whitespace, ignore if the language you are evaluating on does not require/use whitespace'
845
846
                    )

Ethan Smith's avatar
Ethan Smith committed
847
    def download(self, dataset_kwargs=None) -> None:
848
849
850
851
852
853
        self.dataset = datasets.load_dataset(
            path=self.DATASET_PATH,
            name=self.DATASET_NAME,
            **dataset_kwargs if dataset_kwargs is not None else {},
        )

baberabb's avatar
baberabb committed
854
    def has_training_docs(self) -> bool:
855
        if self.config.training_split is not None:
856
857
858
859
            return True
        else:
            return False

baberabb's avatar
baberabb committed
860
    def has_validation_docs(self) -> bool:
861
        if self.config.validation_split is not None:
862
863
864
865
            return True
        else:
            return False

baberabb's avatar
baberabb committed
866
    def has_test_docs(self) -> bool:
867
        if self.config.test_split is not None:
868
869
870
871
            return True
        else:
            return False

baberabb's avatar
baberabb committed
872
    def training_docs(self) -> datasets.Dataset:
873
        if self.has_training_docs():
874
875
876
            if self.config.process_docs is not None:
                return self.config.process_docs(
                    self.dataset[self.config.training_split]
877
                )
878
            return self.dataset[self.config.training_split]
879

baberabb's avatar
baberabb committed
880
    def validation_docs(self) -> datasets.Dataset:
881
        if self.has_validation_docs():
882
883
884
            if self.config.process_docs is not None:
                return self.config.process_docs(
                    self.dataset[self.config.validation_split]
885
                )
886
            return self.dataset[self.config.validation_split]
887

baberabb's avatar
baberabb committed
888
    def test_docs(self) -> datasets.Dataset:
889
        if self.has_test_docs():
890
891
892
            if self.config.process_docs is not None:
                return self.config.process_docs(self.dataset[self.config.test_split])
            return self.dataset[self.config.test_split]
893

894
    def fewshot_docs(self):
895
        if self.config.fewshot_split is not None:
896
897
            if self.config.process_docs is not None:
                return self.config.process_docs(self.dataset[self.config.fewshot_split])
898
            return self.dataset[self.config.fewshot_split]
899
        else:
900
            if (self.config.num_fewshot is not None) and (self.config.num_fewshot > 0):
901
                eval_logger.warning(
902
                    f"Task '{self.config.task}': "
903
904
905
906
                    "num_fewshot > 0 but fewshot_split is None. "
                    "using preconfigured rule."
                )
            return super().fewshot_docs()
907

lintangsutawika's avatar
lintangsutawika committed
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
    @utils.positional_deprecated
    def fewshot_context(self, doc, num_fewshot):
        """Returns a fewshot context string that is made up of a prepended description
        (if provided), the `num_fewshot` number of examples, and an appended prompt example.

        :param doc: str
            The document as returned from training_docs, validation_docs, or test_docs.
        :param num_fewshot: int
            The number of fewshot examples to provide in the returned context string.
        :returns: str
            The fewshot context.
        """

        if num_fewshot == 0:
            # always prepend the (possibly empty) task description
            labeled_examples = self.config.description
        else:
            labeled_examples = self.config.description + self.sampler.get_context(
                doc, num_fewshot
            )

        example = self.doc_to_text(doc)
930
931
932
933
934
935
936
937
938
939
940
941
942
        if self.multiple_input:
            return labeled_examples
        else:
            if isinstance(example, str):
                return labeled_examples + example
            elif isinstance(example, list):
                return [labeled_examples + ex for ex in example]
            elif isinstance(example, int):
                if self.config.doc_to_choice is not None:
                    choices = self.doc_to_choice(doc)
                    return labeled_examples + choices[example]
                else:
                    return labeled_examples + str(example)
lintangsutawika's avatar
lintangsutawika committed
943

944
    def apply_filters(self):
Baber Abbasi's avatar
Baber Abbasi committed
945
        """Iterates over FilterEnsembles and applies them to instances"""
946
947
        if hasattr(self, "_filters"):
            for f in self._filters:
948
                f.apply(self._instances)
949
950
951
952
        else:
            eval_logger.warning("No filter defined, passing through instances")
            return self._instances

953
    def should_decontaminate(self):
954
        return self.config.should_decontaminate
955
956

    def doc_to_decontamination_query(self, doc):
957
        if self.config.should_decontaminate:
958
959
            if self.config.doc_to_decontamination_query is None:
                return self.doc_to_text(doc)
960
            else:
961
962
963
964
965
966
967
968
969
970
971
                doc_to_decontamination_query = self.config.doc_to_decontamination_query
                if doc_to_decontamination_query in self.features:
                    return doc[doc_to_decontamination_query]
                elif callable(doc_to_decontamination_query):
                    return doc_to_decontamination_query(doc)
                else:
                    return ast.literal_eval(
                        utils.apply_template(
                            self.config.doc_to_decontamination_query, doc
                        )
                    )
972

973
974
975
976
977
978
979
980
981
982
983
984
    def _process_doc(self, doc):
        """
        Override this to process (detokenize, strip, replace, etc.) individual
        documents. This can be used in a map over documents of a data split.
        E.g. `map(self._process_doc, self.dataset["validation"])`

        :return: dict
            The processed version of the specified `doc`.
        """
        return doc

    def doc_to_text(self, doc):
985
986
        if self.prompt is not None:
            doc_to_text = self.prompt
987
        else:
988
            doc_to_text = self.config.doc_to_text
lintangsutawika's avatar
lintangsutawika committed
989

990
        if isinstance(doc_to_text, int):
991
            return doc_to_text
992
        elif isinstance(doc_to_text, str):
993
            if doc_to_text in self.features:
994
                # if self.config.doc_to_choice is not None:
995
996
                #     return self.doc_to_choice(doc)[doc[doc_to_text]]
                # else:
997
998
                return doc[doc_to_text]
            else:
lintangsutawika's avatar
lintangsutawika committed
999
                text_string = utils.apply_template(doc_to_text, doc)
lintangsutawika's avatar
lintangsutawika committed
1000
                if text_string.isdigit() and self._config.doc_to_choice is not None:
lintangsutawika's avatar
lintangsutawika committed
1001
1002
1003
                    return ast.literal_eval(text_string)
                else:
                    return text_string
1004
        elif callable(doc_to_text):
1005
            return doc_to_text(doc)
lintangsutawika's avatar
lintangsutawika committed
1006
        # Used when applying a Promptsource template
1007
        elif hasattr(doc_to_text, "apply"):
1008
1009
1010
1011
1012
            applied_prompt = doc_to_text.apply(doc)
            if len(applied_prompt) == 2:
                return applied_prompt[0]
            else:
                eval_logger.warning("Applied prompt returns empty string")
1013
                return self.config.fewshot_delimiter
1014
        else:
1015
            print(type(doc_to_text))
1016
            raise TypeError
1017

1018
    def doc_to_target(self, doc: dict) -> Union[int, str, list]:
1019
1020
        if self.prompt is not None:
            doc_to_target = self.prompt
1021
        else:
1022
            doc_to_target = self.config.doc_to_target
1023

1024
        if isinstance(doc_to_target, int):
1025
            return doc_to_target
1026
        elif isinstance(doc_to_target, str):
1027
            if doc_to_target in self.features:
1028
                # if self.config.doc_to_choice is not None:
1029
1030
1031
                #     return self.doc_to_choice(doc)[doc[doc_to_target]]
                # else:
                return doc[doc_to_target]
1032
            else:
lintangsutawika's avatar
lintangsutawika committed
1033
                target_string = utils.apply_template(doc_to_target, doc)
lintangsutawika's avatar
lintangsutawika committed
1034
                if target_string.isdigit() and self._config.doc_to_choice is not None:
lintangsutawika's avatar
lintangsutawika committed
1035
                    return ast.literal_eval(target_string)
lintangsutawika's avatar
lintangsutawika committed
1036
1037
1038
1039
1040
                elif (
                    len(target_string) >= 2
                    and (target_string[0] == "[")
                    and (target_string[-1] == "]")
                ):
baberabb's avatar
baberabb committed
1041
1042
1043
1044
                    try:
                        return ast.literal_eval(target_string)
                    except (SyntaxError, ValueError):
                        return target_string
lintangsutawika's avatar
lintangsutawika committed
1045
1046
                else:
                    return target_string
1047
        elif isinstance(doc_to_target, list):
1048
            return doc_to_target
1049
        elif callable(doc_to_target):
1050
            return doc_to_target(doc)
lintangsutawika's avatar
lintangsutawika committed
1051
        # Used when applying a Promptsource template
1052
        elif hasattr(doc_to_target, "apply"):
1053
            applied_prompt = doc_to_target.apply(doc)
1054
1055
1056
1057
            if len(applied_prompt) == 2:
                return applied_prompt[1]
            else:
                eval_logger.warning("Applied prompt returns empty string")
1058
                return self.config.fewshot_delimiter
1059
1060
        else:
            raise TypeError
1061

baberabb's avatar
baberabb committed
1062
    def doc_to_choice(self, doc: Any) -> List[str]:
1063
1064
        if self.prompt is not None:
            doc_to_choice = self.prompt
1065
        elif self.config.doc_to_choice is None:
1066
1067
            eval_logger.error("doc_to_choice was called but not set in config")
        else:
1068
            doc_to_choice = self.config.doc_to_choice
1069

1070
        if isinstance(doc_to_choice, str):
1071
1072
1073
1074
            if doc_to_choice in self.features:
                return doc[doc_to_choice]
            else:
                return ast.literal_eval(utils.apply_template(doc_to_choice, doc))
1075
        elif isinstance(doc_to_choice, list):
1076
            return doc_to_choice
1077
        elif isinstance(doc_to_choice, dict):
1078
1079
1080
1081
1082
1083
1084
            return list(doc_to_choice.values())
        elif callable(doc_to_choice):
            return doc_to_choice(doc)
        elif hasattr(doc_to_choice, "get_answer_choices_list"):
            return doc_to_choice.get_answer_choices_list(doc)
        else:
            raise TypeError
1085

baberabb's avatar
baberabb committed
1086
1087
1088
    def construct_requests(
        self, doc: dict, ctx: str, **kwargs
    ) -> Union[List[Instance], Instance]:
1089
        if self.OUTPUT_TYPE == "loglikelihood":
lintangsutawika's avatar
lintangsutawika committed
1090
            arguments = (ctx, self.doc_to_target(doc))
1091
        elif self.OUTPUT_TYPE == "loglikelihood_rolling":
lintangsutawika's avatar
lintangsutawika committed
1092
            arguments = (self.doc_to_target(doc),)
1093
        elif self.OUTPUT_TYPE == "multiple_choice":
1094
            choices = self.doc_to_choice(doc)
1095
            target_delimiter = self.config.target_delimiter
1096
1097
            if self.multiple_input:
                # If there are multiple inputs, choices are placed in the ctx
1098
                cont = self.doc_to_target(doc)
1099
1100
1101
                arguments = [
                    (ctx + choice, f"{target_delimiter}{cont}") for choice in choices
                ]
1102
            else:
1103
                # Otherwise they are placed in the continuation
1104
                arguments = [(ctx, f"{target_delimiter}{cont}") for cont in choices]
1105

1106
            request_list = [
1107
1108
                Instance(
                    request_type="loglikelihood",
lintangsutawika's avatar
lintangsutawika committed
1109
                    doc=doc,
1110
                    arguments=arg,
1111
                    idx=i,
1112
1113
                    **kwargs,
                )
1114
                for i, arg in enumerate(arguments)
1115
            ]
1116
            # TODO: we should raise a warning telling users this will at most ~2x runtime.
1117
            if "acc_mutual_info" in self._metric_fn_list.keys():
1118
1119
1120
                # if we are calculating multiple choice accuracy
                # using mutual information instead of raw loglikelihood as metric, need unconditional lls.

lintangsutawika's avatar
lintangsutawika committed
1121
                # here mutual info refers to calculating
1122
1123
1124
1125
1126
1127
                # log(P(choice|ctx) / P(choice)) = log(P(choice|ctx)) - log(P(choice))
                # in other words normalizing by subtracting the unconditional logprob of each choice.
                request_list.extend(
                    [
                        Instance(
                            request_type="loglikelihood",
lintangsutawika's avatar
lintangsutawika committed
1128
                            doc=doc,
1129
                            arguments=("", "{}".format(choice)),
1130
1131
1132
                            idx=i,
                            **kwargs,
                        )
lintangsutawika's avatar
lintangsutawika committed
1133
                        for i, choice in enumerate(choices)
1134
1135
1136
                    ]
                )
            return request_list
lintangsutawika's avatar
lintangsutawika committed
1137

1138
        elif self.OUTPUT_TYPE == "generate_until":
1139
            arguments = (ctx, deepcopy(self.config.generation_kwargs))
lintangsutawika's avatar
lintangsutawika committed
1140
1141

        return Instance(
lintangsutawika's avatar
lintangsutawika committed
1142
1143
            request_type=self.OUTPUT_TYPE, doc=doc, arguments=arguments, idx=0, **kwargs
        )
1144
1145

    def process_results(self, doc, results):
1146
1147
        if callable(self.config.process_results):
            return self.config.process_results(doc, results)
lintangsutawika's avatar
lintangsutawika committed
1148

1149
        result_dict = {}
1150
        use_metric = list(self._metric_fn_list.keys())
1151
1152
1153
        if self.OUTPUT_TYPE == "loglikelihood":
            results = results[0]
            ll, is_greedy = results
1154
1155
1156
1157
            return {
                **({"perplexity": ll} if "perplexity" in use_metric else {}),
                **({"acc": int(is_greedy)} if "acc" in use_metric else {}),
            }
1158
        elif self.OUTPUT_TYPE == "loglikelihood_rolling":
haileyschoelkopf's avatar
haileyschoelkopf committed
1159
            (loglikelihood,) = results
1160
1161
            _words = self.count_words(self.doc_to_target(doc))
            _bytes = self.count_bytes(self.doc_to_target(doc))
haileyschoelkopf's avatar
haileyschoelkopf committed
1162
            return {
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
                **(
                    {"word_perplexity": (loglikelihood, _words)}
                    if "word_perplexity" in use_metric
                    else {}
                ),
                **(
                    {"byte_perplexity": (loglikelihood, _bytes)}
                    if "byte_perplexity" in use_metric
                    else {}
                ),
                **(
                    {"bits_per_byte": (loglikelihood, _bytes)}
                    if "bits_per_byte" in use_metric
                    else {}
                ),
haileyschoelkopf's avatar
haileyschoelkopf committed
1178
            }
1179
        elif self.OUTPUT_TYPE == "multiple_choice":
1180
            lls, is_greedy = zip(*results)
lintangsutawika's avatar
lintangsutawika committed
1181

1182
            # retrieve choices in List[str] form, to compute choice lengths, etc.
1183
            choices = self.doc_to_choice(doc)
1184
1185
            completion_len = np.array([float(len(i)) for i in choices])

1186
1187
            if (
                2 * len(choices) == len(lls)
1188
                and "acc_mutual_info" in self._metric_fn_list.keys()
1189
1190
1191
1192
1193
1194
1195
            ):
                # then we are doing mutual info.
                # this stores the "dryrun" / unconditional answer loglikelihoods
                lls_unconditional = lls[1::2]
                assert len(lls_unconditional) == len(choices)
                # and this stores our "regular" conditional loglikelihoods
                lls = lls[::2]
1196

1197
1198
            pred = np.argmax(lls)
            pred_norm = np.argmax(lls / completion_len)
lintangsutawika's avatar
lintangsutawika committed
1199

1200
1201
            if self.multiple_input:
                gold = self.doc_to_text(doc)
1202
            else:
1203
                gold = self.doc_to_target(doc)
1204
1205

            gold_index_error = False
1206
            if isinstance(gold, list):
Lintang Sutawika's avatar
Lintang Sutawika committed
1207
1208
                gold = [i if i < len(choices) else -100 for i in gold]
                if -100 in gold:
1209
1210
                    gold_index_error = True
            else:
1211
                if isinstance(gold, int):
Lintang Sutawika's avatar
Lintang Sutawika committed
1212
                    gold = gold if gold < len(choices) else -100
1213
                elif isinstance(gold, str):
Lintang Sutawika's avatar
Lintang Sutawika committed
1214
                    gold = choices.index(gold) if gold in choices else -100
lintangsutawika's avatar
lintangsutawika committed
1215

Lintang Sutawika's avatar
Lintang Sutawika committed
1216
                if gold == -100:
1217
1218
1219
1220
                    gold_index_error = True

            if gold_index_error:
                eval_logger.warning(
lintangsutawika's avatar
lintangsutawika committed
1221
                    f"Label index was not in within range of available choices,"
1222
1223
                    f"Sample:\n\n{doc}\n\n"
                )
lintangsutawika's avatar
lintangsutawika committed
1224

1225
            if self.multiple_target:
lintangsutawika's avatar
lintangsutawika committed
1226
1227
                acc = 1.0 if pred in gold else 0.0
                acc_norm = 1.0 if pred_norm in gold else 0.0
Lintang Sutawika's avatar
Lintang Sutawika committed
1228
                exact_match = int(any([is_greedy[i] if i != -100 else 0 for i in gold]))
lintangsutawika's avatar
lintangsutawika committed
1229
1230
1231
            else:
                acc = 1.0 if pred == gold else 0.0
                acc_norm = 1.0 if pred_norm == gold else 0.0
1232
                # TODO: this gets score of 0 on arc_challenge for pythia-70m. need to test that this works properly
Lintang Sutawika's avatar
Lintang Sutawika committed
1233
                exact_match = int(is_greedy[gold]) if gold != -100 else 0
1234

Lintang Sutawika's avatar
Lintang Sutawika committed
1235
1236
1237
1238
            prob_norm = utils.softmax(lls)

            # TODO use keyword arguments to the metric?
            # gold, pred, norm stuff, the original lls,
1239
            result_dict = {
1240
                **({"acc": acc} if "acc" in use_metric else {}),
1241
1242
                **({"f1": (gold, pred)} if "f1" in use_metric else {}),
                **({"mcc": (gold, pred)} if "mcc" in use_metric else {}),
1243
                **({"acc_norm": acc_norm} if "acc_norm" in use_metric else {}),
1244
                **({"exact_match": exact_match} if "exact_match" in use_metric else {}),
Lintang Sutawika's avatar
Lintang Sutawika committed
1245
1246
1247
1248
1249
                **(
                    {"brier_score": (gold, prob_norm)}
                    if "brier_score" in use_metric
                    else {}
                ),
1250
1251
            }

1252
            if "acc_mutual_info" in use_metric:
lintangsutawika's avatar
lintangsutawika committed
1253
1254
1255
                lls_mutual_info = [
                    ll_c - ll_u for ll_c, ll_u in zip(lls, lls_unconditional)
                ]
1256
1257
1258
                acc_mutual_info = 1.0 if np.argmax(lls_mutual_info) == gold else 0.0
                result_dict["acc_mutual_info"] = acc_mutual_info

1259
        elif self.OUTPUT_TYPE == "generate_until":
1260
            gold = self.doc_to_target(doc)
Chris's avatar
Chris committed
1261
            result = results[0]
1262
            if self.config.doc_to_choice is not None:
lintangsutawika's avatar
lintangsutawika committed
1263
                # If you set doc_to_choice,
lintangsutawika's avatar
lintangsutawika committed
1264
                # it assumes that doc_to_target returns a number.
1265
1266
                choices = self.doc_to_choice(doc)
                gold = choices[gold]
1267
1268
            # we expect multiple_targets to be a list.
            elif self.multiple_target:
baberabb's avatar
baberabb committed
1269
                gold = list(gold)
Chris's avatar
Chris committed
1270
1271
1272
            elif type(gold) != type(result):
                # cast gold to the same type as result
                gold = type(result)(gold)
1273

lintangsutawika's avatar
lintangsutawika committed
1274
            for metric in self._metric_fn_list.keys():
haileyschoelkopf's avatar
haileyschoelkopf committed
1275
1276
1277
1278
1279
                if self.multiple_target:
                    # in the case where we have multiple targets,
                    # return true if any are true
                    # TODO: this may break for multipLe_target, non zero-or-1 metrics
                    scores = []
haileyschoelkopf's avatar
haileyschoelkopf committed
1280
1281
1282
1283
                    if not isinstance(gold, list):
                        # sometimes, a multiple_target dataset has exceptions where one doc has only one string answer
                        # print(gold)
                        gold = [gold]
1284
1285
1286
1287
1288
1289
1290
1291
                    if metric == "exact_match":
                        result = [result for _ in range(len(gold))]
                        scores = self._metric_fn_list[metric](
                            references=gold,
                            predictions=result,
                            **self._metric_fn_kwargs[metric],
                        )[metric]
                        result_score = 1.0 if scores > 0.0 else 0.0
haileyschoelkopf's avatar
haileyschoelkopf committed
1292
                    else:
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
                        for gold_option in gold:
                            try:
                                result_score = self._metric_fn_list[metric](
                                    references=[gold_option],
                                    predictions=[result],
                                    **self._metric_fn_kwargs[metric],
                                )
                            except (
                                TypeError
                            ):  # TODO: this is hacky and I don't want to do it
                                result_score = self._metric_fn_list[metric](
                                    [gold_option, result]
                                )
                            if isinstance(result_score, dict):
                                # TODO: this handles the case where HF evaluate returns a dict.
                                result_score = result_score[metric]
                            scores.append(result_score)
                        if any(scores):
                            result_score = 1.0
                        else:
                            result_score = 0.0
haileyschoelkopf's avatar
haileyschoelkopf committed
1314
                else:
1315
                    try:
1316
                        result_score = self._metric_fn_list[metric](
1317
1318
                            references=[gold],
                            predictions=[result],
1319
                            **self._metric_fn_kwargs[metric],
1320
                        )
1321
                    except TypeError:  # needed for now in order to use a different interface between our own metrics and HF Evaluate metrics
1322
                        result_score = self._metric_fn_list[metric]([gold, result])
1323
1324
1325
1326
                    if isinstance(result_score, dict):
                        # TODO: this handles the case where HF evaluate returns a dict.
                        result_score = result_score[metric]
                result_dict[metric] = result_score
1327
        else:
lintangsutawika's avatar
lintangsutawika committed
1328
1329
            raise ValueError(
                f"Passed invalid output_type '{self.OUTPUT_TYPE}' ! Please use one of ",
1330
                "'loglikelihood', 'loglikelihood_rolling', 'generate_until' or 'multiple_choice'",
1331
            )
1332
1333
1334

        return result_dict

Baber Abbasi's avatar
Baber Abbasi committed
1335
    def aggregation(self) -> dict:
1336
1337
        return self._aggregation_list

Baber Abbasi's avatar
Baber Abbasi committed
1338
    def higher_is_better(self) -> dict:
haileyschoelkopf's avatar
haileyschoelkopf committed
1339
        return self._higher_is_better
1340

Baber Abbasi's avatar
Baber Abbasi committed
1341
1342
1343
    def get_config(self, key: str) -> Any:
        return getattr(self._config, key, None)

1344
1345
1346
1347
1348
1349
1350
1351
1352
    def __repr__(self):
        return (
            f"ConfigurableTask(task_name={getattr(self.config, 'task', None)},"
            f"group_name={getattr(self.config, 'group', None)},"
            f"output_type={self.OUTPUT_TYPE},"
            f"num_fewshot={getattr(self.config, 'num_fewshot', None)},"
            f"num_samples={len(self.eval_docs)})"
        )

1353
1354
1355
1356

class MultipleChoiceTask(Task):
    OUTPUT_TYPE: str = "loglikelihood"

baberabb's avatar
baberabb committed
1357
    def doc_to_target(self, doc: dict) -> str:
1358
1359
        return " " + doc["choices"][doc["gold"]]

baberabb's avatar
baberabb committed
1360
    def construct_requests(self, doc: dict, ctx: str, **kwargs) -> List[Instance]:
1361
        # TODO: add mutual info here?
lintangsutawika's avatar
lintangsutawika committed
1362
1363
        return [
            Instance(
haileyschoelkopf's avatar
haileyschoelkopf committed
1364
                request_type="loglikelihood",
lintangsutawika's avatar
lintangsutawika committed
1365
                doc=doc,
1366
                arguments=(ctx, " {}".format(choice)),
1367
                idx=i,
1368
1369
                **kwargs,
            )
lintangsutawika's avatar
lintangsutawika committed
1370
1371
            for i, choice in enumerate(doc["choices"])
        ]
1372

baberabb's avatar
baberabb committed
1373
    def process_results(self, doc: dict, results: List[Tuple[float, bool]]) -> dict:
lintangsutawika's avatar
lintangsutawika committed
1374
1375
1376
        results = [
            res[0] for res in results
        ]  # only retain loglikelihoods, discard is_greedy TODO: do we need is_greedy anywhere?
1377
1378
1379
1380
1381
1382
1383
1384
1385
1386
1387
        gold = doc["gold"]

        acc = 1.0 if np.argmax(results) == gold else 0.0
        completion_len = np.array([float(len(i)) for i in doc["choices"]])
        acc_norm = 1.0 if np.argmax(results / completion_len) == gold else 0.0

        return {
            "acc": acc,
            "acc_norm": acc_norm,
        }

baberabb's avatar
baberabb committed
1388
    def higher_is_better(self) -> dict:
1389
1390
1391
1392
1393
        return {
            "acc": True,
            "acc_norm": True,
        }

baberabb's avatar
baberabb committed
1394
    def aggregation(self) -> dict:
1395
1396
1397
1398
1399
1400
        return {
            "acc": mean,
            "acc_norm": mean,
        }


lintangsutawika's avatar
lintangsutawika committed
1401
class PerplexityTask(Task):
1402
1403
    OUTPUT_TYPE = "loglikelihood_rolling"

baberabb's avatar
baberabb committed
1404
    def has_training_docs(self) -> bool:
1405
1406
        return False

baberabb's avatar
baberabb committed
1407
    def fewshot_examples(self, k: int, rnd) -> List:
1408
1409
1410
        assert k == 0
        return []

baberabb's avatar
baberabb committed
1411
    def fewshot_context(self, doc: dict, num_fewshot: int) -> Literal[""]:
1412
1413
1414
1415
1416
1417
        assert (
            num_fewshot == 0
        ), "The number of fewshot examples must be 0 for perplexity tasks."

        return ""

baberabb's avatar
baberabb committed
1418
    def higher_is_better(self) -> dict:
1419
1420
1421
1422
1423
1424
1425
1426
1427
        return {
            "word_perplexity": False,
            "byte_perplexity": False,
            "bits_per_byte": False,
        }

    def doc_to_decontamination_query(self, doc):
        return doc

Ethan Smith's avatar
Ethan Smith committed
1428
    def doc_to_text(self, doc) -> str:
1429
1430
1431
1432
1433
        return ""

    def doc_to_target(self, doc):
        return doc

baberabb's avatar
baberabb committed
1434
    def construct_requests(self, doc: dict, ctx: Union[str, None], **kwargs):
1435
1436
        assert not ctx

lintangsutawika's avatar
lintangsutawika committed
1437
1438
1439
1440
1441
1442
1443
        return Instance(
            request_type=self.OUTPUT_TYPE,
            doc=doc,
            arguments=(self.doc_to_target(doc),),
            idx=0,
            **kwargs,
        )
1444

baberabb's avatar
baberabb committed
1445
    def process_results(self, doc: dict, results: float) -> dict:
1446
        (loglikelihood,) = results
haileyschoelkopf's avatar
haileyschoelkopf committed
1447
1448
        words = self.count_words(self.doc_to_target(doc))
        bytes_ = self.count_bytes(self.doc_to_target(doc))
1449
1450
1451
1452
1453
1454
        return {
            "word_perplexity": (loglikelihood, words),
            "byte_perplexity": (loglikelihood, bytes_),
            "bits_per_byte": (loglikelihood, bytes_),
        }

baberabb's avatar
baberabb committed
1455
    def aggregation(self) -> dict:
1456
1457
1458
1459
1460
1461
1462
        return {
            "word_perplexity": weighted_perplexity,
            "byte_perplexity": weighted_perplexity,
            "bits_per_byte": bits_per_byte,
        }

    @classmethod
baberabb's avatar
baberabb committed
1463
    def count_bytes(cls, doc) -> int:
1464
1465
1466
        return len(doc.encode("utf-8"))

    @classmethod
baberabb's avatar
baberabb committed
1467
    def count_words(cls, doc) -> int:
1468
1469
        """Downstream tasks with custom word boundaries should override this!"""
        return len(re.split(r"\s+", doc))