task.py 46.7 KB
Newer Older
1
import abc
2
from dataclasses import dataclass, field, asdict
3
4

import re
5
import ast
lintangsutawika's avatar
lintangsutawika committed
6
import yaml
7
8
9
import evaluate
import random
import itertools
10
import functools
11
from tqdm import tqdm
12
13
14
15

import datasets
import numpy as np

baberabb's avatar
baberabb committed
16
from typing import Union, List, Any, Tuple, Literal
17
from collections.abc import Callable
18

19
from lm_eval import utils
20
from lm_eval.api import samplers
haileyschoelkopf's avatar
haileyschoelkopf committed
21
from lm_eval.api.instance import Instance
lintangsutawika's avatar
lintangsutawika committed
22
from lm_eval.api.filter import FilterEnsemble
23
24
25
26

from lm_eval.logger import eval_logger
from lm_eval.prompts import get_prompt
from lm_eval.filters import build_filter_ensemble
lintangsutawika's avatar
lintangsutawika committed
27
28
29
30
from lm_eval.api.metrics import (
    mean,
    weighted_perplexity,
    bits_per_byte,
lintangsutawika's avatar
lintangsutawika committed
31
    metric_max_over_ground_truths,
lintangsutawika's avatar
lintangsutawika committed
32
33
)
from lm_eval.api.registry import (
haileyschoelkopf's avatar
haileyschoelkopf committed
34
35
36
37
    get_metric,
    get_aggregation,
    get_default_aggregation,
    is_higher_better,
38
39
    DEFAULT_METRIC_REGISTRY,
    OUTPUT_TYPE_REGISTRY,
lintangsutawika's avatar
lintangsutawika committed
40
41
    AGGREGATION_REGISTRY,
)
42

43
44
45
46
47
48
49
ALL_OUTPUT_TYPES = [
    "loglikelihood",
    "multiple_choice",
    "loglikelihood_rolling",
    "greedy_until",
]

50
51
52

@dataclass
class TaskConfig(dict):
53
    # task naming/registry
54
    task: str = None
55
    group: Union[str, list] = None
56
57
58
    # HF dataset options.
    # which dataset to use,
    # and what splits for what purpose
59
60
    dataset_path: str = None
    dataset_name: str = None
61
    dataset_kwargs: dict = None
62
63
64
    training_split: str = None
    validation_split: str = None
    test_split: str = None
lintangsutawika's avatar
lintangsutawika committed
65
    fewshot_split: str = None  # TODO: assert that this not None if num_fewshot > 0. (?) assert if this is same split as one evaling (?)
66
67
    # formatting / prompting options.
    # see docs/advanced_task_guide.md for more info
68
    process_docs: Callable = None
69
70
    doc_to_text: Union[Callable, str] = None
    doc_to_target: Union[Callable, str] = None
lintangsutawika's avatar
lintangsutawika committed
71
    doc_to_choice: Union[Callable, str, dict, list] = None
72
    gold_alias: Union[Callable, str] = None
lintangsutawika's avatar
lintangsutawika committed
73
    process_results: Union[Callable, str] = None
74
    use_prompt: str = None
75
    description: str = ""
76
77
    target_delimiter: str = " "
    fewshot_delimiter: str = "\n\n"
haileyschoelkopf's avatar
haileyschoelkopf committed
78
    fewshot_config: dict = None
79
    # runtime configuration options
80
    num_fewshot: int = 0
81
    # scoring options
82
    metric_list: list = None
83
    output_type: str = "greedy_until"
84
    generation_kwargs: dict = None
85
    repeats: int = 1
lintangsutawika's avatar
lintangsutawika committed
86
    filter_list: Union[str, list] = None
87
88
    should_decontaminate: bool = False
    doc_to_decontamination_query: str = None
89

lintangsutawika's avatar
lintangsutawika committed
90
    metadata: str = None  # by default, not used in the code. allows for users to pass arbitrary info to tasks
91

Ethan Smith's avatar
Ethan Smith committed
92
    def __post_init__(self) -> None:
lintangsutawika's avatar
lintangsutawika committed
93
94
95
        if "." in self.dataset_path:
            import inspect
            from importlib import import_module
lintangsutawika's avatar
format  
lintangsutawika committed
96

lintangsutawika's avatar
lintangsutawika committed
97
            self.dataset_path = inspect.getfile(import_module(self.dataset_path))
98

Lintang Sutawika's avatar
Lintang Sutawika committed
99
100
101
        if self.generation_kwargs is not None:
            if self.output_type != "greedy_until":
                eval_logger.warning(
102
                    "passed `generation_kwargs`, but not using `output_type: greedy_until`!"
Lintang Sutawika's avatar
Lintang Sutawika committed
103
                )
104
                assert self.output_type != "greedy_until"
Lintang Sutawika's avatar
Lintang Sutawika committed
105
106
107
108
109
110
111

            if "temperature" in self.generation_kwargs:
                self.generation_kwargs["temperature"] = float(
                    self.generation_kwargs["temperature"]
                )

            if "until" not in self.generation_kwargs:
112
                self.generation_kwargs["until"] = [self.fewshot_delimiter]
Lintang Sutawika's avatar
Lintang Sutawika committed
113
114
115
116
        else:
            if self.output_type == "greedy_until":
                # ensure that we greedily generate in absence of explicit arguments otherwise
                self.generation_kwargs = {
Lintang Sutawika's avatar
Lintang Sutawika committed
117
                    "until": None
118
119
                    if self.fewshot_delimiter is None
                    else [self.fewshot_delimiter],
Lintang Sutawika's avatar
Lintang Sutawika committed
120
121
                    "do_sample": False,
                }
122

haileyschoelkopf's avatar
haileyschoelkopf committed
123
124
        # TODO: how to make TaskConfigs be de- and re-serializable, even when using the !function constructor?

125
126
127
    def __getitem__(self, item):
        return getattr(self, item)

128
129
130
    def __setitem__(self, item, value):
        return setattr(self, item, value)

131
    def to_dict(self):
132
133
        """dumps the current config as a dictionary object, as a printable format.
        null fields will not be printed.
haileyschoelkopf's avatar
haileyschoelkopf committed
134
        Used for dumping results alongside full task configuration
135

haileyschoelkopf's avatar
haileyschoelkopf committed
136
137
138
139
140
141
142
143
144
145
        :return: dict
            A printable dictionary version of the TaskConfig object.

        # TODO: should any default value in the TaskConfig not be printed?
        """
        cfg_dict = asdict(self)
        # remove values that are `None`
        for k, v in list(cfg_dict.items()):
            if v is None:
                cfg_dict.pop(k)
haileyschoelkopf's avatar
haileyschoelkopf committed
146
147
148
            elif isinstance(v, Callable):
                # TODO: this should handle Promptsource template objects as a separate case?
                cfg_dict[k] = str(v)
haileyschoelkopf's avatar
haileyschoelkopf committed
149
        return cfg_dict
150

151
152
153
154
155
156
157
158
159
160
161
162

class Task(abc.ABC):
    """A task represents an entire benchmark including its dataset, problems,
    answers, and evaluation methods. See BoolQ for a simple example implementation

    A `doc` can be any python object which represents one instance of evaluation.
    This is usually a dictionary e.g.
        {"question": ..., "answer": ...} or
        {"question": ..., question, answer)
    """

    VERSION = None
163

164
165
166
167
168
169
170
171
    # The name of the `Task` benchmark as denoted in the HuggingFace datasets Hub
    # or a path to a custom `datasets` loading script.
    DATASET_PATH: str = None

    # The name of a subset within `DATASET_PATH`.
    DATASET_NAME: str = None

    OUTPUT_TYPE: str = None
lintangsutawika's avatar
lintangsutawika committed
172

173
174
175
176
177
178
    def __init__(
        self,
        data_dir=None,
        cache_dir=None,
        download_mode=None,
        config=None,
Ethan Smith's avatar
Ethan Smith committed
179
    ) -> None:
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
        """
        :param data_dir: str
            Stores the path to a local folder containing the `Task`'s data files.
            Use this to specify the path to manually downloaded data (usually when
            the dataset is not publicly accessible).
        :param cache_dir: str
            The directory to read/write the `Task` dataset. This follows the
            HuggingFace `datasets` API with the default cache directory located at:
                `~/.cache/huggingface/datasets`
            NOTE: You can change the cache location globally for a given process
            to another directory:
                `export HF_DATASETS_CACHE="/path/to/another/directory"`
        :param download_mode: datasets.DownloadMode
            How to treat pre-existing `Task` downloads and data.
            - `datasets.DownloadMode.REUSE_DATASET_IF_EXISTS`
                Reuse download and reuse dataset.
            - `datasets.DownloadMode.REUSE_CACHE_IF_EXISTS`
                Reuse download with fresh dataset.
            - `datasets.DownloadMode.FORCE_REDOWNLOAD`
                Fresh download and fresh dataset.
        """
        self.download(data_dir, cache_dir, download_mode)
        self._training_docs = None
        self._fewshot_docs = None
        self._instances = None

haileyschoelkopf's avatar
haileyschoelkopf committed
206
        self._config = TaskConfig(**config) if config else TaskConfig()
207
208
209

        if not hasattr(self, "_filters"):
            self._filters = []
lintangsutawika's avatar
lintangsutawika committed
210
            for name, components in self._config.get(
211
                "filters", [["none", [["take_first", None]]]]
lintangsutawika's avatar
lintangsutawika committed
212
            ):
213
214
215
                filter_pipeline = build_filter_ensemble(name, components)
                self._filters.append(filter_pipeline)

lintangsutawika's avatar
lintangsutawika committed
216
        self.sampler = samplers.Sampler(
217
218
            list(self.fewshot_docs()), self, rnd=random.Random(1234)
        )
219

Ethan Smith's avatar
Ethan Smith committed
220
    def download(self, data_dir=None, cache_dir=None, download_mode=None) -> None:
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
        """Downloads and returns the task dataset.
        Override this method to download the dataset from a custom API.

        :param data_dir: str
            Stores the path to a local folder containing the `Task`'s data files.
            Use this to specify the path to manually downloaded data (usually when
            the dataset is not publicly accessible).
        :param cache_dir: str
            The directory to read/write the `Task` dataset. This follows the
            HuggingFace `datasets` API with the default cache directory located at:
                `~/.cache/huggingface/datasets`
            NOTE: You can change the cache location globally for a given process
            by setting the shell environment variable, `HF_DATASETS_CACHE`,
            to another directory:
                `export HF_DATASETS_CACHE="/path/to/another/directory"`
        :param download_mode: datasets.DownloadMode
            How to treat pre-existing `Task` downloads and data.
            - `datasets.DownloadMode.REUSE_DATASET_IF_EXISTS`
                Reuse download and reuse dataset.
            - `datasets.DownloadMode.REUSE_CACHE_IF_EXISTS`
                Reuse download with fresh dataset.
            - `datasets.DownloadMode.FORCE_REDOWNLOAD`
                Fresh download and fresh dataset.
        """
245
246
247
248
249
250
251
        self.dataset = datasets.load_dataset(
            path=self.DATASET_PATH,
            name=self.DATASET_NAME,
            data_dir=data_dir,
            cache_dir=cache_dir,
            download_mode=download_mode,
        )
252

253
254
255
256
257
    @property
    def config(self):
        """Returns the TaskConfig associated with this class."""
        return self._config

258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
    @abc.abstractmethod
    def has_training_docs(self):
        """Whether the task has a training set"""
        pass

    @abc.abstractmethod
    def has_validation_docs(self):
        """Whether the task has a validation set"""
        pass

    @abc.abstractmethod
    def has_test_docs(self):
        """Whether the task has a test set"""
        pass

    def training_docs(self):
        """
        :return: Iterable[obj]
            A iterable of any object, that doc_to_text can handle
        """
        return []

    def validation_docs(self):
        """
        :return: Iterable[obj]
            A iterable of any object, that doc_to_text can handle
        """
        return []

    def test_docs(self):
        """
        :return: Iterable[obj]
            A iterable of any object, that doc_to_text can handle
        """
        return []

294
295
296
297
298
299
300
301
302
303
    def fewshot_docs(self):
        """
        :return: Iterable[obj]
            A iterable of any object, that doc_to_text can handle
        """
        if self.has_training_docs():
            return self.training_docs()
        elif self.has_validation_docs():
            return self.validation_docs()
        else:
lintangsutawika's avatar
lintangsutawika committed
304
            eval_logger.warning(
305
                "has_training_docs and has_validation_docs are False"
306
                ", using test_docs as fewshot_docs but this is not recommended."
lintangsutawika's avatar
lintangsutawika committed
307
            )
308
309
            return self.test_docs()

310
311
312
313
314
315
316
317
318
319
    def _process_doc(self, doc):
        """
        Override this to process (detokenize, strip, replace, etc.) individual
        documents. This can be used in a map over documents of a data split.
        E.g. `map(self._process_doc, self.dataset["validation"])`

        :return: dict
            The processed version of the specified `doc`.
        """
        return doc
lintangsutawika's avatar
lintangsutawika committed
320

321
322
323
324
325
326
327
328
329
330
331
332
333
    @property
    def instances(self):
        """After calling `task.build_all_requests()`, tasks
        maintain a list of the dataset instances which will be evaluated.
        """
        return self._instances

    def fewshot_examples(self, k, rnd):
        if self._training_docs is None:
            self._training_docs = list(self.training_docs())

        return rnd.sample(self._training_docs, k)

Ethan Smith's avatar
Ethan Smith committed
334
    def doc_to_decontamination_query(self, doc) -> None:
335
336
337
338
339
340
341
342
343
344
345
346
347
        print(
            "Override doc_to_decontamination_query with document specific decontamination query."
        )
        assert False

    @abc.abstractmethod
    def doc_to_text(self, doc):
        pass

    @abc.abstractmethod
    def doc_to_target(self, doc):
        pass

Ethan Smith's avatar
Ethan Smith committed
348
    def build_all_requests(self, limit=None, rank=None, world_size=None) -> None:
349
350
351
352
353
354
355
356
357
358
        """Build a set of Instances for a task, and store them in task.instances"""
        if self.has_test_docs():
            docs = self.test_docs()
        elif self.has_validation_docs():
            docs = self.validation_docs()
        else:
            assert (
                False
            ), f"Task dataset (path={self.DATASET_PATH}, name={self.DATASET_NAME}) must have valid or test docs!"

359
        eval_logger.info(
360
            f"Building contexts for task '{self.config.task}' on rank {rank}..."
361
362
        )

363
        instances = []
364
365
        for doc_id, doc in utils.create_iterator(
            enumerate(docs), rank, world_size, limit
lintangsutawika's avatar
lintangsutawika committed
366
        ):
367
            # sample fewshot context #TODO: need to offset doc_id by rank now!
368
            fewshot_ctx = self.fewshot_context(
369
                doc,
370
                self.config.num_fewshot,
371
            )
372

373
            # TODO: we should override self.config.repeats if doing greedy gen so users don't waste time+compute
lintangsutawika's avatar
lintangsutawika committed
374
375
376
            inst = self.construct_requests(
                doc=doc,
                ctx=fewshot_ctx,
377
                metadata=(self.config["task"], doc_id, self.config.repeats),
lintangsutawika's avatar
lintangsutawika committed
378
            )
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403

            if not isinstance(inst, list):
                inst = [inst]

            instances.extend(inst)

        self._instances = instances
        assert len(self._instances) != 0, "task.build_requests() did not find any docs!"

    @abc.abstractmethod
    def construct_requests(self, doc, ctx, **kwargs):
        """Uses RequestFactory to construct Requests and returns an iterable of
        Requests which will be sent to the LM.

        :param doc:
            The document as returned from training_docs, validation_docs, or test_docs.
        :param ctx: str
            The context string, generated by fewshot_context. This includes the natural
            language description, as well as the few shot examples, and the question
            part of the document for `doc`.
        :param doc_idx: int
            The index of a document within `self.test_docs()` or `self.validation_docs()`,
            whichever is the main split used.
        :param repeats: int
        TODO: update this docstring
lintangsutawika's avatar
lintangsutawika committed
404
            The number of times each instance in a dataset is inferred on. Defaults to 1,
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
            can be increased for techniques like majority voting.
        """
        pass

    @abc.abstractmethod
    def process_results(self, doc, results):
        """Take a single document and the LM results and evaluates, returning a
        dict where keys are the names of submetrics and values are the values of
        the metric for that one document

        :param doc:
            The document as returned from training_docs, validation_docs, or test_docs.
        :param results:
            The results of the requests created in construct_requests.
        """
        pass

    @abc.abstractmethod
    def aggregation(self):
        """
        :returns: {str: [metric_score] -> float}
            A dictionary where keys are the names of submetrics and values are
            functions that aggregate a list of metric scores
        """
        pass

    @abc.abstractmethod
    def higher_is_better(self):
        """
        :returns: {str: bool}
            A dictionary where keys are the names of submetrics and values are
            whether a higher value of the submetric is better
        """
        pass

haileyschoelkopf's avatar
haileyschoelkopf committed
440
441
442
443
444
445
446
447
448
449
    @classmethod
    def count_bytes(cls, doc):
        """Used for byte-level perplexity metrics in rolling loglikelihood"""
        return len(doc.encode("utf-8"))

    @classmethod
    def count_words(cls, doc):
        """Downstream loglikelihood_rolling perplexity tasks with custom word boundaries should override this!"""
        return len(re.split(r"\s+", doc))

450
    @utils.positional_deprecated
451
    def fewshot_context(self, doc, num_fewshot):
452
453
454
455
456
457
458
459
460
461
462
463
        """Returns a fewshot context string that is made up of a prepended description
        (if provided), the `num_fewshot` number of examples, and an appended prompt example.

        :param doc: str
            The document as returned from training_docs, validation_docs, or test_docs.
        :param num_fewshot: int
            The number of fewshot examples to provide in the returned context string.
        :returns: str
            The fewshot context.
        """

        if num_fewshot == 0:
464
            # always prepend the (possibly empty) task description
465
            labeled_examples = self.config.description
466
        else:
467
            labeled_examples = self.config.description + self.sampler.get_context(
lintangsutawika's avatar
lintangsutawika committed
468
469
                doc, num_fewshot
            )
470
471

        example = self.doc_to_text(doc)
472
473
474
475
        if type(example) == str:
            return labeled_examples + example
        elif type(example) == list:
            return [labeled_examples + ex for ex in example]
476
        elif type(example) == int:
477
            if self.config.doc_to_choice is not None:
lintangsutawika's avatar
lintangsutawika committed
478
479
480
481
                choices = self.doc_to_choice(doc)
                return labeled_examples + choices[example]
            else:
                return labeled_examples + str(example)
482
483

    def apply_filters(self):
lintangsutawika's avatar
lintangsutawika committed
484
485
486
487
488
489
        if hasattr(self, "_filters"):
            for f in self._filters:
                f.apply(self._instances)
        else:
            eval_logger.warning("No filter defined, passing through instances")
            return self._instances
490

baberabb's avatar
baberabb committed
491
    def dump_config(self) -> dict:
492
        """Returns a dictionary representing the task's config.
493
494
495
496
497

        :returns: str
            The fewshot context.
        """
        # TODO: this should only return the overrides applied to a non-YAML task's configuration.
498
        # (num_fewshot)
499
        return self.config.to_dict()
500

501
502

class ConfigurableTask(Task):
503
    VERSION = "Yaml"
504
    OUTPUT_TYPE = None
505
    CONFIG = None
506
507
508

    def __init__(
        self, data_dir=None, cache_dir=None, download_mode=None, config: dict = None
Ethan Smith's avatar
Ethan Smith committed
509
    ) -> None:  # TODO no super() call here
510
        # Get pre-configured attributes
511
        self._config = self.CONFIG
512

513
        # Use new configurations if there was no preconfiguration
514
        if self.config is None:
515
            self._config = TaskConfig(**config)
516
517
        # Overwrite configs
        else:
lintangsutawika's avatar
lintangsutawika committed
518
            if config is not None:
519
                self._config.__dict__.update(config)
520

521
        if self.config is None:
lintangsutawika's avatar
lintangsutawika committed
522
523
524
            raise ValueError(
                "Must pass a config to ConfigurableTask, either in cls.CONFIG or `config` kwarg"
            )
525

526
527
528
        if self.config.output_type is not None:
            assert self.config.output_type in ALL_OUTPUT_TYPES
            self.OUTPUT_TYPE = self.config.output_type
529

530
531
        if self.config.dataset_path is not None:
            self.DATASET_PATH = self.config.dataset_path
532

533
534
        if self.config.dataset_name is not None:
            self.DATASET_NAME = self.config.dataset_name
535

536
537
538
539
        self._metric_fn_list = {}
        self._metric_fn_kwargs = {}
        self._aggregation_list = {}
        self._higher_is_better = {}
540

541
542
        _metric_list = DEFAULT_METRIC_REGISTRY[self.config.output_type]
        if self.config.metric_list is None:
543
            # TODO: handle this in TaskConfig.__post_init__ ?
544
            for metric_name in _metric_list:
haileyschoelkopf's avatar
haileyschoelkopf committed
545
546
                self._metric_fn_list[metric_name] = get_metric(metric_name)
                self._aggregation_list[metric_name] = get_default_aggregation(
547
                    metric_name
haileyschoelkopf's avatar
haileyschoelkopf committed
548
549
                )
                self._higher_is_better[metric_name] = is_higher_better(metric_name)
550
        else:
551
            for metric_config in self.config.metric_list:
552
553
554
555
556
557
558
                assert "metric" in metric_config
                metric_name = metric_config["metric"]
                kwargs = {
                    key: metric_config[key]
                    for key in metric_config
                    if key not in ["metric", "aggregation", "higher_is_better"]
                }
559

560
                if self.config.process_results is not None:
561
562
                    self._metric_fn_list[metric_name] = None
                    self._metric_fn_kwargs[metric_name] = {}
563
564
565
566
567
568
569
570
                elif callable(metric_name):
                    metric_fn = metric_name.__call__
                    metric_name = metric_name.__name__
                    self._metric_fn_list[metric_name] = metric_fn
                    self._metric_fn_kwargs[metric_name] = kwargs
                else:
                    self._metric_fn_list[metric_name] = get_metric(metric_name)
                    self._metric_fn_kwargs[metric_name] = kwargs
lintangsutawika's avatar
lintangsutawika committed
571

572
                if "aggregation" in metric_config:
573
                    agg_name = metric_config["aggregation"]
574
                    if type(agg_name) == str:
haileyschoelkopf's avatar
haileyschoelkopf committed
575
                        self._aggregation_list[metric_name] = get_aggregation(agg_name)
576
577
578
579
                    elif callable(agg_name):
                        self._aggregation_list[metric_name] = metric_config[
                            "aggregation"
                        ]
580
                else:
581
                    INV_AGG_REGISTRY = {v: k for k, v in AGGREGATION_REGISTRY.items()}
haileyschoelkopf's avatar
haileyschoelkopf committed
582
                    metric_agg = get_default_aggregation(metric_name)
583
                    eval_logger.warning(
baberabb's avatar
baberabb committed
584
                        f"[Task: {self._config.task}] metric {metric_name} is defined, but aggregation is not. "
585
586
                        f"using default "
                        f"aggregation={INV_AGG_REGISTRY[metric_agg]}"
587
                    )
588
                    self._aggregation_list[metric_name] = metric_agg
lintangsutawika's avatar
lintangsutawika committed
589

590
591
592
593
594
595
                if "higher_is_better" in metric_config:
                    self._higher_is_better[metric_name] = metric_config[
                        "higher_is_better"
                    ]
                else:
                    eval_logger.warning(
baberabb's avatar
baberabb committed
596
                        f"[Task: {self._config.task}] metric {metric_name} is defined, but higher_is_better is not. "
597
                        f"using default "
haileyschoelkopf's avatar
haileyschoelkopf committed
598
                        f"higher_is_better={is_higher_better(metric_name)}"
599
                    )
haileyschoelkopf's avatar
haileyschoelkopf committed
600
                    self._higher_is_better[metric_name] = is_higher_better(metric_name)
601

602
        self.download(self.config.dataset_kwargs)
603
604
605
        self._training_docs = None
        self._fewshot_docs = None

606
        if self.config.filter_list is not None:
lintangsutawika's avatar
lintangsutawika committed
607
            self._filters = []
608
            for filter_config in self.config.filter_list:
lintangsutawika's avatar
lintangsutawika committed
609
610
611
612
613
614
615
                for filter_pipeline in filter_config:
                    filter_name = filter_config["name"]
                    filter_functions = filter_config["filter"]
                    components = []
                    for function in filter_functions:
                        kwargs = {
                            key: function[key] for key in function if key != "function"
lintangsutawika's avatar
lintangsutawika committed
616
617
618
                        }
                        components.append([function["function"], kwargs])
                    filter_pipeline = build_filter_ensemble(filter_name, components)
lintangsutawika's avatar
lintangsutawika committed
619
                self._filters.append(filter_pipeline)
lintangsutawika's avatar
lintangsutawika committed
620
        else:
621
            self._filters = [build_filter_ensemble("none", [["take_first", None]])]
622

623
624
        if self.config.use_prompt is not None:
            eval_logger.info(f"loading prompt {self.config.use_prompt}")
625
            self.prompt = get_prompt(
626
                self.config.use_prompt, self.DATASET_PATH, self.DATASET_NAME
lintangsutawika's avatar
lintangsutawika committed
627
            )
628
629
630
        else:
            self.prompt = None

lintangsutawika's avatar
lintangsutawika committed
631
        if self.fewshot_docs() is not None:
haileyschoelkopf's avatar
haileyschoelkopf committed
632
            self.sampler = samplers.get_sampler(
haileyschoelkopf's avatar
haileyschoelkopf committed
633
634
635
                self.config.fewshot_config.get("sampler", "default")
                if self.config.fewshot_config
                else "default"
haileyschoelkopf's avatar
haileyschoelkopf committed
636
            )(list(self.fewshot_docs()), self, rnd=random.Random(1234))
637

638
        if self.has_test_docs():
639
            self.task_docs = self.test_docs()
640
        elif self.has_validation_docs():
641
            self.task_docs = self.validation_docs()
642
643
644
645
646
        else:
            assert (
                False
            ), f"Task dataset (path={self.DATASET_PATH}, name={self.DATASET_NAME}) must have valid or test docs!"

647
        # Test One Doc
648
        self.features = list(self.task_docs.features.keys())
649
650
        self.multiple_input = 0
        self.multiple_target = 0
651
        test_doc = self.task_docs[0]
652
        test_text = self.doc_to_text(test_doc)
653
        test_target = self.doc_to_target(test_doc)
lintangsutawika's avatar
lintangsutawika committed
654

655
        if self.config.doc_to_choice is not None:
lintangsutawika's avatar
lintangsutawika committed
656
657
658
            test_choice = self.doc_to_choice(test_doc)
            if type(test_choice) is not list:
                eval_logger.error("doc_to_choice must return list")
659
660
            else:
                num_choice = len(test_choice)
661

662
663
            if type(test_text) is int:
                self.multiple_input = num_choice
664
665
        else:
            test_choice = None
666

667
        if type(test_target) is list:
668
            self.multiple_target = len(test_target)
669
        else:
lintangsutawika's avatar
lintangsutawika committed
670
            if (type(test_target) is int) and (test_choice is not None):
lintangsutawika's avatar
lintangsutawika committed
671
                test_target = test_choice[test_target]
672
            else:
lintangsutawika's avatar
lintangsutawika committed
673
                test_target = str(test_target)
674

675
676
677
        if test_choice is not None:
            check_choices = test_choice
        else:
lintangsutawika's avatar
lintangsutawika committed
678
            check_choices = [test_target]
679
680
681
682
683
        if self.config.doc_to_choice is not None:
            for choice in check_choices:
                choice_has_whitespace = True if choice[0].isspace() else False
                delimiter_has_whitespace = (
                    True if self.config.target_delimiter[-1].isspace() else False
684
                )
685

686
687
688
689
690
691
692
693
694
                if delimiter_has_whitespace and choice_has_whitespace:
                    eval_logger.warning(
                        f'Both target_delimiter and target choice: "{choice}" have whitespace'
                    )
                elif (not delimiter_has_whitespace) and (not choice_has_whitespace):
                    eval_logger.warning(
                        f'Both target_delimiter and target choice: "{choice}" does not have whitespace, ignore if the language you are evaluating on does not require/use whitespace'
                    )

Ethan Smith's avatar
Ethan Smith committed
695
    def download(self, dataset_kwargs=None) -> None:
696
697
698
699
700
701
        self.dataset = datasets.load_dataset(
            path=self.DATASET_PATH,
            name=self.DATASET_NAME,
            **dataset_kwargs if dataset_kwargs is not None else {},
        )

baberabb's avatar
baberabb committed
702
    def has_training_docs(self) -> bool:
703
        if self.config.training_split is not None:
704
705
706
707
            return True
        else:
            return False

baberabb's avatar
baberabb committed
708
    def has_validation_docs(self) -> bool:
709
        if self.config.validation_split is not None:
710
711
712
713
            return True
        else:
            return False

baberabb's avatar
baberabb committed
714
    def has_test_docs(self) -> bool:
715
        if self.config.test_split is not None:
716
717
718
719
            return True
        else:
            return False

baberabb's avatar
baberabb committed
720
    def training_docs(self) -> datasets.Dataset:
721
        if self.has_training_docs():
722
723
724
            if self.config.process_docs is not None:
                return self.config.process_docs(
                    self.dataset[self.config.training_split]
725
                )
726
            return self.dataset[self.config.training_split]
727

baberabb's avatar
baberabb committed
728
    def validation_docs(self) -> datasets.Dataset:
729
        if self.has_validation_docs():
730
731
732
            if self.config.process_docs is not None:
                return self.config.process_docs(
                    self.dataset[self.config.validation_split]
733
                )
734
            return self.dataset[self.config.validation_split]
735

baberabb's avatar
baberabb committed
736
    def test_docs(self) -> datasets.Dataset:
737
        if self.has_test_docs():
738
739
740
            if self.config.process_docs is not None:
                return self.config.process_docs(self.dataset[self.config.test_split])
            return self.dataset[self.config.test_split]
741

742
    def fewshot_docs(self):
743
744
        if self.config.fewshot_split is not None:
            return self.dataset[self.config.fewshot_split]
745
        else:
746
            if self.config.num_fewshot > 0:
747
                eval_logger.warning(
748
                    f"Task '{self.config.task}': "
749
750
751
752
                    "num_fewshot > 0 but fewshot_split is None. "
                    "using preconfigured rule."
                )
            return super().fewshot_docs()
753

754
755
756
757
758
759
760
761
762
    def apply_filters(self):

        if hasattr(self, "_filters"):
            for f in self._filters:
                f.apply(self._instances, self.task_docs)
        else:
            eval_logger.warning("No filter defined, passing through instances")
            return self._instances

763
    def should_decontaminate(self):
764
        return self.config.should_decontaminate
765
766

    def doc_to_decontamination_query(self, doc):
767
768
769
        if self.config.should_decontaminate:
            if self.config.doc_to_decontamination_query in self.features:
                return doc[self.config.doc_to_decontamination_query]
770
771
            else:
                return ast.literal_eval(
772
                    utils.apply_template(self.config.doc_to_decontamination_query, doc)
773
                )
774

775
776
777
778
779
780
781
782
783
784
785
786
    def _process_doc(self, doc):
        """
        Override this to process (detokenize, strip, replace, etc.) individual
        documents. This can be used in a map over documents of a data split.
        E.g. `map(self._process_doc, self.dataset["validation"])`

        :return: dict
            The processed version of the specified `doc`.
        """
        return doc

    def doc_to_text(self, doc):
787
788
        if self.prompt is not None:
            doc_to_text = self.prompt
789
        else:
790
            doc_to_text = self.config.doc_to_text
lintangsutawika's avatar
lintangsutawika committed
791

792
793
794
        if type(doc_to_text) == int:
            return doc_to_text
        elif type(doc_to_text) == str:
795
            if doc_to_text in self.features:
796
                # if self.config.doc_to_choice is not None:
797
798
                #     return self.doc_to_choice(doc)[doc[doc_to_text]]
                # else:
799
800
                return doc[doc_to_text]
            else:
lintangsutawika's avatar
lintangsutawika committed
801
                text_string = utils.apply_template(doc_to_text, doc)
lintangsutawika's avatar
lintangsutawika committed
802
                if text_string.isdigit() and self._config.doc_to_choice is not None:
lintangsutawika's avatar
lintangsutawika committed
803
804
805
                    return ast.literal_eval(text_string)
                else:
                    return text_string
806
        elif callable(doc_to_text):
807
            return doc_to_text(doc)
lintangsutawika's avatar
lintangsutawika committed
808
        # Used when applying a Promptsource template
809
        elif hasattr(doc_to_text, "apply"):
810
811
812
813
814
            applied_prompt = doc_to_text.apply(doc)
            if len(applied_prompt) == 2:
                return applied_prompt[0]
            else:
                eval_logger.warning("Applied prompt returns empty string")
815
                return self.config.fewshot_delimiter
816
        else:
817
            print(type(doc_to_text))
818
            raise TypeError
819

820
    def doc_to_target(self, doc: dict) -> Union[int, str, list]:
821
822
        if self.prompt is not None:
            doc_to_target = self.prompt
823
        else:
824
            doc_to_target = self.config.doc_to_target
825

826
827
828
        if type(doc_to_target) == int:
            return doc_to_target
        elif type(doc_to_target) == str:
829
            if doc_to_target in self.features:
830
                # if self.config.doc_to_choice is not None:
831
832
833
                #     return self.doc_to_choice(doc)[doc[doc_to_target]]
                # else:
                return doc[doc_to_target]
834
            else:
lintangsutawika's avatar
lintangsutawika committed
835
                target_string = utils.apply_template(doc_to_target, doc)
lintangsutawika's avatar
lintangsutawika committed
836
                if target_string.isdigit() and self._config.doc_to_choice is not None:
lintangsutawika's avatar
lintangsutawika committed
837
                    return ast.literal_eval(target_string)
lintangsutawika's avatar
lintangsutawika committed
838
839
840
841
842
                elif (
                    len(target_string) >= 2
                    and (target_string[0] == "[")
                    and (target_string[-1] == "]")
                ):
baberabb's avatar
baberabb committed
843
844
845
846
                    try:
                        return ast.literal_eval(target_string)
                    except (SyntaxError, ValueError):
                        return target_string
lintangsutawika's avatar
lintangsutawika committed
847
848
                else:
                    return target_string
849
850
        elif type(doc_to_target) == list:
            return doc_to_target
851
        elif callable(doc_to_target):
852
            return doc_to_target(doc)
lintangsutawika's avatar
lintangsutawika committed
853
        # Used when applying a Promptsource template
854
        elif hasattr(doc_to_target, "apply"):
855
            applied_prompt = doc_to_target.apply(doc)
856
857
858
859
            if len(applied_prompt) == 2:
                return applied_prompt[1]
            else:
                eval_logger.warning("Applied prompt returns empty string")
860
                return self.config.fewshot_delimiter
861
862
        else:
            raise TypeError
863

baberabb's avatar
baberabb committed
864
    def doc_to_choice(self, doc: Any) -> List[str]:
865
866
        if self.prompt is not None:
            doc_to_choice = self.prompt
867
        elif self.config.doc_to_choice is None:
868
869
            eval_logger.error("doc_to_choice was called but not set in config")
        else:
870
            doc_to_choice = self.config.doc_to_choice
871
872
873
874
875
876
877
878
879
880
881
882
883

        if type(doc_to_choice) == str:
            return ast.literal_eval(utils.apply_template(doc_to_choice, doc))
        elif type(doc_to_choice) == list:
            return doc_to_choice
        elif type(doc_to_choice) == dict:
            return list(doc_to_choice.values())
        elif callable(doc_to_choice):
            return doc_to_choice(doc)
        elif hasattr(doc_to_choice, "get_answer_choices_list"):
            return doc_to_choice.get_answer_choices_list(doc)
        else:
            raise TypeError
884

885
    def gold_alias(self, doc):
886
887
888
889
890
        # returns a version of the gold target answer to a document,
        # which should be passed into metric for scoring as the ground truth.

        # in multiple_choice tasks, this should be castable to an int corresponding to the index
        # within the answer choices, while doc_to_target is the string version of {{answer_choices[gold]}}.
891
892
        if self.config.gold_alias is not None:
            doc_to_target = self.config.gold_alias
893
        else:
lintangsutawika's avatar
lintangsutawika committed
894
            return self.doc_to_target(doc)
895
896
897
898
899
900
901
902
903
904

        if type(doc_to_target) == str:
            return utils.apply_template(doc_to_target, doc)
        elif callable(doc_to_target):
            return doc_to_target(doc)
        elif hasattr(doc_to_target, "apply"):
            return doc_to_target.apply(doc)[1]
        else:
            raise TypeError

baberabb's avatar
baberabb committed
905
906
907
    def construct_requests(
        self, doc: dict, ctx: str, **kwargs
    ) -> Union[List[Instance], Instance]:
908
        if self.OUTPUT_TYPE == "loglikelihood":
lintangsutawika's avatar
lintangsutawika committed
909
            arguments = (ctx, self.doc_to_target(doc))
910
        elif self.OUTPUT_TYPE == "loglikelihood_rolling":
lintangsutawika's avatar
lintangsutawika committed
911
            arguments = (self.doc_to_target(doc),)
912
        elif self.OUTPUT_TYPE == "multiple_choice":
913
            choices = self.doc_to_choice(doc)
914
            target_delimiter = self.config.target_delimiter
915
916
            if self.multiple_input:
                # If there are multiple inputs, choices are placed in the ctx
917
                cont = self.doc_to_target(doc)
918
                arguments = [(ctx, f"{target_delimiter}{cont}") for ctx in choices]
919
            else:
920
                # Otherwise they are placed in the continuation
921
                arguments = [(ctx, f"{target_delimiter}{cont}") for cont in choices]
922

923
            request_list = [
924
925
                Instance(
                    request_type="loglikelihood",
lintangsutawika's avatar
lintangsutawika committed
926
                    doc=doc,
927
                    arguments=arg,
928
                    idx=i,
929
930
                    **kwargs,
                )
931
                for i, arg in enumerate(arguments)
932
            ]
933
            # TODO: we should raise a warning telling users this will at most ~2x runtime.
934
            if "acc_mutual_info" in self._metric_fn_list.keys():
935
936
937
                # if we are calculating multiple choice accuracy
                # using mutual information instead of raw loglikelihood as metric, need unconditional lls.

lintangsutawika's avatar
lintangsutawika committed
938
                # here mutual info refers to calculating
939
940
941
942
943
944
                # log(P(choice|ctx) / P(choice)) = log(P(choice|ctx)) - log(P(choice))
                # in other words normalizing by subtracting the unconditional logprob of each choice.
                request_list.extend(
                    [
                        Instance(
                            request_type="loglikelihood",
lintangsutawika's avatar
lintangsutawika committed
945
                            doc=doc,
946
                            arguments=("", "{}".format(choice)),
947
948
949
                            idx=i,
                            **kwargs,
                        )
lintangsutawika's avatar
lintangsutawika committed
950
                        for i, choice in enumerate(choices)
951
952
953
                    ]
                )
            return request_list
lintangsutawika's avatar
lintangsutawika committed
954

955
        elif self.OUTPUT_TYPE == "greedy_until":
956
            arguments = (ctx, self.config.generation_kwargs)
lintangsutawika's avatar
lintangsutawika committed
957
958

        return Instance(
lintangsutawika's avatar
lintangsutawika committed
959
960
            request_type=self.OUTPUT_TYPE, doc=doc, arguments=arguments, idx=0, **kwargs
        )
961
962
963

    def process_results(self, doc, results):

964
965
        if callable(self.config.process_results):
            return self.config.process_results(doc, results)
lintangsutawika's avatar
lintangsutawika committed
966

967
        result_dict = {}
968
        use_metric = list(self._metric_fn_list.keys())
969
970
971
        if self.OUTPUT_TYPE == "loglikelihood":
            results = results[0]
            ll, is_greedy = results
972
973
974
975
            return {
                **({"perplexity": ll} if "perplexity" in use_metric else {}),
                **({"acc": int(is_greedy)} if "acc" in use_metric else {}),
            }
976
        elif self.OUTPUT_TYPE == "loglikelihood_rolling":
haileyschoelkopf's avatar
haileyschoelkopf committed
977
            (loglikelihood,) = results
978
979
            _words = self.count_words(self.doc_to_target(doc))
            _bytes = self.count_bytes(self.doc_to_target(doc))
haileyschoelkopf's avatar
haileyschoelkopf committed
980
            return {
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
                **(
                    {"word_perplexity": (loglikelihood, _words)}
                    if "word_perplexity" in use_metric
                    else {}
                ),
                **(
                    {"byte_perplexity": (loglikelihood, _bytes)}
                    if "byte_perplexity" in use_metric
                    else {}
                ),
                **(
                    {"bits_per_byte": (loglikelihood, _bytes)}
                    if "bits_per_byte" in use_metric
                    else {}
                ),
haileyschoelkopf's avatar
haileyschoelkopf committed
996
            }
997
        elif self.OUTPUT_TYPE == "multiple_choice":
998
            lls, is_greedy = zip(*results)
lintangsutawika's avatar
lintangsutawika committed
999

1000
            # retrieve choices in List[str] form, to compute choice lengths, etc.
1001
            choices = self.doc_to_choice(doc)
1002
1003
            completion_len = np.array([float(len(i)) for i in choices])

1004
1005
            if (
                2 * len(choices) == len(lls)
1006
                and "acc_mutual_info" in self._metric_fn_list.keys()
1007
1008
1009
1010
1011
1012
1013
            ):
                # then we are doing mutual info.
                # this stores the "dryrun" / unconditional answer loglikelihoods
                lls_unconditional = lls[1::2]
                assert len(lls_unconditional) == len(choices)
                # and this stores our "regular" conditional loglikelihoods
                lls = lls[::2]
1014

1015
1016
            pred = np.argmax(lls)
            pred_norm = np.argmax(lls / completion_len)
lintangsutawika's avatar
lintangsutawika committed
1017

1018
1019
            if self.multiple_input:
                gold = self.doc_to_text(doc)
1020
            else:
1021
                gold = self.doc_to_target(doc)
1022
1023
1024

            gold_index_error = False
            if type(gold) is list:
Lintang Sutawika's avatar
Lintang Sutawika committed
1025
1026
                gold = [i if i < len(choices) else -100 for i in gold]
                if -100 in gold:
1027
1028
1029
                    gold_index_error = True
            else:
                if type(gold) is int:
Lintang Sutawika's avatar
Lintang Sutawika committed
1030
                    gold = gold if gold < len(choices) else -100
1031
                elif type(gold) is str:
Lintang Sutawika's avatar
Lintang Sutawika committed
1032
                    gold = choices.index(gold) if gold in choices else -100
lintangsutawika's avatar
lintangsutawika committed
1033

Lintang Sutawika's avatar
Lintang Sutawika committed
1034
                if gold == -100:
1035
1036
1037
1038
                    gold_index_error = True

            if gold_index_error:
                eval_logger.warning(
lintangsutawika's avatar
lintangsutawika committed
1039
                    f"Label index was not in within range of available choices,"
1040
1041
                    f"Sample:\n\n{doc}\n\n"
                )
lintangsutawika's avatar
lintangsutawika committed
1042

1043
            if self.multiple_target:
lintangsutawika's avatar
lintangsutawika committed
1044
1045
                acc = 1.0 if pred in gold else 0.0
                acc_norm = 1.0 if pred_norm in gold else 0.0
Lintang Sutawika's avatar
Lintang Sutawika committed
1046
                exact_match = int(any([is_greedy[i] if i != -100 else 0 for i in gold]))
lintangsutawika's avatar
lintangsutawika committed
1047
1048
1049
            else:
                acc = 1.0 if pred == gold else 0.0
                acc_norm = 1.0 if pred_norm == gold else 0.0
1050
                # TODO: this gets score of 0 on arc_challenge for pythia-70m. need to test that this works properly
Lintang Sutawika's avatar
Lintang Sutawika committed
1051
                exact_match = int(is_greedy[gold]) if gold != -100 else 0
1052
1053

            result_dict = {
1054
                **({"acc": acc} if "acc" in use_metric else {}),
1055
1056
                **({"f1": (gold, pred)} if "f1" in use_metric else {}),
                **({"mcc": (gold, pred)} if "mcc" in use_metric else {}),
1057
                **({"acc_norm": acc_norm} if "acc_norm" in use_metric else {}),
1058
                **({"exact_match": exact_match} if "exact_match" in use_metric else {}),
1059
1060
            }

1061
            if "acc_mutual_info" in use_metric:
lintangsutawika's avatar
lintangsutawika committed
1062
1063
1064
                lls_mutual_info = [
                    ll_c - ll_u for ll_c, ll_u in zip(lls, lls_unconditional)
                ]
1065
1066
1067
                acc_mutual_info = 1.0 if np.argmax(lls_mutual_info) == gold else 0.0
                result_dict["acc_mutual_info"] = acc_mutual_info

1068
        elif self.OUTPUT_TYPE == "greedy_until":
1069
            gold = self.doc_to_target(doc)
1070
            if self.config.doc_to_choice is not None:
lintangsutawika's avatar
lintangsutawika committed
1071
                # If you set doc_to_choice,
lintangsutawika's avatar
lintangsutawika committed
1072
                # it assumes that doc_to_target returns a number.
1073
1074
                choices = self.doc_to_choice(doc)
                gold = choices[gold]
1075
1076
            # we expect multiple_targets to be a list.
            elif self.multiple_target:
baberabb's avatar
baberabb committed
1077
                gold = list(gold)
lintangsutawika's avatar
lintangsutawika committed
1078
1079
            else:
                gold = str(gold)
1080

lintangsutawika's avatar
lintangsutawika committed
1081
            result = results[0]
lintangsutawika's avatar
lintangsutawika committed
1082
            for metric in self._metric_fn_list.keys():
haileyschoelkopf's avatar
haileyschoelkopf committed
1083
1084
1085
1086
1087
                if self.multiple_target:
                    # in the case where we have multiple targets,
                    # return true if any are true
                    # TODO: this may break for multipLe_target, non zero-or-1 metrics
                    scores = []
haileyschoelkopf's avatar
haileyschoelkopf committed
1088
1089
1090
1091
                    if not isinstance(gold, list):
                        # sometimes, a multiple_target dataset has exceptions where one doc has only one string answer
                        # print(gold)
                        gold = [gold]
haileyschoelkopf's avatar
haileyschoelkopf committed
1092
                    for gold_option in gold:
1093
                        try:
1094
                            result_score = self._metric_fn_list[metric](
1095
1096
                                references=[gold_option],
                                predictions=[result],
1097
                                **self._metric_fn_kwargs[metric],
1098
1099
                            )
                        except TypeError:  # TODO: this is hacky and I don't want to do it
1100
                            result_score = self._metric_fn_list[metric](
haileyschoelkopf's avatar
haileyschoelkopf committed
1101
1102
1103
                                [gold_option, result]
                            )
                        if isinstance(result_score, dict):
haileyschoelkopf's avatar
haileyschoelkopf committed
1104
                            # TODO: this handles the case where HF evaluate returns a dict.
1105
                            result_score = result_score[metric]
haileyschoelkopf's avatar
haileyschoelkopf committed
1106
                        scores.append(result_score)
haileyschoelkopf's avatar
haileyschoelkopf committed
1107
                    if any(scores):
1108
                        result_score = 1.0
haileyschoelkopf's avatar
haileyschoelkopf committed
1109
                    else:
1110
                        result_score = 0.0
haileyschoelkopf's avatar
haileyschoelkopf committed
1111
                else:
1112
                    try:
1113
                        result_score = self._metric_fn_list[metric](
1114
1115
                            references=[gold],
                            predictions=[result],
1116
                            **self._metric_fn_kwargs[metric],
1117
                        )
1118
1119
                    except TypeError:  # needed for now in order to use a different interface between our own metrics and HF Evaluate metrics
                        result_score = self._metric_fn_list[metric]([gold, result])
1120
1121
1122
1123
                    if isinstance(result_score, dict):
                        # TODO: this handles the case where HF evaluate returns a dict.
                        result_score = result_score[metric]
                result_dict[metric] = result_score
1124
        else:
lintangsutawika's avatar
lintangsutawika committed
1125
1126
            raise ValueError(
                f"Passed invalid output_type '{self.OUTPUT_TYPE}' ! Please use one of ",
1127
                "'loglikelihood', 'loglikelihood_rolling', 'greedy_until' or 'multiple_choice'",
1128
            )
1129
1130
1131
1132
1133
1134
1135

        return result_dict

    def aggregation(self):
        return self._aggregation_list

    def higher_is_better(self):
haileyschoelkopf's avatar
haileyschoelkopf committed
1136
        return self._higher_is_better
1137
1138
1139
1140
1141


class MultipleChoiceTask(Task):
    OUTPUT_TYPE: str = "loglikelihood"

baberabb's avatar
baberabb committed
1142
    def doc_to_target(self, doc: dict) -> str:
1143
1144
        return " " + doc["choices"][doc["gold"]]

baberabb's avatar
baberabb committed
1145
    def construct_requests(self, doc: dict, ctx: str, **kwargs) -> List[Instance]:
1146
        # TODO: add mutual info here?
lintangsutawika's avatar
lintangsutawika committed
1147
1148
        return [
            Instance(
haileyschoelkopf's avatar
haileyschoelkopf committed
1149
                request_type="loglikelihood",
lintangsutawika's avatar
lintangsutawika committed
1150
                doc=doc,
1151
                arguments=(ctx, " {}".format(choice)),
1152
                idx=i,
1153
1154
                **kwargs,
            )
lintangsutawika's avatar
lintangsutawika committed
1155
1156
            for i, choice in enumerate(doc["choices"])
        ]
1157

baberabb's avatar
baberabb committed
1158
    def process_results(self, doc: dict, results: List[Tuple[float, bool]]) -> dict:
lintangsutawika's avatar
lintangsutawika committed
1159
1160
1161
        results = [
            res[0] for res in results
        ]  # only retain loglikelihoods, discard is_greedy TODO: do we need is_greedy anywhere?
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
        gold = doc["gold"]

        acc = 1.0 if np.argmax(results) == gold else 0.0
        completion_len = np.array([float(len(i)) for i in doc["choices"]])
        acc_norm = 1.0 if np.argmax(results / completion_len) == gold else 0.0

        return {
            "acc": acc,
            "acc_norm": acc_norm,
        }

baberabb's avatar
baberabb committed
1173
    def higher_is_better(self) -> dict:
1174
1175
1176
1177
1178
        return {
            "acc": True,
            "acc_norm": True,
        }

baberabb's avatar
baberabb committed
1179
    def aggregation(self) -> dict:
1180
1181
1182
1183
1184
1185
        return {
            "acc": mean,
            "acc_norm": mean,
        }


lintangsutawika's avatar
lintangsutawika committed
1186
class PerplexityTask(Task):
1187
1188
    OUTPUT_TYPE = "loglikelihood_rolling"

baberabb's avatar
baberabb committed
1189
    def has_training_docs(self) -> bool:
1190
1191
        return False

baberabb's avatar
baberabb committed
1192
    def fewshot_examples(self, k: int, rnd) -> List:
1193
1194
1195
        assert k == 0
        return []

baberabb's avatar
baberabb committed
1196
    def fewshot_context(self, doc: dict, num_fewshot: int) -> Literal[""]:
1197
1198
1199
1200
1201
1202
        assert (
            num_fewshot == 0
        ), "The number of fewshot examples must be 0 for perplexity tasks."

        return ""

baberabb's avatar
baberabb committed
1203
    def higher_is_better(self) -> dict:
1204
1205
1206
1207
1208
1209
1210
1211
1212
        return {
            "word_perplexity": False,
            "byte_perplexity": False,
            "bits_per_byte": False,
        }

    def doc_to_decontamination_query(self, doc):
        return doc

Ethan Smith's avatar
Ethan Smith committed
1213
    def doc_to_text(self, doc) -> str:
1214
1215
1216
1217
1218
        return ""

    def doc_to_target(self, doc):
        return doc

baberabb's avatar
baberabb committed
1219
    def construct_requests(self, doc: dict, ctx: Union[str, None], **kwargs):
1220
1221
        assert not ctx

lintangsutawika's avatar
lintangsutawika committed
1222
1223
1224
1225
1226
1227
1228
        return Instance(
            request_type=self.OUTPUT_TYPE,
            doc=doc,
            arguments=(self.doc_to_target(doc),),
            idx=0,
            **kwargs,
        )
1229

baberabb's avatar
baberabb committed
1230
    def process_results(self, doc: dict, results: float) -> dict:
1231
        (loglikelihood,) = results
haileyschoelkopf's avatar
haileyschoelkopf committed
1232
1233
        words = self.count_words(self.doc_to_target(doc))
        bytes_ = self.count_bytes(self.doc_to_target(doc))
1234
1235
1236
1237
1238
1239
        return {
            "word_perplexity": (loglikelihood, words),
            "byte_perplexity": (loglikelihood, bytes_),
            "bits_per_byte": (loglikelihood, bytes_),
        }

baberabb's avatar
baberabb committed
1240
    def aggregation(self) -> dict:
1241
1242
1243
1244
1245
1246
1247
        return {
            "word_perplexity": weighted_perplexity,
            "byte_perplexity": weighted_perplexity,
            "bits_per_byte": bits_per_byte,
        }

    @classmethod
baberabb's avatar
baberabb committed
1248
    def count_bytes(cls, doc) -> int:
1249
1250
1251
        return len(doc.encode("utf-8"))

    @classmethod
baberabb's avatar
baberabb committed
1252
    def count_words(cls, doc) -> int:
1253
1254
        """Downstream tasks with custom word boundaries should override this!"""
        return len(re.split(r"\s+", doc))