task.py 33.7 KB
Newer Older
1
2
3
4
import abc
from dataclasses import dataclass

import re
5
import ast
lintangsutawika's avatar
lintangsutawika committed
6
import yaml
7
8
9
import evaluate
import random
import itertools
10
import functools
11
12
13
14

import datasets
import numpy as np

15
16
from typing import Union
from collections.abc import Callable
17

18
from lm_eval import utils
19
from lm_eval.api import samplers
haileyschoelkopf's avatar
haileyschoelkopf committed
20
from lm_eval.api.instance import Instance
lintangsutawika's avatar
lintangsutawika committed
21
from lm_eval.api.filter import FilterEnsemble
22
from lm_eval.api.metrics import (
lintangsutawika's avatar
lintangsutawika committed
23
24
25
26
27
28
29
30
31
    METRIC_REGISTRY,
    AGGREGATION_REGISTRY,
    HIGHER_IS_BETTER_REGISTRY,
    get_metric,
    get_aggregation,
    mean,
    weighted_perplexity,
    bits_per_byte,
)
32

lintangsutawika's avatar
lintangsutawika committed
33
from lm_eval.logger import eval_logger
34
from lm_eval.prompts import get_prompt
35
36
from lm_eval.filters import build_filter_ensemble

37
38
39
40
41
42
43
ALL_OUTPUT_TYPES = [
    "loglikelihood",
    "multiple_choice",
    "loglikelihood_rolling",
    "greedy_until",
]

44
45
46
47

@dataclass
class TaskConfig(dict):

48
49
    task: str = None
    group: str = None
50
    names: str = None
lintangsutawika's avatar
lintangsutawika committed
51
    reference: str = None
lintangsutawika's avatar
lintangsutawika committed
52
53
54
    task_name: str = (
        None  # TODO: deprecate this, it'll be set in __post_init__ to be names[0]
    )
55
56
    dataset_path: str = None
    dataset_name: str = None
57
    dataset_kwargs: dict = None
58
59
60
    training_split: str = None
    validation_split: str = None
    test_split: str = None
lintangsutawika's avatar
lintangsutawika committed
61
    fewshot_split: str = None  # TODO: assert that this not None if num_fewshot > 0. (?) assert if this is same split as one evaling (?)
62

63
    template_aliases: str = None
64
    aliases: Union[str, list] = None
65
66
    doc_to_text: Union[Callable, str] = None
    doc_to_target: Union[Callable, str] = None
67

68
69
    num_fewshot: int = 0
    batch_size: int = 1
70
71
    repeats: int = 1

72
73
74
75
    metric_list: str = None
    gold_alias: str = None
    output_type: str = "greedy_until"
    delimiter: str = "\n\n"
lintangsutawika's avatar
lintangsutawika committed
76
    filter_list: Union[str, list] = None
lintangsutawika's avatar
lintangsutawika committed
77
78
79
    normalization: str = (
        None  # TODO: add length-normalization of various types, mutual info
    )
80
81
    should_decontaminate: bool = False
    doc_to_decontamination_query: str = None
82
    use_prompt: str = None
83

lintangsutawika's avatar
lintangsutawika committed
84
    metadata: str = None  # by default, not used in the code. allows for users to pass arbitrary info to tasks
85

86
87
88
89
    def __post_init__(self):
        # allow user-specified aliases so that users can
        # force prompt-compatibility for some prompt regardless of
        # field names in prompt
90
91
92
        if self.template_aliases is not None:
            if type(self.doc_to_text) == str:
                self.doc_to_text = self.template_aliases + self.doc_to_text
93

94
95
            if type(self.doc_to_target) == str:
                self.doc_to_target = self.template_aliases + self.doc_to_target
96

97
98
99
        # set "task_name" metadata field based on the "primary" name set
        if self.names:
            self.task_name = self.names[0]
100

101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
    def __getitem__(self, item):
        return getattr(self, item)


class Task(abc.ABC):
    """A task represents an entire benchmark including its dataset, problems,
    answers, and evaluation methods. See BoolQ for a simple example implementation

    A `doc` can be any python object which represents one instance of evaluation.
    This is usually a dictionary e.g.
        {"question": ..., "answer": ...} or
        {"question": ..., question, answer)
    """

    VERSION = None
116

117
118
119
120
121
122
123
124
    # The name of the `Task` benchmark as denoted in the HuggingFace datasets Hub
    # or a path to a custom `datasets` loading script.
    DATASET_PATH: str = None

    # The name of a subset within `DATASET_PATH`.
    DATASET_NAME: str = None

    OUTPUT_TYPE: str = None
lintangsutawika's avatar
lintangsutawika committed
125

126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
    def __init__(
        self,
        data_dir=None,
        cache_dir=None,
        download_mode=None,
        config=None,
    ):
        """
        :param data_dir: str
            Stores the path to a local folder containing the `Task`'s data files.
            Use this to specify the path to manually downloaded data (usually when
            the dataset is not publicly accessible).
        :param cache_dir: str
            The directory to read/write the `Task` dataset. This follows the
            HuggingFace `datasets` API with the default cache directory located at:
                `~/.cache/huggingface/datasets`
            NOTE: You can change the cache location globally for a given process
            by setting the shell environment variable, `HF_DATASETS_CACHE`,
            to another directory:
                `export HF_DATASETS_CACHE="/path/to/another/directory"`
        :param download_mode: datasets.DownloadMode
            How to treat pre-existing `Task` downloads and data.
            - `datasets.DownloadMode.REUSE_DATASET_IF_EXISTS`
                Reuse download and reuse dataset.
            - `datasets.DownloadMode.REUSE_CACHE_IF_EXISTS`
                Reuse download with fresh dataset.
            - `datasets.DownloadMode.FORCE_REDOWNLOAD`
                Fresh download and fresh dataset.
        """
        self.download(data_dir, cache_dir, download_mode)
        self._training_docs = None
        self._fewshot_docs = None
        self._instances = None

haileyschoelkopf's avatar
haileyschoelkopf committed
160
        self._config = TaskConfig(**config) if config else TaskConfig()
161
162
163

        if not hasattr(self, "_filters"):
            self._filters = []
lintangsutawika's avatar
lintangsutawika committed
164
            for name, components in self._config.get(
165
                "filters", [["none", [["take_first", None]]]]
lintangsutawika's avatar
lintangsutawika committed
166
            ):
167
168
169
                filter_pipeline = build_filter_ensemble(name, components)
                self._filters.append(filter_pipeline)

lintangsutawika's avatar
lintangsutawika committed
170
171
172
        self.sampler = samplers.Sampler(
            list(self.fewshot_docs()), self, rnd=random.Random()
        )  # TODO: pass the correct docs in here
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198

    def download(self, data_dir=None, cache_dir=None, download_mode=None):
        """Downloads and returns the task dataset.
        Override this method to download the dataset from a custom API.

        :param data_dir: str
            Stores the path to a local folder containing the `Task`'s data files.
            Use this to specify the path to manually downloaded data (usually when
            the dataset is not publicly accessible).
        :param cache_dir: str
            The directory to read/write the `Task` dataset. This follows the
            HuggingFace `datasets` API with the default cache directory located at:
                `~/.cache/huggingface/datasets`
            NOTE: You can change the cache location globally for a given process
            by setting the shell environment variable, `HF_DATASETS_CACHE`,
            to another directory:
                `export HF_DATASETS_CACHE="/path/to/another/directory"`
        :param download_mode: datasets.DownloadMode
            How to treat pre-existing `Task` downloads and data.
            - `datasets.DownloadMode.REUSE_DATASET_IF_EXISTS`
                Reuse download and reuse dataset.
            - `datasets.DownloadMode.REUSE_CACHE_IF_EXISTS`
                Reuse download with fresh dataset.
            - `datasets.DownloadMode.FORCE_REDOWNLOAD`
                Fresh download and fresh dataset.
        """
199
200
201
202
203
204
205
        self.dataset = datasets.load_dataset(
            path=self.DATASET_PATH,
            name=self.DATASET_NAME,
            data_dir=data_dir,
            cache_dir=cache_dir,
            download_mode=download_mode,
        )
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242

    @abc.abstractmethod
    def has_training_docs(self):
        """Whether the task has a training set"""
        pass

    @abc.abstractmethod
    def has_validation_docs(self):
        """Whether the task has a validation set"""
        pass

    @abc.abstractmethod
    def has_test_docs(self):
        """Whether the task has a test set"""
        pass

    def training_docs(self):
        """
        :return: Iterable[obj]
            A iterable of any object, that doc_to_text can handle
        """
        return []

    def validation_docs(self):
        """
        :return: Iterable[obj]
            A iterable of any object, that doc_to_text can handle
        """
        return []

    def test_docs(self):
        """
        :return: Iterable[obj]
            A iterable of any object, that doc_to_text can handle
        """
        return []

243
244
245
246
247
248
249
250
251
252
    def fewshot_docs(self):
        """
        :return: Iterable[obj]
            A iterable of any object, that doc_to_text can handle
        """
        if self.has_training_docs():
            return self.training_docs()
        elif self.has_validation_docs():
            return self.validation_docs()
        else:
lintangsutawika's avatar
lintangsutawika committed
253
            eval_logger.warning(
254
                "has_training_docs and has_validation_docs are False"
lintangsutawika's avatar
lintangsutawika committed
255
                "using test_docs but this is not recommended."
lintangsutawika's avatar
lintangsutawika committed
256
            )
257
258
            return self.test_docs()

259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
    def _process_doc(self, doc):
        """
        Override this to process (detokenize, strip, replace, etc.) individual
        documents. This can be used in a map over documents of a data split.
        E.g. `map(self._process_doc, self.dataset["validation"])`

        :return: dict
            The processed version of the specified `doc`.
        """
        return doc

    @property
    def instances(self):
        """After calling `task.build_all_requests()`, tasks
        maintain a list of the dataset instances which will be evaluated.
        """
        return self._instances

    def fewshot_examples(self, k, rnd):
        if self._training_docs is None:
            self._training_docs = list(self.training_docs())

        return rnd.sample(self._training_docs, k)

    def doc_to_decontamination_query(self, doc):
        print(
            "Override doc_to_decontamination_query with document specific decontamination query."
        )
        assert False

    @abc.abstractmethod
    def doc_to_text(self, doc):
        pass

    @abc.abstractmethod
    def doc_to_target(self, doc):
        pass

297
    def build_all_requests(self, limit=None, rank=None, world_size=None):
298
299
300
301
302
303
304
305
306
307
308
        """Build a set of Instances for a task, and store them in task.instances"""
        if self.has_test_docs():
            docs = self.test_docs()
        elif self.has_validation_docs():
            docs = self.validation_docs()
        else:
            assert (
                False
            ), f"Task dataset (path={self.DATASET_PATH}, name={self.DATASET_NAME}) must have valid or test docs!"

        instances = []
309
310
        for doc_id, doc in utils.create_iterator(
            enumerate(docs), rank, world_size, limit
lintangsutawika's avatar
lintangsutawika committed
311
        ):
312
            # sample fewshot context #TODO: need to offset doc_id by rank now!
313
314
315
316
            fewshot_ctx = self.fewshot_context(
                doc, self._config.num_fewshot, rnd=random.Random()
            )
            # TODO: hardcoded for now: # of runs on each input to be 2. # TODO: we should override this if doing greedy gen so users don't waste time+compute
lintangsutawika's avatar
lintangsutawika committed
317
318
319
320
321
            inst = self.construct_requests(
                doc=doc,
                ctx=fewshot_ctx,
                metadata=(self._config["task_name"], doc_id, self._config.repeats),
            )
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346

            if not isinstance(inst, list):
                inst = [inst]

            instances.extend(inst)

        self._instances = instances
        assert len(self._instances) != 0, "task.build_requests() did not find any docs!"

    @abc.abstractmethod
    def construct_requests(self, doc, ctx, **kwargs):
        """Uses RequestFactory to construct Requests and returns an iterable of
        Requests which will be sent to the LM.

        :param doc:
            The document as returned from training_docs, validation_docs, or test_docs.
        :param ctx: str
            The context string, generated by fewshot_context. This includes the natural
            language description, as well as the few shot examples, and the question
            part of the document for `doc`.
        :param doc_idx: int
            The index of a document within `self.test_docs()` or `self.validation_docs()`,
            whichever is the main split used.
        :param repeats: int
        TODO: update this docstring
lintangsutawika's avatar
lintangsutawika committed
347
            The number of times each instance in a dataset is inferred on. Defaults to 1,
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
            can be increased for techniques like majority voting.
        """
        pass

    @abc.abstractmethod
    def process_results(self, doc, results):
        """Take a single document and the LM results and evaluates, returning a
        dict where keys are the names of submetrics and values are the values of
        the metric for that one document

        :param doc:
            The document as returned from training_docs, validation_docs, or test_docs.
        :param results:
            The results of the requests created in construct_requests.
        """
        pass

    @abc.abstractmethod
    def aggregation(self):
        """
        :returns: {str: [metric_score] -> float}
            A dictionary where keys are the names of submetrics and values are
            functions that aggregate a list of metric scores
        """
        pass

    @abc.abstractmethod
    def higher_is_better(self):
        """
        :returns: {str: bool}
            A dictionary where keys are the names of submetrics and values are
            whether a higher value of the submetric is better
        """
        pass

haileyschoelkopf's avatar
haileyschoelkopf committed
383
384
385
386
387
388
389
390
391
392
    @classmethod
    def count_bytes(cls, doc):
        """Used for byte-level perplexity metrics in rolling loglikelihood"""
        return len(doc.encode("utf-8"))

    @classmethod
    def count_words(cls, doc):
        """Downstream loglikelihood_rolling perplexity tasks with custom word boundaries should override this!"""
        return len(re.split(r"\s+", doc))

393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
    @utils.positional_deprecated
    def fewshot_context(self, doc, num_fewshot, rnd=None):
        """Returns a fewshot context string that is made up of a prepended description
        (if provided), the `num_fewshot` number of examples, and an appended prompt example.

        :param doc: str
            The document as returned from training_docs, validation_docs, or test_docs.
        :param num_fewshot: int
            The number of fewshot examples to provide in the returned context string.
        :param rnd: random.Random
            The pseudo-random number generator used to randomly sample examples.
            WARNING: This is currently a required arg although it's optionalized with a default `None`.
        :returns: str
            The fewshot context.
        """
        assert (
            rnd is not None
        ), "A `random.Random` generator argument must be provided to `rnd`"

        if num_fewshot == 0:
            labeled_examples = ""
        else:
415
            labeled_examples = self.sampler.get_context(doc, self._config.num_fewshot)
416
417

            # for sets with no training docs, draw from other set *but ensure no overlap with current doc*
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
            # if self.has_training_docs():
            #     fewshotex = self.fewshot_examples(k=num_fewshot, rnd=rnd)
            # else:
            #     if self._fewshot_docs is None:
            #         self._fewshot_docs = list(
            #             self.validation_docs()
            #             if self.has_validation_docs()
            #             else self.test_docs()
            #         )

            #     fewshotex = rnd.sample(self._fewshot_docs, num_fewshot + 1)

            #     # get rid of the doc that's the one we're evaluating, if it's in the fewshot
            #     fewshotex = [x for x in fewshotex if x != doc][:num_fewshot]

            # labeled_examples = (
            #     "\n\n".join(
            #         [
            #             self.doc_to_text(doc) + self.doc_to_target(doc)
            #             for doc in fewshotex
            #         ]
            #     )
            #     + "\n\n"
            # )
442
443
444
445
446
447

        example = self.doc_to_text(doc)
        return labeled_examples + example

    def apply_filters(self):

lintangsutawika's avatar
lintangsutawika committed
448
449
450
451
452
453
        if hasattr(self, "_filters"):
            for f in self._filters:
                f.apply(self._instances)
        else:
            eval_logger.warning("No filter defined, passing through instances")
            return self._instances
454
455
456
457
458


class ConfigurableTask(Task):

    VERSION = "2.0"
459
    OUTPUT_TYPE = None
460
    CONFIG = None
461
462
463
464

    def __init__(
        self, data_dir=None, cache_dir=None, download_mode=None, config: dict = None
    ):
465
        # Get pre-configured attributes
466
        self._config = self.CONFIG
467

468
469
        # Use new configurations if there was no preconfiguration
        if self._config is None:
470
            self._config = TaskConfig(**config)
471
472
        # Overwrite configs
        else:
lintangsutawika's avatar
lintangsutawika committed
473
            if config is not None:
474
                self._config.__dict__.update(config)
475

476
        if self._config is None:
lintangsutawika's avatar
lintangsutawika committed
477
478
479
            raise ValueError(
                "Must pass a config to ConfigurableTask, either in cls.CONFIG or `config` kwarg"
            )
480
481

        if self._config.output_type is not None:
482
            assert self._config.output_type in ALL_OUTPUT_TYPES
483
484
            self.OUTPUT_TYPE = self._config.output_type

485
486
487
488
489
490
        if self._config.dataset_path is not None:
            self.DATASET_PATH = self._config.dataset_path

        if self._config.dataset_name is not None:
            self.DATASET_NAME = self._config.dataset_name

491
492
493
494
        self._metric_fn_list = {}
        self._metric_fn_kwargs = {}
        self._aggregation_list = {}
        self._higher_is_better = {}
495

496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
        if self._config.metric_list is None:
            eval_logger.warning(
                f"Output Type set as {self._config.output_type} and metric_list is not set"
                "Will default to exact_match"
            )
            _metric_list = METRIC_REGISTRY[self._config.output_type]
            for metric_name, metric_params in _metric_list.items():
                self._metric_fn_list[metric_name] = metric_params["fn"]
                self._aggregation_list[metric_name] = metric_params["aggregation"]
                self._higher_is_better[metric_name] = metric_params["higher_is_better"]
        else:
            for metric_config in self._config.metric_list:

                assert "metric" in metric_config
                metric_name = metric_config["metric"]
                kwargs = {
                    key: metric_config[key]
                    for key in metric_config
                    if key not in ["metric", "aggregation", "higher_is_better"]
                }
                if metric_name in _metric_list:
                    self._metric_fn_list[metric_name] = metric_params["fn"]
                else:
                    eval_logger.warning(
                        f"Metric {metric_name} not found, "
                        "Searching from https://huggingface.co/evaluate-metric"
                    )
                    try:
                        metric_object = evaluate.load(metric_name)
                        self._metric_fn_list[metric_name] = metric_object
                        self._metric_fn_kwargs[metric_name] = kwargs

                    except Exception:
                        raise Warning(
                            "{} not found in the evaluate library!".format(metric_name),
                            "Please check https://huggingface.co/evaluate-metric",
                        )
lintangsutawika's avatar
lintangsutawika committed
533

534
535
536
537
538
539
540
541
542
                if "aggregation" in metric_config:
                    self._aggregation_list[metric_name] = metric_config["aggregation"]
                else:
                    eval_logger.warning(
                        f"metric {metric_name} is defined, but aggregation is not"
                        f"using default aggregation for {metric_name}"
                    )
                    self._aggregation_list[metric_name] = _metric_list[metric_name][
                        "aggregation"
lintangsutawika's avatar
lintangsutawika committed
543
544
                    ]

545
546
547
548
549
550
551
552
553
554
555
                if "higher_is_better" in metric_config:
                    self._higher_is_better[metric_name] = metric_config[
                        "higher_is_better"
                    ]
                else:
                    eval_logger.warning(
                        f"metric {metric_name} is defined, but higher_is_better is not"
                        f"using default higher_is_better for {metric_name}"
                    )
                    self._higher_is_better[metric_name] = _metric_list[metric_name][
                        "higher_is_better"
lintangsutawika's avatar
lintangsutawika committed
556
                    ]
557

558
        self.download(self._config.dataset_kwargs)
559
560
561
        self._training_docs = None
        self._fewshot_docs = None

lintangsutawika's avatar
lintangsutawika committed
562
        if self._config.filter_list is not None:
lintangsutawika's avatar
lintangsutawika committed
563
            self._filters = []
lintangsutawika's avatar
lintangsutawika committed
564
565
566
567
568
569
570
571
            for filter_config in self._config.filter_list:
                for filter_pipeline in filter_config:
                    filter_name = filter_config["name"]
                    filter_functions = filter_config["filter"]
                    components = []
                    for function in filter_functions:
                        kwargs = {
                            key: function[key] for key in function if key != "function"
lintangsutawika's avatar
lintangsutawika committed
572
573
574
575
                        }
                        components.append([function["function"], kwargs])

                    filter_pipeline = build_filter_ensemble(filter_name, components)
lintangsutawika's avatar
lintangsutawika committed
576
                self._filters.append(filter_pipeline)
lintangsutawika's avatar
lintangsutawika committed
577
        else:
lintangsutawika's avatar
lintangsutawika committed
578
579
580
            self._filters = [
                build_filter_ensemble("take_first", [["take_first", None]])
            ]
581
582

        if self._config.use_prompt is not None:
lintangsutawika's avatar
lintangsutawika committed
583
            eval_logger.info(f"loading prompt {self._config.use_prompt}")
584
            self.prompt = get_prompt(
lintangsutawika's avatar
lintangsutawika committed
585
586
                self._config.use_prompt, self.DATASET_PATH, self.DATASET_NAME
            )
587
588
589
        else:
            self.prompt = None

lintangsutawika's avatar
lintangsutawika committed
590
591
592
593
        if self.fewshot_docs() is not None:
            self.sampler = samplers.Sampler(
                list(self.fewshot_docs()), self, rnd=random.Random()
            )  # TODO: pass the correct docs in here
594

595
596
597
598
599
600
601
602
    def download(self, dataset_kwargs=None):

        self.dataset = datasets.load_dataset(
            path=self.DATASET_PATH,
            name=self.DATASET_NAME,
            **dataset_kwargs if dataset_kwargs is not None else {},
        )

603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
    def has_training_docs(self):
        if self._config.training_split is not None:
            return True
        else:
            return False

    def has_validation_docs(self):
        if self._config.validation_split is not None:
            return True
        else:
            return False

    def has_test_docs(self):
        if self._config.test_split is not None:
            return True
        else:
            return False

    def training_docs(self):
        if self._config.training_split is not None:
            return self.dataset[self._config.training_split]

    def validation_docs(self):
        if self._config.validation_split is not None:
            return self.dataset[self._config.validation_split]

    def test_docs(self):
        if self._config.test_split is not None:
            return self.dataset[self._config.test_split]

633
    def fewshot_docs(self):
lintangsutawika's avatar
lintangsutawika committed
634
        if (self._config.num_fewshot > 0) and (self._config.fewshot_split is None):
lintangsutawika's avatar
lintangsutawika committed
635
            eval_logger.warning(
lintangsutawika's avatar
lintangsutawika committed
636
                "num_fewshot > 0 but fewshot_split is None. "
lintangsutawika's avatar
lintangsutawika committed
637
                "using preconfigured rule."
lintangsutawika's avatar
lintangsutawika committed
638
            )
639
640
            return super().fewshot_docs()

lintangsutawika's avatar
lintangsutawika committed
641
        elif self._config.fewshot_split is not None:
642
643
            return self.dataset[self._config.fewshot_split]

644
645
646
647
648
649
650
    def should_decontaminate(self):
        return self._config.should_decontaminate

    def doc_to_decontamination_query(self, doc):
        if self._config.should_decontaminate:
            return utils.apply_template(self._config.doc_to_decontamination_query, doc)

651
652
653
654
655
656
657
658
659
660
661
662
    def _process_doc(self, doc):
        """
        Override this to process (detokenize, strip, replace, etc.) individual
        documents. This can be used in a map over documents of a data split.
        E.g. `map(self._process_doc, self.dataset["validation"])`

        :return: dict
            The processed version of the specified `doc`.
        """
        return doc

    def doc_to_text(self, doc):
663
664
665

        if self.prompt is not None:
            doc_to_text = self.prompt
666
667
        else:
            doc_to_text = self._config.doc_to_text
lintangsutawika's avatar
lintangsutawika committed
668

669
670
        if type(doc_to_text) == str:
            return utils.apply_template(doc_to_text, doc)
671
        elif callable(doc_to_text):
672
673
674
            return doc_to_text(doc)
        if hasattr(doc_to_text, "apply"):
            return doc_to_text.apply(doc)[0]
675
        else:
676
            print(type(doc_to_text))
677
            raise TypeError
678
679

    def doc_to_target(self, doc):
680
681
682

        if self.prompt is not None:
            doc_to_target = self.prompt
683
684
685
        else:
            doc_to_target = self._config.doc_to_target

686
687
        if type(doc_to_target) == str:
            return utils.apply_template(doc_to_target, doc)
688
        elif callable(doc_to_target):
689
690
691
            return doc_to_target(doc)
        elif hasattr(doc_to_target, "apply"):
            return doc_to_target.apply(doc)[1]
692
693
        else:
            raise TypeError
694
695
696

    def construct_requests(self, doc, ctx, **kwargs):

697
        if self.OUTPUT_TYPE == "loglikelihood":
lintangsutawika's avatar
lintangsutawika committed
698
            arguments = (ctx, self.doc_to_target(doc))
699
        elif self.OUTPUT_TYPE == "loglikelihood_rolling":
lintangsutawika's avatar
lintangsutawika committed
700
            arguments = (self.doc_to_target(doc),)
701
        elif self.OUTPUT_TYPE == "multiple_choice":
702
703
            # we pass the user-defined answer_choices var (in aliases) and translate the result to a Python list.
            # TODO: any cleaner way to do this?
lintangsutawika's avatar
lintangsutawika committed
704
705
706
707
708
            choices = ast.literal_eval(
                utils.apply_template(
                    self._config.template_aliases + "{{answer_choices}}", doc
                )
            )
709
            request_list = [
710
711
                Instance(
                    request_type="loglikelihood",
lintangsutawika's avatar
lintangsutawika committed
712
                    doc=doc,
713
                    arguments=(ctx, " {}".format(choice)),
714
                    idx=i,
715
716
                    **kwargs,
                )
lintangsutawika's avatar
lintangsutawika committed
717
                for i, choice in enumerate(choices)
718
            ]
719
720
721
722
723
            # TODO: we should raise a warning telling users this will at most ~2x runtime.
            if "acc_mutual_info" in self._metric_list.keys():
                # if we are calculating multiple choice accuracy
                # using mutual information instead of raw loglikelihood as metric, need unconditional lls.

lintangsutawika's avatar
lintangsutawika committed
724
                # here mutual info refers to calculating
725
726
727
728
729
730
                # log(P(choice|ctx) / P(choice)) = log(P(choice|ctx)) - log(P(choice))
                # in other words normalizing by subtracting the unconditional logprob of each choice.
                request_list.extend(
                    [
                        Instance(
                            request_type="loglikelihood",
lintangsutawika's avatar
lintangsutawika committed
731
                            doc=doc,
732
733
734
735
                            arguments=("", "{}".format(choice)),
                            idx=i,
                            **kwargs,
                        )
lintangsutawika's avatar
lintangsutawika committed
736
                        for i, choice in enumerate(choices)
737
738
739
                    ]
                )
            return request_list
lintangsutawika's avatar
lintangsutawika committed
740

741
        elif self.OUTPUT_TYPE == "greedy_until":
lintangsutawika's avatar
lintangsutawika committed
742
            arguments = (ctx, self._config.delimiter)
lintangsutawika's avatar
lintangsutawika committed
743
744

        return Instance(
lintangsutawika's avatar
lintangsutawika committed
745
746
            request_type=self.OUTPUT_TYPE, doc=doc, arguments=arguments, idx=0, **kwargs
        )
747
748
749
750

    def process_results(self, doc, results):

        result_dict = {}
751
752
753
        if self.OUTPUT_TYPE == "loglikelihood":
            results = results[0]
            ll, is_greedy = results
754
            result_dict = {"perplexity": ll, "acc": int(is_greedy)}
755
        elif self.OUTPUT_TYPE == "loglikelihood_rolling":
haileyschoelkopf's avatar
haileyschoelkopf committed
756
            (loglikelihood,) = results
757
758
            _words = self.count_words(self.doc_to_target(doc))
            _bytes = self.count_bytes(self.doc_to_target(doc))
haileyschoelkopf's avatar
haileyschoelkopf committed
759
            return {
760
761
762
                "word_perplexity": (loglikelihood, _words),
                "byte_perplexity": (loglikelihood, _bytes),
                "bits_per_byte": (loglikelihood, _bytes),
haileyschoelkopf's avatar
haileyschoelkopf committed
763
            }
764
        elif self.OUTPUT_TYPE == "multiple_choice":
lintangsutawika's avatar
lintangsutawika committed
765
766
767
            lls = [
                res[0] for res in results
            ]  # only retain loglikelihoods, discard is_greedy
haileyschoelkopf's avatar
haileyschoelkopf committed
768
            gold = int(self.doc_to_target(doc))
769
            pred = np.argmax(lls)
770
            # retrieve choices in List[str] form, to compute choice lengths, etc.
lintangsutawika's avatar
lintangsutawika committed
771
772
773
774
775
776
777
778
779
            choices = ast.literal_eval(
                utils.apply_template(
                    self._config.template_aliases + "{{answer_choices}}", doc
                )
            )
            if (
                2 * len(choices) == len(lls)
                and "acc_mutual_info" in self._metric_list.keys()
            ):
780
781
782
783
784
785
                # then we are doing mutual info.
                # this stores the "dryrun" / unconditional answer loglikelihoods
                lls_unconditional = lls[1::2]
                assert len(lls_unconditional) == len(choices)
                # and this stores our "regular" conditional loglikelihoods
                lls = lls[::2]
786
787

            acc = 1.0 if np.argmax(lls) == gold else 0.0
788
789
            completion_len = np.array([float(len(i)) for i in choices])
            acc_norm = 1.0 if np.argmax(lls / completion_len) == gold else 0.0
790
791
792

            result_dict = {
                "acc": acc,
793
                "f1": (pred, gold),
794
                "acc_norm": acc_norm,
795
796
797
798
799
800
            }

            # TODO: set which normalization metrics should be reported, and calculate them

            if "exact_match" in self._metric_list.keys():
                # TODO: this gets score of 0 on arc_challenge for pythia-70m. need to test that this works properly
lintangsutawika's avatar
lintangsutawika committed
801
802
803
804
                is_greedy = [
                    res[1] for res in results
                ]  # take only the `is_greedy` results
                is_greedy = is_greedy[gold]  # take value for the gold answer
805
806
807
                result_dict["exact_match"] = int(is_greedy)

            if "acc_mutual_info" in self._metric_list.keys():
lintangsutawika's avatar
lintangsutawika committed
808
809
810
                lls_mutual_info = [
                    ll_c - ll_u for ll_c, ll_u in zip(lls, lls_unconditional)
                ]
811
812
813
                acc_mutual_info = 1.0 if np.argmax(lls_mutual_info) == gold else 0.0
                result_dict["acc_mutual_info"] = acc_mutual_info

814
815
816
817
818
819
820
821
822
        elif self.OUTPUT_TYPE == "greedy_until":

            if self._config.gold_alias is not None:
                gold = doc[self._config.gold_alias]
            else:
                gold = self.doc_to_target(doc)

            for key, result in zip(self._metric_list.keys(), results):
                _dict = self._metric_list[key].compute(
lintangsutawika's avatar
lintangsutawika committed
823
                    references=[gold], predictions=[result], **self._metric_kwargs[key]
824
                )
825

lintangsutawika's avatar
lintangsutawika committed
826
                result_dict = {**result_dict, **_dict}
827
        else:
lintangsutawika's avatar
lintangsutawika committed
828
829
            raise ValueError(
                f"Passed invalid output_type '{self.OUTPUT_TYPE}' ! Please use one of ",
830
                "'loglikelihood', 'loglikelihood_rolling', 'greedy_until', or 'multiple_choice'",
831
            )
832
833
834
835
836
837
838

        return result_dict

    def aggregation(self):
        return self._aggregation_list

    def higher_is_better(self):
haileyschoelkopf's avatar
haileyschoelkopf committed
839
        return self._higher_is_better
840
841
842
843
844
845
846
847
848
849


class MultipleChoiceTask(Task):

    OUTPUT_TYPE: str = "loglikelihood"

    def doc_to_target(self, doc):
        return " " + doc["choices"][doc["gold"]]

    def construct_requests(self, doc, ctx, **kwargs):
850
        # TODO: add mutual info here?
lintangsutawika's avatar
lintangsutawika committed
851
852
        return [
            Instance(
haileyschoelkopf's avatar
haileyschoelkopf committed
853
                request_type="loglikelihood",
lintangsutawika's avatar
lintangsutawika committed
854
                doc=doc,
855
                arguments=(ctx, " {}".format(choice)),
856
                idx=i,
857
858
                **kwargs,
            )
lintangsutawika's avatar
lintangsutawika committed
859
860
            for i, choice in enumerate(doc["choices"])
        ]
861
862

    def process_results(self, doc, results):
lintangsutawika's avatar
lintangsutawika committed
863
864
865
        results = [
            res[0] for res in results
        ]  # only retain loglikelihoods, discard is_greedy TODO: do we need is_greedy anywhere?
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
        gold = doc["gold"]

        acc = 1.0 if np.argmax(results) == gold else 0.0
        completion_len = np.array([float(len(i)) for i in doc["choices"]])
        acc_norm = 1.0 if np.argmax(results / completion_len) == gold else 0.0

        return {
            "acc": acc,
            "acc_norm": acc_norm,
        }

    def higher_is_better(self):
        return {
            "acc": True,
            "acc_norm": True,
        }

    def aggregation(self):
        return {
            "acc": mean,
            "acc_norm": mean,
        }


lintangsutawika's avatar
lintangsutawika committed
890
class PerplexityTask(Task):
891
892
893
894
895
896
897
898
899
900

    OUTPUT_TYPE = "loglikelihood_rolling"

    def has_training_docs(self):
        return False

    def fewshot_examples(self, k, rnd):
        assert k == 0
        return []

lintangsutawika's avatar
lintangsutawika committed
901
    def fewshot_context(self, doc, num_fewshot, rnd=None):
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
        assert (
            num_fewshot == 0
        ), "The number of fewshot examples must be 0 for perplexity tasks."
        assert (
            rnd is not None
        ), "A `random.Random` generator argument must be provided to `rnd`."

        return ""

    def higher_is_better(self):
        return {
            "word_perplexity": False,
            "byte_perplexity": False,
            "bits_per_byte": False,
        }

    def doc_to_decontamination_query(self, doc):
        return doc

    def doc_to_text(self, doc):
        return ""

    def doc_to_target(self, doc):
        return doc

    def construct_requests(self, doc, ctx, **kwargs):
        assert not ctx

lintangsutawika's avatar
lintangsutawika committed
930
931
932
933
934
935
936
        return Instance(
            request_type=self.OUTPUT_TYPE,
            doc=doc,
            arguments=(self.doc_to_target(doc),),
            idx=0,
            **kwargs,
        )
937
938
939

    def process_results(self, doc, results):
        (loglikelihood,) = results
haileyschoelkopf's avatar
haileyschoelkopf committed
940
941
        words = self.count_words(self.doc_to_target(doc))
        bytes_ = self.count_bytes(self.doc_to_target(doc))
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
        return {
            "word_perplexity": (loglikelihood, words),
            "byte_perplexity": (loglikelihood, bytes_),
            "bits_per_byte": (loglikelihood, bytes_),
        }

    def aggregation(self):
        return {
            "word_perplexity": weighted_perplexity,
            "byte_perplexity": weighted_perplexity,
            "bits_per_byte": bits_per_byte,
        }

    @classmethod
    def count_bytes(cls, doc):
        return len(doc.encode("utf-8"))

    @classmethod
    def count_words(cls, doc):
        """Downstream tasks with custom word boundaries should override this!"""
        return len(re.split(r"\s+", doc))