task.py 39.2 KB
Newer Older
1
import abc
2
from dataclasses import dataclass, field, asdict
3
4

import re
5
import ast
lintangsutawika's avatar
lintangsutawika committed
6
import yaml
7
8
9
import evaluate
import random
import itertools
10
import functools
11
12
13
14

import datasets
import numpy as np

15
16
from typing import Union
from collections.abc import Callable
17

18
from lm_eval import utils
19
from lm_eval.api import samplers
haileyschoelkopf's avatar
haileyschoelkopf committed
20
from lm_eval.api.instance import Instance
lintangsutawika's avatar
lintangsutawika committed
21
from lm_eval.api.filter import FilterEnsemble
22
23
24
25

from lm_eval.logger import eval_logger
from lm_eval.prompts import get_prompt
from lm_eval.filters import build_filter_ensemble
lintangsutawika's avatar
lintangsutawika committed
26
27
28
29
30
31
from lm_eval.api.metrics import (
    mean,
    weighted_perplexity,
    bits_per_byte,
)
from lm_eval.api.registry import (
haileyschoelkopf's avatar
haileyschoelkopf committed
32
33
34
35
    get_metric,
    get_aggregation,
    get_default_aggregation,
    is_higher_better,
36
37
    DEFAULT_METRIC_REGISTRY,
    OUTPUT_TYPE_REGISTRY,
lintangsutawika's avatar
lintangsutawika committed
38
39
    AGGREGATION_REGISTRY,
)
40

41
42
43
44
45
ALL_OUTPUT_TYPES = [
    "loglikelihood",
    "multiple_choice",
    "loglikelihood_rolling",
    "greedy_until",
46
    "winograd_schema"
47
48
]

49
50
51

@dataclass
class TaskConfig(dict):
52
    # task naming/registry
53
    task: str = None
54
    group: Union[str, list] = None
55
56
57
    # HF dataset options.
    # which dataset to use,
    # and what splits for what purpose
58
59
    dataset_path: str = None
    dataset_name: str = None
60
    dataset_kwargs: dict = None
61
62
63
    training_split: str = None
    validation_split: str = None
    test_split: str = None
lintangsutawika's avatar
lintangsutawika committed
64
    fewshot_split: str = None  # TODO: assert that this not None if num_fewshot > 0. (?) assert if this is same split as one evaling (?)
65
66
    # formatting / prompting options.
    # see docs/advanced_task_guide.md for more info
67
    template_aliases: Union[str, list] = None
68
69
    doc_to_text: Union[Callable, str] = None
    doc_to_target: Union[Callable, str] = None
70
    doc_to_choice: Union[Callable, str] = None
71
    gold_alias: Union[Callable, str] = None
72
    use_prompt: str = None
73
    description: str = ""
74
75
    target_delimiter: str = " "
    fewshot_delimiter: str = "\n\n"
76
    # runtime configuration options
77
    num_fewshot: int = 0
78
    # scoring options
79
80
    metric_list: str = None
    output_type: str = "greedy_until"
81
    generation_kwargs: dict = None
82
    repeats: int = 1
lintangsutawika's avatar
lintangsutawika committed
83
    filter_list: Union[str, list] = None
84
85
    should_decontaminate: bool = False
    doc_to_decontamination_query: str = None
86

lintangsutawika's avatar
lintangsutawika committed
87
    metadata: str = None  # by default, not used in the code. allows for users to pass arbitrary info to tasks
88

89
90
91
92
    def __post_init__(self):
        # allow user-specified aliases so that users can
        # force prompt-compatibility for some prompt regardless of
        # field names in prompt
93
94
95
        if self.template_aliases is not None:
            if type(self.doc_to_text) == str:
                self.doc_to_text = self.template_aliases + self.doc_to_text
96

97
98
            if type(self.doc_to_target) == str:
                self.doc_to_target = self.template_aliases + self.doc_to_target
99

100
            if type(self.gold_alias) == str:
lintangsutawika's avatar
lintangsutawika committed
101
                self.gold_alias = self.template_aliases + self.gold_alias
102

Lintang Sutawika's avatar
Lintang Sutawika committed
103
104
105
106
107
108
109
110
111
112
113
114
        if self.generation_kwargs is not None:
            if self.output_type != "greedy_until":
                eval_logger.warning(
                    "passed `generation_kwargs`, but not using a generation request type!"
                )

            if "temperature" in self.generation_kwargs:
                self.generation_kwargs["temperature"] = float(
                    self.generation_kwargs["temperature"]
                )

            if "until" not in self.generation_kwargs:
115
                self.generation_kwargs["until"] = [self.fewshot_delimiter]
Lintang Sutawika's avatar
Lintang Sutawika committed
116
117
118
119
        else:
            if self.output_type == "greedy_until":
                # ensure that we greedily generate in absence of explicit arguments otherwise
                self.generation_kwargs = {
Lintang Sutawika's avatar
Lintang Sutawika committed
120
                    "until": None
121
122
                    if self.fewshot_delimiter is None
                    else [self.fewshot_delimiter],
Lintang Sutawika's avatar
Lintang Sutawika committed
123
124
125
                    "do_sample": False,
                    "temperature": 0.0,
                }
126

haileyschoelkopf's avatar
haileyschoelkopf committed
127
128
        # TODO: how to make TaskConfigs be de- and re-serializable, even when using the !function constructor?

129
130
131
    def __getitem__(self, item):
        return getattr(self, item)

132
    def to_dict(self):
133
134
        """dumps the current config as a dictionary object, as a printable format.
        null fields will not be printed.
haileyschoelkopf's avatar
haileyschoelkopf committed
135
        Used for dumping results alongside full task configuration
136

haileyschoelkopf's avatar
haileyschoelkopf committed
137
138
139
140
141
142
143
144
145
146
        :return: dict
            A printable dictionary version of the TaskConfig object.

        # TODO: should any default value in the TaskConfig not be printed?
        """
        cfg_dict = asdict(self)
        # remove values that are `None`
        for k, v in list(cfg_dict.items()):
            if v is None:
                cfg_dict.pop(k)
haileyschoelkopf's avatar
haileyschoelkopf committed
147
148
149
            elif isinstance(v, Callable):
                # TODO: this should handle Promptsource template objects as a separate case?
                cfg_dict[k] = str(v)
haileyschoelkopf's avatar
haileyschoelkopf committed
150
        return cfg_dict
151

152
153
154
155
156
157
158
159
160
161
162
163

class Task(abc.ABC):
    """A task represents an entire benchmark including its dataset, problems,
    answers, and evaluation methods. See BoolQ for a simple example implementation

    A `doc` can be any python object which represents one instance of evaluation.
    This is usually a dictionary e.g.
        {"question": ..., "answer": ...} or
        {"question": ..., question, answer)
    """

    VERSION = None
164

165
166
167
168
169
170
171
172
    # The name of the `Task` benchmark as denoted in the HuggingFace datasets Hub
    # or a path to a custom `datasets` loading script.
    DATASET_PATH: str = None

    # The name of a subset within `DATASET_PATH`.
    DATASET_NAME: str = None

    OUTPUT_TYPE: str = None
lintangsutawika's avatar
lintangsutawika committed
173

174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
    def __init__(
        self,
        data_dir=None,
        cache_dir=None,
        download_mode=None,
        config=None,
    ):
        """
        :param data_dir: str
            Stores the path to a local folder containing the `Task`'s data files.
            Use this to specify the path to manually downloaded data (usually when
            the dataset is not publicly accessible).
        :param cache_dir: str
            The directory to read/write the `Task` dataset. This follows the
            HuggingFace `datasets` API with the default cache directory located at:
                `~/.cache/huggingface/datasets`
            NOTE: You can change the cache location globally for a given process
            by setting the shell environment variable, `HF_DATASETS_CACHE`,
            to another directory:
                `export HF_DATASETS_CACHE="/path/to/another/directory"`
        :param download_mode: datasets.DownloadMode
            How to treat pre-existing `Task` downloads and data.
            - `datasets.DownloadMode.REUSE_DATASET_IF_EXISTS`
                Reuse download and reuse dataset.
            - `datasets.DownloadMode.REUSE_CACHE_IF_EXISTS`
                Reuse download with fresh dataset.
            - `datasets.DownloadMode.FORCE_REDOWNLOAD`
                Fresh download and fresh dataset.
        """
        self.download(data_dir, cache_dir, download_mode)
        self._training_docs = None
        self._fewshot_docs = None
        self._instances = None

haileyschoelkopf's avatar
haileyschoelkopf committed
208
        self._config = TaskConfig(**config) if config else TaskConfig()
209
210
211

        if not hasattr(self, "_filters"):
            self._filters = []
lintangsutawika's avatar
lintangsutawika committed
212
            for name, components in self._config.get(
213
                "filters", [["none", [["take_first", None]]]]
lintangsutawika's avatar
lintangsutawika committed
214
            ):
215
216
217
                filter_pipeline = build_filter_ensemble(name, components)
                self._filters.append(filter_pipeline)

lintangsutawika's avatar
lintangsutawika committed
218
219
220
        self.sampler = samplers.Sampler(
            list(self.fewshot_docs()), self, rnd=random.Random()
        )  # TODO: pass the correct docs in here
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246

    def download(self, data_dir=None, cache_dir=None, download_mode=None):
        """Downloads and returns the task dataset.
        Override this method to download the dataset from a custom API.

        :param data_dir: str
            Stores the path to a local folder containing the `Task`'s data files.
            Use this to specify the path to manually downloaded data (usually when
            the dataset is not publicly accessible).
        :param cache_dir: str
            The directory to read/write the `Task` dataset. This follows the
            HuggingFace `datasets` API with the default cache directory located at:
                `~/.cache/huggingface/datasets`
            NOTE: You can change the cache location globally for a given process
            by setting the shell environment variable, `HF_DATASETS_CACHE`,
            to another directory:
                `export HF_DATASETS_CACHE="/path/to/another/directory"`
        :param download_mode: datasets.DownloadMode
            How to treat pre-existing `Task` downloads and data.
            - `datasets.DownloadMode.REUSE_DATASET_IF_EXISTS`
                Reuse download and reuse dataset.
            - `datasets.DownloadMode.REUSE_CACHE_IF_EXISTS`
                Reuse download with fresh dataset.
            - `datasets.DownloadMode.FORCE_REDOWNLOAD`
                Fresh download and fresh dataset.
        """
247
248
249
250
251
252
253
        self.dataset = datasets.load_dataset(
            path=self.DATASET_PATH,
            name=self.DATASET_NAME,
            data_dir=data_dir,
            cache_dir=cache_dir,
            download_mode=download_mode,
        )
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290

    @abc.abstractmethod
    def has_training_docs(self):
        """Whether the task has a training set"""
        pass

    @abc.abstractmethod
    def has_validation_docs(self):
        """Whether the task has a validation set"""
        pass

    @abc.abstractmethod
    def has_test_docs(self):
        """Whether the task has a test set"""
        pass

    def training_docs(self):
        """
        :return: Iterable[obj]
            A iterable of any object, that doc_to_text can handle
        """
        return []

    def validation_docs(self):
        """
        :return: Iterable[obj]
            A iterable of any object, that doc_to_text can handle
        """
        return []

    def test_docs(self):
        """
        :return: Iterable[obj]
            A iterable of any object, that doc_to_text can handle
        """
        return []

291
292
293
294
295
296
297
298
299
300
    def fewshot_docs(self):
        """
        :return: Iterable[obj]
            A iterable of any object, that doc_to_text can handle
        """
        if self.has_training_docs():
            return self.training_docs()
        elif self.has_validation_docs():
            return self.validation_docs()
        else:
lintangsutawika's avatar
lintangsutawika committed
301
            eval_logger.warning(
302
                "has_training_docs and has_validation_docs are False"
303
                ", using test_docs as fewshot_docs but this is not recommended."
lintangsutawika's avatar
lintangsutawika committed
304
            )
305
306
            return self.test_docs()

307
308
309
310
311
312
313
314
315
316
    def _process_doc(self, doc):
        """
        Override this to process (detokenize, strip, replace, etc.) individual
        documents. This can be used in a map over documents of a data split.
        E.g. `map(self._process_doc, self.dataset["validation"])`

        :return: dict
            The processed version of the specified `doc`.
        """
        return doc
317
318
319
320
321
322
323
324
    
    def create_choices(self, doc):
        if self._config.create_choices is None:
            return ast.literal_eval(
                    utils.apply_template(
                        self._config.template_aliases + "{{answer_choices}}", doc
                        )
                    )
Benjamin Fattori's avatar
Benjamin Fattori committed
325
326
        elif type(self._config.create_choices) == str:
            return utils.apply_template(self._config.create_choices, doc)
327
328
        else:
            return self._config.create_choices(doc)
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356

    @property
    def instances(self):
        """After calling `task.build_all_requests()`, tasks
        maintain a list of the dataset instances which will be evaluated.
        """
        return self._instances

    def fewshot_examples(self, k, rnd):
        if self._training_docs is None:
            self._training_docs = list(self.training_docs())

        return rnd.sample(self._training_docs, k)

    def doc_to_decontamination_query(self, doc):
        print(
            "Override doc_to_decontamination_query with document specific decontamination query."
        )
        assert False

    @abc.abstractmethod
    def doc_to_text(self, doc):
        pass

    @abc.abstractmethod
    def doc_to_target(self, doc):
        pass

357
    def build_all_requests(self, limit=None, rank=None, world_size=None):
358
359
360
361
362
363
364
365
366
367
368
        """Build a set of Instances for a task, and store them in task.instances"""
        if self.has_test_docs():
            docs = self.test_docs()
        elif self.has_validation_docs():
            docs = self.validation_docs()
        else:
            assert (
                False
            ), f"Task dataset (path={self.DATASET_PATH}, name={self.DATASET_NAME}) must have valid or test docs!"

        instances = []
369
370
        for doc_id, doc in utils.create_iterator(
            enumerate(docs), rank, world_size, limit
lintangsutawika's avatar
lintangsutawika committed
371
        ):
372
            # sample fewshot context #TODO: need to offset doc_id by rank now!
373
374
375
            fewshot_ctx = self.fewshot_context(
                doc, self._config.num_fewshot, rnd=random.Random()
            )
376

haileyschoelkopf's avatar
haileyschoelkopf committed
377
            # TODO: we should override self._config.repeats if doing greedy gen so users don't waste time+compute
lintangsutawika's avatar
lintangsutawika committed
378
379
380
            inst = self.construct_requests(
                doc=doc,
                ctx=fewshot_ctx,
381
                metadata=(self._config["task"], doc_id, self._config.repeats),
lintangsutawika's avatar
lintangsutawika committed
382
            )
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407

            if not isinstance(inst, list):
                inst = [inst]

            instances.extend(inst)

        self._instances = instances
        assert len(self._instances) != 0, "task.build_requests() did not find any docs!"

    @abc.abstractmethod
    def construct_requests(self, doc, ctx, **kwargs):
        """Uses RequestFactory to construct Requests and returns an iterable of
        Requests which will be sent to the LM.

        :param doc:
            The document as returned from training_docs, validation_docs, or test_docs.
        :param ctx: str
            The context string, generated by fewshot_context. This includes the natural
            language description, as well as the few shot examples, and the question
            part of the document for `doc`.
        :param doc_idx: int
            The index of a document within `self.test_docs()` or `self.validation_docs()`,
            whichever is the main split used.
        :param repeats: int
        TODO: update this docstring
lintangsutawika's avatar
lintangsutawika committed
408
            The number of times each instance in a dataset is inferred on. Defaults to 1,
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
            can be increased for techniques like majority voting.
        """
        pass

    @abc.abstractmethod
    def process_results(self, doc, results):
        """Take a single document and the LM results and evaluates, returning a
        dict where keys are the names of submetrics and values are the values of
        the metric for that one document

        :param doc:
            The document as returned from training_docs, validation_docs, or test_docs.
        :param results:
            The results of the requests created in construct_requests.
        """
        pass

    @abc.abstractmethod
    def aggregation(self):
        """
        :returns: {str: [metric_score] -> float}
            A dictionary where keys are the names of submetrics and values are
            functions that aggregate a list of metric scores
        """
        pass

    @abc.abstractmethod
    def higher_is_better(self):
        """
        :returns: {str: bool}
            A dictionary where keys are the names of submetrics and values are
            whether a higher value of the submetric is better
        """
        pass

haileyschoelkopf's avatar
haileyschoelkopf committed
444
445
446
447
448
449
450
451
452
453
    @classmethod
    def count_bytes(cls, doc):
        """Used for byte-level perplexity metrics in rolling loglikelihood"""
        return len(doc.encode("utf-8"))

    @classmethod
    def count_words(cls, doc):
        """Downstream loglikelihood_rolling perplexity tasks with custom word boundaries should override this!"""
        return len(re.split(r"\s+", doc))

454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
    @utils.positional_deprecated
    def fewshot_context(self, doc, num_fewshot, rnd=None):
        """Returns a fewshot context string that is made up of a prepended description
        (if provided), the `num_fewshot` number of examples, and an appended prompt example.

        :param doc: str
            The document as returned from training_docs, validation_docs, or test_docs.
        :param num_fewshot: int
            The number of fewshot examples to provide in the returned context string.
        :param rnd: random.Random
            The pseudo-random number generator used to randomly sample examples.
            WARNING: This is currently a required arg although it's optionalized with a default `None`.
        :returns: str
            The fewshot context.
        """
        assert (
            rnd is not None
        ), "A `random.Random` generator argument must be provided to `rnd`"

        if num_fewshot == 0:
474
475
            # always prepend the (possibly empty) task description
            labeled_examples = self._config.description
476
        else:
lintangsutawika's avatar
lintangsutawika committed
477
478
479
            labeled_examples = self._config.description + self.sampler.get_context(
                doc, num_fewshot
            )
480
481
482
483
484
485

        example = self.doc_to_text(doc)
        return labeled_examples + example

    def apply_filters(self):

lintangsutawika's avatar
lintangsutawika committed
486
487
488
489
490
491
        if hasattr(self, "_filters"):
            for f in self._filters:
                f.apply(self._instances)
        else:
            eval_logger.warning("No filter defined, passing through instances")
            return self._instances
492

493
    def dump_config(self):
494
        """Returns a dictionary representing the task's config.
495
496
497
498
499

        :returns: str
            The fewshot context.
        """
        # TODO: this should only return the overrides applied to a non-YAML task's configuration.
500
        # (num_fewshot)
501
502
        return self._config.to_dict()

503
504
505

class ConfigurableTask(Task):

506
    VERSION = "Yaml"
507
    OUTPUT_TYPE = None
508
    CONFIG = None
509
510
511
512

    def __init__(
        self, data_dir=None, cache_dir=None, download_mode=None, config: dict = None
    ):
513
        # Get pre-configured attributes
514
        self._config = self.CONFIG
515

516
517
        # Use new configurations if there was no preconfiguration
        if self._config is None:
518
            self._config = TaskConfig(**config)
519
520
        # Overwrite configs
        else:
lintangsutawika's avatar
lintangsutawika committed
521
            if config is not None:
522
                self._config.__dict__.update(config)
523

524
        if self._config is None:
lintangsutawika's avatar
lintangsutawika committed
525
526
527
            raise ValueError(
                "Must pass a config to ConfigurableTask, either in cls.CONFIG or `config` kwarg"
            )
528
529

        if self._config.output_type is not None:
530
            assert self._config.output_type in ALL_OUTPUT_TYPES
531
532
            self.OUTPUT_TYPE = self._config.output_type

533
534
535
536
537
538
        if self._config.dataset_path is not None:
            self.DATASET_PATH = self._config.dataset_path

        if self._config.dataset_name is not None:
            self.DATASET_NAME = self._config.dataset_name

539
540
541
542
        self._metric_fn_list = {}
        self._metric_fn_kwargs = {}
        self._aggregation_list = {}
        self._higher_is_better = {}
543

544
        _metric_list = DEFAULT_METRIC_REGISTRY[self._config.output_type]
545
        if self._config.metric_list is None:
546
            # TODO: handle this in TaskConfig.__post_init__ ?
547
            for metric_name in _metric_list:
haileyschoelkopf's avatar
haileyschoelkopf committed
548
549
                self._metric_fn_list[metric_name] = get_metric(metric_name)
                self._aggregation_list[metric_name] = get_default_aggregation(
550
                    metric_name
haileyschoelkopf's avatar
haileyschoelkopf committed
551
552
                )
                self._higher_is_better[metric_name] = is_higher_better(metric_name)
553
554
555
556
557
558
559
560
561
        else:
            for metric_config in self._config.metric_list:
                assert "metric" in metric_config
                metric_name = metric_config["metric"]
                kwargs = {
                    key: metric_config[key]
                    for key in metric_config
                    if key not in ["metric", "aggregation", "higher_is_better"]
                }
haileyschoelkopf's avatar
haileyschoelkopf committed
562
563
                self._metric_fn_list[metric_name] = get_metric(metric_name)
                self._metric_fn_kwargs[metric_name] = kwargs
lintangsutawika's avatar
lintangsutawika committed
564

565
                if "aggregation" in metric_config:
566
                    agg_name = metric_config["aggregation"]
567
                    if type(agg_name) == str:
haileyschoelkopf's avatar
haileyschoelkopf committed
568
                        self._aggregation_list[metric_name] = get_aggregation(agg_name)
569
570
571
572
                    elif callable(agg_name):
                        self._aggregation_list[metric_name] = metric_config[
                            "aggregation"
                        ]
573
                else:
574
575

                    INV_AGG_REGISTRY = {v: k for k, v in AGGREGATION_REGISTRY.items()}
haileyschoelkopf's avatar
haileyschoelkopf committed
576
                    metric_agg = get_default_aggregation(metric_name)
577
                    eval_logger.warning(
578
579
580
                        f"metric {metric_name} is defined, but aggregation is not. "
                        f"using default "
                        f"aggregation={INV_AGG_REGISTRY[metric_agg]}"
581
                    )
582
                    self._aggregation_list[metric_name] = metric_agg
lintangsutawika's avatar
lintangsutawika committed
583

584
585
586
587
588
589
                if "higher_is_better" in metric_config:
                    self._higher_is_better[metric_name] = metric_config[
                        "higher_is_better"
                    ]
                else:
                    eval_logger.warning(
590
591
                        f"metric {metric_name} is defined, but higher_is_better is not. "
                        f"using default "
haileyschoelkopf's avatar
haileyschoelkopf committed
592
                        f"higher_is_better={is_higher_better(metric_name)}"
593
                    )
haileyschoelkopf's avatar
haileyschoelkopf committed
594
                    self._higher_is_better[metric_name] = is_higher_better(metric_name)
595

596
        self.download(self._config.dataset_kwargs)
597
598
599
        self._training_docs = None
        self._fewshot_docs = None

lintangsutawika's avatar
lintangsutawika committed
600
        if self._config.filter_list is not None:
lintangsutawika's avatar
lintangsutawika committed
601
            self._filters = []
lintangsutawika's avatar
lintangsutawika committed
602
603
604
605
606
607
608
609
            for filter_config in self._config.filter_list:
                for filter_pipeline in filter_config:
                    filter_name = filter_config["name"]
                    filter_functions = filter_config["filter"]
                    components = []
                    for function in filter_functions:
                        kwargs = {
                            key: function[key] for key in function if key != "function"
lintangsutawika's avatar
lintangsutawika committed
610
611
612
                        }
                        components.append([function["function"], kwargs])
                    filter_pipeline = build_filter_ensemble(filter_name, components)
lintangsutawika's avatar
lintangsutawika committed
613
                self._filters.append(filter_pipeline)
lintangsutawika's avatar
lintangsutawika committed
614
        else:
615
            self._filters = [build_filter_ensemble("none", [["take_first", None]])]
616
617

        if self._config.use_prompt is not None:
lintangsutawika's avatar
lintangsutawika committed
618
            eval_logger.info(f"loading prompt {self._config.use_prompt}")
619
            self.prompt = get_prompt(
lintangsutawika's avatar
lintangsutawika committed
620
621
                self._config.use_prompt, self.DATASET_PATH, self.DATASET_NAME
            )
622
623
624
        else:
            self.prompt = None

lintangsutawika's avatar
lintangsutawika committed
625
626
627
        if self.fewshot_docs() is not None:
            self.sampler = samplers.Sampler(
                list(self.fewshot_docs()), self, rnd=random.Random()
628
            )
629

630
631
632
633
        if self._config.template_aliases is not None:
            for key, alias in self._config.template_aliases:
                self.dataset.rename_column(key, alias)

634
635
636
637
638
639
640
641
642
643
644
645

    def __post_init__(self):

        if self.has_test_docs():
            docs = self.test_docs()
        elif self.has_validation_docs():
            docs = self.validation_docs()
        else:
            assert (
                False
            ), f"Task dataset (path={self.DATASET_PATH}, name={self.DATASET_NAME}) must have valid or test docs!"

646
        # Test One Doc
647
648
649
650
651
652
653
654
655
656
657
        test_doc = docs[0]
        test_text = self.doc_to_text(test_doc)

        if OUTPUT_TYPE == "multiple_choice":
            if type(test_text) is list:
                self.multiple_input = True
            elif type(test_text) is str:
                self.multiple_input = False
                test_choice = self.doc_choice(test_doc)
        
        test_target = self.doc_to_target(test_doc)
658
659
        

660
661
662
663
664
665
666
667
    def download(self, dataset_kwargs=None):

        self.dataset = datasets.load_dataset(
            path=self.DATASET_PATH,
            name=self.DATASET_NAME,
            **dataset_kwargs if dataset_kwargs is not None else {},
        )

668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
    def has_training_docs(self):
        if self._config.training_split is not None:
            return True
        else:
            return False

    def has_validation_docs(self):
        if self._config.validation_split is not None:
            return True
        else:
            return False

    def has_test_docs(self):
        if self._config.test_split is not None:
            return True
        else:
            return False

    def training_docs(self):
        if self._config.training_split is not None:
            return self.dataset[self._config.training_split]

    def validation_docs(self):
        if self._config.validation_split is not None:
            return self.dataset[self._config.validation_split]

    def test_docs(self):
        if self._config.test_split is not None:
            return self.dataset[self._config.test_split]

698
    def fewshot_docs(self):
699
        if self._config.fewshot_split is not None:
700
            return self.dataset[self._config.fewshot_split]
701
702
703
        else:
            if self._config.num_fewshot > 0:
                eval_logger.warning(
haileyschoelkopf's avatar
haileyschoelkopf committed
704
                    f"Task '{self._config.task}': "
705
706
707
708
                    "num_fewshot > 0 but fewshot_split is None. "
                    "using preconfigured rule."
                )
            return super().fewshot_docs()
709

710
711
712
713
714
715
716
    def should_decontaminate(self):
        return self._config.should_decontaminate

    def doc_to_decontamination_query(self, doc):
        if self._config.should_decontaminate:
            return utils.apply_template(self._config.doc_to_decontamination_query, doc)

717
718
719
720
721
722
723
724
725
726
727
728
    def _process_doc(self, doc):
        """
        Override this to process (detokenize, strip, replace, etc.) individual
        documents. This can be used in a map over documents of a data split.
        E.g. `map(self._process_doc, self.dataset["validation"])`

        :return: dict
            The processed version of the specified `doc`.
        """
        return doc

    def doc_to_text(self, doc):
729
730
731

        if self.prompt is not None:
            doc_to_text = self.prompt
732
733
        else:
            doc_to_text = self._config.doc_to_text
lintangsutawika's avatar
lintangsutawika committed
734

735
736
        if type(doc_to_text) == str:
            return utils.apply_template(doc_to_text, doc)
737
        elif callable(doc_to_text):
738
            return doc_to_text(doc)
739
740
        # Used when applyting a Promptsource template
        elif hasattr(doc_to_text, "apply"):
741
            return doc_to_text.apply(doc)[0]
742
        else:
743
            print(type(doc_to_text))
744
            raise TypeError
745
746

    def doc_to_target(self, doc):
747
748
749

        if self.prompt is not None:
            doc_to_target = self.prompt
750
751
752
        else:
            doc_to_target = self._config.doc_to_target

753
754
        if type(doc_to_target) == str:
            return utils.apply_template(doc_to_target, doc)
755
        elif callable(doc_to_target):
756
            return doc_to_target(doc)
757
        # Used when applyting a Promptsource template
758
759
        elif hasattr(doc_to_target, "apply"):
            return doc_to_target.apply(doc)[1]
760
761
        else:
            raise TypeError
762

763
    def gold_alias(self, doc):
764
765
766
767
768
        # returns a version of the gold target answer to a document,
        # which should be passed into metric for scoring as the ground truth.

        # in multiple_choice tasks, this should be castable to an int corresponding to the index
        # within the answer choices, while doc_to_target is the string version of {{answer_choices[gold]}}.
lintangsutawika's avatar
lintangsutawika committed
769
        if self._config.gold_alias is not None:
770
771
            doc_to_target = self._config.gold_alias
        else:
lintangsutawika's avatar
lintangsutawika committed
772
            return self.doc_to_target(doc)
773
774
775
776
777
778
779
780
781
782

        if type(doc_to_target) == str:
            return utils.apply_template(doc_to_target, doc)
        elif callable(doc_to_target):
            return doc_to_target(doc)
        elif hasattr(doc_to_target, "apply"):
            return doc_to_target.apply(doc)[1]
        else:
            raise TypeError

783
784
    def construct_requests(self, doc, ctx, **kwargs):

785
        if self.OUTPUT_TYPE == "loglikelihood":
lintangsutawika's avatar
lintangsutawika committed
786
            arguments = (ctx, self.doc_to_target(doc))
787
        elif self.OUTPUT_TYPE == "loglikelihood_rolling":
lintangsutawika's avatar
lintangsutawika committed
788
            arguments = (self.doc_to_target(doc),)
789
        elif self.OUTPUT_TYPE == "multiple_choice":
790
            # we pass the user-defined answer_choices var (in aliases) and translate the result to a Python list.
791
792
793
            # TODO: any cleaner way to do this?            
            if self.multiple_input:
                choices = self.doc_to_text(doc)
794
795
796
797
                cont = self.doc_to_target(doc)
                arguments = [
                    (ctx, " {}".format(cont)) for ctx in choices
                ]
798
            else:
799
800
801
802
                cont = self.create_choices(doc)
                arguments = [
                    (ctx, " {}".format(cont)) for cont in choices
                ]
803

804
            request_list = [
805
806
                Instance(
                    request_type="loglikelihood",
lintangsutawika's avatar
lintangsutawika committed
807
                    doc=doc,
808
                    arguments=arguments,
809
                    idx=i,
810
811
                    **kwargs,
                )
812
                for i, arg in enumerate(arguments)
813
            ]
814
            # TODO: we should raise a warning telling users this will at most ~2x runtime.
815
            if "acc_mutual_info" in self._metric_fn_list.keys():
816
817
818
                # if we are calculating multiple choice accuracy
                # using mutual information instead of raw loglikelihood as metric, need unconditional lls.

lintangsutawika's avatar
lintangsutawika committed
819
                # here mutual info refers to calculating
820
821
822
823
824
825
                # log(P(choice|ctx) / P(choice)) = log(P(choice|ctx)) - log(P(choice))
                # in other words normalizing by subtracting the unconditional logprob of each choice.
                request_list.extend(
                    [
                        Instance(
                            request_type="loglikelihood",
lintangsutawika's avatar
lintangsutawika committed
826
                            doc=doc,
827
                            arguments=("", "{}".format(choice)),
828
829
830
                            idx=i,
                            **kwargs,
                        )
lintangsutawika's avatar
lintangsutawika committed
831
                        for i, choice in enumerate(choices)
832
833
834
                    ]
                )
            return request_list
lintangsutawika's avatar
lintangsutawika committed
835

836
        elif self.OUTPUT_TYPE == "greedy_until":
837
            arguments = (ctx, self._config.generation_kwargs)
lintangsutawika's avatar
lintangsutawika committed
838

839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
        elif self.OUTPUT_TYPE == "winograd_schema":
            # similar to multiple_choice task type except each request contains
            # multiple differing contexts with the same continuation

            contexts = self.create_choices(doc)
            choice = self.doc_to_target(doc)
            
            request_list = [
                Instance(
                    request_type="loglikelihood",
                    doc=doc,
                    arguments=(context, " {}".format(choice)),
                    idx=i,
                    **kwargs,
                )
                for i, context in enumerate(contexts)
            ]
Benjamin Fattori's avatar
Benjamin Fattori committed
856
            
857
858
            return request_list

lintangsutawika's avatar
lintangsutawika committed
859
        return Instance(
lintangsutawika's avatar
lintangsutawika committed
860
861
            request_type=self.OUTPUT_TYPE, doc=doc, arguments=arguments, idx=0, **kwargs
        )
862
863
864

    def process_results(self, doc, results):

lintangsutawika's avatar
lintangsutawika committed
865
866
867
        # if callable(self._config.process_results):
        #     return self._config.process_results(doc, results)

868
        result_dict = {}
869
        use_metric = list(self._metric_fn_list.keys())
870
871
872
        if self.OUTPUT_TYPE == "loglikelihood":
            results = results[0]
            ll, is_greedy = results
873
874
875
876
            return {
                **({"perplexity": ll} if "perplexity" in use_metric else {}),
                **({"acc": int(is_greedy)} if "acc" in use_metric else {}),
            }
877
        elif self.OUTPUT_TYPE == "loglikelihood_rolling":
haileyschoelkopf's avatar
haileyschoelkopf committed
878
            (loglikelihood,) = results
879
880
            _words = self.count_words(self.doc_to_target(doc))
            _bytes = self.count_bytes(self.doc_to_target(doc))
haileyschoelkopf's avatar
haileyschoelkopf committed
881
            return {
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
                **(
                    {"word_perplexity": (loglikelihood, _words)}
                    if "word_perplexity" in use_metric
                    else {}
                ),
                **(
                    {"byte_perplexity": (loglikelihood, _bytes)}
                    if "byte_perplexity" in use_metric
                    else {}
                ),
                **(
                    {"bits_per_byte": (loglikelihood, _bytes)}
                    if "bits_per_byte" in use_metric
                    else {}
                ),
haileyschoelkopf's avatar
haileyschoelkopf committed
897
            }
898
        elif self.OUTPUT_TYPE == "multiple_choice":
899
900

            lls, is_greedy = zip(*results)
lintangsutawika's avatar
lintangsutawika committed
901

902
            # retrieve choices in List[str] form, to compute choice lengths, etc.
903
            choices = self.create_choices(doc)
904
905
            completion_len = np.array([float(len(i)) for i in choices])

906
907
            if (
                2 * len(choices) == len(lls)
908
                and "acc_mutual_info" in self._metric_fn_list.keys()
909
910
911
912
913
914
915
            ):
                # then we are doing mutual info.
                # this stores the "dryrun" / unconditional answer loglikelihoods
                lls_unconditional = lls[1::2]
                assert len(lls_unconditional) == len(choices)
                # and this stores our "regular" conditional loglikelihoods
                lls = lls[::2]
916

917
918
919
            pred_idx = np.argmax(lls)
            pred_idx_norm = np.argmax(lls / completion_len)
            
920
921
            if self._config.gold_alias is not None:
                gold = int(self.gold_alias(doc))
922
923
                pred = pred_idx
                pred_norm = pred_idx_norm
924
925
            else:
                gold = self.doc_to_target(doc)
926
927
928
                gold_idx = choices.index(gold)
                pred = choices[pred_idx]
                pred_norm = choices[pred_idx_norm]
Hailey Schoelkopf's avatar
Hailey Schoelkopf committed
929

930
931
            acc = 1.0 if pred == gold else 0.0
            acc_norm = 1.0 if pred_norm == gold else 0.0
932
933

            result_dict = {
934
                **({"acc": acc} if "acc" in use_metric else {}),
935
936
                **({"f1": (gold_idx, pred_idx)} if "f1" in use_metric else {}),
                **({"mcc": (gold_idx, pred_idx)} if "mcc" in use_metric else {}),
937
                **({"acc_norm": acc_norm} if "acc_norm" in use_metric else {}),
938
939
            }

940
            if "exact_match" in self._metric_fn_list.keys():
941
                # TODO: this gets score of 0 on arc_challenge for pythia-70m. need to test that this works properly
lintangsutawika's avatar
lintangsutawika committed
942
                is_greedy = is_greedy[gold]  # take value for the gold answer
943
944
                result_dict["exact_match"] = int(is_greedy)

945
            if "acc_mutual_info" in use_metric:
lintangsutawika's avatar
lintangsutawika committed
946
947
948
                lls_mutual_info = [
                    ll_c - ll_u for ll_c, ll_u in zip(lls, lls_unconditional)
                ]
949
950
951
                acc_mutual_info = 1.0 if np.argmax(lls_mutual_info) == gold else 0.0
                result_dict["acc_mutual_info"] = acc_mutual_info

952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
        elif self.OUTPUT_TYPE == "winograd_schema":

            lls, is_greedy = zip(*results)
            if self._config.gold_alias is not None:
                gold = int(self.gold_alias(doc))
            else:
                gold = int(self.doc_to_target(doc))

            pred = np.argmax(lls)
            acc = 1.0 if np.argmax(lls) == gold else 0.0

            result_dict = {
                **({"acc": acc} if "acc" in use_metric else {}),
            }

967
968
969
        elif self.OUTPUT_TYPE == "greedy_until":

            if self._config.gold_alias is not None:
970
                gold = self.gold_alias(doc)
971
972
973
            else:
                gold = self.doc_to_target(doc)

974
            for key, result in zip(self._metric_fn_list.keys(), results):
haileyschoelkopf's avatar
haileyschoelkopf committed
975
                _dict = self._metric_fn_list[key](
haileyschoelkopf's avatar
haileyschoelkopf committed
976
977
978
                    references=[gold],
                    predictions=[result],
                    **self._metric_fn_kwargs[key],
979
                )
980

lintangsutawika's avatar
lintangsutawika committed
981
                result_dict = {**result_dict, **_dict}
982
        else:
lintangsutawika's avatar
lintangsutawika committed
983
984
            raise ValueError(
                f"Passed invalid output_type '{self.OUTPUT_TYPE}' ! Please use one of ",
985
                "'loglikelihood', 'loglikelihood_rolling', 'greedy_until', 'multiple_choice' or 'winograd_schema' ",
986
            )
987
988
989
990
991
992
993

        return result_dict

    def aggregation(self):
        return self._aggregation_list

    def higher_is_better(self):
haileyschoelkopf's avatar
haileyschoelkopf committed
994
        return self._higher_is_better
995
996
997
998
999
1000
1001
1002
1003
1004


class MultipleChoiceTask(Task):

    OUTPUT_TYPE: str = "loglikelihood"

    def doc_to_target(self, doc):
        return " " + doc["choices"][doc["gold"]]

    def construct_requests(self, doc, ctx, **kwargs):
1005
        # TODO: add mutual info here?
lintangsutawika's avatar
lintangsutawika committed
1006
1007
        return [
            Instance(
haileyschoelkopf's avatar
haileyschoelkopf committed
1008
                request_type="loglikelihood",
lintangsutawika's avatar
lintangsutawika committed
1009
                doc=doc,
1010
                arguments=(ctx, " {}".format(choice)),
1011
                idx=i,
1012
1013
                **kwargs,
            )
lintangsutawika's avatar
lintangsutawika committed
1014
1015
            for i, choice in enumerate(doc["choices"])
        ]
1016
1017

    def process_results(self, doc, results):
lintangsutawika's avatar
lintangsutawika committed
1018
1019
1020
        results = [
            res[0] for res in results
        ]  # only retain loglikelihoods, discard is_greedy TODO: do we need is_greedy anywhere?
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
        gold = doc["gold"]

        acc = 1.0 if np.argmax(results) == gold else 0.0
        completion_len = np.array([float(len(i)) for i in doc["choices"]])
        acc_norm = 1.0 if np.argmax(results / completion_len) == gold else 0.0

        return {
            "acc": acc,
            "acc_norm": acc_norm,
        }

    def higher_is_better(self):
        return {
            "acc": True,
            "acc_norm": True,
        }

    def aggregation(self):
        return {
            "acc": mean,
            "acc_norm": mean,
        }


lintangsutawika's avatar
lintangsutawika committed
1045
class PerplexityTask(Task):
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055

    OUTPUT_TYPE = "loglikelihood_rolling"

    def has_training_docs(self):
        return False

    def fewshot_examples(self, k, rnd):
        assert k == 0
        return []

lintangsutawika's avatar
lintangsutawika committed
1056
    def fewshot_context(self, doc, num_fewshot, rnd=None):
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
        assert (
            num_fewshot == 0
        ), "The number of fewshot examples must be 0 for perplexity tasks."
        assert (
            rnd is not None
        ), "A `random.Random` generator argument must be provided to `rnd`."

        return ""

    def higher_is_better(self):
        return {
            "word_perplexity": False,
            "byte_perplexity": False,
            "bits_per_byte": False,
        }

    def doc_to_decontamination_query(self, doc):
        return doc

    def doc_to_text(self, doc):
        return ""

    def doc_to_target(self, doc):
        return doc

    def construct_requests(self, doc, ctx, **kwargs):
        assert not ctx

lintangsutawika's avatar
lintangsutawika committed
1085
1086
1087
1088
1089
1090
1091
        return Instance(
            request_type=self.OUTPUT_TYPE,
            doc=doc,
            arguments=(self.doc_to_target(doc),),
            idx=0,
            **kwargs,
        )
1092
1093
1094

    def process_results(self, doc, results):
        (loglikelihood,) = results
haileyschoelkopf's avatar
haileyschoelkopf committed
1095
1096
        words = self.count_words(self.doc_to_target(doc))
        bytes_ = self.count_bytes(self.doc_to_target(doc))
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
        return {
            "word_perplexity": (loglikelihood, words),
            "byte_perplexity": (loglikelihood, bytes_),
            "bits_per_byte": (loglikelihood, bytes_),
        }

    def aggregation(self):
        return {
            "word_perplexity": weighted_perplexity,
            "byte_perplexity": weighted_perplexity,
            "bits_per_byte": bits_per_byte,
        }

    @classmethod
    def count_bytes(cls, doc):
        return len(doc.encode("utf-8"))

    @classmethod
    def count_words(cls, doc):
        """Downstream tasks with custom word boundaries should override this!"""
        return len(re.split(r"\s+", doc))