"examples/nlp/vscode:/vscode.git/clone" did not exist on "6529e7c9980772faff1862f2a8f842c5c04a1ed2"
task.py 40.2 KB
Newer Older
1
import abc
2
from dataclasses import dataclass, field, asdict
3
4

import re
5
import ast
lintangsutawika's avatar
lintangsutawika committed
6
import yaml
7
8
9
import evaluate
import random
import itertools
10
import functools
11
from tqdm import tqdm
12
13
14
15

import datasets
import numpy as np

16
17
from typing import Union
from collections.abc import Callable
18

19
from lm_eval import utils
20
from lm_eval.api import samplers
haileyschoelkopf's avatar
haileyschoelkopf committed
21
from lm_eval.api.instance import Instance
lintangsutawika's avatar
lintangsutawika committed
22
from lm_eval.api.filter import FilterEnsemble
23
24
25
26

from lm_eval.logger import eval_logger
from lm_eval.prompts import get_prompt
from lm_eval.filters import build_filter_ensemble
lintangsutawika's avatar
lintangsutawika committed
27
28
29
30
from lm_eval.api.metrics import (
    mean,
    weighted_perplexity,
    bits_per_byte,
lintangsutawika's avatar
lintangsutawika committed
31
    metric_max_over_ground_truths,
lintangsutawika's avatar
lintangsutawika committed
32
33
)
from lm_eval.api.registry import (
haileyschoelkopf's avatar
haileyschoelkopf committed
34
35
36
37
    get_metric,
    get_aggregation,
    get_default_aggregation,
    is_higher_better,
38
39
    DEFAULT_METRIC_REGISTRY,
    OUTPUT_TYPE_REGISTRY,
lintangsutawika's avatar
lintangsutawika committed
40
41
    AGGREGATION_REGISTRY,
)
42

43
44
45
46
47
48
49
ALL_OUTPUT_TYPES = [
    "loglikelihood",
    "multiple_choice",
    "loglikelihood_rolling",
    "greedy_until",
]

50
51
52

@dataclass
class TaskConfig(dict):
53
    # task naming/registry
54
    task: str = None
55
    group: Union[str, list] = None
56
57
58
    # HF dataset options.
    # which dataset to use,
    # and what splits for what purpose
59
60
    dataset_path: str = None
    dataset_name: str = None
61
    dataset_kwargs: dict = None
62
63
64
    training_split: str = None
    validation_split: str = None
    test_split: str = None
lintangsutawika's avatar
lintangsutawika committed
65
    fewshot_split: str = None  # TODO: assert that this not None if num_fewshot > 0. (?) assert if this is same split as one evaling (?)
66
67
    # formatting / prompting options.
    # see docs/advanced_task_guide.md for more info
68
    process_docs: Callable = None
69
70
    doc_to_text: Union[Callable, str] = None
    doc_to_target: Union[Callable, str] = None
lintangsutawika's avatar
lintangsutawika committed
71
    doc_to_choice: Union[Callable, str, dict, list] = None
72
    gold_alias: Union[Callable, str] = None
73
    use_prompt: str = None
74
    description: str = ""
75
76
    target_delimiter: str = " "
    fewshot_delimiter: str = "\n\n"
77
    # runtime configuration options
78
    num_fewshot: int = 0
79
    # scoring options
80
81
    metric_list: str = None
    output_type: str = "greedy_until"
82
    generation_kwargs: dict = None
83
    repeats: int = 1
lintangsutawika's avatar
lintangsutawika committed
84
    filter_list: Union[str, list] = None
85
86
    should_decontaminate: bool = False
    doc_to_decontamination_query: str = None
87

lintangsutawika's avatar
lintangsutawika committed
88
    metadata: str = None  # by default, not used in the code. allows for users to pass arbitrary info to tasks
89

90
    def __post_init__(self):
91

Lintang Sutawika's avatar
Lintang Sutawika committed
92
93
94
        if self.generation_kwargs is not None:
            if self.output_type != "greedy_until":
                eval_logger.warning(
95
                    "passed `generation_kwargs`, but not using `output_type: greedy_until`!"
Lintang Sutawika's avatar
Lintang Sutawika committed
96
                )
97
                assert self.output_type != "greedy_until"
Lintang Sutawika's avatar
Lintang Sutawika committed
98
99
100
101
102
103
104

            if "temperature" in self.generation_kwargs:
                self.generation_kwargs["temperature"] = float(
                    self.generation_kwargs["temperature"]
                )

            if "until" not in self.generation_kwargs:
105
                self.generation_kwargs["until"] = [self.fewshot_delimiter]
Lintang Sutawika's avatar
Lintang Sutawika committed
106
107
108
109
        else:
            if self.output_type == "greedy_until":
                # ensure that we greedily generate in absence of explicit arguments otherwise
                self.generation_kwargs = {
Lintang Sutawika's avatar
Lintang Sutawika committed
110
                    "until": None
111
112
                    if self.fewshot_delimiter is None
                    else [self.fewshot_delimiter],
Lintang Sutawika's avatar
Lintang Sutawika committed
113
114
115
                    "do_sample": False,
                    "temperature": 0.0,
                }
116

haileyschoelkopf's avatar
haileyschoelkopf committed
117
118
        # TODO: how to make TaskConfigs be de- and re-serializable, even when using the !function constructor?

119
120
121
    def __getitem__(self, item):
        return getattr(self, item)

122
    def to_dict(self):
123
124
        """dumps the current config as a dictionary object, as a printable format.
        null fields will not be printed.
haileyschoelkopf's avatar
haileyschoelkopf committed
125
        Used for dumping results alongside full task configuration
126

haileyschoelkopf's avatar
haileyschoelkopf committed
127
128
129
130
131
132
133
134
135
136
        :return: dict
            A printable dictionary version of the TaskConfig object.

        # TODO: should any default value in the TaskConfig not be printed?
        """
        cfg_dict = asdict(self)
        # remove values that are `None`
        for k, v in list(cfg_dict.items()):
            if v is None:
                cfg_dict.pop(k)
haileyschoelkopf's avatar
haileyschoelkopf committed
137
138
139
            elif isinstance(v, Callable):
                # TODO: this should handle Promptsource template objects as a separate case?
                cfg_dict[k] = str(v)
haileyschoelkopf's avatar
haileyschoelkopf committed
140
        return cfg_dict
141

142
143
144
145
146
147
148
149
150
151
152
153

class Task(abc.ABC):
    """A task represents an entire benchmark including its dataset, problems,
    answers, and evaluation methods. See BoolQ for a simple example implementation

    A `doc` can be any python object which represents one instance of evaluation.
    This is usually a dictionary e.g.
        {"question": ..., "answer": ...} or
        {"question": ..., question, answer)
    """

    VERSION = None
154

155
156
157
158
159
160
161
162
    # The name of the `Task` benchmark as denoted in the HuggingFace datasets Hub
    # or a path to a custom `datasets` loading script.
    DATASET_PATH: str = None

    # The name of a subset within `DATASET_PATH`.
    DATASET_NAME: str = None

    OUTPUT_TYPE: str = None
lintangsutawika's avatar
lintangsutawika committed
163

164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
    def __init__(
        self,
        data_dir=None,
        cache_dir=None,
        download_mode=None,
        config=None,
    ):
        """
        :param data_dir: str
            Stores the path to a local folder containing the `Task`'s data files.
            Use this to specify the path to manually downloaded data (usually when
            the dataset is not publicly accessible).
        :param cache_dir: str
            The directory to read/write the `Task` dataset. This follows the
            HuggingFace `datasets` API with the default cache directory located at:
                `~/.cache/huggingface/datasets`
            NOTE: You can change the cache location globally for a given process
            by setting the shell environment variable, `HF_DATASETS_CACHE`,
            to another directory:
                `export HF_DATASETS_CACHE="/path/to/another/directory"`
        :param download_mode: datasets.DownloadMode
            How to treat pre-existing `Task` downloads and data.
            - `datasets.DownloadMode.REUSE_DATASET_IF_EXISTS`
                Reuse download and reuse dataset.
            - `datasets.DownloadMode.REUSE_CACHE_IF_EXISTS`
                Reuse download with fresh dataset.
            - `datasets.DownloadMode.FORCE_REDOWNLOAD`
                Fresh download and fresh dataset.
        """
        self.download(data_dir, cache_dir, download_mode)
        self._training_docs = None
        self._fewshot_docs = None
        self._instances = None

haileyschoelkopf's avatar
haileyschoelkopf committed
198
        self._config = TaskConfig(**config) if config else TaskConfig()
199
200
201

        if not hasattr(self, "_filters"):
            self._filters = []
lintangsutawika's avatar
lintangsutawika committed
202
            for name, components in self._config.get(
203
                "filters", [["none", [["take_first", None]]]]
lintangsutawika's avatar
lintangsutawika committed
204
            ):
205
206
207
                filter_pipeline = build_filter_ensemble(name, components)
                self._filters.append(filter_pipeline)

lintangsutawika's avatar
lintangsutawika committed
208
        self.sampler = samplers.Sampler(
209
210
            list(self.fewshot_docs()), self, rnd=random.Random(1234)
        )
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236

    def download(self, data_dir=None, cache_dir=None, download_mode=None):
        """Downloads and returns the task dataset.
        Override this method to download the dataset from a custom API.

        :param data_dir: str
            Stores the path to a local folder containing the `Task`'s data files.
            Use this to specify the path to manually downloaded data (usually when
            the dataset is not publicly accessible).
        :param cache_dir: str
            The directory to read/write the `Task` dataset. This follows the
            HuggingFace `datasets` API with the default cache directory located at:
                `~/.cache/huggingface/datasets`
            NOTE: You can change the cache location globally for a given process
            by setting the shell environment variable, `HF_DATASETS_CACHE`,
            to another directory:
                `export HF_DATASETS_CACHE="/path/to/another/directory"`
        :param download_mode: datasets.DownloadMode
            How to treat pre-existing `Task` downloads and data.
            - `datasets.DownloadMode.REUSE_DATASET_IF_EXISTS`
                Reuse download and reuse dataset.
            - `datasets.DownloadMode.REUSE_CACHE_IF_EXISTS`
                Reuse download with fresh dataset.
            - `datasets.DownloadMode.FORCE_REDOWNLOAD`
                Fresh download and fresh dataset.
        """
237
238
239
240
241
242
243
        self.dataset = datasets.load_dataset(
            path=self.DATASET_PATH,
            name=self.DATASET_NAME,
            data_dir=data_dir,
            cache_dir=cache_dir,
            download_mode=download_mode,
        )
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280

    @abc.abstractmethod
    def has_training_docs(self):
        """Whether the task has a training set"""
        pass

    @abc.abstractmethod
    def has_validation_docs(self):
        """Whether the task has a validation set"""
        pass

    @abc.abstractmethod
    def has_test_docs(self):
        """Whether the task has a test set"""
        pass

    def training_docs(self):
        """
        :return: Iterable[obj]
            A iterable of any object, that doc_to_text can handle
        """
        return []

    def validation_docs(self):
        """
        :return: Iterable[obj]
            A iterable of any object, that doc_to_text can handle
        """
        return []

    def test_docs(self):
        """
        :return: Iterable[obj]
            A iterable of any object, that doc_to_text can handle
        """
        return []

281
282
283
284
285
286
287
288
289
290
    def fewshot_docs(self):
        """
        :return: Iterable[obj]
            A iterable of any object, that doc_to_text can handle
        """
        if self.has_training_docs():
            return self.training_docs()
        elif self.has_validation_docs():
            return self.validation_docs()
        else:
lintangsutawika's avatar
lintangsutawika committed
291
            eval_logger.warning(
292
                "has_training_docs and has_validation_docs are False"
293
                ", using test_docs as fewshot_docs but this is not recommended."
lintangsutawika's avatar
lintangsutawika committed
294
            )
295
296
            return self.test_docs()

297
298
299
300
301
302
303
304
305
306
    def _process_doc(self, doc):
        """
        Override this to process (detokenize, strip, replace, etc.) individual
        documents. This can be used in a map over documents of a data split.
        E.g. `map(self._process_doc, self.dataset["validation"])`

        :return: dict
            The processed version of the specified `doc`.
        """
        return doc
lintangsutawika's avatar
lintangsutawika committed
307

308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
    @property
    def instances(self):
        """After calling `task.build_all_requests()`, tasks
        maintain a list of the dataset instances which will be evaluated.
        """
        return self._instances

    def fewshot_examples(self, k, rnd):
        if self._training_docs is None:
            self._training_docs = list(self.training_docs())

        return rnd.sample(self._training_docs, k)

    def doc_to_decontamination_query(self, doc):
        print(
            "Override doc_to_decontamination_query with document specific decontamination query."
        )
        assert False

    @abc.abstractmethod
    def doc_to_text(self, doc):
        pass

    @abc.abstractmethod
    def doc_to_target(self, doc):
        pass

335
    def build_all_requests(self, limit=None, rank=None, world_size=None):
336
337
338
339
340
341
342
343
344
345
        """Build a set of Instances for a task, and store them in task.instances"""
        if self.has_test_docs():
            docs = self.test_docs()
        elif self.has_validation_docs():
            docs = self.validation_docs()
        else:
            assert (
                False
            ), f"Task dataset (path={self.DATASET_PATH}, name={self.DATASET_NAME}) must have valid or test docs!"

346
347
348
349
        eval_logger.info(
            f"Building contexts for task '{self._config.task}' on rank {rank}..."
        )

350
        instances = []
351
352
        for doc_id, doc in utils.create_iterator(
            enumerate(docs), rank, world_size, limit
lintangsutawika's avatar
lintangsutawika committed
353
        ):
354
            # sample fewshot context #TODO: need to offset doc_id by rank now!
355
            fewshot_ctx = self.fewshot_context(
356
357
                doc,
                self._config.num_fewshot,
358
            )
359

haileyschoelkopf's avatar
haileyschoelkopf committed
360
            # TODO: we should override self._config.repeats if doing greedy gen so users don't waste time+compute
lintangsutawika's avatar
lintangsutawika committed
361
362
363
            inst = self.construct_requests(
                doc=doc,
                ctx=fewshot_ctx,
364
                metadata=(self._config["task"], doc_id, self._config.repeats),
lintangsutawika's avatar
lintangsutawika committed
365
            )
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390

            if not isinstance(inst, list):
                inst = [inst]

            instances.extend(inst)

        self._instances = instances
        assert len(self._instances) != 0, "task.build_requests() did not find any docs!"

    @abc.abstractmethod
    def construct_requests(self, doc, ctx, **kwargs):
        """Uses RequestFactory to construct Requests and returns an iterable of
        Requests which will be sent to the LM.

        :param doc:
            The document as returned from training_docs, validation_docs, or test_docs.
        :param ctx: str
            The context string, generated by fewshot_context. This includes the natural
            language description, as well as the few shot examples, and the question
            part of the document for `doc`.
        :param doc_idx: int
            The index of a document within `self.test_docs()` or `self.validation_docs()`,
            whichever is the main split used.
        :param repeats: int
        TODO: update this docstring
lintangsutawika's avatar
lintangsutawika committed
391
            The number of times each instance in a dataset is inferred on. Defaults to 1,
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
            can be increased for techniques like majority voting.
        """
        pass

    @abc.abstractmethod
    def process_results(self, doc, results):
        """Take a single document and the LM results and evaluates, returning a
        dict where keys are the names of submetrics and values are the values of
        the metric for that one document

        :param doc:
            The document as returned from training_docs, validation_docs, or test_docs.
        :param results:
            The results of the requests created in construct_requests.
        """
        pass

    @abc.abstractmethod
    def aggregation(self):
        """
        :returns: {str: [metric_score] -> float}
            A dictionary where keys are the names of submetrics and values are
            functions that aggregate a list of metric scores
        """
        pass

    @abc.abstractmethod
    def higher_is_better(self):
        """
        :returns: {str: bool}
            A dictionary where keys are the names of submetrics and values are
            whether a higher value of the submetric is better
        """
        pass

haileyschoelkopf's avatar
haileyschoelkopf committed
427
428
429
430
431
432
433
434
435
436
    @classmethod
    def count_bytes(cls, doc):
        """Used for byte-level perplexity metrics in rolling loglikelihood"""
        return len(doc.encode("utf-8"))

    @classmethod
    def count_words(cls, doc):
        """Downstream loglikelihood_rolling perplexity tasks with custom word boundaries should override this!"""
        return len(re.split(r"\s+", doc))

437
    @utils.positional_deprecated
438
    def fewshot_context(self, doc, num_fewshot):
439
440
441
442
443
444
445
446
447
448
449
450
        """Returns a fewshot context string that is made up of a prepended description
        (if provided), the `num_fewshot` number of examples, and an appended prompt example.

        :param doc: str
            The document as returned from training_docs, validation_docs, or test_docs.
        :param num_fewshot: int
            The number of fewshot examples to provide in the returned context string.
        :returns: str
            The fewshot context.
        """

        if num_fewshot == 0:
451
452
            # always prepend the (possibly empty) task description
            labeled_examples = self._config.description
453
        else:
lintangsutawika's avatar
lintangsutawika committed
454
455
456
            labeled_examples = self._config.description + self.sampler.get_context(
                doc, num_fewshot
            )
457
458

        example = self.doc_to_text(doc)
459
460
461
462
        if type(example) == str:
            return labeled_examples + example
        elif type(example) == list:
            return [labeled_examples + ex for ex in example]
463
464
465

    def apply_filters(self):

lintangsutawika's avatar
lintangsutawika committed
466
467
468
469
470
471
        if hasattr(self, "_filters"):
            for f in self._filters:
                f.apply(self._instances)
        else:
            eval_logger.warning("No filter defined, passing through instances")
            return self._instances
472

473
    def dump_config(self):
474
        """Returns a dictionary representing the task's config.
475
476
477
478
479

        :returns: str
            The fewshot context.
        """
        # TODO: this should only return the overrides applied to a non-YAML task's configuration.
480
        # (num_fewshot)
481
482
        return self._config.to_dict()

483
484
485

class ConfigurableTask(Task):

486
    VERSION = "Yaml"
487
    OUTPUT_TYPE = None
488
    CONFIG = None
489
490
491
492

    def __init__(
        self, data_dir=None, cache_dir=None, download_mode=None, config: dict = None
    ):
493
        # Get pre-configured attributes
494
        self._config = self.CONFIG
495

496
497
        # Use new configurations if there was no preconfiguration
        if self._config is None:
498
            self._config = TaskConfig(**config)
499
500
        # Overwrite configs
        else:
lintangsutawika's avatar
lintangsutawika committed
501
            if config is not None:
502
                self._config.__dict__.update(config)
503

504
        if self._config is None:
lintangsutawika's avatar
lintangsutawika committed
505
506
507
            raise ValueError(
                "Must pass a config to ConfigurableTask, either in cls.CONFIG or `config` kwarg"
            )
508
509

        if self._config.output_type is not None:
510
            assert self._config.output_type in ALL_OUTPUT_TYPES
511
512
            self.OUTPUT_TYPE = self._config.output_type

513
514
515
516
517
518
        if self._config.dataset_path is not None:
            self.DATASET_PATH = self._config.dataset_path

        if self._config.dataset_name is not None:
            self.DATASET_NAME = self._config.dataset_name

519
520
521
522
        self._metric_fn_list = {}
        self._metric_fn_kwargs = {}
        self._aggregation_list = {}
        self._higher_is_better = {}
523

524
        _metric_list = DEFAULT_METRIC_REGISTRY[self._config.output_type]
525
        if self._config.metric_list is None:
526
            # TODO: handle this in TaskConfig.__post_init__ ?
527
            for metric_name in _metric_list:
haileyschoelkopf's avatar
haileyschoelkopf committed
528
529
                self._metric_fn_list[metric_name] = get_metric(metric_name)
                self._aggregation_list[metric_name] = get_default_aggregation(
530
                    metric_name
haileyschoelkopf's avatar
haileyschoelkopf committed
531
532
                )
                self._higher_is_better[metric_name] = is_higher_better(metric_name)
533
534
535
536
537
538
539
540
541
        else:
            for metric_config in self._config.metric_list:
                assert "metric" in metric_config
                metric_name = metric_config["metric"]
                kwargs = {
                    key: metric_config[key]
                    for key in metric_config
                    if key not in ["metric", "aggregation", "higher_is_better"]
                }
haileyschoelkopf's avatar
haileyschoelkopf committed
542
543
                self._metric_fn_list[metric_name] = get_metric(metric_name)
                self._metric_fn_kwargs[metric_name] = kwargs
lintangsutawika's avatar
lintangsutawika committed
544

545
                if "aggregation" in metric_config:
546
                    agg_name = metric_config["aggregation"]
547
                    if type(agg_name) == str:
haileyschoelkopf's avatar
haileyschoelkopf committed
548
                        self._aggregation_list[metric_name] = get_aggregation(agg_name)
549
550
551
552
                    elif callable(agg_name):
                        self._aggregation_list[metric_name] = metric_config[
                            "aggregation"
                        ]
553
                else:
554
555

                    INV_AGG_REGISTRY = {v: k for k, v in AGGREGATION_REGISTRY.items()}
haileyschoelkopf's avatar
haileyschoelkopf committed
556
                    metric_agg = get_default_aggregation(metric_name)
557
                    eval_logger.warning(
558
559
560
                        f"metric {metric_name} is defined, but aggregation is not. "
                        f"using default "
                        f"aggregation={INV_AGG_REGISTRY[metric_agg]}"
561
                    )
562
                    self._aggregation_list[metric_name] = metric_agg
lintangsutawika's avatar
lintangsutawika committed
563

564
565
566
567
568
569
                if "higher_is_better" in metric_config:
                    self._higher_is_better[metric_name] = metric_config[
                        "higher_is_better"
                    ]
                else:
                    eval_logger.warning(
570
571
                        f"metric {metric_name} is defined, but higher_is_better is not. "
                        f"using default "
haileyschoelkopf's avatar
haileyschoelkopf committed
572
                        f"higher_is_better={is_higher_better(metric_name)}"
573
                    )
haileyschoelkopf's avatar
haileyschoelkopf committed
574
                    self._higher_is_better[metric_name] = is_higher_better(metric_name)
575

576
        self.download(self._config.dataset_kwargs)
577
578
579
        self._training_docs = None
        self._fewshot_docs = None

lintangsutawika's avatar
lintangsutawika committed
580
        if self._config.filter_list is not None:
lintangsutawika's avatar
lintangsutawika committed
581
            self._filters = []
lintangsutawika's avatar
lintangsutawika committed
582
583
584
585
586
587
588
589
            for filter_config in self._config.filter_list:
                for filter_pipeline in filter_config:
                    filter_name = filter_config["name"]
                    filter_functions = filter_config["filter"]
                    components = []
                    for function in filter_functions:
                        kwargs = {
                            key: function[key] for key in function if key != "function"
lintangsutawika's avatar
lintangsutawika committed
590
591
592
                        }
                        components.append([function["function"], kwargs])
                    filter_pipeline = build_filter_ensemble(filter_name, components)
lintangsutawika's avatar
lintangsutawika committed
593
                self._filters.append(filter_pipeline)
lintangsutawika's avatar
lintangsutawika committed
594
        else:
595
            self._filters = [build_filter_ensemble("none", [["take_first", None]])]
596
597

        if self._config.use_prompt is not None:
lintangsutawika's avatar
lintangsutawika committed
598
            eval_logger.info(f"loading prompt {self._config.use_prompt}")
599
            self.prompt = get_prompt(
lintangsutawika's avatar
lintangsutawika committed
600
601
                self._config.use_prompt, self.DATASET_PATH, self.DATASET_NAME
            )
602
603
604
        else:
            self.prompt = None

lintangsutawika's avatar
lintangsutawika committed
605
606
        if self.fewshot_docs() is not None:
            self.sampler = samplers.Sampler(
607
                list(self.fewshot_docs()), self, rnd=random.Random(1234)
608
            )
609

610
611
612
613
614
615
616
617
618
        if self.has_test_docs():
            docs = self.test_docs()
        elif self.has_validation_docs():
            docs = self.validation_docs()
        else:
            assert (
                False
            ), f"Task dataset (path={self.DATASET_PATH}, name={self.DATASET_NAME}) must have valid or test docs!"

619
        # Test One Doc
620
621
622
        self.features = list(docs.features.keys())
        self.multiple_input = 0
        self.multiple_target = 0
623
624
        test_doc = docs[0]
        test_text = self.doc_to_text(test_doc)
625
        test_target = self.doc_to_target(test_doc)
lintangsutawika's avatar
lintangsutawika committed
626
627
628
629
630

        if self._config.doc_to_choice is not None:
            test_choice = self.doc_to_choice(test_doc)
            if type(test_choice) is not list:
                eval_logger.error("doc_to_choice must return list")
631
632
            else:
                num_choice = len(test_choice)
633

634
635
            if type(test_text) is int:
                self.multiple_input = num_choice
636

637
        if type(test_target) is list:
638
639
            self.multiple_target = len(test_target)

640
641
642
643
644
645
646
647
    def download(self, dataset_kwargs=None):

        self.dataset = datasets.load_dataset(
            path=self.DATASET_PATH,
            name=self.DATASET_NAME,
            **dataset_kwargs if dataset_kwargs is not None else {},
        )

648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
    def has_training_docs(self):
        if self._config.training_split is not None:
            return True
        else:
            return False

    def has_validation_docs(self):
        if self._config.validation_split is not None:
            return True
        else:
            return False

    def has_test_docs(self):
        if self._config.test_split is not None:
            return True
        else:
            return False

    def training_docs(self):
667
668
669
670
671
        if self.has_training_docs():
            if self._config.process_docs:
                return self._config.process_docs(
                    self.dataset[self._config.training_split]
                )
672
673
674
            return self.dataset[self._config.training_split]

    def validation_docs(self):
675
676
677
678
679
        if self.has_validation_docs():
            if self._config.process_docs:
                return self._config.process_docs(
                    self.dataset[self._config.validation_split]
                )
680
681
682
            return self.dataset[self._config.validation_split]

    def test_docs(self):
683
684
685
        if self.has_test_docs():
            if self._config.process_docs:
                return self._config.process_docs(self.dataset[self._config.test_split])
686
687
            return self.dataset[self._config.test_split]

688
    def fewshot_docs(self):
689
        if self._config.fewshot_split is not None:
690
            return self.dataset[self._config.fewshot_split]
691
692
693
        else:
            if self._config.num_fewshot > 0:
                eval_logger.warning(
haileyschoelkopf's avatar
haileyschoelkopf committed
694
                    f"Task '{self._config.task}': "
695
696
697
698
                    "num_fewshot > 0 but fewshot_split is None. "
                    "using preconfigured rule."
                )
            return super().fewshot_docs()
699

700
701
702
703
704
    def should_decontaminate(self):
        return self._config.should_decontaminate

    def doc_to_decontamination_query(self, doc):
        if self._config.should_decontaminate:
705
706
707
708
709
710
            if self._config.doc_to_decontamination_query in self.features:
                return doc[self._config.doc_to_decontamination_query]
            else:
                return ast.literal_eval(
                    utils.apply_template(self._config.doc_to_decontamination_query, doc)
                )
711

712
713
714
715
716
717
718
719
720
721
722
723
    def _process_doc(self, doc):
        """
        Override this to process (detokenize, strip, replace, etc.) individual
        documents. This can be used in a map over documents of a data split.
        E.g. `map(self._process_doc, self.dataset["validation"])`

        :return: dict
            The processed version of the specified `doc`.
        """
        return doc

    def doc_to_text(self, doc):
724
725
726

        if self.prompt is not None:
            doc_to_text = self.prompt
727
728
        else:
            doc_to_text = self._config.doc_to_text
lintangsutawika's avatar
lintangsutawika committed
729

730
731
732
        if type(doc_to_text) == int:
            return doc_to_text
        elif type(doc_to_text) == str:
733
            if doc_to_text in self.features:
734
735
736
                # if self._config.doc_to_choice is not None:
                #     return self.doc_to_choice(doc)[doc[doc_to_text]]
                # else:
737
738
                return doc[doc_to_text]
            else:
lintangsutawika's avatar
lintangsutawika committed
739
740
741
742
743
                text_string = utils.apply_template(doc_to_text, doc)
                if text_string.isdigit():
                    return ast.literal_eval(text_string)
                else:
                    return text_string
744
        elif callable(doc_to_text):
745
            return doc_to_text(doc)
lintangsutawika's avatar
lintangsutawika committed
746
        # Used when applying a Promptsource template
747
        elif hasattr(doc_to_text, "apply"):
748
            return doc_to_text.apply(doc)[0]
749
        else:
750
            print(type(doc_to_text))
751
            raise TypeError
752
753

    def doc_to_target(self, doc):
754
755
756

        if self.prompt is not None:
            doc_to_target = self.prompt
757
758
759
        else:
            doc_to_target = self._config.doc_to_target

760
761
762
        if type(doc_to_target) == int:
            return doc_to_target
        elif type(doc_to_target) == str:
763
            if doc_to_target in self.features:
764
765
766
767
                # if self._config.doc_to_choice is not None:
                #     return self.doc_to_choice(doc)[doc[doc_to_target]]
                # else:
                return doc[doc_to_target]
768
            else:
lintangsutawika's avatar
lintangsutawika committed
769
770
771
772
773
                target_string = utils.apply_template(doc_to_target, doc)
                if target_string.isdigit():
                    return ast.literal_eval(target_string)
                else:
                    return target_string
774
        elif callable(doc_to_target):
775
            return doc_to_target(doc)
lintangsutawika's avatar
lintangsutawika committed
776
        # Used when applying a Promptsource template
777
778
        elif hasattr(doc_to_target, "apply"):
            return doc_to_target.apply(doc)[1]
779
780
        else:
            raise TypeError
781
782
783
784
785

    def doc_to_choice(self, doc):

        if self.prompt is not None:
            doc_to_choice = self.prompt
lintangsutawika's avatar
lintangsutawika committed
786
        elif self._config.doc_to_choice is None:
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
            eval_logger.error("doc_to_choice was called but not set in config")
        else:
            doc_to_choice = self._config.doc_to_choice

        if type(doc_to_choice) == str:
            return ast.literal_eval(utils.apply_template(doc_to_choice, doc))
        elif type(doc_to_choice) == list:
            return doc_to_choice
        elif type(doc_to_choice) == dict:
            return list(doc_to_choice.values())
        elif callable(doc_to_choice):
            return doc_to_choice(doc)
        elif hasattr(doc_to_choice, "get_answer_choices_list"):
            return doc_to_choice.get_answer_choices_list(doc)
        else:
            raise TypeError
803

804
    def gold_alias(self, doc):
805
806
807
808
809
        # returns a version of the gold target answer to a document,
        # which should be passed into metric for scoring as the ground truth.

        # in multiple_choice tasks, this should be castable to an int corresponding to the index
        # within the answer choices, while doc_to_target is the string version of {{answer_choices[gold]}}.
lintangsutawika's avatar
lintangsutawika committed
810
        if self._config.gold_alias is not None:
811
812
            doc_to_target = self._config.gold_alias
        else:
lintangsutawika's avatar
lintangsutawika committed
813
            return self.doc_to_target(doc)
814
815
816
817
818
819
820
821
822
823

        if type(doc_to_target) == str:
            return utils.apply_template(doc_to_target, doc)
        elif callable(doc_to_target):
            return doc_to_target(doc)
        elif hasattr(doc_to_target, "apply"):
            return doc_to_target.apply(doc)[1]
        else:
            raise TypeError

824
825
    def construct_requests(self, doc, ctx, **kwargs):

826
        if self.OUTPUT_TYPE == "loglikelihood":
lintangsutawika's avatar
lintangsutawika committed
827
            arguments = (ctx, self.doc_to_target(doc))
828
        elif self.OUTPUT_TYPE == "loglikelihood_rolling":
lintangsutawika's avatar
lintangsutawika committed
829
            arguments = (self.doc_to_target(doc),)
830
        elif self.OUTPUT_TYPE == "multiple_choice":
831
832
833
834

            choices = self.doc_to_choice(doc)
            if self.multiple_input:
                # If there are multiple inputs, choices are placed in the ctx
835
                cont = self.doc_to_target(doc)
lintangsutawika's avatar
lintangsutawika committed
836
                arguments = [(ctx, " {}".format(cont)) for ctx in choices]
837
            else:
838
                # Otherwise they are placed in the continuation
lintangsutawika's avatar
lintangsutawika committed
839
                arguments = [(ctx, " {}".format(cont)) for cont in choices]
840

841
            request_list = [
842
843
                Instance(
                    request_type="loglikelihood",
lintangsutawika's avatar
lintangsutawika committed
844
                    doc=doc,
845
                    arguments=arg,
846
                    idx=i,
847
848
                    **kwargs,
                )
849
                for i, arg in enumerate(arguments)
850
            ]
851
            # TODO: we should raise a warning telling users this will at most ~2x runtime.
852
            if "acc_mutual_info" in self._metric_fn_list.keys():
853
854
855
                # if we are calculating multiple choice accuracy
                # using mutual information instead of raw loglikelihood as metric, need unconditional lls.

lintangsutawika's avatar
lintangsutawika committed
856
                # here mutual info refers to calculating
857
858
859
860
861
862
                # log(P(choice|ctx) / P(choice)) = log(P(choice|ctx)) - log(P(choice))
                # in other words normalizing by subtracting the unconditional logprob of each choice.
                request_list.extend(
                    [
                        Instance(
                            request_type="loglikelihood",
lintangsutawika's avatar
lintangsutawika committed
863
                            doc=doc,
864
                            arguments=("", "{}".format(choice)),
865
866
867
                            idx=i,
                            **kwargs,
                        )
lintangsutawika's avatar
lintangsutawika committed
868
                        for i, choice in enumerate(choices)
869
870
871
                    ]
                )
            return request_list
lintangsutawika's avatar
lintangsutawika committed
872

873
        elif self.OUTPUT_TYPE == "greedy_until":
874
            arguments = (ctx, self._config.generation_kwargs)
lintangsutawika's avatar
lintangsutawika committed
875
876

        return Instance(
lintangsutawika's avatar
lintangsutawika committed
877
878
            request_type=self.OUTPUT_TYPE, doc=doc, arguments=arguments, idx=0, **kwargs
        )
879
880
881

    def process_results(self, doc, results):

lintangsutawika's avatar
lintangsutawika committed
882
883
884
        # if callable(self._config.process_results):
        #     return self._config.process_results(doc, results)

885
        result_dict = {}
886
        use_metric = list(self._metric_fn_list.keys())
887
888
889
        if self.OUTPUT_TYPE == "loglikelihood":
            results = results[0]
            ll, is_greedy = results
890
891
892
893
            return {
                **({"perplexity": ll} if "perplexity" in use_metric else {}),
                **({"acc": int(is_greedy)} if "acc" in use_metric else {}),
            }
894
        elif self.OUTPUT_TYPE == "loglikelihood_rolling":
haileyschoelkopf's avatar
haileyschoelkopf committed
895
            (loglikelihood,) = results
896
897
            _words = self.count_words(self.doc_to_target(doc))
            _bytes = self.count_bytes(self.doc_to_target(doc))
haileyschoelkopf's avatar
haileyschoelkopf committed
898
            return {
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
                **(
                    {"word_perplexity": (loglikelihood, _words)}
                    if "word_perplexity" in use_metric
                    else {}
                ),
                **(
                    {"byte_perplexity": (loglikelihood, _bytes)}
                    if "byte_perplexity" in use_metric
                    else {}
                ),
                **(
                    {"bits_per_byte": (loglikelihood, _bytes)}
                    if "bits_per_byte" in use_metric
                    else {}
                ),
haileyschoelkopf's avatar
haileyschoelkopf committed
914
            }
915
        elif self.OUTPUT_TYPE == "multiple_choice":
916
917

            lls, is_greedy = zip(*results)
lintangsutawika's avatar
lintangsutawika committed
918

919
            # retrieve choices in List[str] form, to compute choice lengths, etc.
920
            choices = self.doc_to_choice(doc)
921
922
            completion_len = np.array([float(len(i)) for i in choices])

923
924
            if (
                2 * len(choices) == len(lls)
925
                and "acc_mutual_info" in self._metric_fn_list.keys()
926
927
928
929
930
931
932
            ):
                # then we are doing mutual info.
                # this stores the "dryrun" / unconditional answer loglikelihoods
                lls_unconditional = lls[1::2]
                assert len(lls_unconditional) == len(choices)
                # and this stores our "regular" conditional loglikelihoods
                lls = lls[::2]
933

934
935
            pred = np.argmax(lls)
            pred_norm = np.argmax(lls / completion_len)
lintangsutawika's avatar
lintangsutawika committed
936

937
938
            if self.multiple_input:
                gold = self.doc_to_text(doc)
939
            else:
940
                gold = self.doc_to_target(doc)
941
942
                if type(gold) is str:
                    gold = choices.index(gold)
lintangsutawika's avatar
lintangsutawika committed
943

944
            if self.multiple_target:
lintangsutawika's avatar
lintangsutawika committed
945
946
947
948
949
                acc = 1.0 if pred in gold else 0.0
                acc_norm = 1.0 if pred_norm in gold else 0.0
            else:
                acc = 1.0 if pred == gold else 0.0
                acc_norm = 1.0 if pred_norm == gold else 0.0
950
951

            result_dict = {
952
                **({"acc": acc} if "acc" in use_metric else {}),
953
954
                **({"f1": (gold, pred)} if "f1" in use_metric else {}),
                **({"mcc": (gold, pred)} if "mcc" in use_metric else {}),
955
                **({"acc_norm": acc_norm} if "acc_norm" in use_metric else {}),
956
957
            }

958
            if "exact_match" in self._metric_fn_list.keys():
959
                # TODO: this gets score of 0 on arc_challenge for pythia-70m. need to test that this works properly
lintangsutawika's avatar
lintangsutawika committed
960
                is_greedy = is_greedy[gold]  # take value for the gold answer
961
962
                result_dict["exact_match"] = int(is_greedy)

963
            if "acc_mutual_info" in use_metric:
lintangsutawika's avatar
lintangsutawika committed
964
965
966
                lls_mutual_info = [
                    ll_c - ll_u for ll_c, ll_u in zip(lls, lls_unconditional)
                ]
967
968
969
                acc_mutual_info = 1.0 if np.argmax(lls_mutual_info) == gold else 0.0
                result_dict["acc_mutual_info"] = acc_mutual_info

970
971
        elif self.OUTPUT_TYPE == "greedy_until":

972
            gold = self.doc_to_target(doc)
973

974
            for key, result in zip(self._metric_fn_list.keys(), results):
haileyschoelkopf's avatar
haileyschoelkopf committed
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
                if self.multiple_target:
                    # in the case where we have multiple targets,
                    # return true if any are true
                    # TODO: this may break for multipLe_target, non zero-or-1 metrics
                    scores = []
                    for gold_option in gold:
                        res = self._metric_fn_list[key](
                            references=[gold_option],
                            predictions=[result],
                            **self._metric_fn_kwargs[key],
                        )
                        if isinstance(res, dict):
                            # TODO: this handles the case where HF evaluate returns a dict.
                            res = res[key]
                        scores.append(res)
                    if any(scores):
                        result = 1.0
                    else:
                        result = 0.0
                else:
                    result = self._metric_fn_list[key](
                        references=[gold],
                        predictions=[result],
                        **self._metric_fn_kwargs[key],
                    )
1000

haileyschoelkopf's avatar
haileyschoelkopf committed
1001
1002
1003
1004
                if isinstance(result, dict):
                    result_dict.update(result)
                else:
                    result_dict[key] = result
1005
        else:
lintangsutawika's avatar
lintangsutawika committed
1006
1007
            raise ValueError(
                f"Passed invalid output_type '{self.OUTPUT_TYPE}' ! Please use one of ",
1008
                "'loglikelihood', 'loglikelihood_rolling', 'greedy_until' or 'multiple_choice'",
1009
            )
1010
1011
1012
1013
1014
1015
1016

        return result_dict

    def aggregation(self):
        return self._aggregation_list

    def higher_is_better(self):
haileyschoelkopf's avatar
haileyschoelkopf committed
1017
        return self._higher_is_better
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027


class MultipleChoiceTask(Task):

    OUTPUT_TYPE: str = "loglikelihood"

    def doc_to_target(self, doc):
        return " " + doc["choices"][doc["gold"]]

    def construct_requests(self, doc, ctx, **kwargs):
1028
        # TODO: add mutual info here?
lintangsutawika's avatar
lintangsutawika committed
1029
1030
        return [
            Instance(
haileyschoelkopf's avatar
haileyschoelkopf committed
1031
                request_type="loglikelihood",
lintangsutawika's avatar
lintangsutawika committed
1032
                doc=doc,
1033
                arguments=(ctx, " {}".format(choice)),
1034
                idx=i,
1035
1036
                **kwargs,
            )
lintangsutawika's avatar
lintangsutawika committed
1037
1038
            for i, choice in enumerate(doc["choices"])
        ]
1039
1040

    def process_results(self, doc, results):
lintangsutawika's avatar
lintangsutawika committed
1041
1042
1043
        results = [
            res[0] for res in results
        ]  # only retain loglikelihoods, discard is_greedy TODO: do we need is_greedy anywhere?
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
        gold = doc["gold"]

        acc = 1.0 if np.argmax(results) == gold else 0.0
        completion_len = np.array([float(len(i)) for i in doc["choices"]])
        acc_norm = 1.0 if np.argmax(results / completion_len) == gold else 0.0

        return {
            "acc": acc,
            "acc_norm": acc_norm,
        }

    def higher_is_better(self):
        return {
            "acc": True,
            "acc_norm": True,
        }

    def aggregation(self):
        return {
            "acc": mean,
            "acc_norm": mean,
        }


lintangsutawika's avatar
lintangsutawika committed
1068
class PerplexityTask(Task):
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078

    OUTPUT_TYPE = "loglikelihood_rolling"

    def has_training_docs(self):
        return False

    def fewshot_examples(self, k, rnd):
        assert k == 0
        return []

1079
    def fewshot_context(self, doc, num_fewshot):
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
        assert (
            num_fewshot == 0
        ), "The number of fewshot examples must be 0 for perplexity tasks."

        return ""

    def higher_is_better(self):
        return {
            "word_perplexity": False,
            "byte_perplexity": False,
            "bits_per_byte": False,
        }

    def doc_to_decontamination_query(self, doc):
        return doc

    def doc_to_text(self, doc):
        return ""

    def doc_to_target(self, doc):
        return doc

    def construct_requests(self, doc, ctx, **kwargs):
        assert not ctx

lintangsutawika's avatar
lintangsutawika committed
1105
1106
1107
1108
1109
1110
1111
        return Instance(
            request_type=self.OUTPUT_TYPE,
            doc=doc,
            arguments=(self.doc_to_target(doc),),
            idx=0,
            **kwargs,
        )
1112
1113
1114

    def process_results(self, doc, results):
        (loglikelihood,) = results
haileyschoelkopf's avatar
haileyschoelkopf committed
1115
1116
        words = self.count_words(self.doc_to_target(doc))
        bytes_ = self.count_bytes(self.doc_to_target(doc))
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
        return {
            "word_perplexity": (loglikelihood, words),
            "byte_perplexity": (loglikelihood, bytes_),
            "bits_per_byte": (loglikelihood, bytes_),
        }

    def aggregation(self):
        return {
            "word_perplexity": weighted_perplexity,
            "byte_perplexity": weighted_perplexity,
            "bits_per_byte": bits_per_byte,
        }

    @classmethod
    def count_bytes(cls, doc):
        return len(doc.encode("utf-8"))

    @classmethod
    def count_words(cls, doc):
        """Downstream tasks with custom word boundaries should override this!"""
        return len(re.split(r"\s+", doc))