task.py 49.3 KB
Newer Older
1
import abc
2
from dataclasses import dataclass, field, asdict
3
4

import re
5
import ast
lintangsutawika's avatar
lintangsutawika committed
6
import yaml
7
8
9
import evaluate
import random
import itertools
10
import functools
11
from tqdm import tqdm
12
13
14
15

import datasets
import numpy as np

baberabb's avatar
baberabb committed
16
from typing import Union, List, Any, Tuple, Literal
17
from collections.abc import Callable
18

19
from lm_eval import utils
20
from lm_eval.api import samplers
haileyschoelkopf's avatar
haileyschoelkopf committed
21
from lm_eval.api.instance import Instance
lintangsutawika's avatar
lintangsutawika committed
22
from lm_eval.api.filter import FilterEnsemble
23
24
25
26

from lm_eval.logger import eval_logger
from lm_eval.prompts import get_prompt
from lm_eval.filters import build_filter_ensemble
lintangsutawika's avatar
lintangsutawika committed
27
28
29
30
from lm_eval.api.metrics import (
    mean,
    weighted_perplexity,
    bits_per_byte,
lintangsutawika's avatar
lintangsutawika committed
31
    metric_max_over_ground_truths,
lintangsutawika's avatar
lintangsutawika committed
32
33
)
from lm_eval.api.registry import (
haileyschoelkopf's avatar
haileyschoelkopf committed
34
35
    get_metric,
    get_aggregation,
36
    get_metric_aggregation,
haileyschoelkopf's avatar
haileyschoelkopf committed
37
    is_higher_better,
38
39
    DEFAULT_METRIC_REGISTRY,
    OUTPUT_TYPE_REGISTRY,
lintangsutawika's avatar
lintangsutawika committed
40
41
    AGGREGATION_REGISTRY,
)
42

43
44
45
46
ALL_OUTPUT_TYPES = [
    "loglikelihood",
    "multiple_choice",
    "loglikelihood_rolling",
47
    "generate_until",
48
49
]

50
51
52

@dataclass
class TaskConfig(dict):
53
    # task naming/registry
54
    task: str = None
lintangsutawika's avatar
lintangsutawika committed
55
    task_alias: str = None
56
    group: Union[str, list] = None
lintangsutawika's avatar
lintangsutawika committed
57
    group_alias: Union[str, list] = None
58
59
60
    # HF dataset options.
    # which dataset to use,
    # and what splits for what purpose
61
62
    dataset_path: str = None
    dataset_name: str = None
63
    dataset_kwargs: dict = None
64
65
66
    training_split: str = None
    validation_split: str = None
    test_split: str = None
lintangsutawika's avatar
lintangsutawika committed
67
    fewshot_split: str = None  # TODO: assert that this not None if num_fewshot > 0. (?) assert if this is same split as one evaling (?)
68
69
    # formatting / prompting options.
    # see docs/advanced_task_guide.md for more info
70
    process_docs: Callable = None
71
72
    doc_to_text: Union[Callable, str] = None
    doc_to_target: Union[Callable, str] = None
lintangsutawika's avatar
lintangsutawika committed
73
    doc_to_choice: Union[Callable, str, dict, list] = None
lintangsutawika's avatar
lintangsutawika committed
74
    process_results: Union[Callable, str] = None
75
    use_prompt: str = None
76
    description: str = ""
77
78
    target_delimiter: str = " "
    fewshot_delimiter: str = "\n\n"
haileyschoelkopf's avatar
haileyschoelkopf committed
79
    fewshot_config: dict = None
80
    # runtime configuration options
81
    num_fewshot: int = 0
82
    # scoring options
83
    metric_list: list = None
84
    output_type: str = "generate_until"
85
    generation_kwargs: dict = None
86
    repeats: int = 1
lintangsutawika's avatar
lintangsutawika committed
87
    filter_list: Union[str, list] = None
88
89
    should_decontaminate: bool = False
    doc_to_decontamination_query: str = None
90

lintangsutawika's avatar
lintangsutawika committed
91
    metadata: str = None  # by default, not used in the code. allows for users to pass arbitrary info to tasks
92

Ethan Smith's avatar
Ethan Smith committed
93
    def __post_init__(self) -> None:
lintangsutawika's avatar
lintangsutawika committed
94
        if self.dataset_path and ("." in self.dataset_path):
lintangsutawika's avatar
lintangsutawika committed
95
96
            import inspect
            from importlib import import_module
lintangsutawika's avatar
format  
lintangsutawika committed
97

lintangsutawika's avatar
lintangsutawika committed
98
            self.dataset_path = inspect.getfile(import_module(self.dataset_path))
99

Lintang Sutawika's avatar
Lintang Sutawika committed
100
        if self.generation_kwargs is not None:
101
            if self.output_type != "generate_until":
Lintang Sutawika's avatar
Lintang Sutawika committed
102
                eval_logger.warning(
103
                    f"[{self.task}] passed `generation_kwargs`, but not using `output_type: generate_until`!"
Lintang Sutawika's avatar
Lintang Sutawika committed
104
                )
105
                assert self.output_type != "generate_until"
Lintang Sutawika's avatar
Lintang Sutawika committed
106
107
108
109
110
111
112

            if "temperature" in self.generation_kwargs:
                self.generation_kwargs["temperature"] = float(
                    self.generation_kwargs["temperature"]
                )

            if "until" not in self.generation_kwargs:
113
                self.generation_kwargs["until"] = [self.fewshot_delimiter]
Lintang Sutawika's avatar
Lintang Sutawika committed
114
        else:
115
            if self.output_type == "generate_until":
Lintang Sutawika's avatar
Lintang Sutawika committed
116
117
                # ensure that we greedily generate in absence of explicit arguments otherwise
                self.generation_kwargs = {
Lintang Sutawika's avatar
Lintang Sutawika committed
118
                    "until": None
119
120
                    if self.fewshot_delimiter is None
                    else [self.fewshot_delimiter],
Lintang Sutawika's avatar
Lintang Sutawika committed
121
122
                    "do_sample": False,
                }
123

haileyschoelkopf's avatar
haileyschoelkopf committed
124
125
        # TODO: how to make TaskConfigs be de- and re-serializable, even when using the !function constructor?

126
127
128
    def __getitem__(self, item):
        return getattr(self, item)

129
130
131
    def __setitem__(self, item, value):
        return setattr(self, item, value)

132
    def to_dict(self):
133
134
        """dumps the current config as a dictionary object, as a printable format.
        null fields will not be printed.
haileyschoelkopf's avatar
haileyschoelkopf committed
135
        Used for dumping results alongside full task configuration
136

haileyschoelkopf's avatar
haileyschoelkopf committed
137
138
139
140
141
142
143
144
145
146
        :return: dict
            A printable dictionary version of the TaskConfig object.

        # TODO: should any default value in the TaskConfig not be printed?
        """
        cfg_dict = asdict(self)
        # remove values that are `None`
        for k, v in list(cfg_dict.items()):
            if v is None:
                cfg_dict.pop(k)
haileyschoelkopf's avatar
haileyschoelkopf committed
147
148
149
            elif isinstance(v, Callable):
                # TODO: this should handle Promptsource template objects as a separate case?
                cfg_dict[k] = str(v)
haileyschoelkopf's avatar
haileyschoelkopf committed
150
        return cfg_dict
151

152
153
154
155
156
157
158
159
160
161
162
163

class Task(abc.ABC):
    """A task represents an entire benchmark including its dataset, problems,
    answers, and evaluation methods. See BoolQ for a simple example implementation

    A `doc` can be any python object which represents one instance of evaluation.
    This is usually a dictionary e.g.
        {"question": ..., "answer": ...} or
        {"question": ..., question, answer)
    """

    VERSION = None
164

165
166
167
168
169
170
171
172
    # The name of the `Task` benchmark as denoted in the HuggingFace datasets Hub
    # or a path to a custom `datasets` loading script.
    DATASET_PATH: str = None

    # The name of a subset within `DATASET_PATH`.
    DATASET_NAME: str = None

    OUTPUT_TYPE: str = None
lintangsutawika's avatar
lintangsutawika committed
173

174
175
176
177
178
179
    def __init__(
        self,
        data_dir=None,
        cache_dir=None,
        download_mode=None,
        config=None,
Ethan Smith's avatar
Ethan Smith committed
180
    ) -> None:
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
        """
        :param data_dir: str
            Stores the path to a local folder containing the `Task`'s data files.
            Use this to specify the path to manually downloaded data (usually when
            the dataset is not publicly accessible).
        :param cache_dir: str
            The directory to read/write the `Task` dataset. This follows the
            HuggingFace `datasets` API with the default cache directory located at:
                `~/.cache/huggingface/datasets`
            NOTE: You can change the cache location globally for a given process
            to another directory:
                `export HF_DATASETS_CACHE="/path/to/another/directory"`
        :param download_mode: datasets.DownloadMode
            How to treat pre-existing `Task` downloads and data.
            - `datasets.DownloadMode.REUSE_DATASET_IF_EXISTS`
                Reuse download and reuse dataset.
            - `datasets.DownloadMode.REUSE_CACHE_IF_EXISTS`
                Reuse download with fresh dataset.
            - `datasets.DownloadMode.FORCE_REDOWNLOAD`
                Fresh download and fresh dataset.
        """
        self.download(data_dir, cache_dir, download_mode)
        self._training_docs = None
        self._fewshot_docs = None
        self._instances = None

lintangsutawika's avatar
lintangsutawika committed
207
208
209
210
211
212
213
214
215
        self._config = (
            TaskConfig(
                {
                    **config,
                }
            )
            if config
            else TaskConfig()
        )
216

lintangsutawika's avatar
lintangsutawika committed
217
        self._filters = [build_filter_ensemble("none", [["take_first", None]])]
218

Ethan Smith's avatar
Ethan Smith committed
219
    def download(self, data_dir=None, cache_dir=None, download_mode=None) -> None:
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
        """Downloads and returns the task dataset.
        Override this method to download the dataset from a custom API.

        :param data_dir: str
            Stores the path to a local folder containing the `Task`'s data files.
            Use this to specify the path to manually downloaded data (usually when
            the dataset is not publicly accessible).
        :param cache_dir: str
            The directory to read/write the `Task` dataset. This follows the
            HuggingFace `datasets` API with the default cache directory located at:
                `~/.cache/huggingface/datasets`
            NOTE: You can change the cache location globally for a given process
            by setting the shell environment variable, `HF_DATASETS_CACHE`,
            to another directory:
                `export HF_DATASETS_CACHE="/path/to/another/directory"`
        :param download_mode: datasets.DownloadMode
            How to treat pre-existing `Task` downloads and data.
            - `datasets.DownloadMode.REUSE_DATASET_IF_EXISTS`
                Reuse download and reuse dataset.
            - `datasets.DownloadMode.REUSE_CACHE_IF_EXISTS`
                Reuse download with fresh dataset.
            - `datasets.DownloadMode.FORCE_REDOWNLOAD`
                Fresh download and fresh dataset.
        """
244
245
246
247
248
249
250
        self.dataset = datasets.load_dataset(
            path=self.DATASET_PATH,
            name=self.DATASET_NAME,
            data_dir=data_dir,
            cache_dir=cache_dir,
            download_mode=download_mode,
        )
251

252
253
254
255
256
    @property
    def config(self):
        """Returns the TaskConfig associated with this class."""
        return self._config

257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
    @abc.abstractmethod
    def has_training_docs(self):
        """Whether the task has a training set"""
        pass

    @abc.abstractmethod
    def has_validation_docs(self):
        """Whether the task has a validation set"""
        pass

    @abc.abstractmethod
    def has_test_docs(self):
        """Whether the task has a test set"""
        pass

    def training_docs(self):
        """
        :return: Iterable[obj]
            A iterable of any object, that doc_to_text can handle
        """
        return []

    def validation_docs(self):
        """
        :return: Iterable[obj]
            A iterable of any object, that doc_to_text can handle
        """
        return []

    def test_docs(self):
        """
        :return: Iterable[obj]
            A iterable of any object, that doc_to_text can handle
        """
        return []

293
294
295
296
297
298
299
300
301
302
    def fewshot_docs(self):
        """
        :return: Iterable[obj]
            A iterable of any object, that doc_to_text can handle
        """
        if self.has_training_docs():
            return self.training_docs()
        elif self.has_validation_docs():
            return self.validation_docs()
        else:
lintangsutawika's avatar
lintangsutawika committed
303
            eval_logger.warning(
304
                "has_training_docs and has_validation_docs are False"
305
                ", using test_docs as fewshot_docs but this is not recommended."
lintangsutawika's avatar
lintangsutawika committed
306
            )
307
308
            return self.test_docs()

309
310
311
312
313
314
315
316
317
318
    def _process_doc(self, doc):
        """
        Override this to process (detokenize, strip, replace, etc.) individual
        documents. This can be used in a map over documents of a data split.
        E.g. `map(self._process_doc, self.dataset["validation"])`

        :return: dict
            The processed version of the specified `doc`.
        """
        return doc
lintangsutawika's avatar
lintangsutawika committed
319

320
321
322
323
324
325
326
327
328
329
330
331
332
    @property
    def instances(self):
        """After calling `task.build_all_requests()`, tasks
        maintain a list of the dataset instances which will be evaluated.
        """
        return self._instances

    def fewshot_examples(self, k, rnd):
        if self._training_docs is None:
            self._training_docs = list(self.training_docs())

        return rnd.sample(self._training_docs, k)

Ethan Smith's avatar
Ethan Smith committed
333
    def doc_to_decontamination_query(self, doc) -> None:
334
335
336
337
338
339
340
341
342
343
344
345
346
        print(
            "Override doc_to_decontamination_query with document specific decontamination query."
        )
        assert False

    @abc.abstractmethod
    def doc_to_text(self, doc):
        pass

    @abc.abstractmethod
    def doc_to_target(self, doc):
        pass

Ethan Smith's avatar
Ethan Smith committed
347
    def build_all_requests(self, limit=None, rank=None, world_size=None) -> None:
348
349
350
351
352
353
354
355
356
357
        """Build a set of Instances for a task, and store them in task.instances"""
        if self.has_test_docs():
            docs = self.test_docs()
        elif self.has_validation_docs():
            docs = self.validation_docs()
        else:
            assert (
                False
            ), f"Task dataset (path={self.DATASET_PATH}, name={self.DATASET_NAME}) must have valid or test docs!"

358
        eval_logger.info(f"Building contexts for task on rank {rank}...")
359

360
        instances = []
361
362
        for doc_id, doc in utils.create_iterator(
            enumerate(docs), rank, world_size, limit
lintangsutawika's avatar
lintangsutawika committed
363
        ):
364
            # sample fewshot context #TODO: need to offset doc_id by rank now!
365
            fewshot_ctx = self.fewshot_context(
366
                doc,
367
                self.config.num_fewshot,
368
            )
369

370
            # TODO: we should override self.config.repeats if doing greedy gen so users don't waste time+compute
lintangsutawika's avatar
lintangsutawika committed
371
372
373
            inst = self.construct_requests(
                doc=doc,
                ctx=fewshot_ctx,
374
                metadata=(self.config["task"], doc_id, self.config.repeats),
lintangsutawika's avatar
lintangsutawika committed
375
            )
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400

            if not isinstance(inst, list):
                inst = [inst]

            instances.extend(inst)

        self._instances = instances
        assert len(self._instances) != 0, "task.build_requests() did not find any docs!"

    @abc.abstractmethod
    def construct_requests(self, doc, ctx, **kwargs):
        """Uses RequestFactory to construct Requests and returns an iterable of
        Requests which will be sent to the LM.

        :param doc:
            The document as returned from training_docs, validation_docs, or test_docs.
        :param ctx: str
            The context string, generated by fewshot_context. This includes the natural
            language description, as well as the few shot examples, and the question
            part of the document for `doc`.
        :param doc_idx: int
            The index of a document within `self.test_docs()` or `self.validation_docs()`,
            whichever is the main split used.
        :param repeats: int
        TODO: update this docstring
lintangsutawika's avatar
lintangsutawika committed
401
            The number of times each instance in a dataset is inferred on. Defaults to 1,
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
            can be increased for techniques like majority voting.
        """
        pass

    @abc.abstractmethod
    def process_results(self, doc, results):
        """Take a single document and the LM results and evaluates, returning a
        dict where keys are the names of submetrics and values are the values of
        the metric for that one document

        :param doc:
            The document as returned from training_docs, validation_docs, or test_docs.
        :param results:
            The results of the requests created in construct_requests.
        """
        pass

    @abc.abstractmethod
    def aggregation(self):
        """
        :returns: {str: [metric_score] -> float}
            A dictionary where keys are the names of submetrics and values are
            functions that aggregate a list of metric scores
        """
        pass

    @abc.abstractmethod
    def higher_is_better(self):
        """
        :returns: {str: bool}
            A dictionary where keys are the names of submetrics and values are
            whether a higher value of the submetric is better
        """
        pass

haileyschoelkopf's avatar
haileyschoelkopf committed
437
438
439
440
441
442
443
444
445
446
    @classmethod
    def count_bytes(cls, doc):
        """Used for byte-level perplexity metrics in rolling loglikelihood"""
        return len(doc.encode("utf-8"))

    @classmethod
    def count_words(cls, doc):
        """Downstream loglikelihood_rolling perplexity tasks with custom word boundaries should override this!"""
        return len(re.split(r"\s+", doc))

447
    @utils.positional_deprecated
lintangsutawika's avatar
lintangsutawika committed
448
    def fewshot_context(
449
450
451
452
453
454
        self,
        doc,
        num_fewshot,
        provide_description=None,
        rnd=random.Random(1234),
        description=None,
lintangsutawika's avatar
lintangsutawika committed
455
    ):
456
457
458
459
460
461
462
        """Returns a fewshot context string that is made up of a prepended description
        (if provided), the `num_fewshot` number of examples, and an appended prompt example.

        :param doc: str
            The document as returned from training_docs, validation_docs, or test_docs.
        :param num_fewshot: int
            The number of fewshot examples to provide in the returned context string.
lintangsutawika's avatar
lintangsutawika committed
463
464
465
466
467
468
469
        :param provide_description: bool
            Not implemented, and this option is deprecated and will be removed in a future version in favor of a different description providing method
        :param rnd: random.Random
            The pseudo-random number generator used to randomly sample examples.
            WARNING: This is currently a required arg although it's optionalized with a default `None`.
        :param description: str
            The task's description that will be prepended to the fewshot examples.
470
471
472
        :returns: str
            The fewshot context.
        """
lintangsutawika's avatar
lintangsutawika committed
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
        assert (
            rnd is not None
        ), "A `random.Random` generator argument must be provided to `rnd`"
        assert not provide_description, (
            "The `provide_description` arg will be removed in future versions. To prepend "
            "a custom description to the context, supply the corresponding string via the "
            "`description` arg."
        )
        if provide_description is not None:
            # nudge people to not specify it at all
            print(
                "WARNING: provide_description is deprecated and will be removed in a future version in favor of description_dict"
            )

        description = description + "\n\n" if description else ""
488
489

        if num_fewshot == 0:
lintangsutawika's avatar
lintangsutawika committed
490
            labeled_examples = ""
491
        else:
lintangsutawika's avatar
lintangsutawika committed
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
            # for sets with no training docs, draw from other set *but ensure no overlap with current doc*
            if self.has_training_docs():
                fewshotex = self.fewshot_examples(k=num_fewshot, rnd=rnd)
            else:
                if self._fewshot_docs is None:
                    self._fewshot_docs = list(
                        self.validation_docs()
                        if self.has_validation_docs()
                        else self.test_docs()
                    )

                fewshotex = rnd.sample(self._fewshot_docs, num_fewshot + 1)

                # get rid of the doc that's the one we're evaluating, if it's in the fewshot
                fewshotex = [x for x in fewshotex if x != doc][:num_fewshot]

            labeled_examples = (
                "\n\n".join(
                    [
                        self.doc_to_text(doc) + self.doc_to_target(doc)
                        for doc in fewshotex
                    ]
                )
                + "\n\n"
lintangsutawika's avatar
lintangsutawika committed
516
            )
517
518

        example = self.doc_to_text(doc)
lintangsutawika's avatar
lintangsutawika committed
519
        return description + labeled_examples + example
520
521

    def apply_filters(self):
lintangsutawika's avatar
lintangsutawika committed
522
523
        if hasattr(self, "_filters"):
            for f in self._filters:
lintangsutawika's avatar
lintangsutawika committed
524
                f.apply(self._instances, None)
lintangsutawika's avatar
lintangsutawika committed
525
526
527
        else:
            eval_logger.warning("No filter defined, passing through instances")
            return self._instances
528

baberabb's avatar
baberabb committed
529
    def dump_config(self) -> dict:
530
        """Returns a dictionary representing the task's config.
531
532
533
534
535

        :returns: str
            The fewshot context.
        """
        # TODO: this should only return the overrides applied to a non-YAML task's configuration.
536
        # (num_fewshot)
537
        return self.config.to_dict()
538

539
540

class ConfigurableTask(Task):
541
    VERSION = "Yaml"
542
    OUTPUT_TYPE = None
543
    CONFIG = None
544
545
546

    def __init__(
        self, data_dir=None, cache_dir=None, download_mode=None, config: dict = None
Ethan Smith's avatar
Ethan Smith committed
547
    ) -> None:  # TODO no super() call here
548
        # Get pre-configured attributes
549
        self._config = self.CONFIG
550

551
        # Use new configurations if there was no preconfiguration
552
        if self.config is None:
553
            self._config = TaskConfig(**config)
554
555
        # Overwrite configs
        else:
lintangsutawika's avatar
lintangsutawika committed
556
            if config is not None:
557
                self._config.__dict__.update(config)
558

559
        if self.config is None:
lintangsutawika's avatar
lintangsutawika committed
560
561
562
            raise ValueError(
                "Must pass a config to ConfigurableTask, either in cls.CONFIG or `config` kwarg"
            )
563

564
565
566
        if self.config.output_type is not None:
            assert self.config.output_type in ALL_OUTPUT_TYPES
            self.OUTPUT_TYPE = self.config.output_type
567

568
569
        if self.config.dataset_path is not None:
            self.DATASET_PATH = self.config.dataset_path
570

571
572
        if self.config.dataset_name is not None:
            self.DATASET_NAME = self.config.dataset_name
573

574
575
576
577
        self._metric_fn_list = {}
        self._metric_fn_kwargs = {}
        self._aggregation_list = {}
        self._higher_is_better = {}
578

579
        if self.config.metric_list is None:
580
            # TODO: handle this in TaskConfig.__post_init__ ?
581
582
            _metric_list = DEFAULT_METRIC_REGISTRY[self.config.output_type]

583
            for metric_name in _metric_list:
haileyschoelkopf's avatar
haileyschoelkopf committed
584
                self._metric_fn_list[metric_name] = get_metric(metric_name)
lintangsutawika's avatar
lintangsutawika committed
585
                self._metric_fn_kwargs[metric_name] = {}
586
587
588
                self._aggregation_list[metric_name] = get_metric_aggregation(
                    metric_name
                )
haileyschoelkopf's avatar
haileyschoelkopf committed
589
                self._higher_is_better[metric_name] = is_higher_better(metric_name)
590
        else:
591
            for metric_config in self.config.metric_list:
592
593
594
595
596
                assert "metric" in metric_config
                metric_name = metric_config["metric"]
                kwargs = {
                    key: metric_config[key]
                    for key in metric_config
Chris's avatar
Chris committed
597
598
                    if key
                    not in ["metric", "aggregation", "higher_is_better", "hf_evaluate"]
599
                }
Chris's avatar
Chris committed
600
601
602
603
                hf_evaluate_metric = (
                    "hf_evaluate" in metric_config
                    and metric_config["hf_evaluate"] is True
                )
604

605
                if self.config.process_results is not None:
606
607
                    self._metric_fn_list[metric_name] = None
                    self._metric_fn_kwargs[metric_name] = {}
608
609
610
611
612
613
                elif callable(metric_name):
                    metric_fn = metric_name.__call__
                    metric_name = metric_name.__name__
                    self._metric_fn_list[metric_name] = metric_fn
                    self._metric_fn_kwargs[metric_name] = kwargs
                else:
Chris's avatar
Chris committed
614
615
616
                    self._metric_fn_list[metric_name] = get_metric(
                        metric_name, hf_evaluate_metric
                    )
617
                    self._metric_fn_kwargs[metric_name] = kwargs
lintangsutawika's avatar
lintangsutawika committed
618

619
                if "aggregation" in metric_config:
620
                    agg_name = metric_config["aggregation"]
621
                    if type(agg_name) == str:
haileyschoelkopf's avatar
haileyschoelkopf committed
622
                        self._aggregation_list[metric_name] = get_aggregation(agg_name)
623
624
625
626
                    elif callable(agg_name):
                        self._aggregation_list[metric_name] = metric_config[
                            "aggregation"
                        ]
627
                else:
628
                    INV_AGG_REGISTRY = {v: k for k, v in AGGREGATION_REGISTRY.items()}
lintangsutawika's avatar
lintangsutawika committed
629
                    metric_agg = get_metric_aggregation(metric_name)
630
                    eval_logger.warning(
baberabb's avatar
baberabb committed
631
                        f"[Task: {self._config.task}] metric {metric_name} is defined, but aggregation is not. "
632
633
                        f"using default "
                        f"aggregation={INV_AGG_REGISTRY[metric_agg]}"
634
                    )
635
                    self._aggregation_list[metric_name] = metric_agg
lintangsutawika's avatar
lintangsutawika committed
636

637
638
639
640
641
642
                if "higher_is_better" in metric_config:
                    self._higher_is_better[metric_name] = metric_config[
                        "higher_is_better"
                    ]
                else:
                    eval_logger.warning(
baberabb's avatar
baberabb committed
643
                        f"[Task: {self._config.task}] metric {metric_name} is defined, but higher_is_better is not. "
644
                        f"using default "
haileyschoelkopf's avatar
haileyschoelkopf committed
645
                        f"higher_is_better={is_higher_better(metric_name)}"
646
                    )
haileyschoelkopf's avatar
haileyschoelkopf committed
647
                    self._higher_is_better[metric_name] = is_higher_better(metric_name)
648

649
        self.download(self.config.dataset_kwargs)
650
651
652
        self._training_docs = None
        self._fewshot_docs = None

653
        if self.config.filter_list is not None:
lintangsutawika's avatar
lintangsutawika committed
654
            self._filters = []
655
            for filter_config in self.config.filter_list:
lintangsutawika's avatar
lintangsutawika committed
656
657
658
659
660
661
662
                for filter_pipeline in filter_config:
                    filter_name = filter_config["name"]
                    filter_functions = filter_config["filter"]
                    components = []
                    for function in filter_functions:
                        kwargs = {
                            key: function[key] for key in function if key != "function"
lintangsutawika's avatar
lintangsutawika committed
663
664
665
                        }
                        components.append([function["function"], kwargs])
                    filter_pipeline = build_filter_ensemble(filter_name, components)
lintangsutawika's avatar
lintangsutawika committed
666
                self._filters.append(filter_pipeline)
lintangsutawika's avatar
lintangsutawika committed
667
        else:
668
            self._filters = [build_filter_ensemble("none", [["take_first", None]])]
669

670
671
        if self.config.use_prompt is not None:
            eval_logger.info(f"loading prompt {self.config.use_prompt}")
672
            self.prompt = get_prompt(
673
                self.config.use_prompt, self.DATASET_PATH, self.DATASET_NAME
lintangsutawika's avatar
lintangsutawika committed
674
            )
675
676
677
        else:
            self.prompt = None

lintangsutawika's avatar
lintangsutawika committed
678
        if self.fewshot_docs() is not None:
haileyschoelkopf's avatar
haileyschoelkopf committed
679
            self.sampler = samplers.get_sampler(
haileyschoelkopf's avatar
haileyschoelkopf committed
680
681
682
                self.config.fewshot_config.get("sampler", "default")
                if self.config.fewshot_config
                else "default"
haileyschoelkopf's avatar
haileyschoelkopf committed
683
            )(list(self.fewshot_docs()), self, rnd=random.Random(1234))
684

685
        if self.has_test_docs():
686
            self.task_docs = self.test_docs()
687
        elif self.has_validation_docs():
688
            self.task_docs = self.validation_docs()
689
690
691
692
693
        else:
            assert (
                False
            ), f"Task dataset (path={self.DATASET_PATH}, name={self.DATASET_NAME}) must have valid or test docs!"

694
        # Test One Doc
695
        self.features = list(self.task_docs.features.keys())
696
697
        self.multiple_input = 0
        self.multiple_target = 0
698
        test_doc = self.task_docs[0]
699
        test_text = self.doc_to_text(test_doc)
700
        test_target = self.doc_to_target(test_doc)
lintangsutawika's avatar
lintangsutawika committed
701

702
        if self.config.doc_to_choice is not None:
lintangsutawika's avatar
lintangsutawika committed
703
704
705
            test_choice = self.doc_to_choice(test_doc)
            if type(test_choice) is not list:
                eval_logger.error("doc_to_choice must return list")
706
707
            else:
                num_choice = len(test_choice)
708

709
710
            if type(test_text) is int:
                self.multiple_input = num_choice
711
712
        else:
            test_choice = None
713

714
        if type(test_target) is list:
715
            self.multiple_target = len(test_target)
716
        else:
lintangsutawika's avatar
lintangsutawika committed
717
            if (type(test_target) is int) and (test_choice is not None):
lintangsutawika's avatar
lintangsutawika committed
718
                test_target = test_choice[test_target]
719
            else:
lintangsutawika's avatar
lintangsutawika committed
720
                test_target = str(test_target)
721

722
723
724
        if test_choice is not None:
            check_choices = test_choice
        else:
lintangsutawika's avatar
lintangsutawika committed
725
            check_choices = [test_target]
726
727
728
729
        if self.config.doc_to_choice is not None:
            for choice in check_choices:
                choice_has_whitespace = True if choice[0].isspace() else False
                delimiter_has_whitespace = (
730
731
                    True
                    if self.config.target_delimiter.rstrip()
732
                    != self.config.target_delimiter
733
                    else False
734
                )
735

736
737
738
739
740
741
                if delimiter_has_whitespace and choice_has_whitespace:
                    eval_logger.warning(
                        f'Both target_delimiter and target choice: "{choice}" have whitespace'
                    )
                elif (not delimiter_has_whitespace) and (not choice_has_whitespace):
                    eval_logger.warning(
742
                        f'Both target_delimiter "{self.config.target_delimiter}" and target choice: "{choice}" do not have whitespace, ignore if the language you are evaluating on does not require/use whitespace'
743
744
                    )

Ethan Smith's avatar
Ethan Smith committed
745
    def download(self, dataset_kwargs=None) -> None:
746
747
748
749
750
751
        self.dataset = datasets.load_dataset(
            path=self.DATASET_PATH,
            name=self.DATASET_NAME,
            **dataset_kwargs if dataset_kwargs is not None else {},
        )

baberabb's avatar
baberabb committed
752
    def has_training_docs(self) -> bool:
753
        if self.config.training_split is not None:
754
755
756
757
            return True
        else:
            return False

baberabb's avatar
baberabb committed
758
    def has_validation_docs(self) -> bool:
759
        if self.config.validation_split is not None:
760
761
762
763
            return True
        else:
            return False

baberabb's avatar
baberabb committed
764
    def has_test_docs(self) -> bool:
765
        if self.config.test_split is not None:
766
767
768
769
            return True
        else:
            return False

baberabb's avatar
baberabb committed
770
    def training_docs(self) -> datasets.Dataset:
771
        if self.has_training_docs():
772
773
774
            if self.config.process_docs is not None:
                return self.config.process_docs(
                    self.dataset[self.config.training_split]
775
                )
776
            return self.dataset[self.config.training_split]
777

baberabb's avatar
baberabb committed
778
    def validation_docs(self) -> datasets.Dataset:
779
        if self.has_validation_docs():
780
781
782
            if self.config.process_docs is not None:
                return self.config.process_docs(
                    self.dataset[self.config.validation_split]
783
                )
784
            return self.dataset[self.config.validation_split]
785

baberabb's avatar
baberabb committed
786
    def test_docs(self) -> datasets.Dataset:
787
        if self.has_test_docs():
788
789
790
            if self.config.process_docs is not None:
                return self.config.process_docs(self.dataset[self.config.test_split])
            return self.dataset[self.config.test_split]
791

792
    def fewshot_docs(self):
793
794
        if self.config.fewshot_split is not None:
            return self.dataset[self.config.fewshot_split]
795
        else:
796
            if self.config.num_fewshot > 0:
797
                eval_logger.warning(
798
                    f"Task '{self.config.task}': "
799
800
801
802
                    "num_fewshot > 0 but fewshot_split is None. "
                    "using preconfigured rule."
                )
            return super().fewshot_docs()
803

lintangsutawika's avatar
lintangsutawika committed
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
    @utils.positional_deprecated
    def fewshot_context(self, doc, num_fewshot):
        """Returns a fewshot context string that is made up of a prepended description
        (if provided), the `num_fewshot` number of examples, and an appended prompt example.

        :param doc: str
            The document as returned from training_docs, validation_docs, or test_docs.
        :param num_fewshot: int
            The number of fewshot examples to provide in the returned context string.
        :returns: str
            The fewshot context.
        """

        if num_fewshot == 0:
            # always prepend the (possibly empty) task description
            labeled_examples = self.config.description
        else:
            labeled_examples = self.config.description + self.sampler.get_context(
                doc, num_fewshot
            )

        example = self.doc_to_text(doc)
        if type(example) == str:
            return labeled_examples + example
        elif type(example) == list:
            return [labeled_examples + ex for ex in example]
        elif type(example) == int:
            if self.config.doc_to_choice is not None:
                choices = self.doc_to_choice(doc)
                return labeled_examples + choices[example]
            else:
                return labeled_examples + str(example)

837
838
839
840
841
842
843
844
    def apply_filters(self):
        if hasattr(self, "_filters"):
            for f in self._filters:
                f.apply(self._instances, self.task_docs)
        else:
            eval_logger.warning("No filter defined, passing through instances")
            return self._instances

845
    def should_decontaminate(self):
846
        return self.config.should_decontaminate
847
848

    def doc_to_decontamination_query(self, doc):
849
850
851
        if self.config.should_decontaminate:
            if self.config.doc_to_decontamination_query in self.features:
                return doc[self.config.doc_to_decontamination_query]
852
853
            else:
                return ast.literal_eval(
854
                    utils.apply_template(self.config.doc_to_decontamination_query, doc)
855
                )
856

857
858
859
860
861
862
863
864
865
866
867
868
    def _process_doc(self, doc):
        """
        Override this to process (detokenize, strip, replace, etc.) individual
        documents. This can be used in a map over documents of a data split.
        E.g. `map(self._process_doc, self.dataset["validation"])`

        :return: dict
            The processed version of the specified `doc`.
        """
        return doc

    def doc_to_text(self, doc):
869
870
        if self.prompt is not None:
            doc_to_text = self.prompt
871
        else:
872
            doc_to_text = self.config.doc_to_text
lintangsutawika's avatar
lintangsutawika committed
873

874
875
876
        if type(doc_to_text) == int:
            return doc_to_text
        elif type(doc_to_text) == str:
877
            if doc_to_text in self.features:
878
                # if self.config.doc_to_choice is not None:
879
880
                #     return self.doc_to_choice(doc)[doc[doc_to_text]]
                # else:
881
882
                return doc[doc_to_text]
            else:
lintangsutawika's avatar
lintangsutawika committed
883
                text_string = utils.apply_template(doc_to_text, doc)
lintangsutawika's avatar
lintangsutawika committed
884
                if text_string.isdigit() and self._config.doc_to_choice is not None:
lintangsutawika's avatar
lintangsutawika committed
885
886
887
                    return ast.literal_eval(text_string)
                else:
                    return text_string
888
        elif callable(doc_to_text):
889
            return doc_to_text(doc)
lintangsutawika's avatar
lintangsutawika committed
890
        # Used when applying a Promptsource template
891
        elif hasattr(doc_to_text, "apply"):
892
893
894
895
896
            applied_prompt = doc_to_text.apply(doc)
            if len(applied_prompt) == 2:
                return applied_prompt[0]
            else:
                eval_logger.warning("Applied prompt returns empty string")
897
                return self.config.fewshot_delimiter
898
        else:
899
            print(type(doc_to_text))
900
            raise TypeError
901

902
    def doc_to_target(self, doc: dict) -> Union[int, str, list]:
903
904
        if self.prompt is not None:
            doc_to_target = self.prompt
905
        else:
906
            doc_to_target = self.config.doc_to_target
907

908
909
910
        if type(doc_to_target) == int:
            return doc_to_target
        elif type(doc_to_target) == str:
911
            if doc_to_target in self.features:
912
                # if self.config.doc_to_choice is not None:
913
914
915
                #     return self.doc_to_choice(doc)[doc[doc_to_target]]
                # else:
                return doc[doc_to_target]
916
            else:
lintangsutawika's avatar
lintangsutawika committed
917
                target_string = utils.apply_template(doc_to_target, doc)
lintangsutawika's avatar
lintangsutawika committed
918
                if target_string.isdigit() and self._config.doc_to_choice is not None:
lintangsutawika's avatar
lintangsutawika committed
919
                    return ast.literal_eval(target_string)
lintangsutawika's avatar
lintangsutawika committed
920
921
922
923
924
                elif (
                    len(target_string) >= 2
                    and (target_string[0] == "[")
                    and (target_string[-1] == "]")
                ):
baberabb's avatar
baberabb committed
925
926
927
928
                    try:
                        return ast.literal_eval(target_string)
                    except (SyntaxError, ValueError):
                        return target_string
lintangsutawika's avatar
lintangsutawika committed
929
930
                else:
                    return target_string
931
932
        elif type(doc_to_target) == list:
            return doc_to_target
933
        elif callable(doc_to_target):
934
            return doc_to_target(doc)
lintangsutawika's avatar
lintangsutawika committed
935
        # Used when applying a Promptsource template
936
        elif hasattr(doc_to_target, "apply"):
937
            applied_prompt = doc_to_target.apply(doc)
938
939
940
941
            if len(applied_prompt) == 2:
                return applied_prompt[1]
            else:
                eval_logger.warning("Applied prompt returns empty string")
942
                return self.config.fewshot_delimiter
943
944
        else:
            raise TypeError
945

baberabb's avatar
baberabb committed
946
    def doc_to_choice(self, doc: Any) -> List[str]:
947
948
        if self.prompt is not None:
            doc_to_choice = self.prompt
949
        elif self.config.doc_to_choice is None:
950
951
            eval_logger.error("doc_to_choice was called but not set in config")
        else:
952
            doc_to_choice = self.config.doc_to_choice
953
954
955
956
957
958
959
960
961
962
963
964
965

        if type(doc_to_choice) == str:
            return ast.literal_eval(utils.apply_template(doc_to_choice, doc))
        elif type(doc_to_choice) == list:
            return doc_to_choice
        elif type(doc_to_choice) == dict:
            return list(doc_to_choice.values())
        elif callable(doc_to_choice):
            return doc_to_choice(doc)
        elif hasattr(doc_to_choice, "get_answer_choices_list"):
            return doc_to_choice.get_answer_choices_list(doc)
        else:
            raise TypeError
966

baberabb's avatar
baberabb committed
967
968
969
    def construct_requests(
        self, doc: dict, ctx: str, **kwargs
    ) -> Union[List[Instance], Instance]:
970
        if self.OUTPUT_TYPE == "loglikelihood":
lintangsutawika's avatar
lintangsutawika committed
971
            arguments = (ctx, self.doc_to_target(doc))
972
        elif self.OUTPUT_TYPE == "loglikelihood_rolling":
lintangsutawika's avatar
lintangsutawika committed
973
            arguments = (self.doc_to_target(doc),)
974
        elif self.OUTPUT_TYPE == "multiple_choice":
975
            choices = self.doc_to_choice(doc)
976
            target_delimiter = self.config.target_delimiter
977
978
            if self.multiple_input:
                # If there are multiple inputs, choices are placed in the ctx
979
                cont = self.doc_to_target(doc)
980
                arguments = [(ctx, f"{target_delimiter}{cont}") for ctx in choices]
981
            else:
982
                # Otherwise they are placed in the continuation
983
                arguments = [(ctx, f"{target_delimiter}{cont}") for cont in choices]
984

985
            request_list = [
986
987
                Instance(
                    request_type="loglikelihood",
lintangsutawika's avatar
lintangsutawika committed
988
                    doc=doc,
989
                    arguments=arg,
990
                    idx=i,
991
992
                    **kwargs,
                )
993
                for i, arg in enumerate(arguments)
994
            ]
995
            # TODO: we should raise a warning telling users this will at most ~2x runtime.
996
            if "acc_mutual_info" in self._metric_fn_list.keys():
997
998
999
                # if we are calculating multiple choice accuracy
                # using mutual information instead of raw loglikelihood as metric, need unconditional lls.

lintangsutawika's avatar
lintangsutawika committed
1000
                # here mutual info refers to calculating
1001
1002
1003
1004
1005
1006
                # log(P(choice|ctx) / P(choice)) = log(P(choice|ctx)) - log(P(choice))
                # in other words normalizing by subtracting the unconditional logprob of each choice.
                request_list.extend(
                    [
                        Instance(
                            request_type="loglikelihood",
lintangsutawika's avatar
lintangsutawika committed
1007
                            doc=doc,
1008
                            arguments=("", "{}".format(choice)),
1009
1010
1011
                            idx=i,
                            **kwargs,
                        )
lintangsutawika's avatar
lintangsutawika committed
1012
                        for i, choice in enumerate(choices)
1013
1014
1015
                    ]
                )
            return request_list
lintangsutawika's avatar
lintangsutawika committed
1016

1017
        elif self.OUTPUT_TYPE == "generate_until":
1018
            arguments = (ctx, self.config.generation_kwargs)
lintangsutawika's avatar
lintangsutawika committed
1019
1020

        return Instance(
lintangsutawika's avatar
lintangsutawika committed
1021
1022
            request_type=self.OUTPUT_TYPE, doc=doc, arguments=arguments, idx=0, **kwargs
        )
1023
1024

    def process_results(self, doc, results):
1025
1026
        if callable(self.config.process_results):
            return self.config.process_results(doc, results)
lintangsutawika's avatar
lintangsutawika committed
1027

1028
        result_dict = {}
1029
        use_metric = list(self._metric_fn_list.keys())
1030
1031
1032
        if self.OUTPUT_TYPE == "loglikelihood":
            results = results[0]
            ll, is_greedy = results
1033
1034
1035
1036
            return {
                **({"perplexity": ll} if "perplexity" in use_metric else {}),
                **({"acc": int(is_greedy)} if "acc" in use_metric else {}),
            }
1037
        elif self.OUTPUT_TYPE == "loglikelihood_rolling":
haileyschoelkopf's avatar
haileyschoelkopf committed
1038
            (loglikelihood,) = results
1039
1040
            _words = self.count_words(self.doc_to_target(doc))
            _bytes = self.count_bytes(self.doc_to_target(doc))
haileyschoelkopf's avatar
haileyschoelkopf committed
1041
            return {
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
                **(
                    {"word_perplexity": (loglikelihood, _words)}
                    if "word_perplexity" in use_metric
                    else {}
                ),
                **(
                    {"byte_perplexity": (loglikelihood, _bytes)}
                    if "byte_perplexity" in use_metric
                    else {}
                ),
                **(
                    {"bits_per_byte": (loglikelihood, _bytes)}
                    if "bits_per_byte" in use_metric
                    else {}
                ),
haileyschoelkopf's avatar
haileyschoelkopf committed
1057
            }
1058
        elif self.OUTPUT_TYPE == "multiple_choice":
1059
            lls, is_greedy = zip(*results)
lintangsutawika's avatar
lintangsutawika committed
1060

1061
            # retrieve choices in List[str] form, to compute choice lengths, etc.
1062
            choices = self.doc_to_choice(doc)
1063
1064
            completion_len = np.array([float(len(i)) for i in choices])

1065
1066
            if (
                2 * len(choices) == len(lls)
1067
                and "acc_mutual_info" in self._metric_fn_list.keys()
1068
1069
1070
1071
1072
1073
1074
            ):
                # then we are doing mutual info.
                # this stores the "dryrun" / unconditional answer loglikelihoods
                lls_unconditional = lls[1::2]
                assert len(lls_unconditional) == len(choices)
                # and this stores our "regular" conditional loglikelihoods
                lls = lls[::2]
1075

1076
1077
            pred = np.argmax(lls)
            pred_norm = np.argmax(lls / completion_len)
lintangsutawika's avatar
lintangsutawika committed
1078

1079
1080
            if self.multiple_input:
                gold = self.doc_to_text(doc)
1081
            else:
1082
                gold = self.doc_to_target(doc)
1083
1084
1085

            gold_index_error = False
            if type(gold) is list:
Lintang Sutawika's avatar
Lintang Sutawika committed
1086
1087
                gold = [i if i < len(choices) else -100 for i in gold]
                if -100 in gold:
1088
1089
1090
                    gold_index_error = True
            else:
                if type(gold) is int:
Lintang Sutawika's avatar
Lintang Sutawika committed
1091
                    gold = gold if gold < len(choices) else -100
1092
                elif type(gold) is str:
Lintang Sutawika's avatar
Lintang Sutawika committed
1093
                    gold = choices.index(gold) if gold in choices else -100
lintangsutawika's avatar
lintangsutawika committed
1094

Lintang Sutawika's avatar
Lintang Sutawika committed
1095
                if gold == -100:
1096
1097
1098
1099
                    gold_index_error = True

            if gold_index_error:
                eval_logger.warning(
lintangsutawika's avatar
lintangsutawika committed
1100
                    f"Label index was not in within range of available choices,"
1101
1102
                    f"Sample:\n\n{doc}\n\n"
                )
lintangsutawika's avatar
lintangsutawika committed
1103

1104
            if self.multiple_target:
lintangsutawika's avatar
lintangsutawika committed
1105
1106
                acc = 1.0 if pred in gold else 0.0
                acc_norm = 1.0 if pred_norm in gold else 0.0
Lintang Sutawika's avatar
Lintang Sutawika committed
1107
                exact_match = int(any([is_greedy[i] if i != -100 else 0 for i in gold]))
lintangsutawika's avatar
lintangsutawika committed
1108
1109
1110
            else:
                acc = 1.0 if pred == gold else 0.0
                acc_norm = 1.0 if pred_norm == gold else 0.0
1111
                # TODO: this gets score of 0 on arc_challenge for pythia-70m. need to test that this works properly
Lintang Sutawika's avatar
Lintang Sutawika committed
1112
                exact_match = int(is_greedy[gold]) if gold != -100 else 0
1113
1114

            result_dict = {
1115
                **({"acc": acc} if "acc" in use_metric else {}),
1116
1117
                **({"f1": (gold, pred)} if "f1" in use_metric else {}),
                **({"mcc": (gold, pred)} if "mcc" in use_metric else {}),
1118
                **({"acc_norm": acc_norm} if "acc_norm" in use_metric else {}),
1119
                **({"exact_match": exact_match} if "exact_match" in use_metric else {}),
1120
1121
            }

1122
            if "acc_mutual_info" in use_metric:
lintangsutawika's avatar
lintangsutawika committed
1123
1124
1125
                lls_mutual_info = [
                    ll_c - ll_u for ll_c, ll_u in zip(lls, lls_unconditional)
                ]
1126
1127
1128
                acc_mutual_info = 1.0 if np.argmax(lls_mutual_info) == gold else 0.0
                result_dict["acc_mutual_info"] = acc_mutual_info

1129
        elif self.OUTPUT_TYPE == "generate_until":
1130
            gold = self.doc_to_target(doc)
Chris's avatar
Chris committed
1131
            result = results[0]
1132
            if self.config.doc_to_choice is not None:
lintangsutawika's avatar
lintangsutawika committed
1133
                # If you set doc_to_choice,
lintangsutawika's avatar
lintangsutawika committed
1134
                # it assumes that doc_to_target returns a number.
1135
1136
                choices = self.doc_to_choice(doc)
                gold = choices[gold]
1137
1138
            # we expect multiple_targets to be a list.
            elif self.multiple_target:
baberabb's avatar
baberabb committed
1139
                gold = list(gold)
Chris's avatar
Chris committed
1140
1141
1142
            elif type(gold) != type(result):
                # cast gold to the same type as result
                gold = type(result)(gold)
1143

lintangsutawika's avatar
lintangsutawika committed
1144
            for metric in self._metric_fn_list.keys():
haileyschoelkopf's avatar
haileyschoelkopf committed
1145
1146
1147
1148
1149
                if self.multiple_target:
                    # in the case where we have multiple targets,
                    # return true if any are true
                    # TODO: this may break for multipLe_target, non zero-or-1 metrics
                    scores = []
haileyschoelkopf's avatar
haileyschoelkopf committed
1150
1151
1152
1153
                    if not isinstance(gold, list):
                        # sometimes, a multiple_target dataset has exceptions where one doc has only one string answer
                        # print(gold)
                        gold = [gold]
haileyschoelkopf's avatar
haileyschoelkopf committed
1154
                    for gold_option in gold:
1155
                        try:
1156
                            result_score = self._metric_fn_list[metric](
1157
1158
                                references=[gold_option],
                                predictions=[result],
1159
                                **self._metric_fn_kwargs[metric],
1160
                            )
baberabb's avatar
baberabb committed
1161
1162
1163
                        except (
                            TypeError
                        ):  # TODO: this is hacky and I don't want to do it
1164
                            result_score = self._metric_fn_list[metric](
haileyschoelkopf's avatar
haileyschoelkopf committed
1165
1166
1167
                                [gold_option, result]
                            )
                        if isinstance(result_score, dict):
haileyschoelkopf's avatar
haileyschoelkopf committed
1168
                            # TODO: this handles the case where HF evaluate returns a dict.
1169
                            result_score = result_score[metric]
haileyschoelkopf's avatar
haileyschoelkopf committed
1170
                        scores.append(result_score)
haileyschoelkopf's avatar
haileyschoelkopf committed
1171
                    if any(scores):
1172
                        result_score = 1.0
haileyschoelkopf's avatar
haileyschoelkopf committed
1173
                    else:
1174
                        result_score = 0.0
haileyschoelkopf's avatar
haileyschoelkopf committed
1175
                else:
1176
                    try:
1177
                        result_score = self._metric_fn_list[metric](
1178
1179
                            references=[gold],
                            predictions=[result],
1180
                            **self._metric_fn_kwargs[metric],
1181
                        )
baberabb's avatar
baberabb committed
1182
1183
1184
                    except (
                        TypeError
                    ):  # needed for now in order to use a different interface between our own metrics and HF Evaluate metrics
1185
                        result_score = self._metric_fn_list[metric]([gold, result])
1186
1187
1188
1189
                    if isinstance(result_score, dict):
                        # TODO: this handles the case where HF evaluate returns a dict.
                        result_score = result_score[metric]
                result_dict[metric] = result_score
1190
        else:
lintangsutawika's avatar
lintangsutawika committed
1191
1192
            raise ValueError(
                f"Passed invalid output_type '{self.OUTPUT_TYPE}' ! Please use one of ",
1193
                "'loglikelihood', 'loglikelihood_rolling', 'generate_until' or 'multiple_choice'",
1194
            )
1195
1196
1197
1198
1199
1200
1201

        return result_dict

    def aggregation(self):
        return self._aggregation_list

    def higher_is_better(self):
haileyschoelkopf's avatar
haileyschoelkopf committed
1202
        return self._higher_is_better
1203
1204
1205
1206
1207


class MultipleChoiceTask(Task):
    OUTPUT_TYPE: str = "loglikelihood"

baberabb's avatar
baberabb committed
1208
    def doc_to_target(self, doc: dict) -> str:
1209
1210
        return " " + doc["choices"][doc["gold"]]

baberabb's avatar
baberabb committed
1211
    def construct_requests(self, doc: dict, ctx: str, **kwargs) -> List[Instance]:
1212
        # TODO: add mutual info here?
lintangsutawika's avatar
lintangsutawika committed
1213
1214
        return [
            Instance(
haileyschoelkopf's avatar
haileyschoelkopf committed
1215
                request_type="loglikelihood",
lintangsutawika's avatar
lintangsutawika committed
1216
                doc=doc,
1217
                arguments=(ctx, " {}".format(choice)),
1218
                idx=i,
1219
1220
                **kwargs,
            )
lintangsutawika's avatar
lintangsutawika committed
1221
1222
            for i, choice in enumerate(doc["choices"])
        ]
1223

baberabb's avatar
baberabb committed
1224
    def process_results(self, doc: dict, results: List[Tuple[float, bool]]) -> dict:
lintangsutawika's avatar
lintangsutawika committed
1225
1226
1227
        results = [
            res[0] for res in results
        ]  # only retain loglikelihoods, discard is_greedy TODO: do we need is_greedy anywhere?
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
        gold = doc["gold"]

        acc = 1.0 if np.argmax(results) == gold else 0.0
        completion_len = np.array([float(len(i)) for i in doc["choices"]])
        acc_norm = 1.0 if np.argmax(results / completion_len) == gold else 0.0

        return {
            "acc": acc,
            "acc_norm": acc_norm,
        }

baberabb's avatar
baberabb committed
1239
    def higher_is_better(self) -> dict:
1240
1241
1242
1243
1244
        return {
            "acc": True,
            "acc_norm": True,
        }

baberabb's avatar
baberabb committed
1245
    def aggregation(self) -> dict:
1246
1247
1248
1249
1250
1251
        return {
            "acc": mean,
            "acc_norm": mean,
        }


lintangsutawika's avatar
lintangsutawika committed
1252
class PerplexityTask(Task):
1253
1254
    OUTPUT_TYPE = "loglikelihood_rolling"

baberabb's avatar
baberabb committed
1255
    def has_training_docs(self) -> bool:
1256
1257
        return False

baberabb's avatar
baberabb committed
1258
    def fewshot_examples(self, k: int, rnd) -> List:
1259
1260
1261
        assert k == 0
        return []

baberabb's avatar
baberabb committed
1262
    def fewshot_context(self, doc: dict, num_fewshot: int) -> Literal[""]:
1263
1264
1265
1266
1267
1268
        assert (
            num_fewshot == 0
        ), "The number of fewshot examples must be 0 for perplexity tasks."

        return ""

baberabb's avatar
baberabb committed
1269
    def higher_is_better(self) -> dict:
1270
1271
1272
1273
1274
1275
1276
1277
1278
        return {
            "word_perplexity": False,
            "byte_perplexity": False,
            "bits_per_byte": False,
        }

    def doc_to_decontamination_query(self, doc):
        return doc

Ethan Smith's avatar
Ethan Smith committed
1279
    def doc_to_text(self, doc) -> str:
1280
1281
1282
1283
1284
        return ""

    def doc_to_target(self, doc):
        return doc

baberabb's avatar
baberabb committed
1285
    def construct_requests(self, doc: dict, ctx: Union[str, None], **kwargs):
1286
1287
        assert not ctx

lintangsutawika's avatar
lintangsutawika committed
1288
1289
1290
1291
1292
1293
1294
        return Instance(
            request_type=self.OUTPUT_TYPE,
            doc=doc,
            arguments=(self.doc_to_target(doc),),
            idx=0,
            **kwargs,
        )
1295

baberabb's avatar
baberabb committed
1296
    def process_results(self, doc: dict, results: float) -> dict:
1297
        (loglikelihood,) = results
haileyschoelkopf's avatar
haileyschoelkopf committed
1298
1299
        words = self.count_words(self.doc_to_target(doc))
        bytes_ = self.count_bytes(self.doc_to_target(doc))
1300
1301
1302
1303
1304
1305
        return {
            "word_perplexity": (loglikelihood, words),
            "byte_perplexity": (loglikelihood, bytes_),
            "bits_per_byte": (loglikelihood, bytes_),
        }

baberabb's avatar
baberabb committed
1306
    def aggregation(self) -> dict:
1307
1308
1309
1310
1311
1312
1313
        return {
            "word_perplexity": weighted_perplexity,
            "byte_perplexity": weighted_perplexity,
            "bits_per_byte": bits_per_byte,
        }

    @classmethod
baberabb's avatar
baberabb committed
1314
    def count_bytes(cls, doc) -> int:
1315
1316
1317
        return len(doc.encode("utf-8"))

    @classmethod
baberabb's avatar
baberabb committed
1318
    def count_words(cls, doc) -> int:
1319
1320
        """Downstream tasks with custom word boundaries should override this!"""
        return len(re.split(r"\s+", doc))