task.py 49.7 KB
Newer Older
1
import abc
2
import ast
lintangsutawika's avatar
lintangsutawika committed
3
import logging
4
import random
5
6
7
8
import re
from collections.abc import Callable
from dataclasses import asdict, dataclass
from typing import Any, List, Literal, Tuple, Union
9
10
11
12
13

import datasets
import numpy as np

from lm_eval import utils
14
from lm_eval.api import samplers
haileyschoelkopf's avatar
haileyschoelkopf committed
15
from lm_eval.api.instance import Instance
lintangsutawika's avatar
lintangsutawika committed
16
from lm_eval.api.metrics import (
17
    bits_per_byte,
lintangsutawika's avatar
lintangsutawika committed
18
19
20
21
    mean,
    weighted_perplexity,
)
from lm_eval.api.registry import (
22
23
    AGGREGATION_REGISTRY,
    DEFAULT_METRIC_REGISTRY,
haileyschoelkopf's avatar
haileyschoelkopf committed
24
    get_aggregation,
25
    get_metric,
26
    get_metric_aggregation,
haileyschoelkopf's avatar
haileyschoelkopf committed
27
    is_higher_better,
lintangsutawika's avatar
lintangsutawika committed
28
)
29
30
31
from lm_eval.filters import build_filter_ensemble
from lm_eval.prompts import get_prompt

32

33
34
35
36
ALL_OUTPUT_TYPES = [
    "loglikelihood",
    "multiple_choice",
    "loglikelihood_rolling",
37
    "generate_until",
38
39
]

lintangsutawika's avatar
lintangsutawika committed
40

41
eval_logger = logging.getLogger("lm-eval")
42

lintangsutawika's avatar
lintangsutawika committed
43

44
45
@dataclass
class TaskConfig(dict):
46
    # task naming/registry
47
    task: str = None
lintangsutawika's avatar
lintangsutawika committed
48
    task_alias: str = None
49
    group: Union[str, list] = None
lintangsutawika's avatar
lintangsutawika committed
50
    group_alias: Union[str, list] = None
51
52
53
    # HF dataset options.
    # which dataset to use,
    # and what splits for what purpose
54
55
    dataset_path: str = None
    dataset_name: str = None
56
    dataset_kwargs: dict = None
57
58
59
    training_split: str = None
    validation_split: str = None
    test_split: str = None
lintangsutawika's avatar
lintangsutawika committed
60
    fewshot_split: str = None  # TODO: assert that this not None if num_fewshot > 0. (?) assert if this is same split as one evaling (?)
61
62
    # formatting / prompting options.
    # see docs/advanced_task_guide.md for more info
63
    process_docs: Callable = None
64
65
    doc_to_text: Union[Callable, str] = None
    doc_to_target: Union[Callable, str] = None
lintangsutawika's avatar
lintangsutawika committed
66
    doc_to_choice: Union[Callable, str, dict, list] = None
lintangsutawika's avatar
lintangsutawika committed
67
    process_results: Union[Callable, str] = None
68
    use_prompt: str = None
69
    description: str = ""
70
71
    target_delimiter: str = " "
    fewshot_delimiter: str = "\n\n"
haileyschoelkopf's avatar
haileyschoelkopf committed
72
    fewshot_config: dict = None
73
    # runtime configuration options
74
    num_fewshot: int = None
75
    # scoring options
76
    metric_list: list = None
77
    output_type: str = "generate_until"
78
    generation_kwargs: dict = None
79
    repeats: int = 1
lintangsutawika's avatar
lintangsutawika committed
80
    filter_list: Union[str, list] = None
81
82
    should_decontaminate: bool = False
    doc_to_decontamination_query: str = None
83

lintangsutawika's avatar
lintangsutawika committed
84
85
86
    metadata: Union[
        str, list
    ] = None  # by default, not used in the code. allows for users to pass arbitrary info to tasks
87

Ethan Smith's avatar
Ethan Smith committed
88
    def __post_init__(self) -> None:
Lintang Sutawika's avatar
Lintang Sutawika committed
89
        if self.generation_kwargs is not None:
90
            if self.output_type != "generate_until":
Lintang Sutawika's avatar
Lintang Sutawika committed
91
                eval_logger.warning(
92
                    f"[{self.task}] passed `generation_kwargs`, but not using `output_type: generate_until`!"
Lintang Sutawika's avatar
Lintang Sutawika committed
93
                )
94
                assert self.output_type != "generate_until"
Lintang Sutawika's avatar
Lintang Sutawika committed
95
96
97
98
99
100
101

            if "temperature" in self.generation_kwargs:
                self.generation_kwargs["temperature"] = float(
                    self.generation_kwargs["temperature"]
                )

            if "until" not in self.generation_kwargs:
102
                self.generation_kwargs["until"] = [self.fewshot_delimiter]
Lintang Sutawika's avatar
Lintang Sutawika committed
103
        else:
104
            if self.output_type == "generate_until":
Lintang Sutawika's avatar
Lintang Sutawika committed
105
106
                # ensure that we greedily generate in absence of explicit arguments otherwise
                self.generation_kwargs = {
Lintang Sutawika's avatar
Lintang Sutawika committed
107
                    "until": None
108
109
                    if self.fewshot_delimiter is None
                    else [self.fewshot_delimiter],
Lintang Sutawika's avatar
Lintang Sutawika committed
110
111
                    "do_sample": False,
                }
112

haileyschoelkopf's avatar
haileyschoelkopf committed
113
114
        # TODO: how to make TaskConfigs be de- and re-serializable, even when using the !function constructor?

115
116
117
    def __getitem__(self, item):
        return getattr(self, item)

118
119
120
    def __setitem__(self, item, value):
        return setattr(self, item, value)

Lintang Sutawika's avatar
Lintang Sutawika committed
121
    def to_dict(self, keep_callable=False):
122
123
        """dumps the current config as a dictionary object, as a printable format.
        null fields will not be printed.
haileyschoelkopf's avatar
haileyschoelkopf committed
124
        Used for dumping results alongside full task configuration
125

haileyschoelkopf's avatar
haileyschoelkopf committed
126
127
128
129
130
131
132
133
134
135
        :return: dict
            A printable dictionary version of the TaskConfig object.

        # TODO: should any default value in the TaskConfig not be printed?
        """
        cfg_dict = asdict(self)
        # remove values that are `None`
        for k, v in list(cfg_dict.items()):
            if v is None:
                cfg_dict.pop(k)
haileyschoelkopf's avatar
haileyschoelkopf committed
136
            elif isinstance(v, Callable):
Lintang Sutawika's avatar
Lintang Sutawika committed
137
138
139
140
141
                if keep_callable:
                    cfg_dict[k] = v
                else:
                    # TODO: this should handle Promptsource template objects as a separate case?
                    cfg_dict[k] = str(v)
haileyschoelkopf's avatar
haileyschoelkopf committed
142
        return cfg_dict
143

144
145
146
147
148
149
150
151
152
153
154
155

class Task(abc.ABC):
    """A task represents an entire benchmark including its dataset, problems,
    answers, and evaluation methods. See BoolQ for a simple example implementation

    A `doc` can be any python object which represents one instance of evaluation.
    This is usually a dictionary e.g.
        {"question": ..., "answer": ...} or
        {"question": ..., question, answer)
    """

    VERSION = None
156

157
158
159
160
161
162
163
164
    # The name of the `Task` benchmark as denoted in the HuggingFace datasets Hub
    # or a path to a custom `datasets` loading script.
    DATASET_PATH: str = None

    # The name of a subset within `DATASET_PATH`.
    DATASET_NAME: str = None

    OUTPUT_TYPE: str = None
lintangsutawika's avatar
lintangsutawika committed
165

166
167
168
169
170
171
    def __init__(
        self,
        data_dir=None,
        cache_dir=None,
        download_mode=None,
        config=None,
Ethan Smith's avatar
Ethan Smith committed
172
    ) -> None:
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
        """
        :param data_dir: str
            Stores the path to a local folder containing the `Task`'s data files.
            Use this to specify the path to manually downloaded data (usually when
            the dataset is not publicly accessible).
        :param cache_dir: str
            The directory to read/write the `Task` dataset. This follows the
            HuggingFace `datasets` API with the default cache directory located at:
                `~/.cache/huggingface/datasets`
            NOTE: You can change the cache location globally for a given process
            to another directory:
                `export HF_DATASETS_CACHE="/path/to/another/directory"`
        :param download_mode: datasets.DownloadMode
            How to treat pre-existing `Task` downloads and data.
            - `datasets.DownloadMode.REUSE_DATASET_IF_EXISTS`
                Reuse download and reuse dataset.
            - `datasets.DownloadMode.REUSE_CACHE_IF_EXISTS`
                Reuse download with fresh dataset.
            - `datasets.DownloadMode.FORCE_REDOWNLOAD`
                Fresh download and fresh dataset.
        """
        self.download(data_dir, cache_dir, download_mode)
        self._training_docs = None
        self._fewshot_docs = None
        self._instances = None

lintangsutawika's avatar
lintangsutawika committed
199
        self._config = TaskConfig({**config}) if config else TaskConfig()
200

lintangsutawika's avatar
lintangsutawika committed
201
        self._filters = [build_filter_ensemble("none", [["take_first", None]])]
202

Ethan Smith's avatar
Ethan Smith committed
203
    def download(self, data_dir=None, cache_dir=None, download_mode=None) -> None:
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
        """Downloads and returns the task dataset.
        Override this method to download the dataset from a custom API.

        :param data_dir: str
            Stores the path to a local folder containing the `Task`'s data files.
            Use this to specify the path to manually downloaded data (usually when
            the dataset is not publicly accessible).
        :param cache_dir: str
            The directory to read/write the `Task` dataset. This follows the
            HuggingFace `datasets` API with the default cache directory located at:
                `~/.cache/huggingface/datasets`
            NOTE: You can change the cache location globally for a given process
            by setting the shell environment variable, `HF_DATASETS_CACHE`,
            to another directory:
                `export HF_DATASETS_CACHE="/path/to/another/directory"`
        :param download_mode: datasets.DownloadMode
            How to treat pre-existing `Task` downloads and data.
            - `datasets.DownloadMode.REUSE_DATASET_IF_EXISTS`
                Reuse download and reuse dataset.
            - `datasets.DownloadMode.REUSE_CACHE_IF_EXISTS`
                Reuse download with fresh dataset.
            - `datasets.DownloadMode.FORCE_REDOWNLOAD`
                Fresh download and fresh dataset.
        """
228
229
230
231
232
233
234
        self.dataset = datasets.load_dataset(
            path=self.DATASET_PATH,
            name=self.DATASET_NAME,
            data_dir=data_dir,
            cache_dir=cache_dir,
            download_mode=download_mode,
        )
235

236
237
238
239
240
    @property
    def config(self):
        """Returns the TaskConfig associated with this class."""
        return self._config

241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
    @abc.abstractmethod
    def has_training_docs(self):
        """Whether the task has a training set"""
        pass

    @abc.abstractmethod
    def has_validation_docs(self):
        """Whether the task has a validation set"""
        pass

    @abc.abstractmethod
    def has_test_docs(self):
        """Whether the task has a test set"""
        pass

    def training_docs(self):
        """
        :return: Iterable[obj]
            A iterable of any object, that doc_to_text can handle
        """
        return []

    def validation_docs(self):
        """
        :return: Iterable[obj]
            A iterable of any object, that doc_to_text can handle
        """
        return []

    def test_docs(self):
        """
        :return: Iterable[obj]
            A iterable of any object, that doc_to_text can handle
        """
        return []

277
278
279
280
281
282
283
284
285
286
    def fewshot_docs(self):
        """
        :return: Iterable[obj]
            A iterable of any object, that doc_to_text can handle
        """
        if self.has_training_docs():
            return self.training_docs()
        elif self.has_validation_docs():
            return self.validation_docs()
        else:
lintangsutawika's avatar
lintangsutawika committed
287
            eval_logger.warning(
288
                "has_training_docs and has_validation_docs are False"
289
                ", using test_docs as fewshot_docs but this is not recommended."
lintangsutawika's avatar
lintangsutawika committed
290
            )
291
292
            return self.test_docs()

293
294
295
296
297
298
299
300
301
302
    def _process_doc(self, doc):
        """
        Override this to process (detokenize, strip, replace, etc.) individual
        documents. This can be used in a map over documents of a data split.
        E.g. `map(self._process_doc, self.dataset["validation"])`

        :return: dict
            The processed version of the specified `doc`.
        """
        return doc
lintangsutawika's avatar
lintangsutawika committed
303

304
305
306
307
308
309
310
311
312
313
314
315
316
    @property
    def instances(self):
        """After calling `task.build_all_requests()`, tasks
        maintain a list of the dataset instances which will be evaluated.
        """
        return self._instances

    def fewshot_examples(self, k, rnd):
        if self._training_docs is None:
            self._training_docs = list(self.training_docs())

        return rnd.sample(self._training_docs, k)

Ethan Smith's avatar
Ethan Smith committed
317
    def doc_to_decontamination_query(self, doc) -> None:
318
319
320
321
322
323
324
325
326
327
328
329
330
        print(
            "Override doc_to_decontamination_query with document specific decontamination query."
        )
        assert False

    @abc.abstractmethod
    def doc_to_text(self, doc):
        pass

    @abc.abstractmethod
    def doc_to_target(self, doc):
        pass

Ethan Smith's avatar
Ethan Smith committed
331
    def build_all_requests(self, limit=None, rank=None, world_size=None) -> None:
332
333
334
335
336
337
        """Build a set of Instances for a task, and store them in task.instances"""
        if self.has_test_docs():
            docs = self.test_docs()
        elif self.has_validation_docs():
            docs = self.validation_docs()
        else:
338
            assert False, f"Task dataset (path={self.DATASET_PATH}, name={self.DATASET_NAME}) must have valid or test docs!"
339

340
        eval_logger.info(f"Building contexts for task on rank {rank}...")
341

342
        instances = []
343
344
        for doc_id, doc in utils.create_iterator(
            enumerate(docs), rank, world_size, limit
lintangsutawika's avatar
lintangsutawika committed
345
        ):
346
            # sample fewshot context #TODO: need to offset doc_id by rank now!
347
            fewshot_ctx = self.fewshot_context(
348
                doc,
349
                0 if self.config.num_fewshot is None else self.config.num_fewshot,
350
            )
351

352
            # TODO: we should override self.config.repeats if doing greedy gen so users don't waste time+compute
lintangsutawika's avatar
lintangsutawika committed
353
354
355
            inst = self.construct_requests(
                doc=doc,
                ctx=fewshot_ctx,
356
                metadata=(self.config["task"], doc_id, self.config.repeats),
lintangsutawika's avatar
lintangsutawika committed
357
            )
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382

            if not isinstance(inst, list):
                inst = [inst]

            instances.extend(inst)

        self._instances = instances
        assert len(self._instances) != 0, "task.build_requests() did not find any docs!"

    @abc.abstractmethod
    def construct_requests(self, doc, ctx, **kwargs):
        """Uses RequestFactory to construct Requests and returns an iterable of
        Requests which will be sent to the LM.

        :param doc:
            The document as returned from training_docs, validation_docs, or test_docs.
        :param ctx: str
            The context string, generated by fewshot_context. This includes the natural
            language description, as well as the few shot examples, and the question
            part of the document for `doc`.
        :param doc_idx: int
            The index of a document within `self.test_docs()` or `self.validation_docs()`,
            whichever is the main split used.
        :param repeats: int
        TODO: update this docstring
lintangsutawika's avatar
lintangsutawika committed
383
            The number of times each instance in a dataset is inferred on. Defaults to 1,
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
            can be increased for techniques like majority voting.
        """
        pass

    @abc.abstractmethod
    def process_results(self, doc, results):
        """Take a single document and the LM results and evaluates, returning a
        dict where keys are the names of submetrics and values are the values of
        the metric for that one document

        :param doc:
            The document as returned from training_docs, validation_docs, or test_docs.
        :param results:
            The results of the requests created in construct_requests.
        """
        pass

    @abc.abstractmethod
    def aggregation(self):
        """
        :returns: {str: [metric_score] -> float}
            A dictionary where keys are the names of submetrics and values are
            functions that aggregate a list of metric scores
        """
        pass

    @abc.abstractmethod
    def higher_is_better(self):
        """
        :returns: {str: bool}
            A dictionary where keys are the names of submetrics and values are
            whether a higher value of the submetric is better
        """
        pass

haileyschoelkopf's avatar
haileyschoelkopf committed
419
420
421
422
423
424
425
426
427
428
    @classmethod
    def count_bytes(cls, doc):
        """Used for byte-level perplexity metrics in rolling loglikelihood"""
        return len(doc.encode("utf-8"))

    @classmethod
    def count_words(cls, doc):
        """Downstream loglikelihood_rolling perplexity tasks with custom word boundaries should override this!"""
        return len(re.split(r"\s+", doc))

429
    @utils.positional_deprecated
lintangsutawika's avatar
lintangsutawika committed
430
    def fewshot_context(
431
432
433
434
435
        self,
        doc,
        num_fewshot,
        rnd=random.Random(1234),
        description=None,
lintangsutawika's avatar
lintangsutawika committed
436
    ):
437
438
439
440
441
442
443
        """Returns a fewshot context string that is made up of a prepended description
        (if provided), the `num_fewshot` number of examples, and an appended prompt example.

        :param doc: str
            The document as returned from training_docs, validation_docs, or test_docs.
        :param num_fewshot: int
            The number of fewshot examples to provide in the returned context string.
lintangsutawika's avatar
lintangsutawika committed
444
445
446
447
448
        :param rnd: random.Random
            The pseudo-random number generator used to randomly sample examples.
            WARNING: This is currently a required arg although it's optionalized with a default `None`.
        :param description: str
            The task's description that will be prepended to the fewshot examples.
449
450
451
        :returns: str
            The fewshot context.
        """
lintangsutawika's avatar
lintangsutawika committed
452
453
454
455
        assert (
            rnd is not None
        ), "A `random.Random` generator argument must be provided to `rnd`"

456
        description = description if description else ""
457
458

        if num_fewshot == 0:
lintangsutawika's avatar
lintangsutawika committed
459
            labeled_examples = ""
460
        else:
lintangsutawika's avatar
lintangsutawika committed
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
            # for sets with no training docs, draw from other set *but ensure no overlap with current doc*
            if self.has_training_docs():
                fewshotex = self.fewshot_examples(k=num_fewshot, rnd=rnd)
            else:
                if self._fewshot_docs is None:
                    self._fewshot_docs = list(
                        self.validation_docs()
                        if self.has_validation_docs()
                        else self.test_docs()
                    )

                fewshotex = rnd.sample(self._fewshot_docs, num_fewshot + 1)

                # get rid of the doc that's the one we're evaluating, if it's in the fewshot
                fewshotex = [x for x in fewshotex if x != doc][:num_fewshot]

            labeled_examples = (
                "\n\n".join(
                    [
                        self.doc_to_text(doc) + self.doc_to_target(doc)
                        for doc in fewshotex
                    ]
                )
                + "\n\n"
lintangsutawika's avatar
lintangsutawika committed
485
            )
486
487

        example = self.doc_to_text(doc)
lintangsutawika's avatar
lintangsutawika committed
488
        return description + labeled_examples + example
489
490

    def apply_filters(self):
lintangsutawika's avatar
lintangsutawika committed
491
492
        if hasattr(self, "_filters"):
            for f in self._filters:
493
                f.apply(self._instances)
lintangsutawika's avatar
lintangsutawika committed
494
495
496
        else:
            eval_logger.warning("No filter defined, passing through instances")
            return self._instances
497

baberabb's avatar
baberabb committed
498
    def dump_config(self) -> dict:
499
        """Returns a dictionary representing the task's config.
500
501
502
503
504

        :returns: str
            The fewshot context.
        """
        # TODO: this should only return the overrides applied to a non-YAML task's configuration.
505
        # (num_fewshot)
506
        return self.config.to_dict()
507

508
509

class ConfigurableTask(Task):
510
    VERSION = "Yaml"
511
    OUTPUT_TYPE = None
512
    CONFIG = None
513
514
515

    def __init__(
        self, data_dir=None, cache_dir=None, download_mode=None, config: dict = None
Ethan Smith's avatar
Ethan Smith committed
516
    ) -> None:  # TODO no super() call here
517
        # Get pre-configured attributes
518
        self._config = self.CONFIG
519

520
        # Use new configurations if there was no preconfiguration
521
        if self.config is None:
522
            self._config = TaskConfig(**config)
523
524
        # Overwrite configs
        else:
lintangsutawika's avatar
lintangsutawika committed
525
            if config is not None:
526
                self._config.__dict__.update(config)
527

528
        if self.config is None:
lintangsutawika's avatar
lintangsutawika committed
529
530
531
            raise ValueError(
                "Must pass a config to ConfigurableTask, either in cls.CONFIG or `config` kwarg"
            )
532

533
534
535
536
        if isinstance(self.config.metadata, dict):
            if "version" in self.config.metadata:
                self.VERSION = self.config.metadata["version"]

537
538
539
        if self.config.output_type is not None:
            assert self.config.output_type in ALL_OUTPUT_TYPES
            self.OUTPUT_TYPE = self.config.output_type
540

541
542
        if self.config.dataset_path is not None:
            self.DATASET_PATH = self.config.dataset_path
543

544
545
        if self.config.dataset_name is not None:
            self.DATASET_NAME = self.config.dataset_name
546

547
548
549
550
        self._metric_fn_list = {}
        self._metric_fn_kwargs = {}
        self._aggregation_list = {}
        self._higher_is_better = {}
551

552
        if self.config.metric_list is None:
553
            # TODO: handle this in TaskConfig.__post_init__ ?
554
555
            _metric_list = DEFAULT_METRIC_REGISTRY[self.config.output_type]

556
            for metric_name in _metric_list:
haileyschoelkopf's avatar
haileyschoelkopf committed
557
                self._metric_fn_list[metric_name] = get_metric(metric_name)
lintangsutawika's avatar
lintangsutawika committed
558
                self._metric_fn_kwargs[metric_name] = {}
559
560
561
                self._aggregation_list[metric_name] = get_metric_aggregation(
                    metric_name
                )
haileyschoelkopf's avatar
haileyschoelkopf committed
562
                self._higher_is_better[metric_name] = is_higher_better(metric_name)
563
        else:
564
            for metric_config in self.config.metric_list:
565
566
567
568
569
                assert "metric" in metric_config
                metric_name = metric_config["metric"]
                kwargs = {
                    key: metric_config[key]
                    for key in metric_config
Chris's avatar
Chris committed
570
571
                    if key
                    not in ["metric", "aggregation", "higher_is_better", "hf_evaluate"]
572
                }
Chris's avatar
Chris committed
573
574
575
576
                hf_evaluate_metric = (
                    "hf_evaluate" in metric_config
                    and metric_config["hf_evaluate"] is True
                )
577

578
                if self.config.process_results is not None:
579
580
                    self._metric_fn_list[metric_name] = None
                    self._metric_fn_kwargs[metric_name] = {}
581
582
583
584
585
586
                elif callable(metric_name):
                    metric_fn = metric_name.__call__
                    metric_name = metric_name.__name__
                    self._metric_fn_list[metric_name] = metric_fn
                    self._metric_fn_kwargs[metric_name] = kwargs
                else:
Chris's avatar
Chris committed
587
588
589
                    self._metric_fn_list[metric_name] = get_metric(
                        metric_name, hf_evaluate_metric
                    )
590
                    self._metric_fn_kwargs[metric_name] = kwargs
lintangsutawika's avatar
lintangsutawika committed
591

592
                if "aggregation" in metric_config:
593
                    agg_name = metric_config["aggregation"]
594
                    if isinstance(agg_name, str):
haileyschoelkopf's avatar
haileyschoelkopf committed
595
                        self._aggregation_list[metric_name] = get_aggregation(agg_name)
596
                    elif callable(agg_name):  # noqa: E721
597
598
599
                        self._aggregation_list[metric_name] = metric_config[
                            "aggregation"
                        ]
600
                else:
601
                    INV_AGG_REGISTRY = {v: k for k, v in AGGREGATION_REGISTRY.items()}
lintangsutawika's avatar
lintangsutawika committed
602
                    metric_agg = get_metric_aggregation(metric_name)
603
                    eval_logger.warning(
baberabb's avatar
baberabb committed
604
                        f"[Task: {self._config.task}] metric {metric_name} is defined, but aggregation is not. "
605
606
                        f"using default "
                        f"aggregation={INV_AGG_REGISTRY[metric_agg]}"
607
                    )
608
                    self._aggregation_list[metric_name] = metric_agg
lintangsutawika's avatar
lintangsutawika committed
609

610
611
612
613
614
615
                if "higher_is_better" in metric_config:
                    self._higher_is_better[metric_name] = metric_config[
                        "higher_is_better"
                    ]
                else:
                    eval_logger.warning(
baberabb's avatar
baberabb committed
616
                        f"[Task: {self._config.task}] metric {metric_name} is defined, but higher_is_better is not. "
617
                        f"using default "
haileyschoelkopf's avatar
haileyschoelkopf committed
618
                        f"higher_is_better={is_higher_better(metric_name)}"
619
                    )
haileyschoelkopf's avatar
haileyschoelkopf committed
620
                    self._higher_is_better[metric_name] = is_higher_better(metric_name)
621

622
        self.download(self.config.dataset_kwargs)
623
624
625
        self._training_docs = None
        self._fewshot_docs = None

626
        if self.config.filter_list is not None:
lintangsutawika's avatar
lintangsutawika committed
627
            self._filters = []
628
            for filter_config in self.config.filter_list:
629
630
631
632
633
634
635
636
637
                filter_name = filter_config["name"]
                filter_functions = filter_config["filter"]
                components = []
                for function in filter_functions:
                    kwargs = {
                        key: function[key] for key in function if key != "function"
                    }
                    components.append([function["function"], kwargs])
                filter_pipeline = build_filter_ensemble(filter_name, components)
lintangsutawika's avatar
lintangsutawika committed
638
                self._filters.append(filter_pipeline)
lintangsutawika's avatar
lintangsutawika committed
639
        else:
640
            self._filters = [build_filter_ensemble("none", [["take_first", None]])]
641

642
643
        if self.config.use_prompt is not None:
            eval_logger.info(f"loading prompt {self.config.use_prompt}")
644
            self.prompt = get_prompt(
645
                self.config.use_prompt, self.DATASET_PATH, self.DATASET_NAME
lintangsutawika's avatar
lintangsutawika committed
646
            )
647
648
649
        else:
            self.prompt = None

lintangsutawika's avatar
lintangsutawika committed
650
        if self.fewshot_docs() is not None:
haileyschoelkopf's avatar
haileyschoelkopf committed
651
            self.sampler = samplers.get_sampler(
haileyschoelkopf's avatar
haileyschoelkopf committed
652
653
654
                self.config.fewshot_config.get("sampler", "default")
                if self.config.fewshot_config
                else "default"
haileyschoelkopf's avatar
haileyschoelkopf committed
655
            )(list(self.fewshot_docs()), self, rnd=random.Random(1234))
656

657
        if self.has_test_docs():
658
            self.task_docs = self.test_docs()
659
        elif self.has_validation_docs():
660
            self.task_docs = self.validation_docs()
661
        else:
662
            assert False, f"Task dataset (path={self.DATASET_PATH}, name={self.DATASET_NAME}) must have valid or test docs!"
663

664
        # Test One Doc
665
        self.features = list(self.task_docs.features.keys())
666
667
        self.multiple_input = 0
        self.multiple_target = 0
668
        test_doc = self.task_docs[0]
669
        test_text = self.doc_to_text(test_doc)
670
        test_target = self.doc_to_target(test_doc)
lintangsutawika's avatar
lintangsutawika committed
671

672
        if self.config.doc_to_choice is not None:
lintangsutawika's avatar
lintangsutawika committed
673
            test_choice = self.doc_to_choice(test_doc)
674
            if not isinstance(test_choice, list):
lintangsutawika's avatar
lintangsutawika committed
675
                eval_logger.error("doc_to_choice must return list")
676
677
            else:
                num_choice = len(test_choice)
678

679
            if isinstance(test_text, int):
680
                self.multiple_input = num_choice
681
682
        else:
            test_choice = None
683

684
        if isinstance(test_target, list):
685
            self.multiple_target = len(test_target)
686
        else:
687
            if (isinstance(test_target, int)) and (test_choice is not None):
lintangsutawika's avatar
lintangsutawika committed
688
                test_target = test_choice[test_target]
689
            else:
lintangsutawika's avatar
lintangsutawika committed
690
                test_target = str(test_target)
691

692
693
694
        if test_choice is not None:
            check_choices = test_choice
        else:
lintangsutawika's avatar
lintangsutawika committed
695
            check_choices = [test_target]
696
697
698
699
        if self.config.doc_to_choice is not None:
            for choice in check_choices:
                choice_has_whitespace = True if choice[0].isspace() else False
                delimiter_has_whitespace = (
700
701
                    True
                    if self.config.target_delimiter.rstrip()
702
                    != self.config.target_delimiter
703
                    else False
704
                )
705

706
                if delimiter_has_whitespace and choice_has_whitespace:
707
708
                    eval_logger.debug(
                        f'Both target_delimiter "{self.config.target_delimiter}" and target choice: "{choice}" have whitespace'
709
710
                    )
                elif (not delimiter_has_whitespace) and (not choice_has_whitespace):
711
                    eval_logger.debug(
712
                        f'Both target_delimiter "{self.config.target_delimiter}" and target choice: "{choice}" do not have whitespace, ignore if the language you are evaluating on does not require/use whitespace'
713
714
                    )

Ethan Smith's avatar
Ethan Smith committed
715
    def download(self, dataset_kwargs=None) -> None:
716
717
718
719
720
721
        self.dataset = datasets.load_dataset(
            path=self.DATASET_PATH,
            name=self.DATASET_NAME,
            **dataset_kwargs if dataset_kwargs is not None else {},
        )

baberabb's avatar
baberabb committed
722
    def has_training_docs(self) -> bool:
723
        if self.config.training_split is not None:
724
725
726
727
            return True
        else:
            return False

baberabb's avatar
baberabb committed
728
    def has_validation_docs(self) -> bool:
729
        if self.config.validation_split is not None:
730
731
732
733
            return True
        else:
            return False

baberabb's avatar
baberabb committed
734
    def has_test_docs(self) -> bool:
735
        if self.config.test_split is not None:
736
737
738
739
            return True
        else:
            return False

baberabb's avatar
baberabb committed
740
    def training_docs(self) -> datasets.Dataset:
741
        if self.has_training_docs():
742
743
744
            if self.config.process_docs is not None:
                return self.config.process_docs(
                    self.dataset[self.config.training_split]
745
                )
746
            return self.dataset[self.config.training_split]
747

baberabb's avatar
baberabb committed
748
    def validation_docs(self) -> datasets.Dataset:
749
        if self.has_validation_docs():
750
751
752
            if self.config.process_docs is not None:
                return self.config.process_docs(
                    self.dataset[self.config.validation_split]
753
                )
754
            return self.dataset[self.config.validation_split]
755

baberabb's avatar
baberabb committed
756
    def test_docs(self) -> datasets.Dataset:
757
        if self.has_test_docs():
758
759
760
            if self.config.process_docs is not None:
                return self.config.process_docs(self.dataset[self.config.test_split])
            return self.dataset[self.config.test_split]
761

762
    def fewshot_docs(self):
763
        if self.config.fewshot_split is not None:
764
765
            if self.config.process_docs is not None:
                return self.config.process_docs(self.dataset[self.config.fewshot_split])
766
            return self.dataset[self.config.fewshot_split]
767
        else:
768
            if (self.config.num_fewshot is not None) and (self.config.num_fewshot > 0):
769
                eval_logger.warning(
770
                    f"Task '{self.config.task}': "
771
772
773
774
                    "num_fewshot > 0 but fewshot_split is None. "
                    "using preconfigured rule."
                )
            return super().fewshot_docs()
775

lintangsutawika's avatar
lintangsutawika committed
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
    @utils.positional_deprecated
    def fewshot_context(self, doc, num_fewshot):
        """Returns a fewshot context string that is made up of a prepended description
        (if provided), the `num_fewshot` number of examples, and an appended prompt example.

        :param doc: str
            The document as returned from training_docs, validation_docs, or test_docs.
        :param num_fewshot: int
            The number of fewshot examples to provide in the returned context string.
        :returns: str
            The fewshot context.
        """

        if num_fewshot == 0:
            # always prepend the (possibly empty) task description
            labeled_examples = self.config.description
        else:
            labeled_examples = self.config.description + self.sampler.get_context(
                doc, num_fewshot
            )

        example = self.doc_to_text(doc)
798
799
800
801
802
803
804
805
806
807
808
809
810
        if self.multiple_input:
            return labeled_examples
        else:
            if isinstance(example, str):
                return labeled_examples + example
            elif isinstance(example, list):
                return [labeled_examples + ex for ex in example]
            elif isinstance(example, int):
                if self.config.doc_to_choice is not None:
                    choices = self.doc_to_choice(doc)
                    return labeled_examples + choices[example]
                else:
                    return labeled_examples + str(example)
lintangsutawika's avatar
lintangsutawika committed
811

812
813
814
    def apply_filters(self):
        if hasattr(self, "_filters"):
            for f in self._filters:
815
                f.apply(self._instances)
816
817
818
819
        else:
            eval_logger.warning("No filter defined, passing through instances")
            return self._instances

820
    def should_decontaminate(self):
821
        return self.config.should_decontaminate
822
823

    def doc_to_decontamination_query(self, doc):
824
        if self.config.should_decontaminate:
825
826
            if self.config.doc_to_decontamination_query is None:
                return self.doc_to_text(doc)
827
            else:
828
829
830
831
832
833
834
835
836
837
838
                doc_to_decontamination_query = self.config.doc_to_decontamination_query
                if doc_to_decontamination_query in self.features:
                    return doc[doc_to_decontamination_query]
                elif callable(doc_to_decontamination_query):
                    return doc_to_decontamination_query(doc)
                else:
                    return ast.literal_eval(
                        utils.apply_template(
                            self.config.doc_to_decontamination_query, doc
                        )
                    )
839

840
841
842
843
844
845
846
847
848
849
850
851
    def _process_doc(self, doc):
        """
        Override this to process (detokenize, strip, replace, etc.) individual
        documents. This can be used in a map over documents of a data split.
        E.g. `map(self._process_doc, self.dataset["validation"])`

        :return: dict
            The processed version of the specified `doc`.
        """
        return doc

    def doc_to_text(self, doc):
852
853
        if self.prompt is not None:
            doc_to_text = self.prompt
854
        else:
855
            doc_to_text = self.config.doc_to_text
lintangsutawika's avatar
lintangsutawika committed
856

857
        if isinstance(doc_to_text, int):
858
            return doc_to_text
859
        elif isinstance(doc_to_text, str):
860
            if doc_to_text in self.features:
861
                # if self.config.doc_to_choice is not None:
862
863
                #     return self.doc_to_choice(doc)[doc[doc_to_text]]
                # else:
864
865
                return doc[doc_to_text]
            else:
lintangsutawika's avatar
lintangsutawika committed
866
                text_string = utils.apply_template(doc_to_text, doc)
lintangsutawika's avatar
lintangsutawika committed
867
                if text_string.isdigit() and self._config.doc_to_choice is not None:
lintangsutawika's avatar
lintangsutawika committed
868
869
870
                    return ast.literal_eval(text_string)
                else:
                    return text_string
871
        elif callable(doc_to_text):
872
            return doc_to_text(doc)
lintangsutawika's avatar
lintangsutawika committed
873
        # Used when applying a Promptsource template
874
        elif hasattr(doc_to_text, "apply"):
875
876
877
878
879
            applied_prompt = doc_to_text.apply(doc)
            if len(applied_prompt) == 2:
                return applied_prompt[0]
            else:
                eval_logger.warning("Applied prompt returns empty string")
880
                return self.config.fewshot_delimiter
881
        else:
882
            print(type(doc_to_text))
883
            raise TypeError
884

885
    def doc_to_target(self, doc: dict) -> Union[int, str, list]:
886
887
        if self.prompt is not None:
            doc_to_target = self.prompt
888
        else:
889
            doc_to_target = self.config.doc_to_target
890

891
        if isinstance(doc_to_target, int):
892
            return doc_to_target
893
        elif isinstance(doc_to_target, str):
894
            if doc_to_target in self.features:
895
                # if self.config.doc_to_choice is not None:
896
897
898
                #     return self.doc_to_choice(doc)[doc[doc_to_target]]
                # else:
                return doc[doc_to_target]
899
            else:
lintangsutawika's avatar
lintangsutawika committed
900
                target_string = utils.apply_template(doc_to_target, doc)
lintangsutawika's avatar
lintangsutawika committed
901
                if target_string.isdigit() and self._config.doc_to_choice is not None:
lintangsutawika's avatar
lintangsutawika committed
902
                    return ast.literal_eval(target_string)
lintangsutawika's avatar
lintangsutawika committed
903
904
905
906
907
                elif (
                    len(target_string) >= 2
                    and (target_string[0] == "[")
                    and (target_string[-1] == "]")
                ):
baberabb's avatar
baberabb committed
908
909
910
911
                    try:
                        return ast.literal_eval(target_string)
                    except (SyntaxError, ValueError):
                        return target_string
lintangsutawika's avatar
lintangsutawika committed
912
913
                else:
                    return target_string
914
        elif isinstance(doc_to_target, list):
915
            return doc_to_target
916
        elif callable(doc_to_target):
917
            return doc_to_target(doc)
lintangsutawika's avatar
lintangsutawika committed
918
        # Used when applying a Promptsource template
919
        elif hasattr(doc_to_target, "apply"):
920
            applied_prompt = doc_to_target.apply(doc)
921
922
923
924
            if len(applied_prompt) == 2:
                return applied_prompt[1]
            else:
                eval_logger.warning("Applied prompt returns empty string")
925
                return self.config.fewshot_delimiter
926
927
        else:
            raise TypeError
928

baberabb's avatar
baberabb committed
929
    def doc_to_choice(self, doc: Any) -> List[str]:
930
931
        if self.prompt is not None:
            doc_to_choice = self.prompt
932
        elif self.config.doc_to_choice is None:
933
934
            eval_logger.error("doc_to_choice was called but not set in config")
        else:
935
            doc_to_choice = self.config.doc_to_choice
936

937
        if isinstance(doc_to_choice, str):
938
939
940
941
            if doc_to_choice in self.features:
                return doc[doc_to_choice]
            else:
                return ast.literal_eval(utils.apply_template(doc_to_choice, doc))
942
        elif isinstance(doc_to_choice, list):
943
            return doc_to_choice
944
        elif isinstance(doc_to_choice, dict):
945
946
947
948
949
950
951
            return list(doc_to_choice.values())
        elif callable(doc_to_choice):
            return doc_to_choice(doc)
        elif hasattr(doc_to_choice, "get_answer_choices_list"):
            return doc_to_choice.get_answer_choices_list(doc)
        else:
            raise TypeError
952

baberabb's avatar
baberabb committed
953
954
955
    def construct_requests(
        self, doc: dict, ctx: str, **kwargs
    ) -> Union[List[Instance], Instance]:
956
        if self.OUTPUT_TYPE == "loglikelihood":
lintangsutawika's avatar
lintangsutawika committed
957
            arguments = (ctx, self.doc_to_target(doc))
958
        elif self.OUTPUT_TYPE == "loglikelihood_rolling":
lintangsutawika's avatar
lintangsutawika committed
959
            arguments = (self.doc_to_target(doc),)
960
        elif self.OUTPUT_TYPE == "multiple_choice":
961
            choices = self.doc_to_choice(doc)
962
            target_delimiter = self.config.target_delimiter
963
964
            if self.multiple_input:
                # If there are multiple inputs, choices are placed in the ctx
965
                cont = self.doc_to_target(doc)
966
967
968
                arguments = [
                    (ctx + choice, f"{target_delimiter}{cont}") for choice in choices
                ]
969
            else:
970
                # Otherwise they are placed in the continuation
971
                arguments = [(ctx, f"{target_delimiter}{cont}") for cont in choices]
972

973
            request_list = [
974
975
                Instance(
                    request_type="loglikelihood",
lintangsutawika's avatar
lintangsutawika committed
976
                    doc=doc,
977
                    arguments=arg,
978
                    idx=i,
979
980
                    **kwargs,
                )
981
                for i, arg in enumerate(arguments)
982
            ]
983
            # TODO: we should raise a warning telling users this will at most ~2x runtime.
984
            if "acc_mutual_info" in self._metric_fn_list.keys():
985
986
987
                # if we are calculating multiple choice accuracy
                # using mutual information instead of raw loglikelihood as metric, need unconditional lls.

lintangsutawika's avatar
lintangsutawika committed
988
                # here mutual info refers to calculating
989
990
991
992
993
994
                # log(P(choice|ctx) / P(choice)) = log(P(choice|ctx)) - log(P(choice))
                # in other words normalizing by subtracting the unconditional logprob of each choice.
                request_list.extend(
                    [
                        Instance(
                            request_type="loglikelihood",
lintangsutawika's avatar
lintangsutawika committed
995
                            doc=doc,
996
                            arguments=("", "{}".format(choice)),
997
998
999
                            idx=i,
                            **kwargs,
                        )
lintangsutawika's avatar
lintangsutawika committed
1000
                        for i, choice in enumerate(choices)
1001
1002
1003
                    ]
                )
            return request_list
lintangsutawika's avatar
lintangsutawika committed
1004

1005
        elif self.OUTPUT_TYPE == "generate_until":
1006
            arguments = (ctx, self.config.generation_kwargs)
lintangsutawika's avatar
lintangsutawika committed
1007
1008

        return Instance(
lintangsutawika's avatar
lintangsutawika committed
1009
1010
            request_type=self.OUTPUT_TYPE, doc=doc, arguments=arguments, idx=0, **kwargs
        )
1011
1012

    def process_results(self, doc, results):
1013
1014
        if callable(self.config.process_results):
            return self.config.process_results(doc, results)
lintangsutawika's avatar
lintangsutawika committed
1015

1016
        result_dict = {}
1017
        use_metric = list(self._metric_fn_list.keys())
1018
1019
1020
        if self.OUTPUT_TYPE == "loglikelihood":
            results = results[0]
            ll, is_greedy = results
1021
1022
1023
1024
            return {
                **({"perplexity": ll} if "perplexity" in use_metric else {}),
                **({"acc": int(is_greedy)} if "acc" in use_metric else {}),
            }
1025
        elif self.OUTPUT_TYPE == "loglikelihood_rolling":
haileyschoelkopf's avatar
haileyschoelkopf committed
1026
            (loglikelihood,) = results
1027
1028
            _words = self.count_words(self.doc_to_target(doc))
            _bytes = self.count_bytes(self.doc_to_target(doc))
haileyschoelkopf's avatar
haileyschoelkopf committed
1029
            return {
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
                **(
                    {"word_perplexity": (loglikelihood, _words)}
                    if "word_perplexity" in use_metric
                    else {}
                ),
                **(
                    {"byte_perplexity": (loglikelihood, _bytes)}
                    if "byte_perplexity" in use_metric
                    else {}
                ),
                **(
                    {"bits_per_byte": (loglikelihood, _bytes)}
                    if "bits_per_byte" in use_metric
                    else {}
                ),
haileyschoelkopf's avatar
haileyschoelkopf committed
1045
            }
1046
        elif self.OUTPUT_TYPE == "multiple_choice":
1047
            lls, is_greedy = zip(*results)
lintangsutawika's avatar
lintangsutawika committed
1048

1049
            # retrieve choices in List[str] form, to compute choice lengths, etc.
1050
            choices = self.doc_to_choice(doc)
1051
1052
            completion_len = np.array([float(len(i)) for i in choices])

1053
1054
            if (
                2 * len(choices) == len(lls)
1055
                and "acc_mutual_info" in self._metric_fn_list.keys()
1056
1057
1058
1059
1060
1061
1062
            ):
                # then we are doing mutual info.
                # this stores the "dryrun" / unconditional answer loglikelihoods
                lls_unconditional = lls[1::2]
                assert len(lls_unconditional) == len(choices)
                # and this stores our "regular" conditional loglikelihoods
                lls = lls[::2]
1063

1064
1065
            pred = np.argmax(lls)
            pred_norm = np.argmax(lls / completion_len)
lintangsutawika's avatar
lintangsutawika committed
1066

1067
1068
            if self.multiple_input:
                gold = self.doc_to_text(doc)
1069
            else:
1070
                gold = self.doc_to_target(doc)
1071
1072

            gold_index_error = False
1073
            if isinstance(gold, list):
Lintang Sutawika's avatar
Lintang Sutawika committed
1074
1075
                gold = [i if i < len(choices) else -100 for i in gold]
                if -100 in gold:
1076
1077
                    gold_index_error = True
            else:
1078
                if isinstance(gold, int):
Lintang Sutawika's avatar
Lintang Sutawika committed
1079
                    gold = gold if gold < len(choices) else -100
1080
                elif isinstance(gold, str):
Lintang Sutawika's avatar
Lintang Sutawika committed
1081
                    gold = choices.index(gold) if gold in choices else -100
lintangsutawika's avatar
lintangsutawika committed
1082

Lintang Sutawika's avatar
Lintang Sutawika committed
1083
                if gold == -100:
1084
1085
1086
1087
                    gold_index_error = True

            if gold_index_error:
                eval_logger.warning(
lintangsutawika's avatar
lintangsutawika committed
1088
                    f"Label index was not in within range of available choices,"
1089
1090
                    f"Sample:\n\n{doc}\n\n"
                )
lintangsutawika's avatar
lintangsutawika committed
1091

1092
            if self.multiple_target:
lintangsutawika's avatar
lintangsutawika committed
1093
1094
                acc = 1.0 if pred in gold else 0.0
                acc_norm = 1.0 if pred_norm in gold else 0.0
Lintang Sutawika's avatar
Lintang Sutawika committed
1095
                exact_match = int(any([is_greedy[i] if i != -100 else 0 for i in gold]))
lintangsutawika's avatar
lintangsutawika committed
1096
1097
1098
            else:
                acc = 1.0 if pred == gold else 0.0
                acc_norm = 1.0 if pred_norm == gold else 0.0
1099
                # TODO: this gets score of 0 on arc_challenge for pythia-70m. need to test that this works properly
Lintang Sutawika's avatar
Lintang Sutawika committed
1100
                exact_match = int(is_greedy[gold]) if gold != -100 else 0
1101
1102

            result_dict = {
1103
                **({"acc": acc} if "acc" in use_metric else {}),
1104
1105
                **({"f1": (gold, pred)} if "f1" in use_metric else {}),
                **({"mcc": (gold, pred)} if "mcc" in use_metric else {}),
1106
                **({"acc_norm": acc_norm} if "acc_norm" in use_metric else {}),
1107
                **({"exact_match": exact_match} if "exact_match" in use_metric else {}),
1108
1109
            }

1110
            if "acc_mutual_info" in use_metric:
lintangsutawika's avatar
lintangsutawika committed
1111
1112
1113
                lls_mutual_info = [
                    ll_c - ll_u for ll_c, ll_u in zip(lls, lls_unconditional)
                ]
1114
1115
1116
                acc_mutual_info = 1.0 if np.argmax(lls_mutual_info) == gold else 0.0
                result_dict["acc_mutual_info"] = acc_mutual_info

1117
        elif self.OUTPUT_TYPE == "generate_until":
1118
            gold = self.doc_to_target(doc)
Chris's avatar
Chris committed
1119
            result = results[0]
1120
            if self.config.doc_to_choice is not None:
lintangsutawika's avatar
lintangsutawika committed
1121
                # If you set doc_to_choice,
lintangsutawika's avatar
lintangsutawika committed
1122
                # it assumes that doc_to_target returns a number.
1123
1124
                choices = self.doc_to_choice(doc)
                gold = choices[gold]
1125
1126
            # we expect multiple_targets to be a list.
            elif self.multiple_target:
baberabb's avatar
baberabb committed
1127
                gold = list(gold)
Chris's avatar
Chris committed
1128
1129
1130
            elif type(gold) != type(result):
                # cast gold to the same type as result
                gold = type(result)(gold)
1131

lintangsutawika's avatar
lintangsutawika committed
1132
            for metric in self._metric_fn_list.keys():
haileyschoelkopf's avatar
haileyschoelkopf committed
1133
1134
1135
1136
1137
                if self.multiple_target:
                    # in the case where we have multiple targets,
                    # return true if any are true
                    # TODO: this may break for multipLe_target, non zero-or-1 metrics
                    scores = []
haileyschoelkopf's avatar
haileyschoelkopf committed
1138
1139
1140
1141
                    if not isinstance(gold, list):
                        # sometimes, a multiple_target dataset has exceptions where one doc has only one string answer
                        # print(gold)
                        gold = [gold]
1142
1143
1144
1145
1146
1147
1148
1149
                    if metric == "exact_match":
                        result = [result for _ in range(len(gold))]
                        scores = self._metric_fn_list[metric](
                            references=gold,
                            predictions=result,
                            **self._metric_fn_kwargs[metric],
                        )[metric]
                        result_score = 1.0 if scores > 0.0 else 0.0
haileyschoelkopf's avatar
haileyschoelkopf committed
1150
                    else:
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
                        for gold_option in gold:
                            try:
                                result_score = self._metric_fn_list[metric](
                                    references=[gold_option],
                                    predictions=[result],
                                    **self._metric_fn_kwargs[metric],
                                )
                            except (
                                TypeError
                            ):  # TODO: this is hacky and I don't want to do it
                                result_score = self._metric_fn_list[metric](
                                    [gold_option, result]
                                )
                            if isinstance(result_score, dict):
                                # TODO: this handles the case where HF evaluate returns a dict.
                                result_score = result_score[metric]
                            scores.append(result_score)
                        if any(scores):
                            result_score = 1.0
                        else:
                            result_score = 0.0
haileyschoelkopf's avatar
haileyschoelkopf committed
1172
                else:
1173
                    try:
1174
                        result_score = self._metric_fn_list[metric](
1175
1176
                            references=[gold],
                            predictions=[result],
1177
                            **self._metric_fn_kwargs[metric],
1178
                        )
1179
                    except TypeError:  # needed for now in order to use a different interface between our own metrics and HF Evaluate metrics
1180
                        result_score = self._metric_fn_list[metric]([gold, result])
1181
1182
1183
1184
                    if isinstance(result_score, dict):
                        # TODO: this handles the case where HF evaluate returns a dict.
                        result_score = result_score[metric]
                result_dict[metric] = result_score
1185
        else:
lintangsutawika's avatar
lintangsutawika committed
1186
1187
            raise ValueError(
                f"Passed invalid output_type '{self.OUTPUT_TYPE}' ! Please use one of ",
1188
                "'loglikelihood', 'loglikelihood_rolling', 'generate_until' or 'multiple_choice'",
1189
            )
1190
1191
1192
1193
1194
1195
1196

        return result_dict

    def aggregation(self):
        return self._aggregation_list

    def higher_is_better(self):
haileyschoelkopf's avatar
haileyschoelkopf committed
1197
        return self._higher_is_better
1198
1199
1200
1201
1202


class MultipleChoiceTask(Task):
    OUTPUT_TYPE: str = "loglikelihood"

baberabb's avatar
baberabb committed
1203
    def doc_to_target(self, doc: dict) -> str:
1204
1205
        return " " + doc["choices"][doc["gold"]]

baberabb's avatar
baberabb committed
1206
    def construct_requests(self, doc: dict, ctx: str, **kwargs) -> List[Instance]:
1207
        # TODO: add mutual info here?
lintangsutawika's avatar
lintangsutawika committed
1208
1209
        return [
            Instance(
haileyschoelkopf's avatar
haileyschoelkopf committed
1210
                request_type="loglikelihood",
lintangsutawika's avatar
lintangsutawika committed
1211
                doc=doc,
1212
                arguments=(ctx, " {}".format(choice)),
1213
                idx=i,
1214
1215
                **kwargs,
            )
lintangsutawika's avatar
lintangsutawika committed
1216
1217
            for i, choice in enumerate(doc["choices"])
        ]
1218

baberabb's avatar
baberabb committed
1219
    def process_results(self, doc: dict, results: List[Tuple[float, bool]]) -> dict:
lintangsutawika's avatar
lintangsutawika committed
1220
1221
1222
        results = [
            res[0] for res in results
        ]  # only retain loglikelihoods, discard is_greedy TODO: do we need is_greedy anywhere?
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
        gold = doc["gold"]

        acc = 1.0 if np.argmax(results) == gold else 0.0
        completion_len = np.array([float(len(i)) for i in doc["choices"]])
        acc_norm = 1.0 if np.argmax(results / completion_len) == gold else 0.0

        return {
            "acc": acc,
            "acc_norm": acc_norm,
        }

baberabb's avatar
baberabb committed
1234
    def higher_is_better(self) -> dict:
1235
1236
1237
1238
1239
        return {
            "acc": True,
            "acc_norm": True,
        }

baberabb's avatar
baberabb committed
1240
    def aggregation(self) -> dict:
1241
1242
1243
1244
1245
1246
        return {
            "acc": mean,
            "acc_norm": mean,
        }


lintangsutawika's avatar
lintangsutawika committed
1247
class PerplexityTask(Task):
1248
1249
    OUTPUT_TYPE = "loglikelihood_rolling"

baberabb's avatar
baberabb committed
1250
    def has_training_docs(self) -> bool:
1251
1252
        return False

baberabb's avatar
baberabb committed
1253
    def fewshot_examples(self, k: int, rnd) -> List:
1254
1255
1256
        assert k == 0
        return []

baberabb's avatar
baberabb committed
1257
    def fewshot_context(self, doc: dict, num_fewshot: int) -> Literal[""]:
1258
1259
1260
1261
1262
1263
        assert (
            num_fewshot == 0
        ), "The number of fewshot examples must be 0 for perplexity tasks."

        return ""

baberabb's avatar
baberabb committed
1264
    def higher_is_better(self) -> dict:
1265
1266
1267
1268
1269
1270
1271
1272
1273
        return {
            "word_perplexity": False,
            "byte_perplexity": False,
            "bits_per_byte": False,
        }

    def doc_to_decontamination_query(self, doc):
        return doc

Ethan Smith's avatar
Ethan Smith committed
1274
    def doc_to_text(self, doc) -> str:
1275
1276
1277
1278
1279
        return ""

    def doc_to_target(self, doc):
        return doc

baberabb's avatar
baberabb committed
1280
    def construct_requests(self, doc: dict, ctx: Union[str, None], **kwargs):
1281
1282
        assert not ctx

lintangsutawika's avatar
lintangsutawika committed
1283
1284
1285
1286
1287
1288
1289
        return Instance(
            request_type=self.OUTPUT_TYPE,
            doc=doc,
            arguments=(self.doc_to_target(doc),),
            idx=0,
            **kwargs,
        )
1290

baberabb's avatar
baberabb committed
1291
    def process_results(self, doc: dict, results: float) -> dict:
1292
        (loglikelihood,) = results
haileyschoelkopf's avatar
haileyschoelkopf committed
1293
1294
        words = self.count_words(self.doc_to_target(doc))
        bytes_ = self.count_bytes(self.doc_to_target(doc))
1295
1296
1297
1298
1299
1300
        return {
            "word_perplexity": (loglikelihood, words),
            "byte_perplexity": (loglikelihood, bytes_),
            "bits_per_byte": (loglikelihood, bytes_),
        }

baberabb's avatar
baberabb committed
1301
    def aggregation(self) -> dict:
1302
1303
1304
1305
1306
1307
1308
        return {
            "word_perplexity": weighted_perplexity,
            "byte_perplexity": weighted_perplexity,
            "bits_per_byte": bits_per_byte,
        }

    @classmethod
baberabb's avatar
baberabb committed
1309
    def count_bytes(cls, doc) -> int:
1310
1311
1312
        return len(doc.encode("utf-8"))

    @classmethod
baberabb's avatar
baberabb committed
1313
    def count_words(cls, doc) -> int:
1314
1315
        """Downstream tasks with custom word boundaries should override this!"""
        return len(re.split(r"\s+", doc))