task.py 37.1 KB
Newer Older
1
import abc
2
from dataclasses import dataclass, field, asdict
3
4

import re
5
import ast
lintangsutawika's avatar
lintangsutawika committed
6
import yaml
7
8
9
import evaluate
import random
import itertools
10
import functools
11
12
13
14

import datasets
import numpy as np

15
16
from typing import Union
from collections.abc import Callable
17

18
from lm_eval import utils
19
from lm_eval.api import samplers
haileyschoelkopf's avatar
haileyschoelkopf committed
20
from lm_eval.api.instance import Instance
lintangsutawika's avatar
lintangsutawika committed
21
from lm_eval.api.filter import FilterEnsemble
22
23
24
25

from lm_eval.logger import eval_logger
from lm_eval.prompts import get_prompt
from lm_eval.filters import build_filter_ensemble
lintangsutawika's avatar
lintangsutawika committed
26
27
28
29
30
31
32
33
from lm_eval.api.metrics import (
    # get_metric,
    # get_aggregation,
    mean,
    weighted_perplexity,
    bits_per_byte,
)
from lm_eval.api.registry import (
lintangsutawika's avatar
lintangsutawika committed
34
    METRIC_REGISTRY,
35
36
    DEFAULT_METRIC_REGISTRY,
    OUTPUT_TYPE_REGISTRY,
lintangsutawika's avatar
lintangsutawika committed
37
38
    AGGREGATION_REGISTRY,
    HIGHER_IS_BETTER_REGISTRY,
39
    DEFAULT_AGGREGATION_REGISTRY,
lintangsutawika's avatar
lintangsutawika committed
40
)
41

42
43
44
45
46
47
48
ALL_OUTPUT_TYPES = [
    "loglikelihood",
    "multiple_choice",
    "loglikelihood_rolling",
    "greedy_until",
]

49
50
51
52

@dataclass
class TaskConfig(dict):

53
    task: str = None
54
    group: Union[str, list] = None
lintangsutawika's avatar
lintangsutawika committed
55
    reference: str = None
56

57
58
    dataset_path: str = None
    dataset_name: str = None
59
    dataset_kwargs: dict = None
60
61
62
    training_split: str = None
    validation_split: str = None
    test_split: str = None
lintangsutawika's avatar
lintangsutawika committed
63
    fewshot_split: str = None  # TODO: assert that this not None if num_fewshot > 0. (?) assert if this is same split as one evaling (?)
64

65
    template_aliases: str = None
66
    aliases: Union[str, list] = None
67
68
    doc_to_text: Union[Callable, str] = None
    doc_to_target: Union[Callable, str] = None
69
    use_prompt: str = None
70

71
72
    num_fewshot: int = 0
    batch_size: int = 1
73
74
    repeats: int = 1

75
    metric_list: str = None
lintangsutawika's avatar
lintangsutawika committed
76
    gold_alias: Union[Callable, str] = None
77
    output_type: str = "greedy_until"
78
    generation_kwargs: dict = None
79
    delimiter: str = "\n\n"
lintangsutawika's avatar
lintangsutawika committed
80
    filter_list: Union[str, list] = None
81
82
    should_decontaminate: bool = False
    doc_to_decontamination_query: str = None
83

lintangsutawika's avatar
lintangsutawika committed
84
    metadata: str = None  # by default, not used in the code. allows for users to pass arbitrary info to tasks
85

86
87
88
89
    def __post_init__(self):
        # allow user-specified aliases so that users can
        # force prompt-compatibility for some prompt regardless of
        # field names in prompt
90
91
92
        if self.template_aliases is not None:
            if type(self.doc_to_text) == str:
                self.doc_to_text = self.template_aliases + self.doc_to_text
93

94
95
            if type(self.doc_to_target) == str:
                self.doc_to_target = self.template_aliases + self.doc_to_target
96

97
            if type(self.gold_alias) == str:
lintangsutawika's avatar
lintangsutawika committed
98
                self.gold_alias = self.template_aliases + self.gold_alias
99

haileyschoelkopf's avatar
haileyschoelkopf committed
100
        if self.generation_kwargs:
101
102
103
            assert (
                self.output_type == "greedy_until"
            ), "passed `generation_kwargs`, but not using a generation request type!"
haileyschoelkopf's avatar
haileyschoelkopf committed
104
        elif self.output_type == "greedy_until":    
105
106
            # ensure that we greedily generate in absence of explicit arguments otherwise
            self.generation_kwargs = {"do_sample": False, "temperature": 0.0}
107

haileyschoelkopf's avatar
haileyschoelkopf committed
108
109
        # TODO: how to make TaskConfigs be de- and re-serializable, even when using the !function constructor?

110
111
112
    def __getitem__(self, item):
        return getattr(self, item)

113
    def to_dict(self):
114
115
        """dumps the current config as a dictionary object, as a printable format.
        null fields will not be printed.
haileyschoelkopf's avatar
haileyschoelkopf committed
116
        Used for dumping results alongside full task configuration
117

haileyschoelkopf's avatar
haileyschoelkopf committed
118
119
120
121
122
123
124
125
126
127
        :return: dict
            A printable dictionary version of the TaskConfig object.

        # TODO: should any default value in the TaskConfig not be printed?
        """
        cfg_dict = asdict(self)
        # remove values that are `None`
        for k, v in list(cfg_dict.items()):
            if v is None:
                cfg_dict.pop(k)
haileyschoelkopf's avatar
haileyschoelkopf committed
128
129
130
            elif isinstance(v, Callable):
                # TODO: this should handle Promptsource template objects as a separate case?
                cfg_dict[k] = str(v)
haileyschoelkopf's avatar
haileyschoelkopf committed
131
        return cfg_dict
132

133
134
135
136
137
138
139
140
141
142
143
144

class Task(abc.ABC):
    """A task represents an entire benchmark including its dataset, problems,
    answers, and evaluation methods. See BoolQ for a simple example implementation

    A `doc` can be any python object which represents one instance of evaluation.
    This is usually a dictionary e.g.
        {"question": ..., "answer": ...} or
        {"question": ..., question, answer)
    """

    VERSION = None
145

146
147
148
149
150
151
152
153
    # The name of the `Task` benchmark as denoted in the HuggingFace datasets Hub
    # or a path to a custom `datasets` loading script.
    DATASET_PATH: str = None

    # The name of a subset within `DATASET_PATH`.
    DATASET_NAME: str = None

    OUTPUT_TYPE: str = None
lintangsutawika's avatar
lintangsutawika committed
154

155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
    def __init__(
        self,
        data_dir=None,
        cache_dir=None,
        download_mode=None,
        config=None,
    ):
        """
        :param data_dir: str
            Stores the path to a local folder containing the `Task`'s data files.
            Use this to specify the path to manually downloaded data (usually when
            the dataset is not publicly accessible).
        :param cache_dir: str
            The directory to read/write the `Task` dataset. This follows the
            HuggingFace `datasets` API with the default cache directory located at:
                `~/.cache/huggingface/datasets`
            NOTE: You can change the cache location globally for a given process
            by setting the shell environment variable, `HF_DATASETS_CACHE`,
            to another directory:
                `export HF_DATASETS_CACHE="/path/to/another/directory"`
        :param download_mode: datasets.DownloadMode
            How to treat pre-existing `Task` downloads and data.
            - `datasets.DownloadMode.REUSE_DATASET_IF_EXISTS`
                Reuse download and reuse dataset.
            - `datasets.DownloadMode.REUSE_CACHE_IF_EXISTS`
                Reuse download with fresh dataset.
            - `datasets.DownloadMode.FORCE_REDOWNLOAD`
                Fresh download and fresh dataset.
        """
        self.download(data_dir, cache_dir, download_mode)
        self._training_docs = None
        self._fewshot_docs = None
        self._instances = None

haileyschoelkopf's avatar
haileyschoelkopf committed
189
        self._config = TaskConfig(**config) if config else TaskConfig()
190
191
192

        if not hasattr(self, "_filters"):
            self._filters = []
lintangsutawika's avatar
lintangsutawika committed
193
            for name, components in self._config.get(
194
                "filters", [["none", [["take_first", None]]]]
lintangsutawika's avatar
lintangsutawika committed
195
            ):
196
197
198
                filter_pipeline = build_filter_ensemble(name, components)
                self._filters.append(filter_pipeline)

lintangsutawika's avatar
lintangsutawika committed
199
200
201
        self.sampler = samplers.Sampler(
            list(self.fewshot_docs()), self, rnd=random.Random()
        )  # TODO: pass the correct docs in here
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227

    def download(self, data_dir=None, cache_dir=None, download_mode=None):
        """Downloads and returns the task dataset.
        Override this method to download the dataset from a custom API.

        :param data_dir: str
            Stores the path to a local folder containing the `Task`'s data files.
            Use this to specify the path to manually downloaded data (usually when
            the dataset is not publicly accessible).
        :param cache_dir: str
            The directory to read/write the `Task` dataset. This follows the
            HuggingFace `datasets` API with the default cache directory located at:
                `~/.cache/huggingface/datasets`
            NOTE: You can change the cache location globally for a given process
            by setting the shell environment variable, `HF_DATASETS_CACHE`,
            to another directory:
                `export HF_DATASETS_CACHE="/path/to/another/directory"`
        :param download_mode: datasets.DownloadMode
            How to treat pre-existing `Task` downloads and data.
            - `datasets.DownloadMode.REUSE_DATASET_IF_EXISTS`
                Reuse download and reuse dataset.
            - `datasets.DownloadMode.REUSE_CACHE_IF_EXISTS`
                Reuse download with fresh dataset.
            - `datasets.DownloadMode.FORCE_REDOWNLOAD`
                Fresh download and fresh dataset.
        """
228
229
230
231
232
233
234
        self.dataset = datasets.load_dataset(
            path=self.DATASET_PATH,
            name=self.DATASET_NAME,
            data_dir=data_dir,
            cache_dir=cache_dir,
            download_mode=download_mode,
        )
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271

    @abc.abstractmethod
    def has_training_docs(self):
        """Whether the task has a training set"""
        pass

    @abc.abstractmethod
    def has_validation_docs(self):
        """Whether the task has a validation set"""
        pass

    @abc.abstractmethod
    def has_test_docs(self):
        """Whether the task has a test set"""
        pass

    def training_docs(self):
        """
        :return: Iterable[obj]
            A iterable of any object, that doc_to_text can handle
        """
        return []

    def validation_docs(self):
        """
        :return: Iterable[obj]
            A iterable of any object, that doc_to_text can handle
        """
        return []

    def test_docs(self):
        """
        :return: Iterable[obj]
            A iterable of any object, that doc_to_text can handle
        """
        return []

272
273
274
275
276
277
278
279
280
281
    def fewshot_docs(self):
        """
        :return: Iterable[obj]
            A iterable of any object, that doc_to_text can handle
        """
        if self.has_training_docs():
            return self.training_docs()
        elif self.has_validation_docs():
            return self.validation_docs()
        else:
lintangsutawika's avatar
lintangsutawika committed
282
            eval_logger.warning(
283
                "has_training_docs and has_validation_docs are False"
284
                ", using test_docs but this is not recommended."
lintangsutawika's avatar
lintangsutawika committed
285
            )
286
287
            return self.test_docs()

288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
    def _process_doc(self, doc):
        """
        Override this to process (detokenize, strip, replace, etc.) individual
        documents. This can be used in a map over documents of a data split.
        E.g. `map(self._process_doc, self.dataset["validation"])`

        :return: dict
            The processed version of the specified `doc`.
        """
        return doc

    @property
    def instances(self):
        """After calling `task.build_all_requests()`, tasks
        maintain a list of the dataset instances which will be evaluated.
        """
        return self._instances

    def fewshot_examples(self, k, rnd):
        if self._training_docs is None:
            self._training_docs = list(self.training_docs())

        return rnd.sample(self._training_docs, k)

    def doc_to_decontamination_query(self, doc):
        print(
            "Override doc_to_decontamination_query with document specific decontamination query."
        )
        assert False

    @abc.abstractmethod
    def doc_to_text(self, doc):
        pass

    @abc.abstractmethod
    def doc_to_target(self, doc):
        pass

326
    def build_all_requests(self, limit=None, rank=None, world_size=None):
327
328
329
330
331
332
333
334
335
336
337
        """Build a set of Instances for a task, and store them in task.instances"""
        if self.has_test_docs():
            docs = self.test_docs()
        elif self.has_validation_docs():
            docs = self.validation_docs()
        else:
            assert (
                False
            ), f"Task dataset (path={self.DATASET_PATH}, name={self.DATASET_NAME}) must have valid or test docs!"

        instances = []
338
339
        for doc_id, doc in utils.create_iterator(
            enumerate(docs), rank, world_size, limit
lintangsutawika's avatar
lintangsutawika committed
340
        ):
341
            # sample fewshot context #TODO: need to offset doc_id by rank now!
342
343
344
345
            fewshot_ctx = self.fewshot_context(
                doc, self._config.num_fewshot, rnd=random.Random()
            )
            # TODO: hardcoded for now: # of runs on each input to be 2. # TODO: we should override this if doing greedy gen so users don't waste time+compute
lintangsutawika's avatar
lintangsutawika committed
346
347
348
            inst = self.construct_requests(
                doc=doc,
                ctx=fewshot_ctx,
349
                metadata=(self._config["task"], doc_id, self._config.repeats),
lintangsutawika's avatar
lintangsutawika committed
350
            )
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375

            if not isinstance(inst, list):
                inst = [inst]

            instances.extend(inst)

        self._instances = instances
        assert len(self._instances) != 0, "task.build_requests() did not find any docs!"

    @abc.abstractmethod
    def construct_requests(self, doc, ctx, **kwargs):
        """Uses RequestFactory to construct Requests and returns an iterable of
        Requests which will be sent to the LM.

        :param doc:
            The document as returned from training_docs, validation_docs, or test_docs.
        :param ctx: str
            The context string, generated by fewshot_context. This includes the natural
            language description, as well as the few shot examples, and the question
            part of the document for `doc`.
        :param doc_idx: int
            The index of a document within `self.test_docs()` or `self.validation_docs()`,
            whichever is the main split used.
        :param repeats: int
        TODO: update this docstring
lintangsutawika's avatar
lintangsutawika committed
376
            The number of times each instance in a dataset is inferred on. Defaults to 1,
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
            can be increased for techniques like majority voting.
        """
        pass

    @abc.abstractmethod
    def process_results(self, doc, results):
        """Take a single document and the LM results and evaluates, returning a
        dict where keys are the names of submetrics and values are the values of
        the metric for that one document

        :param doc:
            The document as returned from training_docs, validation_docs, or test_docs.
        :param results:
            The results of the requests created in construct_requests.
        """
        pass

    @abc.abstractmethod
    def aggregation(self):
        """
        :returns: {str: [metric_score] -> float}
            A dictionary where keys are the names of submetrics and values are
            functions that aggregate a list of metric scores
        """
        pass

    @abc.abstractmethod
    def higher_is_better(self):
        """
        :returns: {str: bool}
            A dictionary where keys are the names of submetrics and values are
            whether a higher value of the submetric is better
        """
        pass

haileyschoelkopf's avatar
haileyschoelkopf committed
412
413
414
415
416
417
418
419
420
421
    @classmethod
    def count_bytes(cls, doc):
        """Used for byte-level perplexity metrics in rolling loglikelihood"""
        return len(doc.encode("utf-8"))

    @classmethod
    def count_words(cls, doc):
        """Downstream loglikelihood_rolling perplexity tasks with custom word boundaries should override this!"""
        return len(re.split(r"\s+", doc))

422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
    @utils.positional_deprecated
    def fewshot_context(self, doc, num_fewshot, rnd=None):
        """Returns a fewshot context string that is made up of a prepended description
        (if provided), the `num_fewshot` number of examples, and an appended prompt example.

        :param doc: str
            The document as returned from training_docs, validation_docs, or test_docs.
        :param num_fewshot: int
            The number of fewshot examples to provide in the returned context string.
        :param rnd: random.Random
            The pseudo-random number generator used to randomly sample examples.
            WARNING: This is currently a required arg although it's optionalized with a default `None`.
        :returns: str
            The fewshot context.
        """
        assert (
            rnd is not None
        ), "A `random.Random` generator argument must be provided to `rnd`"

        if num_fewshot == 0:
            labeled_examples = ""
        else:
Lintang Sutawika's avatar
Lintang Sutawika committed
444
            labeled_examples = self.sampler.get_context(doc, num_fewshot)
445
446

            # for sets with no training docs, draw from other set *but ensure no overlap with current doc*
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
            # if self.has_training_docs():
            #     fewshotex = self.fewshot_examples(k=num_fewshot, rnd=rnd)
            # else:
            #     if self._fewshot_docs is None:
            #         self._fewshot_docs = list(
            #             self.validation_docs()
            #             if self.has_validation_docs()
            #             else self.test_docs()
            #         )

            #     fewshotex = rnd.sample(self._fewshot_docs, num_fewshot + 1)

            #     # get rid of the doc that's the one we're evaluating, if it's in the fewshot
            #     fewshotex = [x for x in fewshotex if x != doc][:num_fewshot]

            # labeled_examples = (
            #     "\n\n".join(
            #         [
            #             self.doc_to_text(doc) + self.doc_to_target(doc)
            #             for doc in fewshotex
            #         ]
            #     )
            #     + "\n\n"
            # )
471
472
473
474
475
476

        example = self.doc_to_text(doc)
        return labeled_examples + example

    def apply_filters(self):

lintangsutawika's avatar
lintangsutawika committed
477
478
479
480
481
482
        if hasattr(self, "_filters"):
            for f in self._filters:
                f.apply(self._instances)
        else:
            eval_logger.warning("No filter defined, passing through instances")
            return self._instances
483

484
    def dump_config(self):
485
        """Returns a dictionary representing the task's config.
486
487
488
489
490
491
492
493

        :returns: str
            The fewshot context.
        """
        # TODO: this should only return the overrides applied to a non-YAML task's configuration.
        # (batch size, num_fewshot)
        return self._config.to_dict()

494
495
496

class ConfigurableTask(Task):

497
    VERSION = "Yaml"
498
    OUTPUT_TYPE = None
499
    CONFIG = None
500
501
502
503

    def __init__(
        self, data_dir=None, cache_dir=None, download_mode=None, config: dict = None
    ):
504
        # Get pre-configured attributes
505
        self._config = self.CONFIG
506

507
508
        # Use new configurations if there was no preconfiguration
        if self._config is None:
509
            self._config = TaskConfig(**config)
510
511
        # Overwrite configs
        else:
lintangsutawika's avatar
lintangsutawika committed
512
            if config is not None:
513
                self._config.__dict__.update(config)
514

515
        if self._config is None:
lintangsutawika's avatar
lintangsutawika committed
516
517
518
            raise ValueError(
                "Must pass a config to ConfigurableTask, either in cls.CONFIG or `config` kwarg"
            )
519
520

        if self._config.output_type is not None:
521
            assert self._config.output_type in ALL_OUTPUT_TYPES
522
523
            self.OUTPUT_TYPE = self._config.output_type

524
525
526
527
528
529
        if self._config.dataset_path is not None:
            self.DATASET_PATH = self._config.dataset_path

        if self._config.dataset_name is not None:
            self.DATASET_NAME = self._config.dataset_name

530
531
532
533
        self._metric_fn_list = {}
        self._metric_fn_kwargs = {}
        self._aggregation_list = {}
        self._higher_is_better = {}
534

535
        _metric_list = DEFAULT_METRIC_REGISTRY[self._config.output_type]
536
        if self._config.metric_list is None:
537
            # TODO: handle this in TaskConfig.__post_init__ ?
538
539
            for metric_name in _metric_list:
                self._metric_fn_list[metric_name] = METRIC_REGISTRY[metric_name]
lintangsutawika's avatar
lintangsutawika committed
540
541
542
                self._aggregation_list[metric_name] = DEFAULT_AGGREGATION_REGISTRY[
                    metric_name
                ]
543
544
545
                self._higher_is_better[metric_name] = HIGHER_IS_BETTER_REGISTRY[
                    metric_name
                ]
546
547
548
549
550
551
552
553
554
        else:
            for metric_config in self._config.metric_list:
                assert "metric" in metric_config
                metric_name = metric_config["metric"]
                kwargs = {
                    key: metric_config[key]
                    for key in metric_config
                    if key not in ["metric", "aggregation", "higher_is_better"]
                }
555
                try:
556
                    self._metric_fn_list[metric_name] = METRIC_REGISTRY[metric_name]
557
                except Exception:
558
559
560
561
562
563
564
565
566
567
568
569
570
571
                    eval_logger.warning(
                        f"Metric {metric_name} not found, "
                        "Searching from https://huggingface.co/evaluate-metric"
                    )
                    try:
                        metric_object = evaluate.load(metric_name)
                        self._metric_fn_list[metric_name] = metric_object
                        self._metric_fn_kwargs[metric_name] = kwargs

                    except Exception:
                        raise Warning(
                            "{} not found in the evaluate library!".format(metric_name),
                            "Please check https://huggingface.co/evaluate-metric",
                        )
lintangsutawika's avatar
lintangsutawika committed
572

573
                if "aggregation" in metric_config:
574
                    agg_name = metric_config["aggregation"]
575
576
577
578
579
580
581
582
                    if type(agg_name) == str:
                        self._aggregation_list[metric_name] = AGGREGATION_REGISTRY[
                            agg_name
                        ]
                    elif callable(agg_name):
                        self._aggregation_list[metric_name] = metric_config[
                            "aggregation"
                        ]
583
                else:
584
585
586

                    INV_AGG_REGISTRY = {v: k for k, v in AGGREGATION_REGISTRY.items()}
                    metric_agg = DEFAULT_AGGREGATION_REGISTRY[metric_name]
587
                    eval_logger.warning(
588
589
590
                        f"metric {metric_name} is defined, but aggregation is not. "
                        f"using default "
                        f"aggregation={INV_AGG_REGISTRY[metric_agg]}"
591
                    )
592
                    self._aggregation_list[metric_name] = metric_agg
lintangsutawika's avatar
lintangsutawika committed
593

594
595
596
597
598
599
                if "higher_is_better" in metric_config:
                    self._higher_is_better[metric_name] = metric_config[
                        "higher_is_better"
                    ]
                else:
                    eval_logger.warning(
600
601
602
                        f"metric {metric_name} is defined, but higher_is_better is not. "
                        f"using default "
                        f"higher_is_better={HIGHER_IS_BETTER_REGISTRY[metric_name]}"
603
                    )
604
605
                    self._higher_is_better[metric_name] = HIGHER_IS_BETTER_REGISTRY[
                        metric_name
lintangsutawika's avatar
lintangsutawika committed
606
                    ]
607

608
        self.download(self._config.dataset_kwargs)
609
610
611
        self._training_docs = None
        self._fewshot_docs = None

lintangsutawika's avatar
lintangsutawika committed
612
        if self._config.filter_list is not None:
lintangsutawika's avatar
lintangsutawika committed
613
            self._filters = []
lintangsutawika's avatar
lintangsutawika committed
614
615
616
617
618
619
620
621
            for filter_config in self._config.filter_list:
                for filter_pipeline in filter_config:
                    filter_name = filter_config["name"]
                    filter_functions = filter_config["filter"]
                    components = []
                    for function in filter_functions:
                        kwargs = {
                            key: function[key] for key in function if key != "function"
lintangsutawika's avatar
lintangsutawika committed
622
623
624
                        }
                        components.append([function["function"], kwargs])
                    filter_pipeline = build_filter_ensemble(filter_name, components)
lintangsutawika's avatar
lintangsutawika committed
625
                self._filters.append(filter_pipeline)
lintangsutawika's avatar
lintangsutawika committed
626
        else:
627
            self._filters = [build_filter_ensemble("none", [["take_first", None]])]
628
629

        if self._config.use_prompt is not None:
lintangsutawika's avatar
lintangsutawika committed
630
            eval_logger.info(f"loading prompt {self._config.use_prompt}")
631
            self.prompt = get_prompt(
lintangsutawika's avatar
lintangsutawika committed
632
633
                self._config.use_prompt, self.DATASET_PATH, self.DATASET_NAME
            )
634
635
636
        else:
            self.prompt = None

lintangsutawika's avatar
lintangsutawika committed
637
638
639
        if self.fewshot_docs() is not None:
            self.sampler = samplers.Sampler(
                list(self.fewshot_docs()), self, rnd=random.Random()
640
            )
641

642
643
644
645
646
647
648
649
    def download(self, dataset_kwargs=None):

        self.dataset = datasets.load_dataset(
            path=self.DATASET_PATH,
            name=self.DATASET_NAME,
            **dataset_kwargs if dataset_kwargs is not None else {},
        )

650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
    def has_training_docs(self):
        if self._config.training_split is not None:
            return True
        else:
            return False

    def has_validation_docs(self):
        if self._config.validation_split is not None:
            return True
        else:
            return False

    def has_test_docs(self):
        if self._config.test_split is not None:
            return True
        else:
            return False

    def training_docs(self):
        if self._config.training_split is not None:
            return self.dataset[self._config.training_split]

    def validation_docs(self):
        if self._config.validation_split is not None:
            return self.dataset[self._config.validation_split]

    def test_docs(self):
        if self._config.test_split is not None:
            return self.dataset[self._config.test_split]

680
    def fewshot_docs(self):
681
        if self._config.fewshot_split is not None:
682
            return self.dataset[self._config.fewshot_split]
683
684
685
        else:
            if self._config.num_fewshot > 0:
                eval_logger.warning(
haileyschoelkopf's avatar
haileyschoelkopf committed
686
                    f"Task '{self._config.task}': "
687
688
689
690
                    "num_fewshot > 0 but fewshot_split is None. "
                    "using preconfigured rule."
                )
            return super().fewshot_docs()
691

692
693
694
695
696
697
698
    def should_decontaminate(self):
        return self._config.should_decontaminate

    def doc_to_decontamination_query(self, doc):
        if self._config.should_decontaminate:
            return utils.apply_template(self._config.doc_to_decontamination_query, doc)

699
700
701
702
703
704
705
706
707
708
709
710
    def _process_doc(self, doc):
        """
        Override this to process (detokenize, strip, replace, etc.) individual
        documents. This can be used in a map over documents of a data split.
        E.g. `map(self._process_doc, self.dataset["validation"])`

        :return: dict
            The processed version of the specified `doc`.
        """
        return doc

    def doc_to_text(self, doc):
711
712
713

        if self.prompt is not None:
            doc_to_text = self.prompt
714
715
        else:
            doc_to_text = self._config.doc_to_text
lintangsutawika's avatar
lintangsutawika committed
716

717
718
        if type(doc_to_text) == str:
            return utils.apply_template(doc_to_text, doc)
719
        elif callable(doc_to_text):
720
721
722
            return doc_to_text(doc)
        if hasattr(doc_to_text, "apply"):
            return doc_to_text.apply(doc)[0]
723
        else:
724
            print(type(doc_to_text))
725
            raise TypeError
726
727

    def doc_to_target(self, doc):
728
729
730

        if self.prompt is not None:
            doc_to_target = self.prompt
731
732
733
        else:
            doc_to_target = self._config.doc_to_target

734
735
        if type(doc_to_target) == str:
            return utils.apply_template(doc_to_target, doc)
736
        elif callable(doc_to_target):
737
738
739
            return doc_to_target(doc)
        elif hasattr(doc_to_target, "apply"):
            return doc_to_target.apply(doc)[1]
740
741
        else:
            raise TypeError
742

743
    def gold_alias(self, doc):
744
        # TODO: reevaluate if we need this. implemented to have a
745
        # processed version of answer to put into gsm8k exact_match scoring as ref.
lintangsutawika's avatar
lintangsutawika committed
746
        if self._config.gold_alias is not None:
747
748
            doc_to_target = self._config.gold_alias
        else:
lintangsutawika's avatar
lintangsutawika committed
749
750
            # doc_to_target = self._config.doc_to_target
            return self.doc_to_target(doc)
751
752
753
754
755
756
757
758
759
760

        if type(doc_to_target) == str:
            return utils.apply_template(doc_to_target, doc)
        elif callable(doc_to_target):
            return doc_to_target(doc)
        elif hasattr(doc_to_target, "apply"):
            return doc_to_target.apply(doc)[1]
        else:
            raise TypeError

761
762
    def construct_requests(self, doc, ctx, **kwargs):

763
        if self.OUTPUT_TYPE == "loglikelihood":
lintangsutawika's avatar
lintangsutawika committed
764
            arguments = (ctx, self.doc_to_target(doc))
765
        elif self.OUTPUT_TYPE == "loglikelihood_rolling":
lintangsutawika's avatar
lintangsutawika committed
766
            arguments = (self.doc_to_target(doc),)
767
        elif self.OUTPUT_TYPE == "multiple_choice":
768
769
            # we pass the user-defined answer_choices var (in aliases) and translate the result to a Python list.
            # TODO: any cleaner way to do this?
lintangsutawika's avatar
lintangsutawika committed
770
771
772
773
774
            choices = ast.literal_eval(
                utils.apply_template(
                    self._config.template_aliases + "{{answer_choices}}", doc
                )
            )
775
            request_list = [
776
777
                Instance(
                    request_type="loglikelihood",
lintangsutawika's avatar
lintangsutawika committed
778
                    doc=doc,
779
                    arguments=(ctx, " {}".format(choice)),
780
                    idx=i,
781
782
                    **kwargs,
                )
lintangsutawika's avatar
lintangsutawika committed
783
                for i, choice in enumerate(choices)
784
            ]
785
            # TODO: we should raise a warning telling users this will at most ~2x runtime.
786
            if "acc_mutual_info" in self._metric_fn_list.keys():
787
788
789
                # if we are calculating multiple choice accuracy
                # using mutual information instead of raw loglikelihood as metric, need unconditional lls.

lintangsutawika's avatar
lintangsutawika committed
790
                # here mutual info refers to calculating
791
792
793
794
795
796
                # log(P(choice|ctx) / P(choice)) = log(P(choice|ctx)) - log(P(choice))
                # in other words normalizing by subtracting the unconditional logprob of each choice.
                request_list.extend(
                    [
                        Instance(
                            request_type="loglikelihood",
lintangsutawika's avatar
lintangsutawika committed
797
                            doc=doc,
798
799
800
801
                            arguments=("", "{}".format(choice)),
                            idx=i,
                            **kwargs,
                        )
lintangsutawika's avatar
lintangsutawika committed
802
                        for i, choice in enumerate(choices)
803
804
805
                    ]
                )
            return request_list
lintangsutawika's avatar
lintangsutawika committed
806

807
        elif self.OUTPUT_TYPE == "greedy_until":
808
            arguments = (ctx, self._config.generation_kwargs)
lintangsutawika's avatar
lintangsutawika committed
809
810

        return Instance(
lintangsutawika's avatar
lintangsutawika committed
811
812
            request_type=self.OUTPUT_TYPE, doc=doc, arguments=arguments, idx=0, **kwargs
        )
813
814
815

    def process_results(self, doc, results):

lintangsutawika's avatar
lintangsutawika committed
816
817
818
        # if callable(self._config.process_results):
        #     return self._config.process_results(doc, results)

819
        result_dict = {}
820
        use_metric = list(self._metric_fn_list.keys())
821
822
823
        if self.OUTPUT_TYPE == "loglikelihood":
            results = results[0]
            ll, is_greedy = results
824
825
826
827
            return {
                **({"perplexity": ll} if "perplexity" in use_metric else {}),
                **({"acc": int(is_greedy)} if "acc" in use_metric else {}),
            }
828
        elif self.OUTPUT_TYPE == "loglikelihood_rolling":
haileyschoelkopf's avatar
haileyschoelkopf committed
829
            (loglikelihood,) = results
830
831
            _words = self.count_words(self.doc_to_target(doc))
            _bytes = self.count_bytes(self.doc_to_target(doc))
haileyschoelkopf's avatar
haileyschoelkopf committed
832
            return {
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
                **(
                    {"word_perplexity": (loglikelihood, _words)}
                    if "word_perplexity" in use_metric
                    else {}
                ),
                **(
                    {"byte_perplexity": (loglikelihood, _bytes)}
                    if "byte_perplexity" in use_metric
                    else {}
                ),
                **(
                    {"bits_per_byte": (loglikelihood, _bytes)}
                    if "bits_per_byte" in use_metric
                    else {}
                ),
haileyschoelkopf's avatar
haileyschoelkopf committed
848
            }
849
        elif self.OUTPUT_TYPE == "multiple_choice":
850
851

            lls, is_greedy = zip(*results)
lintangsutawika's avatar
lintangsutawika committed
852
853
854
855
856
            if self._config.gold_alias is not None:
                gold = int(self.gold_alias(doc))
            else:
                gold = int(self.doc_to_target(doc))

857
            pred = np.argmax(lls)
858
            # retrieve choices in List[str] form, to compute choice lengths, etc.
lintangsutawika's avatar
lintangsutawika committed
859
860
861
862
863
            choices = ast.literal_eval(
                utils.apply_template(
                    self._config.template_aliases + "{{answer_choices}}", doc
                )
            )
864
865
            if (
                2 * len(choices) == len(lls)
866
                and "acc_mutual_info" in self._metric_fn_list.keys()
867
868
869
870
871
872
873
            ):
                # then we are doing mutual info.
                # this stores the "dryrun" / unconditional answer loglikelihoods
                lls_unconditional = lls[1::2]
                assert len(lls_unconditional) == len(choices)
                # and this stores our "regular" conditional loglikelihoods
                lls = lls[::2]
874
875

            acc = 1.0 if np.argmax(lls) == gold else 0.0
876
877
            completion_len = np.array([float(len(i)) for i in choices])
            acc_norm = 1.0 if np.argmax(lls / completion_len) == gold else 0.0
878
879

            result_dict = {
880
                **({"acc": acc} if "acc" in use_metric else {}),
haileyschoelkopf's avatar
haileyschoelkopf committed
881
882
                **({"f1": (gold, pred)} if "f1" in use_metric else {}),
                **({"mcc": (gold, pred)} if "mcc" in use_metric else {}),
883
                **({"acc_norm": acc_norm} if "acc_norm" in use_metric else {}),
884
885
886
            }

            # TODO: set which normalization metrics should be reported, and calculate them
887
            if "exact_match" in self._metric_fn_list.keys():
888
                # TODO: this gets score of 0 on arc_challenge for pythia-70m. need to test that this works properly
lintangsutawika's avatar
lintangsutawika committed
889
                is_greedy = is_greedy[gold]  # take value for the gold answer
890
891
                result_dict["exact_match"] = int(is_greedy)

892
            if "acc_mutual_info" in use_metric:
lintangsutawika's avatar
lintangsutawika committed
893
894
895
                lls_mutual_info = [
                    ll_c - ll_u for ll_c, ll_u in zip(lls, lls_unconditional)
                ]
896
897
898
                acc_mutual_info = 1.0 if np.argmax(lls_mutual_info) == gold else 0.0
                result_dict["acc_mutual_info"] = acc_mutual_info

899
900
901
        elif self.OUTPUT_TYPE == "greedy_until":

            if self._config.gold_alias is not None:
902
                gold = self.gold_alias(doc)
903
904
905
            else:
                gold = self.doc_to_target(doc)

906
907
            for key, result in zip(self._metric_fn_list.keys(), results):
                _dict = self._metric_fn_list[key].compute(
haileyschoelkopf's avatar
haileyschoelkopf committed
908
                    references=[gold], predictions=[result], **self._metric_fn_kwargs[key]
909
                )
910

lintangsutawika's avatar
lintangsutawika committed
911
                result_dict = {**result_dict, **_dict}
912
        else:
lintangsutawika's avatar
lintangsutawika committed
913
914
            raise ValueError(
                f"Passed invalid output_type '{self.OUTPUT_TYPE}' ! Please use one of ",
915
                "'loglikelihood', 'loglikelihood_rolling', 'greedy_until', or 'multiple_choice'",
916
            )
917
918
919
920
921
922
923

        return result_dict

    def aggregation(self):
        return self._aggregation_list

    def higher_is_better(self):
haileyschoelkopf's avatar
haileyschoelkopf committed
924
        return self._higher_is_better
925
926
927
928
929
930
931
932
933
934


class MultipleChoiceTask(Task):

    OUTPUT_TYPE: str = "loglikelihood"

    def doc_to_target(self, doc):
        return " " + doc["choices"][doc["gold"]]

    def construct_requests(self, doc, ctx, **kwargs):
935
        # TODO: add mutual info here?
lintangsutawika's avatar
lintangsutawika committed
936
937
        return [
            Instance(
haileyschoelkopf's avatar
haileyschoelkopf committed
938
                request_type="loglikelihood",
lintangsutawika's avatar
lintangsutawika committed
939
                doc=doc,
940
                arguments=(ctx, " {}".format(choice)),
941
                idx=i,
942
943
                **kwargs,
            )
lintangsutawika's avatar
lintangsutawika committed
944
945
            for i, choice in enumerate(doc["choices"])
        ]
946
947

    def process_results(self, doc, results):
lintangsutawika's avatar
lintangsutawika committed
948
949
950
        results = [
            res[0] for res in results
        ]  # only retain loglikelihoods, discard is_greedy TODO: do we need is_greedy anywhere?
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
        gold = doc["gold"]

        acc = 1.0 if np.argmax(results) == gold else 0.0
        completion_len = np.array([float(len(i)) for i in doc["choices"]])
        acc_norm = 1.0 if np.argmax(results / completion_len) == gold else 0.0

        return {
            "acc": acc,
            "acc_norm": acc_norm,
        }

    def higher_is_better(self):
        return {
            "acc": True,
            "acc_norm": True,
        }

    def aggregation(self):
        return {
            "acc": mean,
            "acc_norm": mean,
        }


lintangsutawika's avatar
lintangsutawika committed
975
class PerplexityTask(Task):
976
977
978
979
980
981
982
983
984
985

    OUTPUT_TYPE = "loglikelihood_rolling"

    def has_training_docs(self):
        return False

    def fewshot_examples(self, k, rnd):
        assert k == 0
        return []

lintangsutawika's avatar
lintangsutawika committed
986
    def fewshot_context(self, doc, num_fewshot, rnd=None):
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
        assert (
            num_fewshot == 0
        ), "The number of fewshot examples must be 0 for perplexity tasks."
        assert (
            rnd is not None
        ), "A `random.Random` generator argument must be provided to `rnd`."

        return ""

    def higher_is_better(self):
        return {
            "word_perplexity": False,
            "byte_perplexity": False,
            "bits_per_byte": False,
        }

    def doc_to_decontamination_query(self, doc):
        return doc

    def doc_to_text(self, doc):
        return ""

    def doc_to_target(self, doc):
        return doc

    def construct_requests(self, doc, ctx, **kwargs):
        assert not ctx

lintangsutawika's avatar
lintangsutawika committed
1015
1016
1017
1018
1019
1020
1021
        return Instance(
            request_type=self.OUTPUT_TYPE,
            doc=doc,
            arguments=(self.doc_to_target(doc),),
            idx=0,
            **kwargs,
        )
1022
1023
1024

    def process_results(self, doc, results):
        (loglikelihood,) = results
haileyschoelkopf's avatar
haileyschoelkopf committed
1025
1026
        words = self.count_words(self.doc_to_target(doc))
        bytes_ = self.count_bytes(self.doc_to_target(doc))
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
        return {
            "word_perplexity": (loglikelihood, words),
            "byte_perplexity": (loglikelihood, bytes_),
            "bits_per_byte": (loglikelihood, bytes_),
        }

    def aggregation(self):
        return {
            "word_perplexity": weighted_perplexity,
            "byte_perplexity": weighted_perplexity,
            "bits_per_byte": bits_per_byte,
        }

    @classmethod
    def count_bytes(cls, doc):
        return len(doc.encode("utf-8"))

    @classmethod
    def count_words(cls, doc):
        """Downstream tasks with custom word boundaries should override this!"""
        return len(re.split(r"\s+", doc))