task.py 43.9 KB
Newer Older
1
import abc
2
from dataclasses import dataclass, field, asdict
3
4

import re
5
import ast
lintangsutawika's avatar
lintangsutawika committed
6
import yaml
7
8
9
import evaluate
import random
import itertools
10
import functools
11
from tqdm import tqdm
12
13
14
15

import datasets
import numpy as np

baberabb's avatar
baberabb committed
16
from typing import Union, List, Any, Tuple, Literal
17
from collections.abc import Callable
18

19
from lm_eval import utils
20
from lm_eval.api import samplers
haileyschoelkopf's avatar
haileyschoelkopf committed
21
from lm_eval.api.instance import Instance
lintangsutawika's avatar
lintangsutawika committed
22
from lm_eval.api.filter import FilterEnsemble
23
24
25
26

from lm_eval.logger import eval_logger
from lm_eval.prompts import get_prompt
from lm_eval.filters import build_filter_ensemble
lintangsutawika's avatar
lintangsutawika committed
27
28
29
30
from lm_eval.api.metrics import (
    mean,
    weighted_perplexity,
    bits_per_byte,
lintangsutawika's avatar
lintangsutawika committed
31
    metric_max_over_ground_truths,
lintangsutawika's avatar
lintangsutawika committed
32
33
)
from lm_eval.api.registry import (
haileyschoelkopf's avatar
haileyschoelkopf committed
34
35
36
37
    get_metric,
    get_aggregation,
    get_default_aggregation,
    is_higher_better,
38
39
    DEFAULT_METRIC_REGISTRY,
    OUTPUT_TYPE_REGISTRY,
lintangsutawika's avatar
lintangsutawika committed
40
41
    AGGREGATION_REGISTRY,
)
42

43
44
45
46
47
48
49
ALL_OUTPUT_TYPES = [
    "loglikelihood",
    "multiple_choice",
    "loglikelihood_rolling",
    "greedy_until",
]

50
51
52

@dataclass
class TaskConfig(dict):
53
    # task naming/registry
54
    task: str = None
55
    group: Union[str, list] = None
56
57
58
    # HF dataset options.
    # which dataset to use,
    # and what splits for what purpose
59
60
    dataset_path: str = None
    dataset_name: str = None
61
    dataset_kwargs: dict = None
62
63
64
    training_split: str = None
    validation_split: str = None
    test_split: str = None
lintangsutawika's avatar
lintangsutawika committed
65
    fewshot_split: str = None  # TODO: assert that this not None if num_fewshot > 0. (?) assert if this is same split as one evaling (?)
66
67
    # formatting / prompting options.
    # see docs/advanced_task_guide.md for more info
68
    process_docs: Callable = None
69
70
    doc_to_text: Union[Callable, str] = None
    doc_to_target: Union[Callable, str] = None
lintangsutawika's avatar
lintangsutawika committed
71
    doc_to_choice: Union[Callable, str, dict, list] = None
72
    gold_alias: Union[Callable, str] = None
lintangsutawika's avatar
lintangsutawika committed
73
    process_results: Union[Callable, str] = None
74
    use_prompt: str = None
75
    description: str = ""
76
77
    target_delimiter: str = " "
    fewshot_delimiter: str = "\n\n"
78
    # runtime configuration options
79
    num_fewshot: int = 0
80
    # scoring options
81
82
    metric_list: str = None
    output_type: str = "greedy_until"
83
    generation_kwargs: dict = None
84
    repeats: int = 1
lintangsutawika's avatar
lintangsutawika committed
85
    filter_list: Union[str, list] = None
86
87
    should_decontaminate: bool = False
    doc_to_decontamination_query: str = None
88

lintangsutawika's avatar
lintangsutawika committed
89
    metadata: str = None  # by default, not used in the code. allows for users to pass arbitrary info to tasks
90

Ethan Smith's avatar
Ethan Smith committed
91
    def __post_init__(self) -> None:
Lintang Sutawika's avatar
Lintang Sutawika committed
92
93
94
        if self.generation_kwargs is not None:
            if self.output_type != "greedy_until":
                eval_logger.warning(
95
                    "passed `generation_kwargs`, but not using `output_type: greedy_until`!"
Lintang Sutawika's avatar
Lintang Sutawika committed
96
                )
97
                assert self.output_type != "greedy_until"
Lintang Sutawika's avatar
Lintang Sutawika committed
98
99
100
101
102
103
104

            if "temperature" in self.generation_kwargs:
                self.generation_kwargs["temperature"] = float(
                    self.generation_kwargs["temperature"]
                )

            if "until" not in self.generation_kwargs:
105
                self.generation_kwargs["until"] = [self.fewshot_delimiter]
Lintang Sutawika's avatar
Lintang Sutawika committed
106
107
108
109
        else:
            if self.output_type == "greedy_until":
                # ensure that we greedily generate in absence of explicit arguments otherwise
                self.generation_kwargs = {
Lintang Sutawika's avatar
Lintang Sutawika committed
110
                    "until": None
111
112
                    if self.fewshot_delimiter is None
                    else [self.fewshot_delimiter],
Lintang Sutawika's avatar
Lintang Sutawika committed
113
114
115
                    "do_sample": False,
                    "temperature": 0.0,
                }
116

haileyschoelkopf's avatar
haileyschoelkopf committed
117
118
        # TODO: how to make TaskConfigs be de- and re-serializable, even when using the !function constructor?

119
120
121
    def __getitem__(self, item):
        return getattr(self, item)

122
123
124
    def __setitem__(self, item, value):
        return setattr(self, item, value)

125
    def to_dict(self):
126
127
        """dumps the current config as a dictionary object, as a printable format.
        null fields will not be printed.
haileyschoelkopf's avatar
haileyschoelkopf committed
128
        Used for dumping results alongside full task configuration
129

haileyschoelkopf's avatar
haileyschoelkopf committed
130
131
132
133
134
135
136
137
138
139
        :return: dict
            A printable dictionary version of the TaskConfig object.

        # TODO: should any default value in the TaskConfig not be printed?
        """
        cfg_dict = asdict(self)
        # remove values that are `None`
        for k, v in list(cfg_dict.items()):
            if v is None:
                cfg_dict.pop(k)
haileyschoelkopf's avatar
haileyschoelkopf committed
140
141
142
            elif isinstance(v, Callable):
                # TODO: this should handle Promptsource template objects as a separate case?
                cfg_dict[k] = str(v)
haileyschoelkopf's avatar
haileyschoelkopf committed
143
        return cfg_dict
144

145
146
147
148
149
150
151
152
153
154
155
156

class Task(abc.ABC):
    """A task represents an entire benchmark including its dataset, problems,
    answers, and evaluation methods. See BoolQ for a simple example implementation

    A `doc` can be any python object which represents one instance of evaluation.
    This is usually a dictionary e.g.
        {"question": ..., "answer": ...} or
        {"question": ..., question, answer)
    """

    VERSION = None
157

158
159
160
161
162
163
164
165
    # The name of the `Task` benchmark as denoted in the HuggingFace datasets Hub
    # or a path to a custom `datasets` loading script.
    DATASET_PATH: str = None

    # The name of a subset within `DATASET_PATH`.
    DATASET_NAME: str = None

    OUTPUT_TYPE: str = None
lintangsutawika's avatar
lintangsutawika committed
166

167
168
169
170
171
172
    def __init__(
        self,
        data_dir=None,
        cache_dir=None,
        download_mode=None,
        config=None,
Ethan Smith's avatar
Ethan Smith committed
173
    ) -> None:
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
        """
        :param data_dir: str
            Stores the path to a local folder containing the `Task`'s data files.
            Use this to specify the path to manually downloaded data (usually when
            the dataset is not publicly accessible).
        :param cache_dir: str
            The directory to read/write the `Task` dataset. This follows the
            HuggingFace `datasets` API with the default cache directory located at:
                `~/.cache/huggingface/datasets`
            NOTE: You can change the cache location globally for a given process
            by setting the shell environment variable, `HF_DATASETS_CACHE`,
            to another directory:
                `export HF_DATASETS_CACHE="/path/to/another/directory"`
        :param download_mode: datasets.DownloadMode
            How to treat pre-existing `Task` downloads and data.
            - `datasets.DownloadMode.REUSE_DATASET_IF_EXISTS`
                Reuse download and reuse dataset.
            - `datasets.DownloadMode.REUSE_CACHE_IF_EXISTS`
                Reuse download with fresh dataset.
            - `datasets.DownloadMode.FORCE_REDOWNLOAD`
                Fresh download and fresh dataset.
        """
        self.download(data_dir, cache_dir, download_mode)
        self._training_docs = None
        self._fewshot_docs = None
        self._instances = None

haileyschoelkopf's avatar
haileyschoelkopf committed
201
        self._config = TaskConfig(**config) if config else TaskConfig()
202
203
204

        if not hasattr(self, "_filters"):
            self._filters = []
lintangsutawika's avatar
lintangsutawika committed
205
            for name, components in self._config.get(
206
                "filters", [["none", [["take_first", None]]]]
lintangsutawika's avatar
lintangsutawika committed
207
            ):
208
209
210
                filter_pipeline = build_filter_ensemble(name, components)
                self._filters.append(filter_pipeline)

lintangsutawika's avatar
lintangsutawika committed
211
        self.sampler = samplers.Sampler(
212
213
            list(self.fewshot_docs()), self, rnd=random.Random(1234)
        )
214

Ethan Smith's avatar
Ethan Smith committed
215
    def download(self, data_dir=None, cache_dir=None, download_mode=None) -> None:
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
        """Downloads and returns the task dataset.
        Override this method to download the dataset from a custom API.

        :param data_dir: str
            Stores the path to a local folder containing the `Task`'s data files.
            Use this to specify the path to manually downloaded data (usually when
            the dataset is not publicly accessible).
        :param cache_dir: str
            The directory to read/write the `Task` dataset. This follows the
            HuggingFace `datasets` API with the default cache directory located at:
                `~/.cache/huggingface/datasets`
            NOTE: You can change the cache location globally for a given process
            by setting the shell environment variable, `HF_DATASETS_CACHE`,
            to another directory:
                `export HF_DATASETS_CACHE="/path/to/another/directory"`
        :param download_mode: datasets.DownloadMode
            How to treat pre-existing `Task` downloads and data.
            - `datasets.DownloadMode.REUSE_DATASET_IF_EXISTS`
                Reuse download and reuse dataset.
            - `datasets.DownloadMode.REUSE_CACHE_IF_EXISTS`
                Reuse download with fresh dataset.
            - `datasets.DownloadMode.FORCE_REDOWNLOAD`
                Fresh download and fresh dataset.
        """
240
241
242
243
244
245
246
        self.dataset = datasets.load_dataset(
            path=self.DATASET_PATH,
            name=self.DATASET_NAME,
            data_dir=data_dir,
            cache_dir=cache_dir,
            download_mode=download_mode,
        )
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283

    @abc.abstractmethod
    def has_training_docs(self):
        """Whether the task has a training set"""
        pass

    @abc.abstractmethod
    def has_validation_docs(self):
        """Whether the task has a validation set"""
        pass

    @abc.abstractmethod
    def has_test_docs(self):
        """Whether the task has a test set"""
        pass

    def training_docs(self):
        """
        :return: Iterable[obj]
            A iterable of any object, that doc_to_text can handle
        """
        return []

    def validation_docs(self):
        """
        :return: Iterable[obj]
            A iterable of any object, that doc_to_text can handle
        """
        return []

    def test_docs(self):
        """
        :return: Iterable[obj]
            A iterable of any object, that doc_to_text can handle
        """
        return []

284
285
286
287
288
289
290
291
292
293
    def fewshot_docs(self):
        """
        :return: Iterable[obj]
            A iterable of any object, that doc_to_text can handle
        """
        if self.has_training_docs():
            return self.training_docs()
        elif self.has_validation_docs():
            return self.validation_docs()
        else:
lintangsutawika's avatar
lintangsutawika committed
294
            eval_logger.warning(
295
                "has_training_docs and has_validation_docs are False"
296
                ", using test_docs as fewshot_docs but this is not recommended."
lintangsutawika's avatar
lintangsutawika committed
297
            )
298
299
            return self.test_docs()

300
301
302
303
304
305
306
307
308
309
    def _process_doc(self, doc):
        """
        Override this to process (detokenize, strip, replace, etc.) individual
        documents. This can be used in a map over documents of a data split.
        E.g. `map(self._process_doc, self.dataset["validation"])`

        :return: dict
            The processed version of the specified `doc`.
        """
        return doc
lintangsutawika's avatar
lintangsutawika committed
310

311
312
313
314
315
316
317
318
319
320
321
322
323
    @property
    def instances(self):
        """After calling `task.build_all_requests()`, tasks
        maintain a list of the dataset instances which will be evaluated.
        """
        return self._instances

    def fewshot_examples(self, k, rnd):
        if self._training_docs is None:
            self._training_docs = list(self.training_docs())

        return rnd.sample(self._training_docs, k)

Ethan Smith's avatar
Ethan Smith committed
324
    def doc_to_decontamination_query(self, doc) -> None:
325
326
327
328
329
330
331
332
333
334
335
336
337
        print(
            "Override doc_to_decontamination_query with document specific decontamination query."
        )
        assert False

    @abc.abstractmethod
    def doc_to_text(self, doc):
        pass

    @abc.abstractmethod
    def doc_to_target(self, doc):
        pass

Ethan Smith's avatar
Ethan Smith committed
338
    def build_all_requests(self, limit=None, rank=None, world_size=None) -> None:
339
340
341
342
343
344
345
346
347
348
        """Build a set of Instances for a task, and store them in task.instances"""
        if self.has_test_docs():
            docs = self.test_docs()
        elif self.has_validation_docs():
            docs = self.validation_docs()
        else:
            assert (
                False
            ), f"Task dataset (path={self.DATASET_PATH}, name={self.DATASET_NAME}) must have valid or test docs!"

349
350
351
352
        eval_logger.info(
            f"Building contexts for task '{self._config.task}' on rank {rank}..."
        )

353
        instances = []
354
355
        for doc_id, doc in utils.create_iterator(
            enumerate(docs), rank, world_size, limit
lintangsutawika's avatar
lintangsutawika committed
356
        ):
357
            # sample fewshot context #TODO: need to offset doc_id by rank now!
358
            fewshot_ctx = self.fewshot_context(
359
360
                doc,
                self._config.num_fewshot,
361
            )
362

haileyschoelkopf's avatar
haileyschoelkopf committed
363
            # TODO: we should override self._config.repeats if doing greedy gen so users don't waste time+compute
lintangsutawika's avatar
lintangsutawika committed
364
365
366
            inst = self.construct_requests(
                doc=doc,
                ctx=fewshot_ctx,
367
                metadata=(self._config["task"], doc_id, self._config.repeats),
lintangsutawika's avatar
lintangsutawika committed
368
            )
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393

            if not isinstance(inst, list):
                inst = [inst]

            instances.extend(inst)

        self._instances = instances
        assert len(self._instances) != 0, "task.build_requests() did not find any docs!"

    @abc.abstractmethod
    def construct_requests(self, doc, ctx, **kwargs):
        """Uses RequestFactory to construct Requests and returns an iterable of
        Requests which will be sent to the LM.

        :param doc:
            The document as returned from training_docs, validation_docs, or test_docs.
        :param ctx: str
            The context string, generated by fewshot_context. This includes the natural
            language description, as well as the few shot examples, and the question
            part of the document for `doc`.
        :param doc_idx: int
            The index of a document within `self.test_docs()` or `self.validation_docs()`,
            whichever is the main split used.
        :param repeats: int
        TODO: update this docstring
lintangsutawika's avatar
lintangsutawika committed
394
            The number of times each instance in a dataset is inferred on. Defaults to 1,
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
            can be increased for techniques like majority voting.
        """
        pass

    @abc.abstractmethod
    def process_results(self, doc, results):
        """Take a single document and the LM results and evaluates, returning a
        dict where keys are the names of submetrics and values are the values of
        the metric for that one document

        :param doc:
            The document as returned from training_docs, validation_docs, or test_docs.
        :param results:
            The results of the requests created in construct_requests.
        """
        pass

    @abc.abstractmethod
    def aggregation(self):
        """
        :returns: {str: [metric_score] -> float}
            A dictionary where keys are the names of submetrics and values are
            functions that aggregate a list of metric scores
        """
        pass

    @abc.abstractmethod
    def higher_is_better(self):
        """
        :returns: {str: bool}
            A dictionary where keys are the names of submetrics and values are
            whether a higher value of the submetric is better
        """
        pass

haileyschoelkopf's avatar
haileyschoelkopf committed
430
431
432
433
434
435
436
437
438
439
    @classmethod
    def count_bytes(cls, doc):
        """Used for byte-level perplexity metrics in rolling loglikelihood"""
        return len(doc.encode("utf-8"))

    @classmethod
    def count_words(cls, doc):
        """Downstream loglikelihood_rolling perplexity tasks with custom word boundaries should override this!"""
        return len(re.split(r"\s+", doc))

440
    @utils.positional_deprecated
441
    def fewshot_context(self, doc, num_fewshot):
442
443
444
445
446
447
448
449
450
451
452
453
        """Returns a fewshot context string that is made up of a prepended description
        (if provided), the `num_fewshot` number of examples, and an appended prompt example.

        :param doc: str
            The document as returned from training_docs, validation_docs, or test_docs.
        :param num_fewshot: int
            The number of fewshot examples to provide in the returned context string.
        :returns: str
            The fewshot context.
        """

        if num_fewshot == 0:
454
455
            # always prepend the (possibly empty) task description
            labeled_examples = self._config.description
456
        else:
lintangsutawika's avatar
lintangsutawika committed
457
458
459
            labeled_examples = self._config.description + self.sampler.get_context(
                doc, num_fewshot
            )
460
461

        example = self.doc_to_text(doc)
462
463
464
465
        if type(example) == str:
            return labeled_examples + example
        elif type(example) == list:
            return [labeled_examples + ex for ex in example]
466
        elif type(example) == int:
lintangsutawika's avatar
lintangsutawika committed
467
468
469
470
471
            if self._config.doc_to_choice is not None:
                choices = self.doc_to_choice(doc)
                return labeled_examples + choices[example]
            else:
                return labeled_examples + str(example)
472
473

    def apply_filters(self):
lintangsutawika's avatar
lintangsutawika committed
474
475
476
477
478
479
        if hasattr(self, "_filters"):
            for f in self._filters:
                f.apply(self._instances)
        else:
            eval_logger.warning("No filter defined, passing through instances")
            return self._instances
480

baberabb's avatar
baberabb committed
481
    def dump_config(self) -> dict:
482
        """Returns a dictionary representing the task's config.
483
484
485
486
487

        :returns: str
            The fewshot context.
        """
        # TODO: this should only return the overrides applied to a non-YAML task's configuration.
488
        # (num_fewshot)
489
490
        return self._config.to_dict()

491
492

class ConfigurableTask(Task):
493
    VERSION = "Yaml"
494
    OUTPUT_TYPE = None
495
    CONFIG = None
496
497
498

    def __init__(
        self, data_dir=None, cache_dir=None, download_mode=None, config: dict = None
Ethan Smith's avatar
Ethan Smith committed
499
    ) -> None:  # TODO no super() call here
500
        # Get pre-configured attributes
501
        self._config = self.CONFIG
502

503
504
        # Use new configurations if there was no preconfiguration
        if self._config is None:
505
            self._config = TaskConfig(**config)
506
507
        # Overwrite configs
        else:
lintangsutawika's avatar
lintangsutawika committed
508
            if config is not None:
509
                self._config.__dict__.update(config)
510

511
        if self._config is None:
lintangsutawika's avatar
lintangsutawika committed
512
513
514
            raise ValueError(
                "Must pass a config to ConfigurableTask, either in cls.CONFIG or `config` kwarg"
            )
515
516

        if self._config.output_type is not None:
517
            assert self._config.output_type in ALL_OUTPUT_TYPES
518
519
            self.OUTPUT_TYPE = self._config.output_type

520
521
522
523
524
525
        if self._config.dataset_path is not None:
            self.DATASET_PATH = self._config.dataset_path

        if self._config.dataset_name is not None:
            self.DATASET_NAME = self._config.dataset_name

526
527
528
529
        self._metric_fn_list = {}
        self._metric_fn_kwargs = {}
        self._aggregation_list = {}
        self._higher_is_better = {}
530

531
        _metric_list = DEFAULT_METRIC_REGISTRY[self._config.output_type]
532
        if self._config.metric_list is None:
533
            # TODO: handle this in TaskConfig.__post_init__ ?
534
            for metric_name in _metric_list:
haileyschoelkopf's avatar
haileyschoelkopf committed
535
536
                self._metric_fn_list[metric_name] = get_metric(metric_name)
                self._aggregation_list[metric_name] = get_default_aggregation(
537
                    metric_name
haileyschoelkopf's avatar
haileyschoelkopf committed
538
539
                )
                self._higher_is_better[metric_name] = is_higher_better(metric_name)
540
541
542
543
544
545
546
547
548
        else:
            for metric_config in self._config.metric_list:
                assert "metric" in metric_config
                metric_name = metric_config["metric"]
                kwargs = {
                    key: metric_config[key]
                    for key in metric_config
                    if key not in ["metric", "aggregation", "higher_is_better"]
                }
549

550
                if self._config.process_results is not None:
551
552
                    self._metric_fn_list[metric_name] = None
                    self._metric_fn_kwargs[metric_name] = {}
553
554
555
556
557
558
559
560
                elif callable(metric_name):
                    metric_fn = metric_name.__call__
                    metric_name = metric_name.__name__
                    self._metric_fn_list[metric_name] = metric_fn
                    self._metric_fn_kwargs[metric_name] = kwargs
                else:
                    self._metric_fn_list[metric_name] = get_metric(metric_name)
                    self._metric_fn_kwargs[metric_name] = kwargs
lintangsutawika's avatar
lintangsutawika committed
561

562
                if "aggregation" in metric_config:
563
                    agg_name = metric_config["aggregation"]
564
                    if type(agg_name) == str:
haileyschoelkopf's avatar
haileyschoelkopf committed
565
                        self._aggregation_list[metric_name] = get_aggregation(agg_name)
566
567
568
569
                    elif callable(agg_name):
                        self._aggregation_list[metric_name] = metric_config[
                            "aggregation"
                        ]
570
                else:
571
                    INV_AGG_REGISTRY = {v: k for k, v in AGGREGATION_REGISTRY.items()}
haileyschoelkopf's avatar
haileyschoelkopf committed
572
                    metric_agg = get_default_aggregation(metric_name)
573
                    eval_logger.warning(
574
575
576
                        f"metric {metric_name} is defined, but aggregation is not. "
                        f"using default "
                        f"aggregation={INV_AGG_REGISTRY[metric_agg]}"
577
                    )
578
                    self._aggregation_list[metric_name] = metric_agg
lintangsutawika's avatar
lintangsutawika committed
579

580
581
582
583
584
585
                if "higher_is_better" in metric_config:
                    self._higher_is_better[metric_name] = metric_config[
                        "higher_is_better"
                    ]
                else:
                    eval_logger.warning(
586
587
                        f"metric {metric_name} is defined, but higher_is_better is not. "
                        f"using default "
haileyschoelkopf's avatar
haileyschoelkopf committed
588
                        f"higher_is_better={is_higher_better(metric_name)}"
589
                    )
haileyschoelkopf's avatar
haileyschoelkopf committed
590
                    self._higher_is_better[metric_name] = is_higher_better(metric_name)
591

592
        self.download(self._config.dataset_kwargs)
593
594
595
        self._training_docs = None
        self._fewshot_docs = None

lintangsutawika's avatar
lintangsutawika committed
596
        if self._config.filter_list is not None:
lintangsutawika's avatar
lintangsutawika committed
597
            self._filters = []
lintangsutawika's avatar
lintangsutawika committed
598
599
600
601
602
603
604
605
            for filter_config in self._config.filter_list:
                for filter_pipeline in filter_config:
                    filter_name = filter_config["name"]
                    filter_functions = filter_config["filter"]
                    components = []
                    for function in filter_functions:
                        kwargs = {
                            key: function[key] for key in function if key != "function"
lintangsutawika's avatar
lintangsutawika committed
606
607
608
                        }
                        components.append([function["function"], kwargs])
                    filter_pipeline = build_filter_ensemble(filter_name, components)
lintangsutawika's avatar
lintangsutawika committed
609
                self._filters.append(filter_pipeline)
lintangsutawika's avatar
lintangsutawika committed
610
        else:
611
            self._filters = [build_filter_ensemble("none", [["take_first", None]])]
612
613

        if self._config.use_prompt is not None:
lintangsutawika's avatar
lintangsutawika committed
614
            eval_logger.info(f"loading prompt {self._config.use_prompt}")
615
            self.prompt = get_prompt(
lintangsutawika's avatar
lintangsutawika committed
616
617
                self._config.use_prompt, self.DATASET_PATH, self.DATASET_NAME
            )
618
619
620
        else:
            self.prompt = None

lintangsutawika's avatar
lintangsutawika committed
621
622
        if self.fewshot_docs() is not None:
            self.sampler = samplers.Sampler(
623
                list(self.fewshot_docs()), self, rnd=random.Random(1234)
624
            )
625

626
627
628
629
630
631
632
633
634
        if self.has_test_docs():
            docs = self.test_docs()
        elif self.has_validation_docs():
            docs = self.validation_docs()
        else:
            assert (
                False
            ), f"Task dataset (path={self.DATASET_PATH}, name={self.DATASET_NAME}) must have valid or test docs!"

635
        # Test One Doc
636
637
638
        self.features = list(docs.features.keys())
        self.multiple_input = 0
        self.multiple_target = 0
639
640
        test_doc = docs[0]
        test_text = self.doc_to_text(test_doc)
641
        test_target = self.doc_to_target(test_doc)
lintangsutawika's avatar
lintangsutawika committed
642
643
644
645
646

        if self._config.doc_to_choice is not None:
            test_choice = self.doc_to_choice(test_doc)
            if type(test_choice) is not list:
                eval_logger.error("doc_to_choice must return list")
647
648
            else:
                num_choice = len(test_choice)
649

650
651
            if type(test_text) is int:
                self.multiple_input = num_choice
652
653
        else:
            test_choice = None
654

655
        if type(test_target) is list:
656
            self.multiple_target = len(test_target)
657
        else:
lintangsutawika's avatar
lintangsutawika committed
658
            if (type(test_target) is int) and (test_choice is not None):
659
660
661
                test_target = [self.doc_to_choice(test_target)[test_target]]
            else:
                test_target = [test_target]
662

663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
        if test_choice is not None:
            check_choices = test_choice
        else:
            check_choices = test_target

        for choice in check_choices:
            choice_has_whitespace = True if " " in choice else False
            delimiter_has_whitespace = (
                True if " " in self._config.target_delimiter else False
            )

            if delimiter_has_whitespace and choice_has_whitespace:
                eval_logger.warning(
                    f'Both target_delimiter and target choice: "{choice}" have whitespace'
                )
            elif (not delimiter_has_whitespace) and (not choice_has_whitespace):
679
                eval_logger.warning(
680
                    f'Both target_delimiter and target choice: "{choice}" does not have whitespace, ignore if the language you are evaluating on does not require/use whitespace'
681
                )
682

Ethan Smith's avatar
Ethan Smith committed
683
    def download(self, dataset_kwargs=None) -> None:
684
685
686
687
688
689
        self.dataset = datasets.load_dataset(
            path=self.DATASET_PATH,
            name=self.DATASET_NAME,
            **dataset_kwargs if dataset_kwargs is not None else {},
        )

baberabb's avatar
baberabb committed
690
    def has_training_docs(self) -> bool:
691
692
693
694
695
        if self._config.training_split is not None:
            return True
        else:
            return False

baberabb's avatar
baberabb committed
696
    def has_validation_docs(self) -> bool:
697
698
699
700
701
        if self._config.validation_split is not None:
            return True
        else:
            return False

baberabb's avatar
baberabb committed
702
    def has_test_docs(self) -> bool:
703
704
705
706
707
        if self._config.test_split is not None:
            return True
        else:
            return False

baberabb's avatar
baberabb committed
708
    def training_docs(self) -> datasets.Dataset:
709
        if self.has_training_docs():
710
            if self._config.process_docs is not None:
711
712
713
                return self._config.process_docs(
                    self.dataset[self._config.training_split]
                )
714
715
            return self.dataset[self._config.training_split]

baberabb's avatar
baberabb committed
716
    def validation_docs(self) -> datasets.Dataset:
717
        if self.has_validation_docs():
718
            if self._config.process_docs is not None:
719
720
721
                return self._config.process_docs(
                    self.dataset[self._config.validation_split]
                )
722
723
            return self.dataset[self._config.validation_split]

baberabb's avatar
baberabb committed
724
    def test_docs(self) -> datasets.Dataset:
725
        if self.has_test_docs():
726
            if self._config.process_docs is not None:
727
                return self._config.process_docs(self.dataset[self._config.test_split])
728
729
            return self.dataset[self._config.test_split]

730
    def fewshot_docs(self):
731
        if self._config.fewshot_split is not None:
732
            return self.dataset[self._config.fewshot_split]
733
734
735
        else:
            if self._config.num_fewshot > 0:
                eval_logger.warning(
haileyschoelkopf's avatar
haileyschoelkopf committed
736
                    f"Task '{self._config.task}': "
737
738
739
740
                    "num_fewshot > 0 but fewshot_split is None. "
                    "using preconfigured rule."
                )
            return super().fewshot_docs()
741

742
743
744
745
746
    def should_decontaminate(self):
        return self._config.should_decontaminate

    def doc_to_decontamination_query(self, doc):
        if self._config.should_decontaminate:
747
748
749
750
751
752
            if self._config.doc_to_decontamination_query in self.features:
                return doc[self._config.doc_to_decontamination_query]
            else:
                return ast.literal_eval(
                    utils.apply_template(self._config.doc_to_decontamination_query, doc)
                )
753

754
755
756
757
758
759
760
761
762
763
764
765
    def _process_doc(self, doc):
        """
        Override this to process (detokenize, strip, replace, etc.) individual
        documents. This can be used in a map over documents of a data split.
        E.g. `map(self._process_doc, self.dataset["validation"])`

        :return: dict
            The processed version of the specified `doc`.
        """
        return doc

    def doc_to_text(self, doc):
766
767
        if self.prompt is not None:
            doc_to_text = self.prompt
768
769
        else:
            doc_to_text = self._config.doc_to_text
lintangsutawika's avatar
lintangsutawika committed
770

771
772
773
        if type(doc_to_text) == int:
            return doc_to_text
        elif type(doc_to_text) == str:
774
            if doc_to_text in self.features:
775
776
777
                # if self._config.doc_to_choice is not None:
                #     return self.doc_to_choice(doc)[doc[doc_to_text]]
                # else:
778
779
                return doc[doc_to_text]
            else:
lintangsutawika's avatar
lintangsutawika committed
780
781
782
783
784
                text_string = utils.apply_template(doc_to_text, doc)
                if text_string.isdigit():
                    return ast.literal_eval(text_string)
                else:
                    return text_string
785
        elif callable(doc_to_text):
786
            return doc_to_text(doc)
lintangsutawika's avatar
lintangsutawika committed
787
        # Used when applying a Promptsource template
788
        elif hasattr(doc_to_text, "apply"):
789
790
791
792
793
            applied_prompt = doc_to_text.apply(doc)
            if len(applied_prompt) == 2:
                return applied_prompt[0]
            else:
                eval_logger.warning("Applied prompt returns empty string")
794
                return self._config.fewshot_delimiter
795
        else:
796
            print(type(doc_to_text))
797
            raise TypeError
798

799
    def doc_to_target(self, doc: dict) -> Union[int, str, list]:
800
801
        if self.prompt is not None:
            doc_to_target = self.prompt
802
803
804
        else:
            doc_to_target = self._config.doc_to_target

805
806
807
        if type(doc_to_target) == int:
            return doc_to_target
        elif type(doc_to_target) == str:
808
            if doc_to_target in self.features:
809
810
811
812
                # if self._config.doc_to_choice is not None:
                #     return self.doc_to_choice(doc)[doc[doc_to_target]]
                # else:
                return doc[doc_to_target]
813
            else:
lintangsutawika's avatar
lintangsutawika committed
814
815
816
                target_string = utils.apply_template(doc_to_target, doc)
                if target_string.isdigit():
                    return ast.literal_eval(target_string)
lintangsutawika's avatar
lintangsutawika committed
817
818
819
820
821
                elif (
                    len(target_string) >= 2
                    and (target_string[0] == "[")
                    and (target_string[-1] == "]")
                ):
822
                    return ast.literal_eval(target_string)
lintangsutawika's avatar
lintangsutawika committed
823
824
                else:
                    return target_string
825
826
        elif type(doc_to_target) == list:
            return doc_to_target
827
        elif callable(doc_to_target):
828
            return doc_to_target(doc)
lintangsutawika's avatar
lintangsutawika committed
829
        # Used when applying a Promptsource template
830
        elif hasattr(doc_to_target, "apply"):
831
            applied_prompt = doc_to_target.apply(doc)
832
833
834
835
            if len(applied_prompt) == 2:
                return applied_prompt[1]
            else:
                eval_logger.warning("Applied prompt returns empty string")
836
                return self._config.fewshot_delimiter
837
838
        else:
            raise TypeError
839

baberabb's avatar
baberabb committed
840
    def doc_to_choice(self, doc: Any) -> List[str]:
841
842
        if self.prompt is not None:
            doc_to_choice = self.prompt
lintangsutawika's avatar
lintangsutawika committed
843
        elif self._config.doc_to_choice is None:
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
            eval_logger.error("doc_to_choice was called but not set in config")
        else:
            doc_to_choice = self._config.doc_to_choice

        if type(doc_to_choice) == str:
            return ast.literal_eval(utils.apply_template(doc_to_choice, doc))
        elif type(doc_to_choice) == list:
            return doc_to_choice
        elif type(doc_to_choice) == dict:
            return list(doc_to_choice.values())
        elif callable(doc_to_choice):
            return doc_to_choice(doc)
        elif hasattr(doc_to_choice, "get_answer_choices_list"):
            return doc_to_choice.get_answer_choices_list(doc)
        else:
            raise TypeError
860

861
    def gold_alias(self, doc):
862
863
864
865
866
        # returns a version of the gold target answer to a document,
        # which should be passed into metric for scoring as the ground truth.

        # in multiple_choice tasks, this should be castable to an int corresponding to the index
        # within the answer choices, while doc_to_target is the string version of {{answer_choices[gold]}}.
lintangsutawika's avatar
lintangsutawika committed
867
        if self._config.gold_alias is not None:
868
869
            doc_to_target = self._config.gold_alias
        else:
lintangsutawika's avatar
lintangsutawika committed
870
            return self.doc_to_target(doc)
871
872
873
874
875
876
877
878
879
880

        if type(doc_to_target) == str:
            return utils.apply_template(doc_to_target, doc)
        elif callable(doc_to_target):
            return doc_to_target(doc)
        elif hasattr(doc_to_target, "apply"):
            return doc_to_target.apply(doc)[1]
        else:
            raise TypeError

baberabb's avatar
baberabb committed
881
882
883
    def construct_requests(
        self, doc: dict, ctx: str, **kwargs
    ) -> Union[List[Instance], Instance]:
884
        if self.OUTPUT_TYPE == "loglikelihood":
lintangsutawika's avatar
lintangsutawika committed
885
            arguments = (ctx, self.doc_to_target(doc))
886
        elif self.OUTPUT_TYPE == "loglikelihood_rolling":
lintangsutawika's avatar
lintangsutawika committed
887
            arguments = (self.doc_to_target(doc),)
888
        elif self.OUTPUT_TYPE == "multiple_choice":
889
            choices = self.doc_to_choice(doc)
890
            target_delimiter = self._config.target_delimiter
891
892
            if self.multiple_input:
                # If there are multiple inputs, choices are placed in the ctx
893
                cont = self.doc_to_target(doc)
894
                arguments = [(ctx, f"{target_delimiter}{cont}") for ctx in choices]
895
            else:
896
                # Otherwise they are placed in the continuation
897
                arguments = [(ctx, f"{target_delimiter}{cont}") for cont in choices]
898

899
            request_list = [
900
901
                Instance(
                    request_type="loglikelihood",
lintangsutawika's avatar
lintangsutawika committed
902
                    doc=doc,
903
                    arguments=arg,
904
                    idx=i,
905
906
                    **kwargs,
                )
907
                for i, arg in enumerate(arguments)
908
            ]
909
            # TODO: we should raise a warning telling users this will at most ~2x runtime.
910
            if "acc_mutual_info" in self._metric_fn_list.keys():
911
912
913
                # if we are calculating multiple choice accuracy
                # using mutual information instead of raw loglikelihood as metric, need unconditional lls.

lintangsutawika's avatar
lintangsutawika committed
914
                # here mutual info refers to calculating
915
916
917
918
919
920
                # log(P(choice|ctx) / P(choice)) = log(P(choice|ctx)) - log(P(choice))
                # in other words normalizing by subtracting the unconditional logprob of each choice.
                request_list.extend(
                    [
                        Instance(
                            request_type="loglikelihood",
lintangsutawika's avatar
lintangsutawika committed
921
                            doc=doc,
922
                            arguments=("", "{}".format(choice)),
923
924
925
                            idx=i,
                            **kwargs,
                        )
lintangsutawika's avatar
lintangsutawika committed
926
                        for i, choice in enumerate(choices)
927
928
929
                    ]
                )
            return request_list
lintangsutawika's avatar
lintangsutawika committed
930

931
        elif self.OUTPUT_TYPE == "greedy_until":
932
            arguments = (ctx, self._config.generation_kwargs)
lintangsutawika's avatar
lintangsutawika committed
933
934

        return Instance(
lintangsutawika's avatar
lintangsutawika committed
935
936
            request_type=self.OUTPUT_TYPE, doc=doc, arguments=arguments, idx=0, **kwargs
        )
937
938

    def process_results(self, doc, results):
lintangsutawika's avatar
lintangsutawika committed
939
940
        if callable(self._config.process_results):
            return self._config.process_results(doc, results)
lintangsutawika's avatar
lintangsutawika committed
941

942
        result_dict = {}
943
        use_metric = list(self._metric_fn_list.keys())
944
945
946
        if self.OUTPUT_TYPE == "loglikelihood":
            results = results[0]
            ll, is_greedy = results
947
948
949
950
            return {
                **({"perplexity": ll} if "perplexity" in use_metric else {}),
                **({"acc": int(is_greedy)} if "acc" in use_metric else {}),
            }
951
        elif self.OUTPUT_TYPE == "loglikelihood_rolling":
haileyschoelkopf's avatar
haileyschoelkopf committed
952
            (loglikelihood,) = results
953
954
            _words = self.count_words(self.doc_to_target(doc))
            _bytes = self.count_bytes(self.doc_to_target(doc))
haileyschoelkopf's avatar
haileyschoelkopf committed
955
            return {
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
                **(
                    {"word_perplexity": (loglikelihood, _words)}
                    if "word_perplexity" in use_metric
                    else {}
                ),
                **(
                    {"byte_perplexity": (loglikelihood, _bytes)}
                    if "byte_perplexity" in use_metric
                    else {}
                ),
                **(
                    {"bits_per_byte": (loglikelihood, _bytes)}
                    if "bits_per_byte" in use_metric
                    else {}
                ),
haileyschoelkopf's avatar
haileyschoelkopf committed
971
            }
972
        elif self.OUTPUT_TYPE == "multiple_choice":
973
            lls, is_greedy = zip(*results)
lintangsutawika's avatar
lintangsutawika committed
974

975
            # retrieve choices in List[str] form, to compute choice lengths, etc.
976
            choices = self.doc_to_choice(doc)
977
978
            completion_len = np.array([float(len(i)) for i in choices])

979
980
            if (
                2 * len(choices) == len(lls)
981
                and "acc_mutual_info" in self._metric_fn_list.keys()
982
983
984
985
986
987
988
            ):
                # then we are doing mutual info.
                # this stores the "dryrun" / unconditional answer loglikelihoods
                lls_unconditional = lls[1::2]
                assert len(lls_unconditional) == len(choices)
                # and this stores our "regular" conditional loglikelihoods
                lls = lls[::2]
989

990
991
            pred = np.argmax(lls)
            pred_norm = np.argmax(lls / completion_len)
lintangsutawika's avatar
lintangsutawika committed
992

993
994
            if self.multiple_input:
                gold = self.doc_to_text(doc)
995
            else:
996
                gold = self.doc_to_target(doc)
997
998
                if type(gold) is str:
                    gold = choices.index(gold)
lintangsutawika's avatar
lintangsutawika committed
999

1000
            if self.multiple_target:
lintangsutawika's avatar
lintangsutawika committed
1001
1002
                acc = 1.0 if pred in gold else 0.0
                acc_norm = 1.0 if pred_norm in gold else 0.0
1003
                exact_match = int(any([is_greedy[i] for i in gold]))
lintangsutawika's avatar
lintangsutawika committed
1004
1005
1006
            else:
                acc = 1.0 if pred == gold else 0.0
                acc_norm = 1.0 if pred_norm == gold else 0.0
1007
1008
                # TODO: this gets score of 0 on arc_challenge for pythia-70m. need to test that this works properly
                exact_match = int(is_greedy[gold])
1009
1010

            result_dict = {
1011
                **({"acc": acc} if "acc" in use_metric else {}),
1012
1013
                **({"f1": (gold, pred)} if "f1" in use_metric else {}),
                **({"mcc": (gold, pred)} if "mcc" in use_metric else {}),
1014
                **({"acc_norm": acc_norm} if "acc_norm" in use_metric else {}),
1015
                **({"exact_match": exact_match} if "exact_match" in use_metric else {}),
1016
1017
            }

1018
            if "acc_mutual_info" in use_metric:
lintangsutawika's avatar
lintangsutawika committed
1019
1020
1021
                lls_mutual_info = [
                    ll_c - ll_u for ll_c, ll_u in zip(lls, lls_unconditional)
                ]
1022
1023
1024
                acc_mutual_info = 1.0 if np.argmax(lls_mutual_info) == gold else 0.0
                result_dict["acc_mutual_info"] = acc_mutual_info

1025
        elif self.OUTPUT_TYPE == "greedy_until":
1026
            gold = self.doc_to_target(doc)
lintangsutawika's avatar
lintangsutawika committed
1027
            if self._config.doc_to_choice is not None:
lintangsutawika's avatar
lintangsutawika committed
1028
                # If you set doc_to_choice,
lintangsutawika's avatar
lintangsutawika committed
1029
                # it assumes that doc_to_target returns a number.
1030
1031
                choices = self.doc_to_choice(doc)
                gold = choices[gold]
lintangsutawika's avatar
lintangsutawika committed
1032
1033
            else:
                gold = str(gold)
1034

1035
            for key, result in zip(self._metric_fn_list.keys(), results):
haileyschoelkopf's avatar
haileyschoelkopf committed
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
                if self.multiple_target:
                    # in the case where we have multiple targets,
                    # return true if any are true
                    # TODO: this may break for multipLe_target, non zero-or-1 metrics
                    scores = []
                    for gold_option in gold:
                        res = self._metric_fn_list[key](
                            references=[gold_option],
                            predictions=[result],
                            **self._metric_fn_kwargs[key],
                        )
                        if isinstance(res, dict):
                            # TODO: this handles the case where HF evaluate returns a dict.
                            res = res[key]
                        scores.append(res)
                    if any(scores):
1052
                        result_score = 1.0
haileyschoelkopf's avatar
haileyschoelkopf committed
1053
                    else:
1054
                        result_score = 0.0
haileyschoelkopf's avatar
haileyschoelkopf committed
1055
                else:
1056
                    result_score = self._metric_fn_list[key](
haileyschoelkopf's avatar
haileyschoelkopf committed
1057
1058
1059
1060
                        references=[gold],
                        predictions=[result],
                        **self._metric_fn_kwargs[key],
                    )
1061

1062
1063
                if isinstance(result_score, dict):
                    result_dict.update(result_score)
haileyschoelkopf's avatar
haileyschoelkopf committed
1064
                else:
1065
                    result_dict[key] = result_score
1066
        else:
lintangsutawika's avatar
lintangsutawika committed
1067
1068
            raise ValueError(
                f"Passed invalid output_type '{self.OUTPUT_TYPE}' ! Please use one of ",
1069
                "'loglikelihood', 'loglikelihood_rolling', 'greedy_until' or 'multiple_choice'",
1070
            )
1071
1072
1073
1074
1075
1076
1077

        return result_dict

    def aggregation(self):
        return self._aggregation_list

    def higher_is_better(self):
haileyschoelkopf's avatar
haileyschoelkopf committed
1078
        return self._higher_is_better
1079
1080
1081
1082
1083


class MultipleChoiceTask(Task):
    OUTPUT_TYPE: str = "loglikelihood"

baberabb's avatar
baberabb committed
1084
    def doc_to_target(self, doc: dict) -> str:
1085
1086
        return " " + doc["choices"][doc["gold"]]

baberabb's avatar
baberabb committed
1087
    def construct_requests(self, doc: dict, ctx: str, **kwargs) -> List[Instance]:
1088
        # TODO: add mutual info here?
lintangsutawika's avatar
lintangsutawika committed
1089
1090
        return [
            Instance(
haileyschoelkopf's avatar
haileyschoelkopf committed
1091
                request_type="loglikelihood",
lintangsutawika's avatar
lintangsutawika committed
1092
                doc=doc,
1093
                arguments=(ctx, " {}".format(choice)),
1094
                idx=i,
1095
1096
                **kwargs,
            )
lintangsutawika's avatar
lintangsutawika committed
1097
1098
            for i, choice in enumerate(doc["choices"])
        ]
1099

baberabb's avatar
baberabb committed
1100
    def process_results(self, doc: dict, results: List[Tuple[float, bool]]) -> dict:
lintangsutawika's avatar
lintangsutawika committed
1101
1102
1103
        results = [
            res[0] for res in results
        ]  # only retain loglikelihoods, discard is_greedy TODO: do we need is_greedy anywhere?
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
        gold = doc["gold"]

        acc = 1.0 if np.argmax(results) == gold else 0.0
        completion_len = np.array([float(len(i)) for i in doc["choices"]])
        acc_norm = 1.0 if np.argmax(results / completion_len) == gold else 0.0

        return {
            "acc": acc,
            "acc_norm": acc_norm,
        }

baberabb's avatar
baberabb committed
1115
    def higher_is_better(self) -> dict:
1116
1117
1118
1119
1120
        return {
            "acc": True,
            "acc_norm": True,
        }

baberabb's avatar
baberabb committed
1121
    def aggregation(self) -> dict:
1122
1123
1124
1125
1126
1127
        return {
            "acc": mean,
            "acc_norm": mean,
        }


lintangsutawika's avatar
lintangsutawika committed
1128
class PerplexityTask(Task):
1129
1130
    OUTPUT_TYPE = "loglikelihood_rolling"

baberabb's avatar
baberabb committed
1131
    def has_training_docs(self) -> bool:
1132
1133
        return False

baberabb's avatar
baberabb committed
1134
    def fewshot_examples(self, k: int, rnd) -> List:
1135
1136
1137
        assert k == 0
        return []

baberabb's avatar
baberabb committed
1138
    def fewshot_context(self, doc: dict, num_fewshot: int) -> Literal[""]:
1139
1140
1141
1142
1143
1144
        assert (
            num_fewshot == 0
        ), "The number of fewshot examples must be 0 for perplexity tasks."

        return ""

baberabb's avatar
baberabb committed
1145
    def higher_is_better(self) -> dict:
1146
1147
1148
1149
1150
1151
1152
1153
1154
        return {
            "word_perplexity": False,
            "byte_perplexity": False,
            "bits_per_byte": False,
        }

    def doc_to_decontamination_query(self, doc):
        return doc

Ethan Smith's avatar
Ethan Smith committed
1155
    def doc_to_text(self, doc) -> str:
1156
1157
1158
1159
1160
        return ""

    def doc_to_target(self, doc):
        return doc

baberabb's avatar
baberabb committed
1161
    def construct_requests(self, doc: dict, ctx: Union[str, None], **kwargs):
1162
1163
        assert not ctx

lintangsutawika's avatar
lintangsutawika committed
1164
1165
1166
1167
1168
1169
1170
        return Instance(
            request_type=self.OUTPUT_TYPE,
            doc=doc,
            arguments=(self.doc_to_target(doc),),
            idx=0,
            **kwargs,
        )
1171

baberabb's avatar
baberabb committed
1172
    def process_results(self, doc: dict, results: float) -> dict:
1173
        (loglikelihood,) = results
haileyschoelkopf's avatar
haileyschoelkopf committed
1174
1175
        words = self.count_words(self.doc_to_target(doc))
        bytes_ = self.count_bytes(self.doc_to_target(doc))
1176
1177
1178
1179
1180
1181
        return {
            "word_perplexity": (loglikelihood, words),
            "byte_perplexity": (loglikelihood, bytes_),
            "bits_per_byte": (loglikelihood, bytes_),
        }

baberabb's avatar
baberabb committed
1182
    def aggregation(self) -> dict:
1183
1184
1185
1186
1187
1188
1189
        return {
            "word_perplexity": weighted_perplexity,
            "byte_perplexity": weighted_perplexity,
            "bits_per_byte": bits_per_byte,
        }

    @classmethod
baberabb's avatar
baberabb committed
1190
    def count_bytes(cls, doc) -> int:
1191
1192
1193
        return len(doc.encode("utf-8"))

    @classmethod
baberabb's avatar
baberabb committed
1194
    def count_words(cls, doc) -> int:
1195
1196
        """Downstream tasks with custom word boundaries should override this!"""
        return len(re.split(r"\s+", doc))