task.py 68.1 KB
Newer Older
1
import abc
2
import ast
lintangsutawika's avatar
lintangsutawika committed
3
import logging
4
import random
5
6
import re
from collections.abc import Callable
7
from copy import deepcopy
8
from dataclasses import asdict, dataclass
9
from inspect import getsource
10
11
12
13
14
15
16
17
18
19
20
21
from typing import (
    Any,
    Dict,
    Iterable,
    Iterator,
    List,
    Literal,
    Mapping,
    Optional,
    Tuple,
    Union,
)
22
23
24

import datasets
import numpy as np
25
from tqdm import tqdm
26
27

from lm_eval import utils
28
from lm_eval.api import samplers
29
30
from lm_eval.api.instance import Instance, OutputType
from lm_eval.api.metrics import bits_per_byte, mean, weighted_perplexity
lintangsutawika's avatar
lintangsutawika committed
31
from lm_eval.api.registry import (
32
33
    AGGREGATION_REGISTRY,
    DEFAULT_METRIC_REGISTRY,
haileyschoelkopf's avatar
haileyschoelkopf committed
34
    get_aggregation,
35
    get_metric,
36
    get_metric_aggregation,
haileyschoelkopf's avatar
haileyschoelkopf committed
37
    is_higher_better,
lintangsutawika's avatar
lintangsutawika committed
38
)
39
from lm_eval.caching.cache import load_from_cache, save_to_cache
40
41
42
from lm_eval.filters import build_filter_ensemble
from lm_eval.prompts import get_prompt

43

44
45
46
47
ALL_OUTPUT_TYPES = [
    "loglikelihood",
    "multiple_choice",
    "loglikelihood_rolling",
48
    "generate_until",
49
50
]

51
eval_logger = logging.getLogger("lm-eval")
52

lintangsutawika's avatar
lintangsutawika committed
53

54
55
@dataclass
class TaskConfig(dict):
56
    # task naming/registry
57
58
    task: Optional[str] = None
    task_alias: Optional[str] = None
Lintang Sutawika's avatar
Lintang Sutawika committed
59
    tag: Optional[Union[str, list]] = None
60
61
62
    # HF dataset options.
    # which dataset to use,
    # and what splits for what purpose
Baber's avatar
Baber committed
63
    download_dataset: Optional[Callable] = None
64
65
66
67
68
69
    dataset_path: Optional[str] = None
    dataset_name: Optional[str] = None
    dataset_kwargs: Optional[dict] = None
    training_split: Optional[str] = None
    validation_split: Optional[str] = None
    test_split: Optional[str] = None
70
    fewshot_split: Optional[str] = (
Baber Abbasi's avatar
Baber Abbasi committed
71
        None  # TODO: assert that this not None if num_fewshot > 0. (?) assert if this is same split as one evaluating (?)
72
    )
73
74
    # formatting / prompting options.
    # see docs/advanced_task_guide.md for more info
75
76
77
    process_docs: Optional[Callable] = None
    doc_to_text: Optional[Union[Callable, str]] = None
    doc_to_target: Optional[Union[Callable, str]] = None
78
    doc_to_image: Union[Callable, str] = None
Hojin Lee's avatar
Hojin Lee committed
79
    unsafe_code: bool = False
80
81
82
    doc_to_choice: Optional[Union[Callable, str, dict, list]] = None
    process_results: Optional[Union[Callable, str]] = None
    use_prompt: Optional[str] = None
83
    description: str = ""
84
85
    target_delimiter: str = " "
    fewshot_delimiter: str = "\n\n"
86
    fewshot_config: Optional[dict] = None
87
    # runtime configuration options
88
    num_fewshot: Optional[int] = None
89
    # scoring options
90
91
92
    metric_list: Optional[list] = None
    output_type: OutputType = "generate_until"
    generation_kwargs: Optional[dict] = None
93
    repeats: int = 1
94
    filter_list: Optional[Union[str, list]] = None
95
    should_decontaminate: bool = False
96
    doc_to_decontamination_query: Optional[str] = None
Baber Abbasi's avatar
Baber Abbasi committed
97
    gen_prefix: Optional[str] = None
98
99
100
    metadata: Optional[dict] = (
        None  # by default, not used in the code. allows for users to pass arbitrary info to tasks
    )
101

Ethan Smith's avatar
Ethan Smith committed
102
    def __post_init__(self) -> None:
Lintang Sutawika's avatar
Lintang Sutawika committed
103
        if self.generation_kwargs is not None:
104
            if self.output_type != "generate_until":
105
                eval_logger.warning(
106
                    f"[{self.task}] passed `generation_kwargs`, but not using `output_type: generate_until`!"
Lintang Sutawika's avatar
Lintang Sutawika committed
107
108
109
110
111
112
113
114
                )

            if "temperature" in self.generation_kwargs:
                self.generation_kwargs["temperature"] = float(
                    self.generation_kwargs["temperature"]
                )

            if "until" not in self.generation_kwargs:
115
                self.generation_kwargs["until"] = [self.fewshot_delimiter]
Lintang Sutawika's avatar
Lintang Sutawika committed
116
        else:
117
            if self.output_type == "generate_until":
Lintang Sutawika's avatar
Lintang Sutawika committed
118
119
                # ensure that we greedily generate in absence of explicit arguments otherwise
                self.generation_kwargs = {
120
121
122
123
124
                    "until": (
                        None
                        if self.fewshot_delimiter is None
                        else [self.fewshot_delimiter]
                    ),
Lintang Sutawika's avatar
Lintang Sutawika committed
125
126
                    "do_sample": False,
                }
127

128
129
130
    def __getitem__(self, item):
        return getattr(self, item)

131
132
133
    def __setitem__(self, item, value):
        return setattr(self, item, value)

134
    def to_dict(self, keep_callable: bool = False) -> dict:
135
136
        """dumps the current config as a dictionary object, as a printable format.
        null fields will not be printed.
haileyschoelkopf's avatar
haileyschoelkopf committed
137
        Used for dumping results alongside full task configuration
138

haileyschoelkopf's avatar
haileyschoelkopf committed
139
140
141
142
143
144
145
146
147
148
        :return: dict
            A printable dictionary version of the TaskConfig object.

        # TODO: should any default value in the TaskConfig not be printed?
        """
        cfg_dict = asdict(self)
        # remove values that are `None`
        for k, v in list(cfg_dict.items()):
            if v is None:
                cfg_dict.pop(k)
149
150
151
152
153
154
155
156
157
158
            elif k == "metric_list":
                for metric_dict in v:
                    for metric_key, metric_value in metric_dict.items():
                        if callable(metric_value):
                            metric_dict[metric_key] = self.serialize_function(
                                metric_value, keep_callable=keep_callable
                            )
                cfg_dict[k] = v
            elif callable(v):
                cfg_dict[k] = self.serialize_function(v, keep_callable=keep_callable)
haileyschoelkopf's avatar
haileyschoelkopf committed
159
        return cfg_dict
160

161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
    def serialize_function(
        self, value: Union[Callable, str], keep_callable=False
    ) -> Union[Callable, str]:
        """Serializes a given function or string.

        If 'keep_callable' is True, the original callable is returned.
        Otherwise, attempts to return the source code of the callable using 'getsource'.
        """
        if keep_callable:
            return value
        else:
            try:
                return getsource(value)
            except (TypeError, OSError):
                return str(value)

177
178
179
180
181
182
183
184
185
186
187

class Task(abc.ABC):
    """A task represents an entire benchmark including its dataset, problems,
    answers, and evaluation methods. See BoolQ for a simple example implementation

    A `doc` can be any python object which represents one instance of evaluation.
    This is usually a dictionary e.g.
        {"question": ..., "answer": ...} or
        {"question": ..., question, answer)
    """

188
    VERSION: Optional[Union[int, str]] = None
189

190
191
    # The name of the `Task` benchmark as denoted in the HuggingFace datasets Hub
    # or a path to a custom `datasets` loading script.
192
    DATASET_PATH: Optional[str] = None
193
194

    # The name of a subset within `DATASET_PATH`.
195
    DATASET_NAME: Optional[str] = None
196

197
    OUTPUT_TYPE: Optional[OutputType] = None
lintangsutawika's avatar
lintangsutawika committed
198

199
200
    def __init__(
        self,
201
202
203
204
        data_dir: Optional[str] = None,
        cache_dir: Optional[str] = None,
        download_mode: Optional[datasets.DownloadMode] = None,
        config: Optional[Mapping] = None,  # Union[dict, TaskConfig]
Ethan Smith's avatar
Ethan Smith committed
205
    ) -> None:
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
        """
        :param data_dir: str
            Stores the path to a local folder containing the `Task`'s data files.
            Use this to specify the path to manually downloaded data (usually when
            the dataset is not publicly accessible).
        :param cache_dir: str
            The directory to read/write the `Task` dataset. This follows the
            HuggingFace `datasets` API with the default cache directory located at:
                `~/.cache/huggingface/datasets`
            NOTE: You can change the cache location globally for a given process
            to another directory:
                `export HF_DATASETS_CACHE="/path/to/another/directory"`
        :param download_mode: datasets.DownloadMode
            How to treat pre-existing `Task` downloads and data.
            - `datasets.DownloadMode.REUSE_DATASET_IF_EXISTS`
                Reuse download and reuse dataset.
            - `datasets.DownloadMode.REUSE_CACHE_IF_EXISTS`
                Reuse download with fresh dataset.
            - `datasets.DownloadMode.FORCE_REDOWNLOAD`
                Fresh download and fresh dataset.
        """
        self.download(data_dir, cache_dir, download_mode)
228
229
230
        self._training_docs: Optional[list] = None
        self._fewshot_docs: Optional[list] = None
        self._instances: Optional[List[Instance]] = None
231

232
        self._config: TaskConfig = TaskConfig({**config}) if config else TaskConfig()
233

lintangsutawika's avatar
lintangsutawika committed
234
        self._filters = [build_filter_ensemble("none", [["take_first", None]])]
235
236
237
        self.fewshot_rnd: Optional[random.Random] = (
            None  # purposely induce errors in case of improper usage
        )
238

239
240
241
242
243
244
    def download(
        self,
        data_dir: Optional[str] = None,
        cache_dir: Optional[str] = None,
        download_mode=None,
    ) -> None:
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
        """Downloads and returns the task dataset.
        Override this method to download the dataset from a custom API.

        :param data_dir: str
            Stores the path to a local folder containing the `Task`'s data files.
            Use this to specify the path to manually downloaded data (usually when
            the dataset is not publicly accessible).
        :param cache_dir: str
            The directory to read/write the `Task` dataset. This follows the
            HuggingFace `datasets` API with the default cache directory located at:
                `~/.cache/huggingface/datasets`
            NOTE: You can change the cache location globally for a given process
            by setting the shell environment variable, `HF_DATASETS_CACHE`,
            to another directory:
                `export HF_DATASETS_CACHE="/path/to/another/directory"`
        :param download_mode: datasets.DownloadMode
            How to treat pre-existing `Task` downloads and data.
            - `datasets.DownloadMode.REUSE_DATASET_IF_EXISTS`
                Reuse download and reuse dataset.
            - `datasets.DownloadMode.REUSE_CACHE_IF_EXISTS`
                Reuse download with fresh dataset.
            - `datasets.DownloadMode.FORCE_REDOWNLOAD`
                Fresh download and fresh dataset.
        """
269
270
271
272
273
274
275
        self.dataset = datasets.load_dataset(
            path=self.DATASET_PATH,
            name=self.DATASET_NAME,
            data_dir=data_dir,
            cache_dir=cache_dir,
            download_mode=download_mode,
        )
276

277
    @property
278
    def config(self) -> TaskConfig:
279
280
281
        """Returns the TaskConfig associated with this class."""
        return self._config

282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
    @abc.abstractmethod
    def has_training_docs(self):
        """Whether the task has a training set"""
        pass

    @abc.abstractmethod
    def has_validation_docs(self):
        """Whether the task has a validation set"""
        pass

    @abc.abstractmethod
    def has_test_docs(self):
        """Whether the task has a test set"""
        pass

297
    def training_docs(self) -> Iterable:
298
299
300
301
302
303
        """
        :return: Iterable[obj]
            A iterable of any object, that doc_to_text can handle
        """
        return []

304
    def validation_docs(self) -> Iterable:
305
306
307
308
309
310
        """
        :return: Iterable[obj]
            A iterable of any object, that doc_to_text can handle
        """
        return []

311
    def test_docs(self) -> Iterable:
312
313
314
315
316
317
        """
        :return: Iterable[obj]
            A iterable of any object, that doc_to_text can handle
        """
        return []

318
    def fewshot_docs(self) -> Iterable:
319
320
321
322
323
324
325
326
327
        """
        :return: Iterable[obj]
            A iterable of any object, that doc_to_text can handle
        """
        if self.has_training_docs():
            return self.training_docs()
        elif self.has_validation_docs():
            return self.validation_docs()
        else:
Baber's avatar
Baber committed
328
329
330
331
332
            if self.config.get("num_fewshot", 0) > 0:
                eval_logger.warning(
                    f"[Task: {self.config.task}] has_training_docs and has_validation_docs are False"
                    ", using test_docs as fewshot_docs but this is not recommended."
                )
333
334
            return self.test_docs()

335
    def _process_doc(self, doc: dict) -> dict:
336
337
338
339
340
341
342
343
344
        """
        Override this to process (detokenize, strip, replace, etc.) individual
        documents. This can be used in a map over documents of a data split.
        E.g. `map(self._process_doc, self.dataset["validation"])`

        :return: dict
            The processed version of the specified `doc`.
        """
        return doc
lintangsutawika's avatar
lintangsutawika committed
345

346
    @property
347
    def instances(self) -> List[Instance]:
348
349
350
351
352
353
354
355
356
357
358
        """After calling `task.build_all_requests()`, tasks
        maintain a list of the dataset instances which will be evaluated.
        """
        return self._instances

    def fewshot_examples(self, k, rnd):
        if self._training_docs is None:
            self._training_docs = list(self.training_docs())

        return rnd.sample(self._training_docs, k)

359
360
    def doc_to_decontamination_query(self, doc):
        raise NotImplementedError(
361
362
363
364
365
366
367
368
369
370
371
            "Override doc_to_decontamination_query with document specific decontamination query."
        )

    @abc.abstractmethod
    def doc_to_text(self, doc):
        pass

    @abc.abstractmethod
    def doc_to_target(self, doc):
        pass

372
373
374
375
    # not an abstractmethod because not every language-only task has to implement this
    def doc_to_image(self, doc):
        raise NotImplementedError

Baber Abbasi's avatar
Baber Abbasi committed
376
377
378
    def doc_to_prefix(self, doc):
        return ""

379
380
    def build_all_requests(
        self,
381
        *,
382
383
384
385
386
387
388
389
390
391
        limit: Union[int, None] = None,
        rank: int = 0,
        world_size: int = 1,
        cache_requests: bool = False,
        rewrite_requests_cache: bool = False,
        system_instruction: Optional[str] = None,
        apply_chat_template: bool = False,
        fewshot_as_multiturn: bool = False,
        chat_template: Optional[Callable] = None,
        tokenizer_name: str = "",
392
    ) -> None:
393
        """Build a set of Instances for a task, and store them in task.instances"""
394
395
396
397

        # used with caching
        og_limit = limit

398
        cache_key = f"requests-{self._config.task}-{self.config.num_fewshot}shot-rank{rank}-world_size{world_size}"
KonradSzafer's avatar
KonradSzafer committed
399
400
401
402
403
404
405
        cache_key += "-chat_template" if apply_chat_template else ""
        cache_key += "-fewshot_as_multiturn" if fewshot_as_multiturn else ""
        cache_key += (
            f"-system_prompt_hash{utils.hash_string(system_instruction)}"
            if system_instruction is not None
            else ""
        )
406
        cache_key += f"-tokenizer{tokenizer_name}"
407

Baber Abbasi's avatar
Baber Abbasi committed
408
        cached_instances = load_from_cache(file_name=cache_key, cache=cache_requests)
409
410
411
412
413
414
415
416
417
418
419
420
421

        if cache_requests and cached_instances and not rewrite_requests_cache:
            cached_instances = cached_instances[:limit]

            flattened_instances = [
                instance
                for instance_group in cached_instances
                for instance in instance_group
            ]

            self._instances = flattened_instances
            return

Baber Abbasi's avatar
Baber Abbasi committed
422
        eval_logger.info(f"Building contexts for {self.config.task} on rank {rank}...")
423

424
        instances = []
425
426
427
428
429
430
431
432
433
434

        # process all documents when caching is specified for simplicity
        if (
            cache_requests
            and (not cached_instances or rewrite_requests_cache)
            and limit is not None
        ):
            limit = None

        doc_id_docs = list(
435
            self.doc_iterator(rank=rank, limit=limit, world_size=world_size)
436
437
438
439
440
441
442
        )

        num_docs = len(doc_id_docs)

        for doc_id, doc in tqdm(
            doc_id_docs,
            total=num_docs,
lintangsutawika's avatar
lintangsutawika committed
443
        ):
444
            # sample fewshot context #TODO: need to offset doc_id by rank now!
445
            fewshot_ctx = self.fewshot_context(
446
                doc,
447
                0 if self.config.num_fewshot is None else self.config.num_fewshot,
KonradSzafer's avatar
KonradSzafer committed
448
449
450
                system_instruction,
                apply_chat_template,
                fewshot_as_multiturn,
451
                chat_template,
Baber Abbasi's avatar
Baber Abbasi committed
452
                gen_prefix=self.doc_to_prefix(doc),
453
            )
454

455
            # TODO: we should override self.config.repeats if doing greedy gen so users don't waste time+compute
lintangsutawika's avatar
lintangsutawika committed
456
457
458
            inst = self.construct_requests(
                doc=doc,
                ctx=fewshot_ctx,
459
                metadata=(self.config["task"], doc_id, self.config.repeats),
460
                apply_chat_template=apply_chat_template,
lintangsutawika's avatar
lintangsutawika committed
461
            )
462
463
464
465

            if not isinstance(inst, list):
                inst = [inst]

466
467
468
469
470
471
472
473
474
475
476
477
478
            instances.append(inst)

        # now flatten, this is to allow slicing to work with pickles

        sliced_instances = instances[:og_limit]

        flattened_instances = [
            instance
            for instance_group in sliced_instances
            for instance in instance_group
        ]

        self._instances = flattened_instances
479

480
481
        if len(self._instances) == 0:
            raise ValueError("task.build_requests() did not find any docs!")
482

483
484
485
        if cache_requests and (not cached_instances or rewrite_requests_cache):
            save_to_cache(file_name=cache_key, obj=instances)

486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
    @abc.abstractmethod
    def construct_requests(self, doc, ctx, **kwargs):
        """Uses RequestFactory to construct Requests and returns an iterable of
        Requests which will be sent to the LM.

        :param doc:
            The document as returned from training_docs, validation_docs, or test_docs.
        :param ctx: str
            The context string, generated by fewshot_context. This includes the natural
            language description, as well as the few shot examples, and the question
            part of the document for `doc`.
        :param doc_idx: int
            The index of a document within `self.test_docs()` or `self.validation_docs()`,
            whichever is the main split used.
        :param repeats: int
        TODO: update this docstring
lintangsutawika's avatar
lintangsutawika committed
502
            The number of times each instance in a dataset is inferred on. Defaults to 1,
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
            can be increased for techniques like majority voting.
        """
        pass

    @abc.abstractmethod
    def process_results(self, doc, results):
        """Take a single document and the LM results and evaluates, returning a
        dict where keys are the names of submetrics and values are the values of
        the metric for that one document

        :param doc:
            The document as returned from training_docs, validation_docs, or test_docs.
        :param results:
            The results of the requests created in construct_requests.
        """
        pass

    @abc.abstractmethod
    def aggregation(self):
        """
        :returns: {str: [metric_score] -> float}
            A dictionary where keys are the names of submetrics and values are
            functions that aggregate a list of metric scores
        """
        pass

    @abc.abstractmethod
    def higher_is_better(self):
        """
        :returns: {str: bool}
            A dictionary where keys are the names of submetrics and values are
            whether a higher value of the submetric is better
        """
        pass

538
539
540
    def get_config(self, key: str) -> Any:
        return getattr(self._config, key, None)

haileyschoelkopf's avatar
haileyschoelkopf committed
541
542
543
544
545
546
547
548
549
550
    @classmethod
    def count_bytes(cls, doc):
        """Used for byte-level perplexity metrics in rolling loglikelihood"""
        return len(doc.encode("utf-8"))

    @classmethod
    def count_words(cls, doc):
        """Downstream loglikelihood_rolling perplexity tasks with custom word boundaries should override this!"""
        return len(re.split(r"\s+", doc))

551
    @utils.positional_deprecated
Baber Abbasi's avatar
Baber Abbasi committed
552
    def fewshot_context(self, doc, num_fewshot, rnd=None, description=None, **kwargs):
553
554
555
556
557
558
559
        """Returns a fewshot context string that is made up of a prepended description
        (if provided), the `num_fewshot` number of examples, and an appended prompt example.

        :param doc: str
            The document as returned from training_docs, validation_docs, or test_docs.
        :param num_fewshot: int
            The number of fewshot examples to provide in the returned context string.
lintangsutawika's avatar
lintangsutawika committed
560
561
562
563
564
        :param rnd: random.Random
            The pseudo-random number generator used to randomly sample examples.
            WARNING: This is currently a required arg although it's optionalized with a default `None`.
        :param description: str
            The task's description that will be prepended to the fewshot examples.
565
566
567
        :returns: str
            The fewshot context.
        """
568
        if rnd is None:
569
570
571
572
573
574
            if self.fewshot_rnd is not None:
                rnd = self.fewshot_rnd
            else:
                raise ValueError(
                    "A `random.Random` generator argument must be provided to `rnd`"
                )
lintangsutawika's avatar
lintangsutawika committed
575

576
        description = description if description else ""
577
578

        if num_fewshot == 0:
lintangsutawika's avatar
lintangsutawika committed
579
            labeled_examples = ""
580
        else:
lintangsutawika's avatar
lintangsutawika committed
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
            # for sets with no training docs, draw from other set *but ensure no overlap with current doc*
            if self.has_training_docs():
                fewshotex = self.fewshot_examples(k=num_fewshot, rnd=rnd)
            else:
                if self._fewshot_docs is None:
                    self._fewshot_docs = list(
                        self.validation_docs()
                        if self.has_validation_docs()
                        else self.test_docs()
                    )

                fewshotex = rnd.sample(self._fewshot_docs, num_fewshot + 1)

                # get rid of the doc that's the one we're evaluating, if it's in the fewshot
                fewshotex = [x for x in fewshotex if x != doc][:num_fewshot]

            labeled_examples = (
                "\n\n".join(
                    [
                        self.doc_to_text(doc) + self.doc_to_target(doc)
                        for doc in fewshotex
                    ]
                )
                + "\n\n"
lintangsutawika's avatar
lintangsutawika committed
605
            )
606
607

        example = self.doc_to_text(doc)
lintangsutawika's avatar
lintangsutawika committed
608
        return description + labeled_examples + example
609

610
    def apply_filters(self) -> Optional[List[Instance]]:
Baber Abbasi's avatar
Baber Abbasi committed
611
        """Iterates over FilterEnsembles and applies them to instances"""
lintangsutawika's avatar
lintangsutawika committed
612
613
        if hasattr(self, "_filters"):
            for f in self._filters:
614
                f.apply(self._instances)
lintangsutawika's avatar
lintangsutawika committed
615
616
617
        else:
            eval_logger.warning("No filter defined, passing through instances")
            return self._instances
618

baberabb's avatar
baberabb committed
619
    def dump_config(self) -> dict:
Baber Abbasi's avatar
Baber Abbasi committed
620
        """Returns the config as a dictionary."""
621
        # TODO: this should only return the overrides applied to a non-YAML task's configuration.
622
        # (num_fewshot)
623
        return self.config.to_dict()
624

Baber Abbasi's avatar
Baber Abbasi committed
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
    def set_config(self, key: str, value: Any, update: bool = False) -> None:
        """Set or update the configuration for a given key."""
        if key is None:
            raise ValueError("Key must be provided.")

        if update:
            current_value = getattr(self._config, key, {})
            if not isinstance(current_value, dict):
                raise TypeError(
                    f"Expected a dict for key '{key}', got {type(current_value).__name__} instead."
                )
            current_value.update(value)
        else:
            setattr(self._config, key, value)

    def override_metric(self, metric_name: str) -> None:
        """
        Override the default metrics used for evaluation with custom metrics.

        Parameters:
        - metric_name (str): The name of the custom metric to override. Should be registered in api.metrics.
        """
        (
            self._metric_fn_list,
            self._aggregation_list,
            self._metric_fn_kwargs,
            self._higher_is_better,
        ) = ({}, {}, {}, {})
        self._metric_fn_list[metric_name] = get_metric(metric_name)
        self._aggregation_list[metric_name] = get_metric_aggregation(metric_name)
        self._higher_is_better[metric_name] = is_higher_better(metric_name)
        self._metric_fn_kwargs[metric_name] = {}
        if not isinstance(self, ConfigurableTask):
            self.process_results = lambda x, y: {metric_name: get_metric(metric_name)}
            self.aggregation = lambda: {
                metric_name: get_metric_aggregation(metric_name)
            }
        setattr(self._config, "metric_list", [{"metric": metric_name}])
        setattr(self._config, "process_results", None)

665
666
667
668
669
    def set_fewshot_seed(self, seed: Optional[int] = None) -> None:
        self.fewshot_rnd = random.Random(seed)
        if hasattr(self, "sampler"):
            self.sampler.rnd = self.fewshot_rnd

670
671
672
673
674
675
676
    @property
    def eval_docs(self) -> Union[datasets.Dataset, List[dict]]:
        if self.has_test_docs():
            return self.test_docs()
        elif self.has_validation_docs():
            return self.validation_docs()
        else:
677
678
679
            raise ValueError(
                f"Task dataset (path={self.DATASET_PATH}, name={self.DATASET_NAME}) must have valid or test docs!"
            )
680
681
682
683
684
685
686
687
688
689
690
691
692

    def doc_iterator(
        self, *, rank: int = 0, limit: Union[int, None] = None, world_size: int = 1
    ) -> Iterator[Tuple[int, Any]]:
        limit = int(limit) if limit else None
        doc_iterator = utils.create_iterator(
            enumerate(self.eval_docs),
            rank=int(rank),
            limit=limit,
            world_size=int(world_size),
        )
        return doc_iterator

693
694

class ConfigurableTask(Task):
695
    VERSION = "Yaml"
696
    OUTPUT_TYPE = None
697
    CONFIG = None
698
699

    def __init__(
700
701
702
703
704
        self,
        data_dir=None,
        cache_dir=None,
        download_mode=None,
        config: Optional[dict] = None,
Ethan Smith's avatar
Ethan Smith committed
705
    ) -> None:  # TODO no super() call here
706
        # Get pre-configured attributes
707
        self._config = self.CONFIG
708

709
        # Use new configurations if there was no preconfiguration
710
        if self.config is None:
711
            self._config = TaskConfig(**config)
712
713
        # Overwrite configs
        else:
lintangsutawika's avatar
lintangsutawika committed
714
            if config is not None:
715
                self._config.__dict__.update(config)
716

717
        if self.config is None:
lintangsutawika's avatar
lintangsutawika committed
718
719
720
            raise ValueError(
                "Must pass a config to ConfigurableTask, either in cls.CONFIG or `config` kwarg"
            )
721

722
723
724
725
        if isinstance(self.config.metadata, dict):
            if "version" in self.config.metadata:
                self.VERSION = self.config.metadata["version"]

726
        if self.config.output_type is not None:
727
728
729
730
            if self.config.output_type not in ALL_OUTPUT_TYPES:
                raise ValueError(
                    f"Got invalid output_type '{self.config.output_type}', must be in '{','.join(ALL_OUTPUT_TYPES)}'"
                )
731
            self.OUTPUT_TYPE = self.config.output_type
732

733
734
735
736
        if self.config.doc_to_image is not None:
            # mark the task as requiring multimodality.
            self.MULTIMODAL = True

Hojin Lee's avatar
Hojin Lee committed
737
738
739
        if self.config.unsafe_code is not False:
            self.UNSAFE_CODE = True

740
741
        if self.config.dataset_path is not None:
            self.DATASET_PATH = self.config.dataset_path
742

743
744
        if self.config.dataset_name is not None:
            self.DATASET_NAME = self.config.dataset_name
745

746
747
748
749
        self._metric_fn_list = {}
        self._metric_fn_kwargs = {}
        self._aggregation_list = {}
        self._higher_is_better = {}
750

751
        if self.config.metric_list is None:
752
            # TODO: handle this in TaskConfig.__post_init__ ?
753
754
            _metric_list = DEFAULT_METRIC_REGISTRY[self.config.output_type]

755
            for metric_name in _metric_list:
haileyschoelkopf's avatar
haileyschoelkopf committed
756
                self._metric_fn_list[metric_name] = get_metric(metric_name)
lintangsutawika's avatar
lintangsutawika committed
757
                self._metric_fn_kwargs[metric_name] = {}
758
759
760
                self._aggregation_list[metric_name] = get_metric_aggregation(
                    metric_name
                )
haileyschoelkopf's avatar
haileyschoelkopf committed
761
                self._higher_is_better[metric_name] = is_higher_better(metric_name)
762
        else:
763
            for metric_config in self.config.metric_list:
764
765
766
767
                if "metric" not in metric_config:
                    raise ValueError(
                        "'metric' key not provided for an entry in 'metric_list', must be specified!"
                    )
768
769
770
771
                metric_name = metric_config["metric"]
                kwargs = {
                    key: metric_config[key]
                    for key in metric_config
Chris's avatar
Chris committed
772
773
                    if key
                    not in ["metric", "aggregation", "higher_is_better", "hf_evaluate"]
774
                }
Chris's avatar
Chris committed
775
776
777
778
                hf_evaluate_metric = (
                    "hf_evaluate" in metric_config
                    and metric_config["hf_evaluate"] is True
                )
779

780
                if self.config.process_results is not None:
781
782
                    self._metric_fn_list[metric_name] = None
                    self._metric_fn_kwargs[metric_name] = {}
783
784
785
786
787
788
                elif callable(metric_name):
                    metric_fn = metric_name.__call__
                    metric_name = metric_name.__name__
                    self._metric_fn_list[metric_name] = metric_fn
                    self._metric_fn_kwargs[metric_name] = kwargs
                else:
Chris's avatar
Chris committed
789
790
791
                    self._metric_fn_list[metric_name] = get_metric(
                        metric_name, hf_evaluate_metric
                    )
792
                    self._metric_fn_kwargs[metric_name] = kwargs
lintangsutawika's avatar
lintangsutawika committed
793

794
                if "aggregation" in metric_config:
795
                    agg_name = metric_config["aggregation"]
796
                    if isinstance(agg_name, str):
haileyschoelkopf's avatar
haileyschoelkopf committed
797
                        self._aggregation_list[metric_name] = get_aggregation(agg_name)
798
                    elif callable(agg_name):  # noqa: E721
799
800
801
                        self._aggregation_list[metric_name] = metric_config[
                            "aggregation"
                        ]
802
                else:
803
                    INV_AGG_REGISTRY = {v: k for k, v in AGGREGATION_REGISTRY.items()}
lintangsutawika's avatar
lintangsutawika committed
804
                    metric_agg = get_metric_aggregation(metric_name)
805
                    eval_logger.warning(
806
                        f"[Task: {self.config.task}] metric {metric_name} is defined, but aggregation is not. "
807
808
                        f"using default "
                        f"aggregation={INV_AGG_REGISTRY[metric_agg]}"
809
                    )
810
                    self._aggregation_list[metric_name] = metric_agg
lintangsutawika's avatar
lintangsutawika committed
811

812
813
814
815
816
817
                if "higher_is_better" in metric_config:
                    self._higher_is_better[metric_name] = metric_config[
                        "higher_is_better"
                    ]
                else:
                    eval_logger.warning(
818
                        f"[Task: {self.config.task}] metric {metric_name} is defined, but higher_is_better is not. "
819
                        f"using default "
haileyschoelkopf's avatar
haileyschoelkopf committed
820
                        f"higher_is_better={is_higher_better(metric_name)}"
821
                    )
haileyschoelkopf's avatar
haileyschoelkopf committed
822
                    self._higher_is_better[metric_name] = is_higher_better(metric_name)
823

Baber's avatar
Baber committed
824
        self.download(self.config.dataset_kwargs)
825
826
827
        self._training_docs = None
        self._fewshot_docs = None

828
        if self.config.filter_list is not None:
lintangsutawika's avatar
lintangsutawika committed
829
            self._filters = []
830
            for filter_config in self.config.filter_list:
831
832
833
834
835
836
837
838
839
                filter_name = filter_config["name"]
                filter_functions = filter_config["filter"]
                components = []
                for function in filter_functions:
                    kwargs = {
                        key: function[key] for key in function if key != "function"
                    }
                    components.append([function["function"], kwargs])
                filter_pipeline = build_filter_ensemble(filter_name, components)
lintangsutawika's avatar
lintangsutawika committed
840
                self._filters.append(filter_pipeline)
lintangsutawika's avatar
lintangsutawika committed
841
        else:
842
            self._filters = [build_filter_ensemble("none", [["take_first", None]])]
843

844
845
        if self.config.use_prompt is not None:
            eval_logger.info(f"loading prompt {self.config.use_prompt}")
846
            self.prompt = get_prompt(
847
                self.config.use_prompt, self.DATASET_PATH, self.DATASET_NAME
lintangsutawika's avatar
lintangsutawika committed
848
            )
849
850
851
        else:
            self.prompt = None

lintangsutawika's avatar
lintangsutawika committed
852
        if self.fewshot_docs() is not None:
853
854
855
856
            self.fewshot_rnd = (
                random.Random()
            )  # setting with no seed, to be overridden at a later time
            config_sampler: Union[str, Callable] = (
haileyschoelkopf's avatar
haileyschoelkopf committed
857
858
859
                self.config.fewshot_config.get("sampler", "default")
                if self.config.fewshot_config
                else "default"
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
            )
            if isinstance(config_sampler, str):
                self.sampler = samplers.get_sampler(config_sampler)(
                    list(self.fewshot_docs()), self, rnd=self.fewshot_rnd
                )
            elif callable(config_sampler) and issubclass(
                config_sampler, samplers.ContextSampler
            ):
                self.sampler = config_sampler(
                    docs=list(self.fewshot_docs()), task=self, rnd=self.fewshot_rnd
                )
            else:
                raise TypeError(
                    f"fewshot_config.sampler should be a string or callable of ContextSampler type, "
                    f"not {type(config_sampler)}"
                )
876

877
        self.task_docs = self.eval_docs
878

879
        # Test One Doc
880
        self.features = list(self.task_docs.features.keys())
881
882
        self.multiple_input = 0
        self.multiple_target = 0
883
        test_doc = self.task_docs[0]
884
        test_text = self.doc_to_text(test_doc)
885
        test_target = self.doc_to_target(test_doc)
lintangsutawika's avatar
lintangsutawika committed
886

887
        if self.config.doc_to_choice is not None:
lintangsutawika's avatar
lintangsutawika committed
888
            test_choice = self.doc_to_choice(test_doc)
889
            if not isinstance(test_choice, list):
lintangsutawika's avatar
lintangsutawika committed
890
                eval_logger.error("doc_to_choice must return list")
891
892
            else:
                num_choice = len(test_choice)
893

894
            if isinstance(test_text, int):
895
                self.multiple_input = num_choice
896
897
        else:
            test_choice = None
898

899
        if isinstance(test_target, list):
900
            self.multiple_target = len(test_target)
901
        else:
902
            if (isinstance(test_target, int)) and (test_choice is not None):
lintangsutawika's avatar
lintangsutawika committed
903
                test_target = test_choice[test_target]
904
            else:
lintangsutawika's avatar
lintangsutawika committed
905
                test_target = str(test_target)
906

907
908
909
        if test_choice is not None:
            check_choices = test_choice
        else:
lintangsutawika's avatar
lintangsutawika committed
910
            check_choices = [test_target]
911
912
913
914
        if self.config.doc_to_choice is not None:
            for choice in check_choices:
                choice_has_whitespace = True if choice[0].isspace() else False
                delimiter_has_whitespace = (
915
916
                    True
                    if self.config.target_delimiter.rstrip()
917
                    != self.config.target_delimiter
918
                    else False
919
                )
920

921
                if delimiter_has_whitespace and choice_has_whitespace:
922
923
                    eval_logger.debug(
                        f'Both target_delimiter "{self.config.target_delimiter}" and target choice: "{choice}" have whitespace'
924
925
                    )
                elif (not delimiter_has_whitespace) and (not choice_has_whitespace):
926
                    eval_logger.debug(
927
                        f'Both target_delimiter "{self.config.target_delimiter}" and target choice: "{choice}" do not have whitespace, ignore if the language you are evaluating on does not require/use whitespace'
928
929
                    )

Baber's avatar
nit  
Baber committed
930
931
932
    def download(
        self, dataset_kwargs: Optional[Dict[str, Any]] = None, **kwargs
    ) -> None:
Baber's avatar
Baber committed
933
934
935
936
937
938
939
940
941
942
943
944
945
        if isinstance(self.config.download_dataset, Callable):
            self.dataset = self.config.download_dataset(
                **self.config.metadata,
                **self.config.dataset_kwargs
                if self.config.dataset_kwargs is not None
                else {},
            )
        else:
            self.dataset = datasets.load_dataset(
                path=self.DATASET_PATH,
                name=self.DATASET_NAME,
                **dataset_kwargs if dataset_kwargs is not None else {},
            )
946

baberabb's avatar
baberabb committed
947
    def has_training_docs(self) -> bool:
948
        if self.config.training_split is not None:
949
950
951
952
            return True
        else:
            return False

baberabb's avatar
baberabb committed
953
    def has_validation_docs(self) -> bool:
954
        if self.config.validation_split is not None:
955
956
957
958
            return True
        else:
            return False

baberabb's avatar
baberabb committed
959
    def has_test_docs(self) -> bool:
960
        if self.config.test_split is not None:
961
962
963
964
            return True
        else:
            return False

baberabb's avatar
baberabb committed
965
    def training_docs(self) -> datasets.Dataset:
966
        if self.has_training_docs():
967
968
969
            if self.config.process_docs is not None:
                return self.config.process_docs(
                    self.dataset[self.config.training_split]
970
                )
971
            return self.dataset[self.config.training_split]
972

baberabb's avatar
baberabb committed
973
    def validation_docs(self) -> datasets.Dataset:
974
        if self.has_validation_docs():
975
976
977
            if self.config.process_docs is not None:
                return self.config.process_docs(
                    self.dataset[self.config.validation_split]
978
                )
979
            return self.dataset[self.config.validation_split]
980

baberabb's avatar
baberabb committed
981
    def test_docs(self) -> datasets.Dataset:
982
        if self.has_test_docs():
983
984
985
            if self.config.process_docs is not None:
                return self.config.process_docs(self.dataset[self.config.test_split])
            return self.dataset[self.config.test_split]
986

987
    def fewshot_docs(self):
988
        if self.config.fewshot_split is not None:
989
990
            if self.config.process_docs is not None:
                return self.config.process_docs(self.dataset[self.config.fewshot_split])
991
            return self.dataset[self.config.fewshot_split]
992
993
994
995
996
997
998
999
1000
1001
1002
1003
        elif (
            self.config.fewshot_config is not None
            and self.config.fewshot_config.get("samples", None) is not None
        ):
            if isinstance(self.config.fewshot_config["samples"], list):
                return self.config.fewshot_config["samples"]
            elif callable(self.config.fewshot_config["samples"]):
                return self.config.fewshot_config["samples"]()
            else:
                raise Exception(
                    "`fewshot_config['samples']` was incorrectly defined in the configuration. It should be either a list of samples as a dict, or function returning this list."
                )
1004
        else:
1005
            if (self.config.num_fewshot is not None) and (self.config.num_fewshot > 0):
1006
                eval_logger.warning(
Lintang Sutawika's avatar
Lintang Sutawika committed
1007
                    f"[Task: {self.config.task}] "
1008
1009
1010
1011
                    "num_fewshot > 0 but fewshot_split is None. "
                    "using preconfigured rule."
                )
            return super().fewshot_docs()
1012

KonradSzafer's avatar
KonradSzafer committed
1013
1014
1015
1016
1017
    @staticmethod
    def append_target_question(
        labeled_examples: List[Dict[str, str]],
        question: str,
        fewshot_as_multiturn: bool = False,
Baber Abbasi's avatar
Baber Abbasi committed
1018
        gen_prefix: Optional[str] = None,
KonradSzafer's avatar
KonradSzafer committed
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
    ) -> None:
        """Adds a target question to the labeled examples list.
        If fewshot_as_multiturn is True, or labeled_examples is empty, or the last entry is a system turn, appends the question as a new user entry.
        Otherwise, it is appended to the last user entry, ensuring that the conversation alternates between the user and the assistant.
        """
        if not fewshot_as_multiturn:
            # if no messages or last message is system, append as new user entry
            if len(labeled_examples) == 0 or labeled_examples[-1]["role"] == "system":
                labeled_examples.append({"role": "user", "content": question})
            # if last message is user, append to it to avoid two user messages in a row
            else:
                labeled_examples[-1]["content"] += question
        else:
            # if fewshot_as_multiturn is True, append as next user entry (last is always assistant)
            labeled_examples.append({"role": "user", "content": question})
Baber Abbasi's avatar
Baber Abbasi committed
1034
1035
        if gen_prefix:
            labeled_examples.append({"role": "assistant", "content": gen_prefix})
KonradSzafer's avatar
KonradSzafer committed
1036

lintangsutawika's avatar
lintangsutawika committed
1037
    @utils.positional_deprecated
KonradSzafer's avatar
KonradSzafer committed
1038
1039
    def fewshot_context(
        self,
Baber Abbasi's avatar
Baber Abbasi committed
1040
        doc: dict,
KonradSzafer's avatar
KonradSzafer committed
1041
1042
1043
1044
        num_fewshot: int,
        system_instruction: Optional[str] = None,
        apply_chat_template: bool = False,
        fewshot_as_multiturn: bool = False,
1045
        chat_template: Optional[Callable] = None,
Baber Abbasi's avatar
Baber Abbasi committed
1046
        gen_prefix: Optional[str] = None,
Baber Abbasi's avatar
Baber Abbasi committed
1047
    ) -> Union[str, List[str]]:
lintangsutawika's avatar
lintangsutawika committed
1048
1049
1050
1051
1052
1053
1054
        """Returns a fewshot context string that is made up of a prepended description
        (if provided), the `num_fewshot` number of examples, and an appended prompt example.

        :param doc: str
            The document as returned from training_docs, validation_docs, or test_docs.
        :param num_fewshot: int
            The number of fewshot examples to provide in the returned context string.
KonradSzafer's avatar
KonradSzafer committed
1055
1056
1057
1058
1059
1060
        :param  system_instruction: str
            System instruction to be applied to the prompt.
        :param apply_chat_template: bool
            Whether to apply the chat template to the fewshot context.
        :param fewshot_as_multiturn: bool
            Whether to provide the fewshot examples as a multiturn conversation or a single user turn.
1061
1062
        :param chat_template:
            callable (from lm.apply_chat_template) that takes in a list[Dict] chat transcript and renders it into a string.
lintangsutawika's avatar
lintangsutawika committed
1063
1064
1065
        :returns: str
            The fewshot context.
        """
KonradSzafer's avatar
KonradSzafer committed
1066
1067
1068
1069
1070
1071
        if apply_chat_template:
            labeled_examples = []
        else:
            labeled_examples = ""

        # get task description
1072
1073
        if description := self.config.description:
            description = utils.apply_template(self.config.description, doc)
lintangsutawika's avatar
lintangsutawika committed
1074

KonradSzafer's avatar
KonradSzafer committed
1075
1076
1077
1078
1079
1080
1081
1082
1083
        # create system prompt based on the provided system instruction and description
        if system_instruction is not None and description:
            system_prompt = (
                f"{system_instruction}{self.sampler.fewshot_delimiter}{description}"
            )
        elif system_instruction is not None:
            system_prompt = system_instruction
        elif description:
            system_prompt = description
lintangsutawika's avatar
lintangsutawika committed
1084
        else:
KonradSzafer's avatar
KonradSzafer committed
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
            system_prompt = ""

        # add system prompt if specified
        if system_prompt:
            if apply_chat_template:
                labeled_examples.append({"role": "system", "content": system_prompt})
            else:
                labeled_examples = system_prompt
        # if few-shot - append examples after the system prompt
        if num_fewshot > 0:
            if apply_chat_template:
                labeled_examples.extend(
                    self.sampler.get_chat_context(
Baber Abbasi's avatar
Baber Abbasi committed
1098
1099
1100
                        doc,
                        num_fewshot,
                        fewshot_as_multiturn,
Baber Abbasi's avatar
Baber Abbasi committed
1101
                        gen_prefix=gen_prefix,
KonradSzafer's avatar
KonradSzafer committed
1102
1103
1104
                    )
                )
            else:
Baber Abbasi's avatar
Baber Abbasi committed
1105
                labeled_examples += self.sampler.get_context(
Baber Abbasi's avatar
Baber Abbasi committed
1106
                    doc, num_fewshot, gen_prefix=gen_prefix
Baber Abbasi's avatar
Baber Abbasi committed
1107
                )
lintangsutawika's avatar
lintangsutawika committed
1108
1109

        example = self.doc_to_text(doc)
KonradSzafer's avatar
KonradSzafer committed
1110
1111
        if apply_chat_template:
            if self.multiple_input:
Baber Abbasi's avatar
Baber Abbasi committed
1112
                # TODO: append prefill?
1113
                return chat_template(labeled_examples)
KonradSzafer's avatar
KonradSzafer committed
1114
1115
            if isinstance(example, str):
                self.append_target_question(
Baber Abbasi's avatar
Baber Abbasi committed
1116
1117
1118
                    labeled_examples,
                    example,
                    fewshot_as_multiturn,
Baber Abbasi's avatar
Baber Abbasi committed
1119
                    gen_prefix=gen_prefix,
KonradSzafer's avatar
KonradSzafer committed
1120
1121
1122
1123
1124
1125
1126
                )
            # for loglikelihood create a list of questions with appended choices
            elif isinstance(example, list):
                labeled_examples_list = []
                # copy chat history for each example and append the answer
                for ex in example:
                    chat = deepcopy(labeled_examples)
Baber Abbasi's avatar
Baber Abbasi committed
1127
1128
1129
1130
                    self.append_target_question(
                        chat,
                        ex,
                        fewshot_as_multiturn,
Baber Abbasi's avatar
Baber Abbasi committed
1131
                        gen_prefix=gen_prefix,
Baber Abbasi's avatar
Baber Abbasi committed
1132
1133
1134
1135
1136
                    )
                    # TODO: append prefill?
                    labeled_examples_list.append(
                        chat_template(
                            chat,
Baber Abbasi's avatar
Baber Abbasi committed
1137
                            add_generation_prompt=False if gen_prefix else True,
Baber Abbasi's avatar
Baber Abbasi committed
1138
1139
                        )
                    )
KonradSzafer's avatar
KonradSzafer committed
1140
1141
1142
1143
1144
1145
                return labeled_examples_list
            # if example is an integer, append the choice or convert to string
            elif isinstance(example, int):
                if self.config.doc_to_choice is not None:
                    choices = self.doc_to_choice(doc)
                    self.append_target_question(
Baber Abbasi's avatar
Baber Abbasi committed
1146
1147
1148
                        labeled_examples,
                        choices[example],
                        fewshot_as_multiturn,
Baber Abbasi's avatar
Baber Abbasi committed
1149
                        gen_prefix=gen_prefix,
KonradSzafer's avatar
KonradSzafer committed
1150
1151
1152
                    )
                else:
                    self.append_target_question(
Baber Abbasi's avatar
Baber Abbasi committed
1153
1154
1155
                        labeled_examples,
                        str(example),
                        fewshot_as_multiturn,
Baber Abbasi's avatar
Baber Abbasi committed
1156
                        gen_prefix=gen_prefix,
KonradSzafer's avatar
KonradSzafer committed
1157
1158
                    )
                # return lm.apply_chat_template(labeled_examples)
Baber Abbasi's avatar
Baber Abbasi committed
1159
1160
            return chat_template(
                labeled_examples,
Baber Abbasi's avatar
Baber Abbasi committed
1161
                add_generation_prompt=False if gen_prefix else True,
Baber Abbasi's avatar
Baber Abbasi committed
1162
            )
1163
        else:
Baber Abbasi's avatar
Baber Abbasi committed
1164
            prefix = (
Baber Abbasi's avatar
Baber Abbasi committed
1165
1166
                self.config.target_delimiter + gen_prefix
                if gen_prefix is not None
Baber Abbasi's avatar
Baber Abbasi committed
1167
1168
                else ""
            )
KonradSzafer's avatar
KonradSzafer committed
1169
1170
            if self.multiple_input:
                return labeled_examples
1171
            if isinstance(example, str):
Baber Abbasi's avatar
Baber Abbasi committed
1172
                return labeled_examples + example + prefix
1173
            elif isinstance(example, list):
Baber Abbasi's avatar
Baber Abbasi committed
1174
                return [labeled_examples + ex + prefix for ex in example]
1175
1176
1177
            elif isinstance(example, int):
                if self.config.doc_to_choice is not None:
                    choices = self.doc_to_choice(doc)
Baber Abbasi's avatar
Baber Abbasi committed
1178
                    return labeled_examples + choices[example] + prefix
1179
                else:
Baber Abbasi's avatar
Baber Abbasi committed
1180
                    return labeled_examples + str(example) + prefix
lintangsutawika's avatar
lintangsutawika committed
1181

Baber Abbasi's avatar
Baber Abbasi committed
1182
    def apply_filters(self) -> Optional[List[Instance]]:
Baber Abbasi's avatar
Baber Abbasi committed
1183
        """Iterates over FilterEnsembles and applies them to instances"""
1184
1185
        if hasattr(self, "_filters"):
            for f in self._filters:
1186
                f.apply(self._instances)
1187
1188
1189
1190
        else:
            eval_logger.warning("No filter defined, passing through instances")
            return self._instances

1191
    def should_decontaminate(self):
1192
        return self.config.should_decontaminate
1193

Baber Abbasi's avatar
Baber Abbasi committed
1194
    def doc_to_decontamination_query(self, doc: dict):
1195
        if self.config.should_decontaminate:
1196
1197
            if self.config.doc_to_decontamination_query is None:
                return self.doc_to_text(doc)
1198
            else:
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
                doc_to_decontamination_query = self.config.doc_to_decontamination_query
                if doc_to_decontamination_query in self.features:
                    return doc[doc_to_decontamination_query]
                elif callable(doc_to_decontamination_query):
                    return doc_to_decontamination_query(doc)
                else:
                    return ast.literal_eval(
                        utils.apply_template(
                            self.config.doc_to_decontamination_query, doc
                        )
                    )
1210

1211
    def _process_doc(self, doc: dict) -> dict:
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
        """
        Override this to process (detokenize, strip, replace, etc.) individual
        documents. This can be used in a map over documents of a data split.
        E.g. `map(self._process_doc, self.dataset["validation"])`

        :return: dict
            The processed version of the specified `doc`.
        """
        return doc

Yu Shi Jie's avatar
Yu Shi Jie committed
1222
    def doc_to_text(self, doc, doc_to_text=None):
1223
1224
        if self.prompt is not None:
            doc_to_text = self.prompt
Yu Shi Jie's avatar
Yu Shi Jie committed
1225
1226
        elif doc_to_text is not None:
            doc_to_text = doc_to_text
1227
        else:
1228
            doc_to_text = self.config.doc_to_text
lintangsutawika's avatar
lintangsutawika committed
1229

1230
        if isinstance(doc_to_text, int):
1231
            return doc_to_text
1232
        elif isinstance(doc_to_text, str):
1233
            if doc_to_text in self.features:
1234
                # if self.config.doc_to_choice is not None:
1235
1236
                #     return self.doc_to_choice(doc)[doc[doc_to_text]]
                # else:
1237
1238
                return doc[doc_to_text]
            else:
lintangsutawika's avatar
lintangsutawika committed
1239
                text_string = utils.apply_template(doc_to_text, doc)
lintangsutawika's avatar
lintangsutawika committed
1240
                if text_string.isdigit() and self._config.doc_to_choice is not None:
lintangsutawika's avatar
lintangsutawika committed
1241
1242
1243
                    return ast.literal_eval(text_string)
                else:
                    return text_string
1244
        elif callable(doc_to_text):
1245
            return doc_to_text(doc)
lintangsutawika's avatar
lintangsutawika committed
1246
        # Used when applying a Promptsource template
1247
        elif hasattr(doc_to_text, "apply"):
1248
1249
1250
1251
1252
            applied_prompt = doc_to_text.apply(doc)
            if len(applied_prompt) == 2:
                return applied_prompt[0]
            else:
                eval_logger.warning("Applied prompt returns empty string")
1253
                return self.config.fewshot_delimiter
1254
        else:
1255
            print(type(doc_to_text))
1256
            raise TypeError
1257

Yu Shi Jie's avatar
Yu Shi Jie committed
1258
    def doc_to_target(self, doc: Mapping, doc_to_target=None) -> Union[int, str, list]:
1259
1260
        if self.prompt is not None:
            doc_to_target = self.prompt
Yu Shi Jie's avatar
Yu Shi Jie committed
1261
1262
        elif doc_to_target is not None:
            doc_to_target = doc_to_target
1263
        else:
1264
            doc_to_target = self.config.doc_to_target
1265

1266
        if isinstance(doc_to_target, int):
1267
            return doc_to_target
1268
        elif isinstance(doc_to_target, str):
1269
            if doc_to_target in self.features:
1270
                # if self.config.doc_to_choice is not None:
1271
1272
1273
                #     return self.doc_to_choice(doc)[doc[doc_to_target]]
                # else:
                return doc[doc_to_target]
1274
            else:
lintangsutawika's avatar
lintangsutawika committed
1275
                target_string = utils.apply_template(doc_to_target, doc)
lintangsutawika's avatar
lintangsutawika committed
1276
                if target_string.isdigit() and self._config.doc_to_choice is not None:
lintangsutawika's avatar
lintangsutawika committed
1277
                    return ast.literal_eval(target_string)
lintangsutawika's avatar
lintangsutawika committed
1278
1279
1280
1281
1282
                elif (
                    len(target_string) >= 2
                    and (target_string[0] == "[")
                    and (target_string[-1] == "]")
                ):
baberabb's avatar
baberabb committed
1283
1284
1285
1286
                    try:
                        return ast.literal_eval(target_string)
                    except (SyntaxError, ValueError):
                        return target_string
lintangsutawika's avatar
lintangsutawika committed
1287
1288
                else:
                    return target_string
1289
        elif isinstance(doc_to_target, list):
1290
            return doc_to_target
1291
        elif callable(doc_to_target):
1292
            return doc_to_target(doc)
lintangsutawika's avatar
lintangsutawika committed
1293
        # Used when applying a Promptsource template
1294
        elif hasattr(doc_to_target, "apply"):
1295
            applied_prompt = doc_to_target.apply(doc)
1296
1297
1298
1299
            if len(applied_prompt) == 2:
                return applied_prompt[1]
            else:
                eval_logger.warning("Applied prompt returns empty string")
1300
                return self.config.fewshot_delimiter
1301
1302
        else:
            raise TypeError
1303

Yu Shi Jie's avatar
Yu Shi Jie committed
1304
    def doc_to_choice(self, doc: Any, doc_to_choice=None) -> List[str]:
1305
1306
        if self.prompt is not None:
            doc_to_choice = self.prompt
Yu Shi Jie's avatar
Yu Shi Jie committed
1307
1308
        elif doc_to_choice is not None:
            doc_to_choice = doc_to_choice
1309
        elif self.config.doc_to_choice is None:
1310
1311
            eval_logger.error("doc_to_choice was called but not set in config")
        else:
1312
            doc_to_choice = self.config.doc_to_choice
1313

1314
        if isinstance(doc_to_choice, str):
1315
1316
1317
1318
            if doc_to_choice in self.features:
                return doc[doc_to_choice]
            else:
                return ast.literal_eval(utils.apply_template(doc_to_choice, doc))
1319
        elif isinstance(doc_to_choice, list):
1320
            return doc_to_choice
1321
        elif isinstance(doc_to_choice, dict):
1322
1323
1324
1325
1326
1327
1328
            return list(doc_to_choice.values())
        elif callable(doc_to_choice):
            return doc_to_choice(doc)
        elif hasattr(doc_to_choice, "get_answer_choices_list"):
            return doc_to_choice.get_answer_choices_list(doc)
        else:
            raise TypeError
1329

1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
    def doc_to_image(self, doc: Any, doc_to_image=None) -> Union[int, str, list]:
        if doc_to_image is not None:
            doc_to_image = doc_to_image
        elif self.config.doc_to_image is not None:
            doc_to_image = self.config.doc_to_image
        else:
            return None

        if isinstance(doc_to_image, list):
            image_feature = [
                self.doc_to_image(doc, feature) for feature in doc_to_image
            ]
            return [feature for feature in image_feature if feature is not None]
        elif isinstance(doc_to_image, str):
            if doc_to_image in self.features:
                return doc[doc_to_image]
            else:
                return ast.literal_eval(utils.apply_template(doc_to_image, doc))
        elif callable(doc_to_image):
            return doc_to_image(doc)
        else:
            return None

Baber Abbasi's avatar
Baber Abbasi committed
1353
1354
1355
1356
1357
1358
1359
1360
    def doc_to_prefix(self, doc):
        if (gen_prefix := self.config.gen_prefix) is not None:
            if gen_prefix in self.features:
                return doc[gen_prefix]
            else:
                return utils.apply_template(gen_prefix, doc)
        return None

baberabb's avatar
baberabb committed
1361
1362
1363
    def construct_requests(
        self, doc: dict, ctx: str, **kwargs
    ) -> Union[List[Instance], Instance]:
1364
1365
        apply_chat_template = kwargs.pop("apply_chat_template", False)

1366
1367
        aux_arguments = None

1368
        if self.OUTPUT_TYPE == "loglikelihood":
lintangsutawika's avatar
lintangsutawika committed
1369
            arguments = (ctx, self.doc_to_target(doc))
1370
        elif self.OUTPUT_TYPE == "loglikelihood_rolling":
lintangsutawika's avatar
lintangsutawika committed
1371
            arguments = (self.doc_to_target(doc),)
1372
        elif self.OUTPUT_TYPE == "multiple_choice":
1373
            choices = self.doc_to_choice(doc)
1374
            target_delimiter = self.config.target_delimiter
1375
1376
            if apply_chat_template:
                target_delimiter = ""
1377
1378
            if self.multiple_input:
                # If there are multiple inputs, choices are placed in the ctx
1379
                cont = self.doc_to_target(doc)
1380
1381
1382
                arguments = [
                    (ctx + choice, f"{target_delimiter}{cont}") for choice in choices
                ]
1383
            else:
1384
                # Otherwise they are placed in the continuation
1385
                arguments = [(ctx, f"{target_delimiter}{cont}") for cont in choices]
1386

1387
1388
1389
1390
1391
1392
1393
1394
1395
1396
1397
1398
1399
1400
1401
1402
1403
1404
1405
1406
1407
1408
1409
1410
1411
1412
1413
1414
1415
1416
1417
            # TODO: we should raise a warning telling users this will at most ~2x runtime.
            if "acc_mutual_info" in self._metric_fn_list.keys():
                # if we are calculating multiple choice accuracy
                # using mutual information instead of raw loglikelihood as metric, need unconditional lls.

                # here mutual info refers to calculating
                # log(P(choice|ctx) / P(choice)) = log(P(choice|ctx)) - log(P(choice))
                # in other words normalizing by subtracting the unconditional logprob of each choice.
                aux_arguments = [("", f"{choice}") for choice in choices]

                arguments.extend(aux_arguments)

        elif self.OUTPUT_TYPE == "generate_until":
            arguments = (ctx, deepcopy(self.config.generation_kwargs))

        multimodal_arg = {}
        if (
            self.config.doc_to_image
        ):  # TODO: ensure that non-multimodal tasks aren't getting visual args
            multimodal_arg = {
                **multimodal_arg,
                **{"visual": self.doc_to_image(doc)},
            }

        if bool(multimodal_arg):
            if isinstance(arguments, list):
                arguments = [arg + (multimodal_arg,) for arg in arguments]
            else:
                arguments = arguments + (multimodal_arg,)

        if self.OUTPUT_TYPE == "multiple_choice":
1418
            request_list = [
1419
1420
                Instance(
                    request_type="loglikelihood",
lintangsutawika's avatar
lintangsutawika committed
1421
                    doc=doc,
1422
                    arguments=arg,
1423
                    idx=i,
1424
1425
                    **kwargs,
                )
1426
                for i, arg in enumerate(arguments)
1427
            ]
1428
1429

            return request_list
lintangsutawika's avatar
lintangsutawika committed
1430

lintangsutawika's avatar
lintangsutawika committed
1431
        return Instance(
1432
1433
1434
1435
1436
            request_type=self.OUTPUT_TYPE,
            doc=doc,
            arguments=arguments,
            idx=0,
            **kwargs,
lintangsutawika's avatar
lintangsutawika committed
1437
        )
1438
1439

    def process_results(self, doc, results):
1440
1441
        if callable(self.config.process_results):
            return self.config.process_results(doc, results)
lintangsutawika's avatar
lintangsutawika committed
1442

1443
        result_dict = {}
1444
        use_metric = list(self._metric_fn_list.keys())
1445
1446
1447
        if self.OUTPUT_TYPE == "loglikelihood":
            results = results[0]
            ll, is_greedy = results
1448
1449
1450
1451
            return {
                **({"perplexity": ll} if "perplexity" in use_metric else {}),
                **({"acc": int(is_greedy)} if "acc" in use_metric else {}),
            }
1452
        elif self.OUTPUT_TYPE == "loglikelihood_rolling":
haileyschoelkopf's avatar
haileyschoelkopf committed
1453
            (loglikelihood,) = results
1454
1455
            _words = self.count_words(self.doc_to_target(doc))
            _bytes = self.count_bytes(self.doc_to_target(doc))
haileyschoelkopf's avatar
haileyschoelkopf committed
1456
            return {
1457
1458
1459
1460
1461
1462
1463
1464
1465
1466
1467
1468
1469
1470
1471
                **(
                    {"word_perplexity": (loglikelihood, _words)}
                    if "word_perplexity" in use_metric
                    else {}
                ),
                **(
                    {"byte_perplexity": (loglikelihood, _bytes)}
                    if "byte_perplexity" in use_metric
                    else {}
                ),
                **(
                    {"bits_per_byte": (loglikelihood, _bytes)}
                    if "bits_per_byte" in use_metric
                    else {}
                ),
haileyschoelkopf's avatar
haileyschoelkopf committed
1472
            }
1473
        elif self.OUTPUT_TYPE == "multiple_choice":
1474
            lls, is_greedy = zip(*results)
lintangsutawika's avatar
lintangsutawika committed
1475

1476
            # retrieve choices in List[str] form, to compute choice lengths, etc.
1477
            choices = self.doc_to_choice(doc)
1478
1479
            completion_len = np.array([float(len(i)) for i in choices])

1480
1481
            if (
                2 * len(choices) == len(lls)
1482
                and "acc_mutual_info" in self._metric_fn_list.keys()
1483
1484
1485
1486
            ):
                # then we are doing mutual info.
                # this stores the "dryrun" / unconditional answer loglikelihoods
                lls_unconditional = lls[1::2]
1487
1488
                if len(lls_unconditional) != len(choices):
                    raise ValueError
1489
1490
                # and this stores our "regular" conditional loglikelihoods
                lls = lls[::2]
1491

1492
1493
            pred = np.argmax(lls)
            pred_norm = np.argmax(lls / completion_len)
lintangsutawika's avatar
lintangsutawika committed
1494

1495
1496
            if self.multiple_input:
                gold = self.doc_to_text(doc)
1497
            else:
1498
                gold = self.doc_to_target(doc)
1499
1500

            gold_index_error = False
1501
            if isinstance(gold, list):
Lintang Sutawika's avatar
Lintang Sutawika committed
1502
1503
                gold = [i if i < len(choices) else -100 for i in gold]
                if -100 in gold:
1504
1505
                    gold_index_error = True
            else:
1506
                if isinstance(gold, int):
Lintang Sutawika's avatar
Lintang Sutawika committed
1507
                    gold = gold if gold < len(choices) else -100
1508
                elif isinstance(gold, str):
Lintang Sutawika's avatar
Lintang Sutawika committed
1509
                    gold = choices.index(gold) if gold in choices else -100
lintangsutawika's avatar
lintangsutawika committed
1510

Lintang Sutawika's avatar
Lintang Sutawika committed
1511
                if gold == -100:
1512
1513
1514
1515
                    gold_index_error = True

            if gold_index_error:
                eval_logger.warning(
lintangsutawika's avatar
lintangsutawika committed
1516
                    f"Label index was not in within range of available choices,"
1517
1518
                    f"Sample:\n\n{doc}\n\n"
                )
lintangsutawika's avatar
lintangsutawika committed
1519

1520
            if self.multiple_target:
lintangsutawika's avatar
lintangsutawika committed
1521
1522
                acc = 1.0 if pred in gold else 0.0
                acc_norm = 1.0 if pred_norm in gold else 0.0
Lintang Sutawika's avatar
Lintang Sutawika committed
1523
                exact_match = int(any([is_greedy[i] if i != -100 else 0 for i in gold]))
lintangsutawika's avatar
lintangsutawika committed
1524
1525
1526
            else:
                acc = 1.0 if pred == gold else 0.0
                acc_norm = 1.0 if pred_norm == gold else 0.0
1527
                # TODO: this gets score of 0 on arc_challenge for pythia-70m. need to test that this works properly
Lintang Sutawika's avatar
Lintang Sutawika committed
1528
                exact_match = int(is_greedy[gold]) if gold != -100 else 0
1529

Lintang Sutawika's avatar
Lintang Sutawika committed
1530
1531
1532
1533
            prob_norm = utils.softmax(lls)

            # TODO use keyword arguments to the metric?
            # gold, pred, norm stuff, the original lls,
1534
            result_dict = {
1535
                **({"acc": acc} if "acc" in use_metric else {}),
1536
1537
                **({"f1": (gold, pred)} if "f1" in use_metric else {}),
                **({"mcc": (gold, pred)} if "mcc" in use_metric else {}),
1538
                **({"acc_norm": acc_norm} if "acc_norm" in use_metric else {}),
1539
                **({"exact_match": exact_match} if "exact_match" in use_metric else {}),
Lintang Sutawika's avatar
Lintang Sutawika committed
1540
1541
1542
1543
1544
                **(
                    {"brier_score": (gold, prob_norm)}
                    if "brier_score" in use_metric
                    else {}
                ),
1545
1546
            }

1547
            if "acc_mutual_info" in use_metric:
lintangsutawika's avatar
lintangsutawika committed
1548
1549
1550
                lls_mutual_info = [
                    ll_c - ll_u for ll_c, ll_u in zip(lls, lls_unconditional)
                ]
1551
1552
1553
                acc_mutual_info = 1.0 if np.argmax(lls_mutual_info) == gold else 0.0
                result_dict["acc_mutual_info"] = acc_mutual_info

1554
        elif self.OUTPUT_TYPE == "generate_until":
1555
            gold = self.doc_to_target(doc)
Chris's avatar
Chris committed
1556
            result = results[0]
1557
            if self.config.doc_to_choice is not None:
lintangsutawika's avatar
lintangsutawika committed
1558
                # If you set doc_to_choice,
lintangsutawika's avatar
lintangsutawika committed
1559
                # it assumes that doc_to_target returns a number.
1560
1561
                choices = self.doc_to_choice(doc)
                gold = choices[gold]
1562
1563
            # we expect multiple_targets to be a list.
            elif self.multiple_target:
baberabb's avatar
baberabb committed
1564
                gold = list(gold)
Hojin Lee's avatar
Hojin Lee committed
1565
1566
1567
            # TODO: handle this better
            elif type(gold) is not type(result) and not (
                "bypass" in self._metric_fn_list.keys() or isinstance(result, list)
1568
            ):
Chris's avatar
Chris committed
1569
1570
                # cast gold to the same type as result
                gold = type(result)(gold)
1571

lintangsutawika's avatar
lintangsutawika committed
1572
            for metric in self._metric_fn_list.keys():
haileyschoelkopf's avatar
haileyschoelkopf committed
1573
1574
1575
1576
1577
                if self.multiple_target:
                    # in the case where we have multiple targets,
                    # return true if any are true
                    # TODO: this may break for multipLe_target, non zero-or-1 metrics
                    scores = []
haileyschoelkopf's avatar
haileyschoelkopf committed
1578
1579
1580
1581
                    if not isinstance(gold, list):
                        # sometimes, a multiple_target dataset has exceptions where one doc has only one string answer
                        # print(gold)
                        gold = [gold]
1582
1583
1584
1585
1586
1587
1588
1589
                    if metric == "exact_match":
                        result = [result for _ in range(len(gold))]
                        scores = self._metric_fn_list[metric](
                            references=gold,
                            predictions=result,
                            **self._metric_fn_kwargs[metric],
                        )[metric]
                        result_score = 1.0 if scores > 0.0 else 0.0
haileyschoelkopf's avatar
haileyschoelkopf committed
1590
                    else:
1591
1592
1593
1594
1595
1596
1597
1598
1599
1600
1601
1602
1603
1604
1605
1606
1607
1608
1609
1610
1611
                        for gold_option in gold:
                            try:
                                result_score = self._metric_fn_list[metric](
                                    references=[gold_option],
                                    predictions=[result],
                                    **self._metric_fn_kwargs[metric],
                                )
                            except (
                                TypeError
                            ):  # TODO: this is hacky and I don't want to do it
                                result_score = self._metric_fn_list[metric](
                                    [gold_option, result]
                                )
                            if isinstance(result_score, dict):
                                # TODO: this handles the case where HF evaluate returns a dict.
                                result_score = result_score[metric]
                            scores.append(result_score)
                        if any(scores):
                            result_score = 1.0
                        else:
                            result_score = 0.0
haileyschoelkopf's avatar
haileyschoelkopf committed
1612
                else:
1613
                    try:
1614
                        result_score = self._metric_fn_list[metric](
1615
1616
                            references=[gold],
                            predictions=[result],
1617
                            **self._metric_fn_kwargs[metric],
1618
                        )
1619
                    except TypeError:  # needed for now in order to use a different interface between our own metrics and HF Evaluate metrics
1620
                        result_score = self._metric_fn_list[metric]([gold, result])
1621
1622
                    if isinstance(result_score, dict):
                        # TODO: this handles the case where HF evaluate returns a dict.
Hojin Lee's avatar
Hojin Lee committed
1623
1624
1625
1626
                        # This allows for multiple metrics to be returned from the same function
                        for k, v in result_score.items():
                            result_dict[k] = v
                        return result_dict
1627
                result_dict[metric] = result_score
1628
        else:
lintangsutawika's avatar
lintangsutawika committed
1629
1630
            raise ValueError(
                f"Passed invalid output_type '{self.OUTPUT_TYPE}' ! Please use one of ",
1631
                "'loglikelihood', 'loglikelihood_rolling', 'generate_until' or 'multiple_choice'",
1632
            )
1633
1634
1635

        return result_dict

Baber Abbasi's avatar
Baber Abbasi committed
1636
    def aggregation(self) -> dict:
1637
1638
        return self._aggregation_list

Baber Abbasi's avatar
Baber Abbasi committed
1639
    def higher_is_better(self) -> dict:
haileyschoelkopf's avatar
haileyschoelkopf committed
1640
        return self._higher_is_better
1641

Baber Abbasi's avatar
Baber Abbasi committed
1642
1643
1644
    def get_config(self, key: str) -> Any:
        return getattr(self._config, key, None)

Lintang Sutawika's avatar
Lintang Sutawika committed
1645
1646
1647
1648
    @property
    def task_name(self) -> Any:
        return getattr(self.config, "task", None)

1649
1650
1651
1652
1653
    def __repr__(self):
        return (
            f"ConfigurableTask(task_name={getattr(self.config, 'task', None)},"
            f"output_type={self.OUTPUT_TYPE},"
            f"num_fewshot={getattr(self.config, 'num_fewshot', None)},"
Baber Abbasi's avatar
Baber Abbasi committed
1654
            f"num_samples={len(self.eval_docs)})"
1655
1656
        )

1657
1658

class MultipleChoiceTask(Task):
1659
    OUTPUT_TYPE = "loglikelihood"
1660

baberabb's avatar
baberabb committed
1661
    def doc_to_target(self, doc: dict) -> str:
1662
1663
        return " " + doc["choices"][doc["gold"]]

baberabb's avatar
baberabb committed
1664
    def construct_requests(self, doc: dict, ctx: str, **kwargs) -> List[Instance]:
1665
        # TODO: add mutual info here?
lintangsutawika's avatar
lintangsutawika committed
1666
1667
        return [
            Instance(
haileyschoelkopf's avatar
haileyschoelkopf committed
1668
                request_type="loglikelihood",
lintangsutawika's avatar
lintangsutawika committed
1669
                doc=doc,
1670
                arguments=(ctx, " {}".format(choice)),
1671
                idx=i,
1672
1673
                **kwargs,
            )
lintangsutawika's avatar
lintangsutawika committed
1674
1675
            for i, choice in enumerate(doc["choices"])
        ]
1676

1677
    def process_results(self, doc: dict, results: Iterable[Tuple[float, bool]]) -> dict:
lintangsutawika's avatar
lintangsutawika committed
1678
1679
1680
        results = [
            res[0] for res in results
        ]  # only retain loglikelihoods, discard is_greedy TODO: do we need is_greedy anywhere?
1681
1682
1683
1684
1685
1686
1687
1688
1689
1690
1691
        gold = doc["gold"]

        acc = 1.0 if np.argmax(results) == gold else 0.0
        completion_len = np.array([float(len(i)) for i in doc["choices"]])
        acc_norm = 1.0 if np.argmax(results / completion_len) == gold else 0.0

        return {
            "acc": acc,
            "acc_norm": acc_norm,
        }

baberabb's avatar
baberabb committed
1692
    def higher_is_better(self) -> dict:
1693
1694
1695
1696
1697
        return {
            "acc": True,
            "acc_norm": True,
        }

baberabb's avatar
baberabb committed
1698
    def aggregation(self) -> dict:
1699
1700
1701
1702
1703
1704
        return {
            "acc": mean,
            "acc_norm": mean,
        }


lintangsutawika's avatar
lintangsutawika committed
1705
class PerplexityTask(Task):
1706
1707
    OUTPUT_TYPE = "loglikelihood_rolling"

baberabb's avatar
baberabb committed
1708
    def has_training_docs(self) -> bool:
1709
1710
        return False

baberabb's avatar
baberabb committed
1711
    def fewshot_examples(self, k: int, rnd) -> List:
1712
1713
1714
1715
        if k != 0:
            raise ValueError(
                "The number of fewshot examples must be 0 for perplexity tasks."
            )
1716
1717
        return []

baberabb's avatar
baberabb committed
1718
    def fewshot_context(self, doc: dict, num_fewshot: int) -> Literal[""]:
1719
1720
1721
1722
        if num_fewshot != 0:
            raise ValueError(
                "The number of fewshot examples must be 0 for perplexity tasks."
            )
1723
1724
1725

        return ""

baberabb's avatar
baberabb committed
1726
    def higher_is_better(self) -> dict:
1727
1728
1729
1730
1731
1732
1733
1734
1735
        return {
            "word_perplexity": False,
            "byte_perplexity": False,
            "bits_per_byte": False,
        }

    def doc_to_decontamination_query(self, doc):
        return doc

Ethan Smith's avatar
Ethan Smith committed
1736
    def doc_to_text(self, doc) -> str:
1737
1738
1739
1740
1741
        return ""

    def doc_to_target(self, doc):
        return doc

1742
1743
1744
    def construct_requests(self, doc: dict, ctx: Optional[str], **kwargs):
        if bool(ctx):
            raise ValueError
1745

lintangsutawika's avatar
lintangsutawika committed
1746
1747
1748
1749
1750
1751
1752
        return Instance(
            request_type=self.OUTPUT_TYPE,
            doc=doc,
            arguments=(self.doc_to_target(doc),),
            idx=0,
            **kwargs,
        )
1753

1754
    def process_results(self, doc: dict, results: Tuple[float]) -> dict:
1755
        (loglikelihood,) = results
haileyschoelkopf's avatar
haileyschoelkopf committed
1756
1757
        words = self.count_words(self.doc_to_target(doc))
        bytes_ = self.count_bytes(self.doc_to_target(doc))
1758
1759
1760
1761
1762
1763
        return {
            "word_perplexity": (loglikelihood, words),
            "byte_perplexity": (loglikelihood, bytes_),
            "bits_per_byte": (loglikelihood, bytes_),
        }

baberabb's avatar
baberabb committed
1764
    def aggregation(self) -> dict:
1765
1766
1767
1768
1769
1770
1771
        return {
            "word_perplexity": weighted_perplexity,
            "byte_perplexity": weighted_perplexity,
            "bits_per_byte": bits_per_byte,
        }

    @classmethod
baberabb's avatar
baberabb committed
1772
    def count_bytes(cls, doc) -> int:
1773
1774
1775
        return len(doc.encode("utf-8"))

    @classmethod
baberabb's avatar
baberabb committed
1776
    def count_words(cls, doc) -> int:
1777
        """Downstream tasks with custom word boundaries should override this!"""
Lintang Sutawika's avatar
Lintang Sutawika committed
1778
        return len(re.split(r"\s+", doc))