task.py 64.1 KB
Newer Older
1
import abc
2
import ast
lintangsutawika's avatar
lintangsutawika committed
3
import logging
4
import random
5
6
import re
from collections.abc import Callable
7
from copy import deepcopy
lintangsutawika's avatar
lintangsutawika committed
8
from dataclasses import asdict, dataclass
9
from inspect import getsource
10
11
12
13
14
15
16
17
18
19
20
21
from typing import (
    Any,
    Dict,
    Iterable,
    Iterator,
    List,
    Literal,
    Mapping,
    Optional,
    Tuple,
    Union,
)
22
23
24

import datasets
import numpy as np
25
from tqdm import tqdm
26
27

from lm_eval import utils
28
from lm_eval.api import samplers
29
30
from lm_eval.api.instance import Instance, OutputType
from lm_eval.api.metrics import bits_per_byte, mean, weighted_perplexity
lintangsutawika's avatar
lintangsutawika committed
31
from lm_eval.api.registry import (
32
33
    AGGREGATION_REGISTRY,
    DEFAULT_METRIC_REGISTRY,
haileyschoelkopf's avatar
haileyschoelkopf committed
34
    get_aggregation,
35
    get_metric,
36
    get_metric_aggregation,
haileyschoelkopf's avatar
haileyschoelkopf committed
37
    is_higher_better,
lintangsutawika's avatar
lintangsutawika committed
38
)
39
from lm_eval.caching.cache import load_from_cache, save_to_cache
40
41
42
from lm_eval.filters import build_filter_ensemble
from lm_eval.prompts import get_prompt

43

44
45
46
47
ALL_OUTPUT_TYPES = [
    "loglikelihood",
    "multiple_choice",
    "loglikelihood_rolling",
48
    "generate_until",
49
50
]

51
eval_logger = logging.getLogger("lm-eval")
52

lintangsutawika's avatar
lintangsutawika committed
53

54
55
@dataclass
class TaskConfig(dict):
56
    # task naming/registry
57
58
    task: Optional[str] = None
    task_alias: Optional[str] = None
lintangsutawika's avatar
lintangsutawika committed
59
    tag: Optional[Union[str, list]] = None
60
    group: Optional[Union[str, list]] = None
61
62
63
    # HF dataset options.
    # which dataset to use,
    # and what splits for what purpose
64
65
66
67
68
69
70
71
72
    dataset_path: Optional[str] = None
    dataset_name: Optional[str] = None
    dataset_kwargs: Optional[dict] = None
    training_split: Optional[str] = None
    validation_split: Optional[str] = None
    test_split: Optional[str] = None
    fewshot_split: Optional[
        str
    ] = None  # TODO: assert that this not None if num_fewshot > 0. (?) assert if this is same split as one evaling (?)
73
74
    # formatting / prompting options.
    # see docs/advanced_task_guide.md for more info
75
76
77
78
79
80
    process_docs: Optional[Callable] = None
    doc_to_text: Optional[Union[Callable, str]] = None
    doc_to_target: Optional[Union[Callable, str]] = None
    doc_to_choice: Optional[Union[Callable, str, dict, list]] = None
    process_results: Optional[Union[Callable, str]] = None
    use_prompt: Optional[str] = None
81
    description: str = ""
82
83
    target_delimiter: str = " "
    fewshot_delimiter: str = "\n\n"
84
    fewshot_config: Optional[dict] = None
85
    # runtime configuration options
86
    num_fewshot: Optional[int] = None
87
    # scoring options
88
89
90
    metric_list: Optional[list] = None
    output_type: OutputType = "generate_until"
    generation_kwargs: Optional[dict] = None
91
    repeats: int = 1
92
    filter_list: Optional[Union[str, list]] = None
93
    should_decontaminate: bool = False
94
95
96
97
    doc_to_decontamination_query: Optional[str] = None
    metadata: Optional[
        dict
    ] = None  # by default, not used in the code. allows for users to pass arbitrary info to tasks
98

Ethan Smith's avatar
Ethan Smith committed
99
    def __post_init__(self) -> None:
100
101
102
103
104
105
106
107
108
109
110
111
        if self.group is not None:
            eval_logger.warning(
                "A task YAML file was found to contain a `group` key. Groups which provide aggregate scores over several subtasks now require a separate config file--if not aggregating, you may want to use the `tag` config option instead within your config. Setting `group` within a TaskConfig will be deprecated in v0.4.4. Please see https://github.com/EleutherAI/lm-evaluation-harness/blob/main/docs/task_guide.md for more information."
            )

            if self.tag is None:
                self.tag = self.group
            else:
                raise ValueError(
                    "Got both a `group` and `tag` entry within a TaskConfig. Please use one or the other--`group` values will be deprecated in v0.4.4."
                )

Lintang Sutawika's avatar
Lintang Sutawika committed
112
        if self.generation_kwargs is not None:
113
            if self.output_type != "generate_until":
114
                eval_logger.warning(
115
                    f"[{self.task}] passed `generation_kwargs`, but not using `output_type: generate_until`!"
Lintang Sutawika's avatar
Lintang Sutawika committed
116
117
118
119
120
121
122
123
                )

            if "temperature" in self.generation_kwargs:
                self.generation_kwargs["temperature"] = float(
                    self.generation_kwargs["temperature"]
                )

            if "until" not in self.generation_kwargs:
124
                self.generation_kwargs["until"] = [self.fewshot_delimiter]
Lintang Sutawika's avatar
Lintang Sutawika committed
125
        else:
126
            if self.output_type == "generate_until":
Lintang Sutawika's avatar
Lintang Sutawika committed
127
128
                # ensure that we greedily generate in absence of explicit arguments otherwise
                self.generation_kwargs = {
129
130
131
132
133
                    "until": (
                        None
                        if self.fewshot_delimiter is None
                        else [self.fewshot_delimiter]
                    ),
Lintang Sutawika's avatar
Lintang Sutawika committed
134
135
                    "do_sample": False,
                }
136

137
138
139
    def __getitem__(self, item):
        return getattr(self, item)

140
141
142
    def __setitem__(self, item, value):
        return setattr(self, item, value)

143
    def to_dict(self, keep_callable: bool = False) -> dict:
144
145
        """dumps the current config as a dictionary object, as a printable format.
        null fields will not be printed.
haileyschoelkopf's avatar
haileyschoelkopf committed
146
        Used for dumping results alongside full task configuration
147

haileyschoelkopf's avatar
haileyschoelkopf committed
148
149
150
151
152
153
154
155
156
157
        :return: dict
            A printable dictionary version of the TaskConfig object.

        # TODO: should any default value in the TaskConfig not be printed?
        """
        cfg_dict = asdict(self)
        # remove values that are `None`
        for k, v in list(cfg_dict.items()):
            if v is None:
                cfg_dict.pop(k)
158
159
160
161
162
163
164
165
166
167
            elif k == "metric_list":
                for metric_dict in v:
                    for metric_key, metric_value in metric_dict.items():
                        if callable(metric_value):
                            metric_dict[metric_key] = self.serialize_function(
                                metric_value, keep_callable=keep_callable
                            )
                cfg_dict[k] = v
            elif callable(v):
                cfg_dict[k] = self.serialize_function(v, keep_callable=keep_callable)
haileyschoelkopf's avatar
haileyschoelkopf committed
168
        return cfg_dict
169

170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
    def serialize_function(
        self, value: Union[Callable, str], keep_callable=False
    ) -> Union[Callable, str]:
        """Serializes a given function or string.

        If 'keep_callable' is True, the original callable is returned.
        Otherwise, attempts to return the source code of the callable using 'getsource'.
        """
        if keep_callable:
            return value
        else:
            try:
                return getsource(value)
            except (TypeError, OSError):
                return str(value)

186
187
188
189
190
191
192
193
194
195
196

class Task(abc.ABC):
    """A task represents an entire benchmark including its dataset, problems,
    answers, and evaluation methods. See BoolQ for a simple example implementation

    A `doc` can be any python object which represents one instance of evaluation.
    This is usually a dictionary e.g.
        {"question": ..., "answer": ...} or
        {"question": ..., question, answer)
    """

197
    VERSION: Optional[Union[int, str]] = None
198

199
200
    # The name of the `Task` benchmark as denoted in the HuggingFace datasets Hub
    # or a path to a custom `datasets` loading script.
201
    DATASET_PATH: Optional[str] = None
202
203

    # The name of a subset within `DATASET_PATH`.
204
    DATASET_NAME: Optional[str] = None
205

206
    OUTPUT_TYPE: Optional[OutputType] = None
lintangsutawika's avatar
lintangsutawika committed
207

208
209
    def __init__(
        self,
210
211
212
213
        data_dir: Optional[str] = None,
        cache_dir: Optional[str] = None,
        download_mode: Optional[datasets.DownloadMode] = None,
        config: Optional[Mapping] = None,  # Union[dict, TaskConfig]
Ethan Smith's avatar
Ethan Smith committed
214
    ) -> None:
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
        """
        :param data_dir: str
            Stores the path to a local folder containing the `Task`'s data files.
            Use this to specify the path to manually downloaded data (usually when
            the dataset is not publicly accessible).
        :param cache_dir: str
            The directory to read/write the `Task` dataset. This follows the
            HuggingFace `datasets` API with the default cache directory located at:
                `~/.cache/huggingface/datasets`
            NOTE: You can change the cache location globally for a given process
            to another directory:
                `export HF_DATASETS_CACHE="/path/to/another/directory"`
        :param download_mode: datasets.DownloadMode
            How to treat pre-existing `Task` downloads and data.
            - `datasets.DownloadMode.REUSE_DATASET_IF_EXISTS`
                Reuse download and reuse dataset.
            - `datasets.DownloadMode.REUSE_CACHE_IF_EXISTS`
                Reuse download with fresh dataset.
            - `datasets.DownloadMode.FORCE_REDOWNLOAD`
                Fresh download and fresh dataset.
        """
        self.download(data_dir, cache_dir, download_mode)
237
238
239
        self._training_docs: Optional[list] = None
        self._fewshot_docs: Optional[list] = None
        self._instances: Optional[List[Instance]] = None
240

241
        self._config: TaskConfig = TaskConfig({**config}) if config else TaskConfig()
242

lintangsutawika's avatar
lintangsutawika committed
243
        self._filters = [build_filter_ensemble("none", [["take_first", None]])]
244
245
246
        self.fewshot_rnd: Optional[
            random.Random
        ] = None  # purposely induce errors in case of improper usage
247

248
249
250
251
252
253
    def download(
        self,
        data_dir: Optional[str] = None,
        cache_dir: Optional[str] = None,
        download_mode=None,
    ) -> None:
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
        """Downloads and returns the task dataset.
        Override this method to download the dataset from a custom API.

        :param data_dir: str
            Stores the path to a local folder containing the `Task`'s data files.
            Use this to specify the path to manually downloaded data (usually when
            the dataset is not publicly accessible).
        :param cache_dir: str
            The directory to read/write the `Task` dataset. This follows the
            HuggingFace `datasets` API with the default cache directory located at:
                `~/.cache/huggingface/datasets`
            NOTE: You can change the cache location globally for a given process
            by setting the shell environment variable, `HF_DATASETS_CACHE`,
            to another directory:
                `export HF_DATASETS_CACHE="/path/to/another/directory"`
        :param download_mode: datasets.DownloadMode
            How to treat pre-existing `Task` downloads and data.
            - `datasets.DownloadMode.REUSE_DATASET_IF_EXISTS`
                Reuse download and reuse dataset.
            - `datasets.DownloadMode.REUSE_CACHE_IF_EXISTS`
                Reuse download with fresh dataset.
            - `datasets.DownloadMode.FORCE_REDOWNLOAD`
                Fresh download and fresh dataset.
        """
278
279
280
281
282
283
284
        self.dataset = datasets.load_dataset(
            path=self.DATASET_PATH,
            name=self.DATASET_NAME,
            data_dir=data_dir,
            cache_dir=cache_dir,
            download_mode=download_mode,
        )
285

286
    @property
287
    def config(self) -> TaskConfig:
288
289
290
        """Returns the TaskConfig associated with this class."""
        return self._config

291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
    @abc.abstractmethod
    def has_training_docs(self):
        """Whether the task has a training set"""
        pass

    @abc.abstractmethod
    def has_validation_docs(self):
        """Whether the task has a validation set"""
        pass

    @abc.abstractmethod
    def has_test_docs(self):
        """Whether the task has a test set"""
        pass

306
    def training_docs(self) -> Iterable:
307
308
309
310
311
312
        """
        :return: Iterable[obj]
            A iterable of any object, that doc_to_text can handle
        """
        return []

313
    def validation_docs(self) -> Iterable:
314
315
316
317
318
319
        """
        :return: Iterable[obj]
            A iterable of any object, that doc_to_text can handle
        """
        return []

320
    def test_docs(self) -> Iterable:
321
322
323
324
325
326
        """
        :return: Iterable[obj]
            A iterable of any object, that doc_to_text can handle
        """
        return []

327
    def fewshot_docs(self) -> Iterable:
328
329
330
331
332
333
334
335
336
        """
        :return: Iterable[obj]
            A iterable of any object, that doc_to_text can handle
        """
        if self.has_training_docs():
            return self.training_docs()
        elif self.has_validation_docs():
            return self.validation_docs()
        else:
lintangsutawika's avatar
lintangsutawika committed
337
            eval_logger.warning(
338
                f"[Task: {self.config.task}] has_training_docs and has_validation_docs are False"
339
                ", using test_docs as fewshot_docs but this is not recommended."
lintangsutawika's avatar
lintangsutawika committed
340
            )
341
342
            return self.test_docs()

343
    def _process_doc(self, doc: dict) -> dict:
344
345
346
347
348
349
350
351
352
        """
        Override this to process (detokenize, strip, replace, etc.) individual
        documents. This can be used in a map over documents of a data split.
        E.g. `map(self._process_doc, self.dataset["validation"])`

        :return: dict
            The processed version of the specified `doc`.
        """
        return doc
lintangsutawika's avatar
lintangsutawika committed
353

354
    @property
355
    def instances(self) -> List[Instance]:
356
357
358
359
360
361
362
363
364
365
366
        """After calling `task.build_all_requests()`, tasks
        maintain a list of the dataset instances which will be evaluated.
        """
        return self._instances

    def fewshot_examples(self, k, rnd):
        if self._training_docs is None:
            self._training_docs = list(self.training_docs())

        return rnd.sample(self._training_docs, k)

367
368
    def doc_to_decontamination_query(self, doc):
        raise NotImplementedError(
369
370
371
372
373
374
375
376
377
378
379
            "Override doc_to_decontamination_query with document specific decontamination query."
        )

    @abc.abstractmethod
    def doc_to_text(self, doc):
        pass

    @abc.abstractmethod
    def doc_to_target(self, doc):
        pass

380
381
    def build_all_requests(
        self,
382
        *,
383
384
385
386
387
        limit=None,
        rank=None,
        world_size=None,
        cache_requests=False,
        rewrite_requests_cache=False,
KonradSzafer's avatar
KonradSzafer committed
388
389
390
391
        system_instruction=None,
        apply_chat_template=False,
        fewshot_as_multiturn=False,
        lm=None,
392
    ) -> None:
393
        """Build a set of Instances for a task, and store them in task.instances"""
394
395
396
397

        # used with caching
        og_limit = limit

398
        cache_key = f"requests-{self._config.task}-{self.config.num_fewshot}shot-rank{rank}-world_size{world_size}"
KonradSzafer's avatar
KonradSzafer committed
399
400
401
402
403
404
405
406
        cache_key += "-chat_template" if apply_chat_template else ""
        cache_key += "-fewshot_as_multiturn" if fewshot_as_multiturn else ""
        cache_key += (
            f"-system_prompt_hash{utils.hash_string(system_instruction)}"
            if system_instruction is not None
            else ""
        )
        cache_key += f"-tokenizer{lm.tokenizer_name}" if apply_chat_template else ""
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421

        cached_instances = load_from_cache(file_name=cache_key)

        if cache_requests and cached_instances and not rewrite_requests_cache:
            cached_instances = cached_instances[:limit]

            flattened_instances = [
                instance
                for instance_group in cached_instances
                for instance in instance_group
            ]

            self._instances = flattened_instances
            return

Baber Abbasi's avatar
Baber Abbasi committed
422
        eval_logger.info(f"Building contexts for {self.config.task} on rank {rank}...")
423

424
        instances = []
425
426
427
428
429
430
431
432
433
434

        # process all documents when caching is specified for simplicity
        if (
            cache_requests
            and (not cached_instances or rewrite_requests_cache)
            and limit is not None
        ):
            limit = None

        doc_id_docs = list(
435
            self.doc_iterator(rank=rank, limit=limit, world_size=world_size)
436
437
438
439
440
441
442
        )

        num_docs = len(doc_id_docs)

        for doc_id, doc in tqdm(
            doc_id_docs,
            total=num_docs,
lintangsutawika's avatar
lintangsutawika committed
443
        ):
444
            # sample fewshot context #TODO: need to offset doc_id by rank now!
445
            fewshot_ctx = self.fewshot_context(
446
                doc,
447
                0 if self.config.num_fewshot is None else self.config.num_fewshot,
KonradSzafer's avatar
KonradSzafer committed
448
449
450
451
                system_instruction,
                apply_chat_template,
                fewshot_as_multiturn,
                lm,
452
            )
453

454
            # TODO: we should override self.config.repeats if doing greedy gen so users don't waste time+compute
lintangsutawika's avatar
lintangsutawika committed
455
456
457
            inst = self.construct_requests(
                doc=doc,
                ctx=fewshot_ctx,
458
                metadata=(self.config["task"], doc_id, self.config.repeats),
lintangsutawika's avatar
lintangsutawika committed
459
            )
460
461
462
463

            if not isinstance(inst, list):
                inst = [inst]

464
465
466
467
468
469
470
471
472
473
474
475
476
            instances.append(inst)

        # now flatten, this is to allow slicing to work with pickles

        sliced_instances = instances[:og_limit]

        flattened_instances = [
            instance
            for instance_group in sliced_instances
            for instance in instance_group
        ]

        self._instances = flattened_instances
477

478
479
        if len(self._instances) == 0:
            raise ValueError("task.build_requests() did not find any docs!")
480

481
482
483
        if cache_requests and (not cached_instances or rewrite_requests_cache):
            save_to_cache(file_name=cache_key, obj=instances)

484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
    @abc.abstractmethod
    def construct_requests(self, doc, ctx, **kwargs):
        """Uses RequestFactory to construct Requests and returns an iterable of
        Requests which will be sent to the LM.

        :param doc:
            The document as returned from training_docs, validation_docs, or test_docs.
        :param ctx: str
            The context string, generated by fewshot_context. This includes the natural
            language description, as well as the few shot examples, and the question
            part of the document for `doc`.
        :param doc_idx: int
            The index of a document within `self.test_docs()` or `self.validation_docs()`,
            whichever is the main split used.
        :param repeats: int
        TODO: update this docstring
lintangsutawika's avatar
lintangsutawika committed
500
            The number of times each instance in a dataset is inferred on. Defaults to 1,
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
            can be increased for techniques like majority voting.
        """
        pass

    @abc.abstractmethod
    def process_results(self, doc, results):
        """Take a single document and the LM results and evaluates, returning a
        dict where keys are the names of submetrics and values are the values of
        the metric for that one document

        :param doc:
            The document as returned from training_docs, validation_docs, or test_docs.
        :param results:
            The results of the requests created in construct_requests.
        """
        pass

    @abc.abstractmethod
    def aggregation(self):
        """
        :returns: {str: [metric_score] -> float}
            A dictionary where keys are the names of submetrics and values are
            functions that aggregate a list of metric scores
        """
        pass

    @abc.abstractmethod
    def higher_is_better(self):
        """
        :returns: {str: bool}
            A dictionary where keys are the names of submetrics and values are
            whether a higher value of the submetric is better
        """
        pass

536
537
538
    def get_config(self, key: str) -> Any:
        return getattr(self._config, key, None)

haileyschoelkopf's avatar
haileyschoelkopf committed
539
540
541
542
543
544
545
546
547
548
    @classmethod
    def count_bytes(cls, doc):
        """Used for byte-level perplexity metrics in rolling loglikelihood"""
        return len(doc.encode("utf-8"))

    @classmethod
    def count_words(cls, doc):
        """Downstream loglikelihood_rolling perplexity tasks with custom word boundaries should override this!"""
        return len(re.split(r"\s+", doc))

549
    @utils.positional_deprecated
lintangsutawika's avatar
lintangsutawika committed
550
    def fewshot_context(
551
552
553
        self,
        doc,
        num_fewshot,
554
        rnd=None,
555
        description=None,
lintangsutawika's avatar
lintangsutawika committed
556
    ):
557
558
559
560
561
562
563
        """Returns a fewshot context string that is made up of a prepended description
        (if provided), the `num_fewshot` number of examples, and an appended prompt example.

        :param doc: str
            The document as returned from training_docs, validation_docs, or test_docs.
        :param num_fewshot: int
            The number of fewshot examples to provide in the returned context string.
lintangsutawika's avatar
lintangsutawika committed
564
565
566
567
568
        :param rnd: random.Random
            The pseudo-random number generator used to randomly sample examples.
            WARNING: This is currently a required arg although it's optionalized with a default `None`.
        :param description: str
            The task's description that will be prepended to the fewshot examples.
569
570
571
        :returns: str
            The fewshot context.
        """
572
        if rnd is None:
573
574
575
576
577
578
            if self.fewshot_rnd is not None:
                rnd = self.fewshot_rnd
            else:
                raise ValueError(
                    "A `random.Random` generator argument must be provided to `rnd`"
                )
lintangsutawika's avatar
lintangsutawika committed
579

580
        description = description if description else ""
581
582

        if num_fewshot == 0:
lintangsutawika's avatar
lintangsutawika committed
583
            labeled_examples = ""
584
        else:
lintangsutawika's avatar
lintangsutawika committed
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
            # for sets with no training docs, draw from other set *but ensure no overlap with current doc*
            if self.has_training_docs():
                fewshotex = self.fewshot_examples(k=num_fewshot, rnd=rnd)
            else:
                if self._fewshot_docs is None:
                    self._fewshot_docs = list(
                        self.validation_docs()
                        if self.has_validation_docs()
                        else self.test_docs()
                    )

                fewshotex = rnd.sample(self._fewshot_docs, num_fewshot + 1)

                # get rid of the doc that's the one we're evaluating, if it's in the fewshot
                fewshotex = [x for x in fewshotex if x != doc][:num_fewshot]

            labeled_examples = (
                "\n\n".join(
                    [
                        self.doc_to_text(doc) + self.doc_to_target(doc)
                        for doc in fewshotex
                    ]
                )
                + "\n\n"
lintangsutawika's avatar
lintangsutawika committed
609
            )
610
611

        example = self.doc_to_text(doc)
lintangsutawika's avatar
lintangsutawika committed
612
        return description + labeled_examples + example
613

614
    def apply_filters(self) -> Optional[List[Instance]]:
Baber Abbasi's avatar
Baber Abbasi committed
615
        """Iterates over FilterEnsembles and applies them to instances"""
lintangsutawika's avatar
lintangsutawika committed
616
617
        if hasattr(self, "_filters"):
            for f in self._filters:
618
                f.apply(self._instances)
lintangsutawika's avatar
lintangsutawika committed
619
620
621
        else:
            eval_logger.warning("No filter defined, passing through instances")
            return self._instances
622

baberabb's avatar
baberabb committed
623
    def dump_config(self) -> dict:
Baber Abbasi's avatar
Baber Abbasi committed
624
        """Returns the config as a dictionary."""
625
        # TODO: this should only return the overrides applied to a non-YAML task's configuration.
626
        # (num_fewshot)
627
        return self.config.to_dict()
628

Baber Abbasi's avatar
Baber Abbasi committed
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
    def set_config(self, key: str, value: Any, update: bool = False) -> None:
        """Set or update the configuration for a given key."""
        if key is None:
            raise ValueError("Key must be provided.")

        if update:
            current_value = getattr(self._config, key, {})
            if not isinstance(current_value, dict):
                raise TypeError(
                    f"Expected a dict for key '{key}', got {type(current_value).__name__} instead."
                )
            current_value.update(value)
        else:
            setattr(self._config, key, value)

    def override_metric(self, metric_name: str) -> None:
        """
        Override the default metrics used for evaluation with custom metrics.

        Parameters:
        - metric_name (str): The name of the custom metric to override. Should be registered in api.metrics.
        """
        (
            self._metric_fn_list,
            self._aggregation_list,
            self._metric_fn_kwargs,
            self._higher_is_better,
        ) = ({}, {}, {}, {})
        self._metric_fn_list[metric_name] = get_metric(metric_name)
        self._aggregation_list[metric_name] = get_metric_aggregation(metric_name)
        self._higher_is_better[metric_name] = is_higher_better(metric_name)
        self._metric_fn_kwargs[metric_name] = {}
        if not isinstance(self, ConfigurableTask):
            self.process_results = lambda x, y: {metric_name: get_metric(metric_name)}
            self.aggregation = lambda: {
                metric_name: get_metric_aggregation(metric_name)
            }
        setattr(self._config, "metric_list", [{"metric": metric_name}])
        setattr(self._config, "process_results", None)

669
670
671
672
673
    def set_fewshot_seed(self, seed: Optional[int] = None) -> None:
        self.fewshot_rnd = random.Random(seed)
        if hasattr(self, "sampler"):
            self.sampler.rnd = self.fewshot_rnd

674
675
676
677
678
679
680
    @property
    def eval_docs(self) -> Union[datasets.Dataset, List[dict]]:
        if self.has_test_docs():
            return self.test_docs()
        elif self.has_validation_docs():
            return self.validation_docs()
        else:
681
682
683
            raise ValueError(
                f"Task dataset (path={self.DATASET_PATH}, name={self.DATASET_NAME}) must have valid or test docs!"
            )
684
685
686
687
688
689
690
691
692
693
694
695
696

    def doc_iterator(
        self, *, rank: int = 0, limit: Union[int, None] = None, world_size: int = 1
    ) -> Iterator[Tuple[int, Any]]:
        limit = int(limit) if limit else None
        doc_iterator = utils.create_iterator(
            enumerate(self.eval_docs),
            rank=int(rank),
            limit=limit,
            world_size=int(world_size),
        )
        return doc_iterator

697

698
class ConfigurableTask(Task):
699
    VERSION = "Yaml"
700
    OUTPUT_TYPE = None
701
    CONFIG = None
702
703

    def __init__(
704
705
706
707
708
        self,
        data_dir=None,
        cache_dir=None,
        download_mode=None,
        config: Optional[dict] = None,
Ethan Smith's avatar
Ethan Smith committed
709
    ) -> None:  # TODO no super() call here
710
        # Get pre-configured attributes
711
        self._config = self.CONFIG
712

713
        # Use new configurations if there was no preconfiguration
714
        if self.config is None:
715
            self._config = TaskConfig(**config)
716
717
        # Overwrite configs
        else:
lintangsutawika's avatar
lintangsutawika committed
718
            if config is not None:
719
                self._config.__dict__.update(config)
720

721
        if self.config is None:
lintangsutawika's avatar
lintangsutawika committed
722
723
724
            raise ValueError(
                "Must pass a config to ConfigurableTask, either in cls.CONFIG or `config` kwarg"
            )
725

726
727
728
729
        if isinstance(self.config.metadata, dict):
            if "version" in self.config.metadata:
                self.VERSION = self.config.metadata["version"]

730
        if self.config.output_type is not None:
731
732
733
734
            if self.config.output_type not in ALL_OUTPUT_TYPES:
                raise ValueError(
                    f"Got invalid output_type '{self.config.output_type}', must be in '{','.join(ALL_OUTPUT_TYPES)}'"
                )
735
            self.OUTPUT_TYPE = self.config.output_type
736

737
738
        if self.config.dataset_path is not None:
            self.DATASET_PATH = self.config.dataset_path
739

740
741
        if self.config.dataset_name is not None:
            self.DATASET_NAME = self.config.dataset_name
742

743
744
745
746
        self._metric_fn_list = {}
        self._metric_fn_kwargs = {}
        self._aggregation_list = {}
        self._higher_is_better = {}
747

748
        if self.config.metric_list is None:
749
            # TODO: handle this in TaskConfig.__post_init__ ?
750
751
            _metric_list = DEFAULT_METRIC_REGISTRY[self.config.output_type]

752
            for metric_name in _metric_list:
haileyschoelkopf's avatar
haileyschoelkopf committed
753
                self._metric_fn_list[metric_name] = get_metric(metric_name)
lintangsutawika's avatar
lintangsutawika committed
754
                self._metric_fn_kwargs[metric_name] = {}
755
756
757
                self._aggregation_list[metric_name] = get_metric_aggregation(
                    metric_name
                )
haileyschoelkopf's avatar
haileyschoelkopf committed
758
                self._higher_is_better[metric_name] = is_higher_better(metric_name)
759
        else:
760
            for metric_config in self.config.metric_list:
761
762
763
764
                if "metric" not in metric_config:
                    raise ValueError(
                        "'metric' key not provided for an entry in 'metric_list', must be specified!"
                    )
765
766
767
768
                metric_name = metric_config["metric"]
                kwargs = {
                    key: metric_config[key]
                    for key in metric_config
Chris's avatar
Chris committed
769
770
                    if key
                    not in ["metric", "aggregation", "higher_is_better", "hf_evaluate"]
771
                }
Chris's avatar
Chris committed
772
773
774
775
                hf_evaluate_metric = (
                    "hf_evaluate" in metric_config
                    and metric_config["hf_evaluate"] is True
                )
776

777
                if self.config.process_results is not None:
778
779
                    self._metric_fn_list[metric_name] = None
                    self._metric_fn_kwargs[metric_name] = {}
780
781
782
783
784
785
                elif callable(metric_name):
                    metric_fn = metric_name.__call__
                    metric_name = metric_name.__name__
                    self._metric_fn_list[metric_name] = metric_fn
                    self._metric_fn_kwargs[metric_name] = kwargs
                else:
Chris's avatar
Chris committed
786
787
788
                    self._metric_fn_list[metric_name] = get_metric(
                        metric_name, hf_evaluate_metric
                    )
789
                    self._metric_fn_kwargs[metric_name] = kwargs
lintangsutawika's avatar
lintangsutawika committed
790

791
                if "aggregation" in metric_config:
792
                    agg_name = metric_config["aggregation"]
793
                    if isinstance(agg_name, str):
haileyschoelkopf's avatar
haileyschoelkopf committed
794
                        self._aggregation_list[metric_name] = get_aggregation(agg_name)
795
                    elif callable(agg_name):  # noqa: E721
796
797
798
                        self._aggregation_list[metric_name] = metric_config[
                            "aggregation"
                        ]
799
                else:
800
                    INV_AGG_REGISTRY = {v: k for k, v in AGGREGATION_REGISTRY.items()}
lintangsutawika's avatar
lintangsutawika committed
801
                    metric_agg = get_metric_aggregation(metric_name)
802
                    eval_logger.warning(
803
                        f"[Task: {self.config.task}] metric {metric_name} is defined, but aggregation is not. "
804
805
                        f"using default "
                        f"aggregation={INV_AGG_REGISTRY[metric_agg]}"
806
                    )
807
                    self._aggregation_list[metric_name] = metric_agg
lintangsutawika's avatar
lintangsutawika committed
808

809
810
811
812
813
814
                if "higher_is_better" in metric_config:
                    self._higher_is_better[metric_name] = metric_config[
                        "higher_is_better"
                    ]
                else:
                    eval_logger.warning(
815
                        f"[Task: {self.config.task}] metric {metric_name} is defined, but higher_is_better is not. "
816
                        f"using default "
haileyschoelkopf's avatar
haileyschoelkopf committed
817
                        f"higher_is_better={is_higher_better(metric_name)}"
818
                    )
haileyschoelkopf's avatar
haileyschoelkopf committed
819
                    self._higher_is_better[metric_name] = is_higher_better(metric_name)
820

821
        self.download(self.config.dataset_kwargs)
822
823
824
        self._training_docs = None
        self._fewshot_docs = None

825
        if self.config.filter_list is not None:
lintangsutawika's avatar
lintangsutawika committed
826
            self._filters = []
827
            for filter_config in self.config.filter_list:
828
829
830
831
832
833
834
835
836
                filter_name = filter_config["name"]
                filter_functions = filter_config["filter"]
                components = []
                for function in filter_functions:
                    kwargs = {
                        key: function[key] for key in function if key != "function"
                    }
                    components.append([function["function"], kwargs])
                filter_pipeline = build_filter_ensemble(filter_name, components)
lintangsutawika's avatar
lintangsutawika committed
837
                self._filters.append(filter_pipeline)
lintangsutawika's avatar
lintangsutawika committed
838
        else:
839
            self._filters = [build_filter_ensemble("none", [["take_first", None]])]
840

841
842
        if self.config.use_prompt is not None:
            eval_logger.info(f"loading prompt {self.config.use_prompt}")
843
            self.prompt = get_prompt(
844
                self.config.use_prompt, self.DATASET_PATH, self.DATASET_NAME
lintangsutawika's avatar
lintangsutawika committed
845
            )
846
847
848
        else:
            self.prompt = None

lintangsutawika's avatar
lintangsutawika committed
849
        if self.fewshot_docs() is not None:
850
851
852
853
            self.fewshot_rnd = (
                random.Random()
            )  # setting with no seed, to be overridden at a later time
            config_sampler: Union[str, Callable] = (
haileyschoelkopf's avatar
haileyschoelkopf committed
854
855
856
                self.config.fewshot_config.get("sampler", "default")
                if self.config.fewshot_config
                else "default"
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
            )
            if isinstance(config_sampler, str):
                self.sampler = samplers.get_sampler(config_sampler)(
                    list(self.fewshot_docs()), self, rnd=self.fewshot_rnd
                )
            elif callable(config_sampler) and issubclass(
                config_sampler, samplers.ContextSampler
            ):
                self.sampler = config_sampler(
                    docs=list(self.fewshot_docs()), task=self, rnd=self.fewshot_rnd
                )
            else:
                raise TypeError(
                    f"fewshot_config.sampler should be a string or callable of ContextSampler type, "
                    f"not {type(config_sampler)}"
                )
873

874
        self.task_docs = self.eval_docs
875

876
        # Test One Doc
877
        self.features = list(self.task_docs.features.keys())
878
879
        self.multiple_input = 0
        self.multiple_target = 0
880
        test_doc = self.task_docs[0]
881
        test_text = self.doc_to_text(test_doc)
882
        test_target = self.doc_to_target(test_doc)
lintangsutawika's avatar
lintangsutawika committed
883

884
        if self.config.doc_to_choice is not None:
lintangsutawika's avatar
lintangsutawika committed
885
            test_choice = self.doc_to_choice(test_doc)
886
            if not isinstance(test_choice, list):
lintangsutawika's avatar
lintangsutawika committed
887
                eval_logger.error("doc_to_choice must return list")
888
889
            else:
                num_choice = len(test_choice)
890

891
            if isinstance(test_text, int):
892
                self.multiple_input = num_choice
893
894
        else:
            test_choice = None
895

896
        if isinstance(test_target, list):
897
            self.multiple_target = len(test_target)
898
        else:
899
            if (isinstance(test_target, int)) and (test_choice is not None):
lintangsutawika's avatar
lintangsutawika committed
900
                test_target = test_choice[test_target]
901
            else:
lintangsutawika's avatar
lintangsutawika committed
902
                test_target = str(test_target)
903

904
905
906
        if test_choice is not None:
            check_choices = test_choice
        else:
lintangsutawika's avatar
lintangsutawika committed
907
            check_choices = [test_target]
908
909
910
911
        if self.config.doc_to_choice is not None:
            for choice in check_choices:
                choice_has_whitespace = True if choice[0].isspace() else False
                delimiter_has_whitespace = (
912
913
                    True
                    if self.config.target_delimiter.rstrip()
914
                    != self.config.target_delimiter
915
                    else False
916
                )
917

918
                if delimiter_has_whitespace and choice_has_whitespace:
919
920
                    eval_logger.debug(
                        f'Both target_delimiter "{self.config.target_delimiter}" and target choice: "{choice}" have whitespace'
921
922
                    )
                elif (not delimiter_has_whitespace) and (not choice_has_whitespace):
923
                    eval_logger.debug(
924
                        f'Both target_delimiter "{self.config.target_delimiter}" and target choice: "{choice}" do not have whitespace, ignore if the language you are evaluating on does not require/use whitespace'
925
926
                    )

927
    def download(self, dataset_kwargs: Optional[Dict[str, Any]] = None) -> None:
928
929
930
931
932
933
        self.dataset = datasets.load_dataset(
            path=self.DATASET_PATH,
            name=self.DATASET_NAME,
            **dataset_kwargs if dataset_kwargs is not None else {},
        )

baberabb's avatar
baberabb committed
934
    def has_training_docs(self) -> bool:
935
        if self.config.training_split is not None:
936
937
938
939
            return True
        else:
            return False

baberabb's avatar
baberabb committed
940
    def has_validation_docs(self) -> bool:
941
        if self.config.validation_split is not None:
942
943
944
945
            return True
        else:
            return False

baberabb's avatar
baberabb committed
946
    def has_test_docs(self) -> bool:
947
        if self.config.test_split is not None:
948
949
950
951
            return True
        else:
            return False

baberabb's avatar
baberabb committed
952
    def training_docs(self) -> datasets.Dataset:
953
        if self.has_training_docs():
954
955
956
            if self.config.process_docs is not None:
                return self.config.process_docs(
                    self.dataset[self.config.training_split]
957
                )
958
            return self.dataset[self.config.training_split]
959

baberabb's avatar
baberabb committed
960
    def validation_docs(self) -> datasets.Dataset:
961
        if self.has_validation_docs():
962
963
964
            if self.config.process_docs is not None:
                return self.config.process_docs(
                    self.dataset[self.config.validation_split]
965
                )
966
            return self.dataset[self.config.validation_split]
967

baberabb's avatar
baberabb committed
968
    def test_docs(self) -> datasets.Dataset:
969
        if self.has_test_docs():
970
971
972
            if self.config.process_docs is not None:
                return self.config.process_docs(self.dataset[self.config.test_split])
            return self.dataset[self.config.test_split]
973

974
    def fewshot_docs(self):
975
        if self.config.fewshot_split is not None:
976
977
            if self.config.process_docs is not None:
                return self.config.process_docs(self.dataset[self.config.fewshot_split])
978
            return self.dataset[self.config.fewshot_split]
979
980
981
982
983
984
985
986
987
988
989
990
        elif (
            self.config.fewshot_config is not None
            and self.config.fewshot_config.get("samples", None) is not None
        ):
            if isinstance(self.config.fewshot_config["samples"], list):
                return self.config.fewshot_config["samples"]
            elif callable(self.config.fewshot_config["samples"]):
                return self.config.fewshot_config["samples"]()
            else:
                raise Exception(
                    "`fewshot_config['samples']` was incorrectly defined in the configuration. It should be either a list of samples as a dict, or function returning this list."
                )
991
        else:
992
            if (self.config.num_fewshot is not None) and (self.config.num_fewshot > 0):
993
                eval_logger.warning(
lintangsutawika's avatar
lintangsutawika committed
994
                    f"[Task: {self.config.task}] "
995
996
997
998
                    "num_fewshot > 0 but fewshot_split is None. "
                    "using preconfigured rule."
                )
            return super().fewshot_docs()
999

KonradSzafer's avatar
KonradSzafer committed
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
    @staticmethod
    def append_target_question(
        labeled_examples: List[Dict[str, str]],
        question: str,
        fewshot_as_multiturn: bool = False,
    ) -> None:
        """Adds a target question to the labeled examples list.
        If fewshot_as_multiturn is True, or labeled_examples is empty, or the last entry is a system turn, appends the question as a new user entry.
        Otherwise, it is appended to the last user entry, ensuring that the conversation alternates between the user and the assistant.
        """
        if not fewshot_as_multiturn:
            # if no messages or last message is system, append as new user entry
            if len(labeled_examples) == 0 or labeled_examples[-1]["role"] == "system":
                labeled_examples.append({"role": "user", "content": question})
            # if last message is user, append to it to avoid two user messages in a row
            else:
                labeled_examples[-1]["content"] += question
        else:
            # if fewshot_as_multiturn is True, append as next user entry (last is always assistant)
            labeled_examples.append({"role": "user", "content": question})

lintangsutawika's avatar
lintangsutawika committed
1021
    @utils.positional_deprecated
KonradSzafer's avatar
KonradSzafer committed
1022
1023
1024
1025
1026
1027
1028
1029
1030
    def fewshot_context(
        self,
        doc: str,
        num_fewshot: int,
        system_instruction: Optional[str] = None,
        apply_chat_template: bool = False,
        fewshot_as_multiturn: bool = False,
        lm=None,
    ) -> str:
lintangsutawika's avatar
lintangsutawika committed
1031
1032
1033
1034
1035
1036
1037
        """Returns a fewshot context string that is made up of a prepended description
        (if provided), the `num_fewshot` number of examples, and an appended prompt example.

        :param doc: str
            The document as returned from training_docs, validation_docs, or test_docs.
        :param num_fewshot: int
            The number of fewshot examples to provide in the returned context string.
KonradSzafer's avatar
KonradSzafer committed
1038
1039
1040
1041
1042
1043
1044
1045
        :param  system_instruction: str
            System instruction to be applied to the prompt.
        :param apply_chat_template: bool
            Whether to apply the chat template to the fewshot context.
        :param fewshot_as_multiturn: bool
            Whether to provide the fewshot examples as a multiturn conversation or a single user turn.
        :param lm:
            Language model with definition of the tokenizer/function to use for applying the chat template.
lintangsutawika's avatar
lintangsutawika committed
1046
1047
1048
        :returns: str
            The fewshot context.
        """
KonradSzafer's avatar
KonradSzafer committed
1049
1050
1051
1052
1053
1054
1055

        if apply_chat_template:
            labeled_examples = []
        else:
            labeled_examples = ""

        # get task description
1056
1057
        if description := self.config.description:
            description = utils.apply_template(self.config.description, doc)
lintangsutawika's avatar
lintangsutawika committed
1058

KonradSzafer's avatar
KonradSzafer committed
1059
1060
1061
1062
1063
1064
1065
1066
1067
        # create system prompt based on the provided system instruction and description
        if system_instruction is not None and description:
            system_prompt = (
                f"{system_instruction}{self.sampler.fewshot_delimiter}{description}"
            )
        elif system_instruction is not None:
            system_prompt = system_instruction
        elif description:
            system_prompt = description
lintangsutawika's avatar
lintangsutawika committed
1068
        else:
KonradSzafer's avatar
KonradSzafer committed
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
            system_prompt = ""

        # add system prompt if specified
        if system_prompt:
            if apply_chat_template:
                labeled_examples.append({"role": "system", "content": system_prompt})
            else:
                labeled_examples = system_prompt

        # if few-shot - append examples after the system prompt
        if num_fewshot > 0:
            if apply_chat_template:
                labeled_examples.extend(
                    self.sampler.get_chat_context(
                        doc, num_fewshot, fewshot_as_multiturn
                    )
                )
            else:
                labeled_examples += self.sampler.get_context(doc, num_fewshot)
lintangsutawika's avatar
lintangsutawika committed
1088
1089

        example = self.doc_to_text(doc)
KonradSzafer's avatar
KonradSzafer committed
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
        if apply_chat_template:
            if self.multiple_input:
                return lm.apply_chat_template(labeled_examples)
            if isinstance(example, str):
                self.append_target_question(
                    labeled_examples, example, fewshot_as_multiturn
                )
            # for loglikelihood create a list of questions with appended choices
            elif isinstance(example, list):
                labeled_examples_list = []
                # copy chat history for each example and append the answer
                for ex in example:
                    chat = deepcopy(labeled_examples)
                    self.append_target_question(chat, ex, fewshot_as_multiturn)
                    labeled_examples_list.append(lm.apply_chat_template(chat))
                return labeled_examples_list
            # if example is an integer, append the choice or convert to string
            elif isinstance(example, int):
                if self.config.doc_to_choice is not None:
                    choices = self.doc_to_choice(doc)
                    self.append_target_question(
                        labeled_examples, choices[example], fewshot_as_multiturn
                    )
                else:
                    self.append_target_question(
                        labeled_examples, str(example), fewshot_as_multiturn
                    )
                # return lm.apply_chat_template(labeled_examples)
            return lm.apply_chat_template(labeled_examples)
1119
        else:
KonradSzafer's avatar
KonradSzafer committed
1120
1121
            if self.multiple_input:
                return labeled_examples
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
            if isinstance(example, str):
                return labeled_examples + example
            elif isinstance(example, list):
                return [labeled_examples + ex for ex in example]
            elif isinstance(example, int):
                if self.config.doc_to_choice is not None:
                    choices = self.doc_to_choice(doc)
                    return labeled_examples + choices[example]
                else:
                    return labeled_examples + str(example)
lintangsutawika's avatar
lintangsutawika committed
1132

1133
    def apply_filters(self):
Baber Abbasi's avatar
Baber Abbasi committed
1134
        """Iterates over FilterEnsembles and applies them to instances"""
1135
1136
        if hasattr(self, "_filters"):
            for f in self._filters:
1137
                f.apply(self._instances)
1138
1139
1140
1141
        else:
            eval_logger.warning("No filter defined, passing through instances")
            return self._instances

1142
    def should_decontaminate(self):
1143
        return self.config.should_decontaminate
1144
1145

    def doc_to_decontamination_query(self, doc):
1146
        if self.config.should_decontaminate:
1147
1148
            if self.config.doc_to_decontamination_query is None:
                return self.doc_to_text(doc)
1149
            else:
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
                doc_to_decontamination_query = self.config.doc_to_decontamination_query
                if doc_to_decontamination_query in self.features:
                    return doc[doc_to_decontamination_query]
                elif callable(doc_to_decontamination_query):
                    return doc_to_decontamination_query(doc)
                else:
                    return ast.literal_eval(
                        utils.apply_template(
                            self.config.doc_to_decontamination_query, doc
                        )
                    )
1161

1162
    def _process_doc(self, doc: dict) -> dict:
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
        """
        Override this to process (detokenize, strip, replace, etc.) individual
        documents. This can be used in a map over documents of a data split.
        E.g. `map(self._process_doc, self.dataset["validation"])`

        :return: dict
            The processed version of the specified `doc`.
        """
        return doc

    def doc_to_text(self, doc):
1174
1175
        if self.prompt is not None:
            doc_to_text = self.prompt
1176
        else:
1177
            doc_to_text = self.config.doc_to_text
lintangsutawika's avatar
lintangsutawika committed
1178

1179
        if isinstance(doc_to_text, int):
1180
            return doc_to_text
1181
        elif isinstance(doc_to_text, str):
1182
            if doc_to_text in self.features:
1183
                # if self.config.doc_to_choice is not None:
1184
1185
                #     return self.doc_to_choice(doc)[doc[doc_to_text]]
                # else:
1186
1187
                return doc[doc_to_text]
            else:
lintangsutawika's avatar
lintangsutawika committed
1188
                text_string = utils.apply_template(doc_to_text, doc)
lintangsutawika's avatar
lintangsutawika committed
1189
                if text_string.isdigit() and self._config.doc_to_choice is not None:
lintangsutawika's avatar
lintangsutawika committed
1190
1191
1192
                    return ast.literal_eval(text_string)
                else:
                    return text_string
1193
        elif callable(doc_to_text):
1194
            return doc_to_text(doc)
lintangsutawika's avatar
lintangsutawika committed
1195
        # Used when applying a Promptsource template
1196
        elif hasattr(doc_to_text, "apply"):
1197
1198
1199
1200
1201
            applied_prompt = doc_to_text.apply(doc)
            if len(applied_prompt) == 2:
                return applied_prompt[0]
            else:
                eval_logger.warning("Applied prompt returns empty string")
1202
                return self.config.fewshot_delimiter
1203
        else:
1204
            print(type(doc_to_text))
1205
            raise TypeError
1206

1207
    def doc_to_target(self, doc: Mapping) -> Union[int, str, list]:
1208
1209
        if self.prompt is not None:
            doc_to_target = self.prompt
1210
        else:
1211
            doc_to_target = self.config.doc_to_target
1212

1213
        if isinstance(doc_to_target, int):
1214
            return doc_to_target
1215
        elif isinstance(doc_to_target, str):
1216
            if doc_to_target in self.features:
1217
                # if self.config.doc_to_choice is not None:
1218
1219
1220
                #     return self.doc_to_choice(doc)[doc[doc_to_target]]
                # else:
                return doc[doc_to_target]
1221
            else:
lintangsutawika's avatar
lintangsutawika committed
1222
                target_string = utils.apply_template(doc_to_target, doc)
lintangsutawika's avatar
lintangsutawika committed
1223
                if target_string.isdigit() and self._config.doc_to_choice is not None:
lintangsutawika's avatar
lintangsutawika committed
1224
                    return ast.literal_eval(target_string)
lintangsutawika's avatar
lintangsutawika committed
1225
1226
1227
1228
1229
                elif (
                    len(target_string) >= 2
                    and (target_string[0] == "[")
                    and (target_string[-1] == "]")
                ):
baberabb's avatar
baberabb committed
1230
1231
1232
1233
                    try:
                        return ast.literal_eval(target_string)
                    except (SyntaxError, ValueError):
                        return target_string
lintangsutawika's avatar
lintangsutawika committed
1234
1235
                else:
                    return target_string
1236
        elif isinstance(doc_to_target, list):
1237
            return doc_to_target
1238
        elif callable(doc_to_target):
1239
            return doc_to_target(doc)
lintangsutawika's avatar
lintangsutawika committed
1240
        # Used when applying a Promptsource template
1241
        elif hasattr(doc_to_target, "apply"):
1242
            applied_prompt = doc_to_target.apply(doc)
1243
1244
1245
1246
            if len(applied_prompt) == 2:
                return applied_prompt[1]
            else:
                eval_logger.warning("Applied prompt returns empty string")
1247
                return self.config.fewshot_delimiter
1248
1249
        else:
            raise TypeError
1250

baberabb's avatar
baberabb committed
1251
    def doc_to_choice(self, doc: Any) -> List[str]:
1252
1253
        if self.prompt is not None:
            doc_to_choice = self.prompt
1254
        elif self.config.doc_to_choice is None:
1255
1256
            eval_logger.error("doc_to_choice was called but not set in config")
        else:
1257
            doc_to_choice = self.config.doc_to_choice
1258

1259
        if isinstance(doc_to_choice, str):
1260
1261
1262
1263
            if doc_to_choice in self.features:
                return doc[doc_to_choice]
            else:
                return ast.literal_eval(utils.apply_template(doc_to_choice, doc))
1264
        elif isinstance(doc_to_choice, list):
1265
            return doc_to_choice
1266
        elif isinstance(doc_to_choice, dict):
1267
1268
1269
1270
1271
1272
1273
            return list(doc_to_choice.values())
        elif callable(doc_to_choice):
            return doc_to_choice(doc)
        elif hasattr(doc_to_choice, "get_answer_choices_list"):
            return doc_to_choice.get_answer_choices_list(doc)
        else:
            raise TypeError
1274

baberabb's avatar
baberabb committed
1275
1276
1277
    def construct_requests(
        self, doc: dict, ctx: str, **kwargs
    ) -> Union[List[Instance], Instance]:
1278
        if self.OUTPUT_TYPE == "loglikelihood":
lintangsutawika's avatar
lintangsutawika committed
1279
            arguments = (ctx, self.doc_to_target(doc))
1280
        elif self.OUTPUT_TYPE == "loglikelihood_rolling":
lintangsutawika's avatar
lintangsutawika committed
1281
            arguments = (self.doc_to_target(doc),)
1282
        elif self.OUTPUT_TYPE == "multiple_choice":
1283
            choices = self.doc_to_choice(doc)
1284
            target_delimiter = self.config.target_delimiter
1285
1286
            if self.multiple_input:
                # If there are multiple inputs, choices are placed in the ctx
1287
                cont = self.doc_to_target(doc)
1288
1289
1290
                arguments = [
                    (ctx + choice, f"{target_delimiter}{cont}") for choice in choices
                ]
1291
            else:
1292
                # Otherwise they are placed in the continuation
1293
                arguments = [(ctx, f"{target_delimiter}{cont}") for cont in choices]
1294

1295
            request_list = [
1296
1297
                Instance(
                    request_type="loglikelihood",
lintangsutawika's avatar
lintangsutawika committed
1298
                    doc=doc,
1299
                    arguments=arg,
1300
                    idx=i,
1301
1302
                    **kwargs,
                )
1303
                for i, arg in enumerate(arguments)
1304
            ]
1305
            # TODO: we should raise a warning telling users this will at most ~2x runtime.
1306
            if "acc_mutual_info" in self._metric_fn_list.keys():
1307
1308
1309
                # if we are calculating multiple choice accuracy
                # using mutual information instead of raw loglikelihood as metric, need unconditional lls.

lintangsutawika's avatar
lintangsutawika committed
1310
                # here mutual info refers to calculating
1311
1312
1313
1314
1315
1316
                # log(P(choice|ctx) / P(choice)) = log(P(choice|ctx)) - log(P(choice))
                # in other words normalizing by subtracting the unconditional logprob of each choice.
                request_list.extend(
                    [
                        Instance(
                            request_type="loglikelihood",
lintangsutawika's avatar
lintangsutawika committed
1317
                            doc=doc,
1318
                            arguments=("", "{}".format(choice)),
1319
1320
1321
                            idx=i,
                            **kwargs,
                        )
lintangsutawika's avatar
lintangsutawika committed
1322
                        for i, choice in enumerate(choices)
1323
1324
1325
                    ]
                )
            return request_list
lintangsutawika's avatar
lintangsutawika committed
1326

1327
        elif self.OUTPUT_TYPE == "generate_until":
1328
            arguments = (ctx, deepcopy(self.config.generation_kwargs))
lintangsutawika's avatar
lintangsutawika committed
1329
1330

        return Instance(
lintangsutawika's avatar
lintangsutawika committed
1331
1332
            request_type=self.OUTPUT_TYPE, doc=doc, arguments=arguments, idx=0, **kwargs
        )
1333
1334

    def process_results(self, doc, results):
1335
1336
        if callable(self.config.process_results):
            return self.config.process_results(doc, results)
lintangsutawika's avatar
lintangsutawika committed
1337

1338
        result_dict = {}
1339
        use_metric = list(self._metric_fn_list.keys())
1340
1341
1342
        if self.OUTPUT_TYPE == "loglikelihood":
            results = results[0]
            ll, is_greedy = results
1343
1344
1345
1346
            return {
                **({"perplexity": ll} if "perplexity" in use_metric else {}),
                **({"acc": int(is_greedy)} if "acc" in use_metric else {}),
            }
1347
        elif self.OUTPUT_TYPE == "loglikelihood_rolling":
haileyschoelkopf's avatar
haileyschoelkopf committed
1348
            (loglikelihood,) = results
1349
1350
            _words = self.count_words(self.doc_to_target(doc))
            _bytes = self.count_bytes(self.doc_to_target(doc))
haileyschoelkopf's avatar
haileyschoelkopf committed
1351
            return {
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366
                **(
                    {"word_perplexity": (loglikelihood, _words)}
                    if "word_perplexity" in use_metric
                    else {}
                ),
                **(
                    {"byte_perplexity": (loglikelihood, _bytes)}
                    if "byte_perplexity" in use_metric
                    else {}
                ),
                **(
                    {"bits_per_byte": (loglikelihood, _bytes)}
                    if "bits_per_byte" in use_metric
                    else {}
                ),
haileyschoelkopf's avatar
haileyschoelkopf committed
1367
            }
1368
        elif self.OUTPUT_TYPE == "multiple_choice":
1369
            lls, is_greedy = zip(*results)
lintangsutawika's avatar
lintangsutawika committed
1370

1371
            # retrieve choices in List[str] form, to compute choice lengths, etc.
1372
            choices = self.doc_to_choice(doc)
1373
1374
            completion_len = np.array([float(len(i)) for i in choices])

1375
1376
            if (
                2 * len(choices) == len(lls)
1377
                and "acc_mutual_info" in self._metric_fn_list.keys()
1378
1379
1380
1381
            ):
                # then we are doing mutual info.
                # this stores the "dryrun" / unconditional answer loglikelihoods
                lls_unconditional = lls[1::2]
1382
1383
                if len(lls_unconditional) != len(choices):
                    raise ValueError
1384
1385
                # and this stores our "regular" conditional loglikelihoods
                lls = lls[::2]
1386

1387
1388
            pred = np.argmax(lls)
            pred_norm = np.argmax(lls / completion_len)
lintangsutawika's avatar
lintangsutawika committed
1389

1390
1391
            if self.multiple_input:
                gold = self.doc_to_text(doc)
1392
            else:
1393
                gold = self.doc_to_target(doc)
1394
1395

            gold_index_error = False
1396
            if isinstance(gold, list):
Lintang Sutawika's avatar
Lintang Sutawika committed
1397
1398
                gold = [i if i < len(choices) else -100 for i in gold]
                if -100 in gold:
1399
1400
                    gold_index_error = True
            else:
1401
                if isinstance(gold, int):
Lintang Sutawika's avatar
Lintang Sutawika committed
1402
                    gold = gold if gold < len(choices) else -100
1403
                elif isinstance(gold, str):
Lintang Sutawika's avatar
Lintang Sutawika committed
1404
                    gold = choices.index(gold) if gold in choices else -100
lintangsutawika's avatar
lintangsutawika committed
1405

Lintang Sutawika's avatar
Lintang Sutawika committed
1406
                if gold == -100:
1407
1408
1409
1410
                    gold_index_error = True

            if gold_index_error:
                eval_logger.warning(
lintangsutawika's avatar
lintangsutawika committed
1411
                    f"Label index was not in within range of available choices,"
1412
1413
                    f"Sample:\n\n{doc}\n\n"
                )
lintangsutawika's avatar
lintangsutawika committed
1414

1415
            if self.multiple_target:
lintangsutawika's avatar
lintangsutawika committed
1416
1417
                acc = 1.0 if pred in gold else 0.0
                acc_norm = 1.0 if pred_norm in gold else 0.0
Lintang Sutawika's avatar
Lintang Sutawika committed
1418
                exact_match = int(any([is_greedy[i] if i != -100 else 0 for i in gold]))
lintangsutawika's avatar
lintangsutawika committed
1419
1420
1421
            else:
                acc = 1.0 if pred == gold else 0.0
                acc_norm = 1.0 if pred_norm == gold else 0.0
1422
                # TODO: this gets score of 0 on arc_challenge for pythia-70m. need to test that this works properly
Lintang Sutawika's avatar
Lintang Sutawika committed
1423
                exact_match = int(is_greedy[gold]) if gold != -100 else 0
1424

Lintang Sutawika's avatar
Lintang Sutawika committed
1425
1426
1427
1428
            prob_norm = utils.softmax(lls)

            # TODO use keyword arguments to the metric?
            # gold, pred, norm stuff, the original lls,
1429
            result_dict = {
1430
                **({"acc": acc} if "acc" in use_metric else {}),
1431
1432
                **({"f1": (gold, pred)} if "f1" in use_metric else {}),
                **({"mcc": (gold, pred)} if "mcc" in use_metric else {}),
1433
                **({"acc_norm": acc_norm} if "acc_norm" in use_metric else {}),
1434
                **({"exact_match": exact_match} if "exact_match" in use_metric else {}),
Lintang Sutawika's avatar
Lintang Sutawika committed
1435
1436
1437
1438
1439
                **(
                    {"brier_score": (gold, prob_norm)}
                    if "brier_score" in use_metric
                    else {}
                ),
1440
1441
            }

1442
            if "acc_mutual_info" in use_metric:
lintangsutawika's avatar
lintangsutawika committed
1443
1444
1445
                lls_mutual_info = [
                    ll_c - ll_u for ll_c, ll_u in zip(lls, lls_unconditional)
                ]
1446
1447
1448
                acc_mutual_info = 1.0 if np.argmax(lls_mutual_info) == gold else 0.0
                result_dict["acc_mutual_info"] = acc_mutual_info

1449
        elif self.OUTPUT_TYPE == "generate_until":
1450
            gold = self.doc_to_target(doc)
Chris's avatar
Chris committed
1451
            result = results[0]
1452
            if self.config.doc_to_choice is not None:
lintangsutawika's avatar
lintangsutawika committed
1453
                # If you set doc_to_choice,
lintangsutawika's avatar
lintangsutawika committed
1454
                # it assumes that doc_to_target returns a number.
1455
1456
                choices = self.doc_to_choice(doc)
                gold = choices[gold]
1457
1458
            # we expect multiple_targets to be a list.
            elif self.multiple_target:
baberabb's avatar
baberabb committed
1459
                gold = list(gold)
Chris's avatar
Chris committed
1460
1461
1462
            elif type(gold) != type(result):
                # cast gold to the same type as result
                gold = type(result)(gold)
1463

lintangsutawika's avatar
lintangsutawika committed
1464
            for metric in self._metric_fn_list.keys():
haileyschoelkopf's avatar
haileyschoelkopf committed
1465
1466
1467
1468
1469
                if self.multiple_target:
                    # in the case where we have multiple targets,
                    # return true if any are true
                    # TODO: this may break for multipLe_target, non zero-or-1 metrics
                    scores = []
haileyschoelkopf's avatar
haileyschoelkopf committed
1470
1471
1472
1473
                    if not isinstance(gold, list):
                        # sometimes, a multiple_target dataset has exceptions where one doc has only one string answer
                        # print(gold)
                        gold = [gold]
1474
1475
1476
1477
1478
1479
1480
1481
                    if metric == "exact_match":
                        result = [result for _ in range(len(gold))]
                        scores = self._metric_fn_list[metric](
                            references=gold,
                            predictions=result,
                            **self._metric_fn_kwargs[metric],
                        )[metric]
                        result_score = 1.0 if scores > 0.0 else 0.0
haileyschoelkopf's avatar
haileyschoelkopf committed
1482
                    else:
1483
1484
1485
1486
1487
1488
1489
1490
1491
1492
1493
1494
1495
1496
1497
1498
1499
1500
1501
1502
1503
                        for gold_option in gold:
                            try:
                                result_score = self._metric_fn_list[metric](
                                    references=[gold_option],
                                    predictions=[result],
                                    **self._metric_fn_kwargs[metric],
                                )
                            except (
                                TypeError
                            ):  # TODO: this is hacky and I don't want to do it
                                result_score = self._metric_fn_list[metric](
                                    [gold_option, result]
                                )
                            if isinstance(result_score, dict):
                                # TODO: this handles the case where HF evaluate returns a dict.
                                result_score = result_score[metric]
                            scores.append(result_score)
                        if any(scores):
                            result_score = 1.0
                        else:
                            result_score = 0.0
haileyschoelkopf's avatar
haileyschoelkopf committed
1504
                else:
1505
                    try:
1506
                        result_score = self._metric_fn_list[metric](
1507
1508
                            references=[gold],
                            predictions=[result],
1509
                            **self._metric_fn_kwargs[metric],
1510
                        )
1511
                    except TypeError:  # needed for now in order to use a different interface between our own metrics and HF Evaluate metrics
1512
                        result_score = self._metric_fn_list[metric]([gold, result])
1513
1514
1515
1516
                    if isinstance(result_score, dict):
                        # TODO: this handles the case where HF evaluate returns a dict.
                        result_score = result_score[metric]
                result_dict[metric] = result_score
1517
        else:
lintangsutawika's avatar
lintangsutawika committed
1518
1519
            raise ValueError(
                f"Passed invalid output_type '{self.OUTPUT_TYPE}' ! Please use one of ",
1520
                "'loglikelihood', 'loglikelihood_rolling', 'generate_until' or 'multiple_choice'",
1521
            )
1522
1523
1524

        return result_dict

Baber Abbasi's avatar
Baber Abbasi committed
1525
    def aggregation(self) -> dict:
1526
1527
        return self._aggregation_list

Baber Abbasi's avatar
Baber Abbasi committed
1528
    def higher_is_better(self) -> dict:
haileyschoelkopf's avatar
haileyschoelkopf committed
1529
        return self._higher_is_better
1530

Baber Abbasi's avatar
Baber Abbasi committed
1531
1532
1533
    def get_config(self, key: str) -> Any:
        return getattr(self._config, key, None)

lintangsutawika's avatar
lintangsutawika committed
1534
1535
1536
    @property
    def task_name(self) -> Any:
        return getattr(self.config, "task", None)
lintangsutawika's avatar
lintangsutawika committed
1537

1538
1539
1540
1541
1542
1543
1544
1545
    def __repr__(self):
        return (
            f"ConfigurableTask(task_name={getattr(self.config, 'task', None)},"
            f"output_type={self.OUTPUT_TYPE},"
            f"num_fewshot={getattr(self.config, 'num_fewshot', None)},"
            f"num_samples={len(self.eval_docs)})"
        )

1546
1547

class MultipleChoiceTask(Task):
1548
    OUTPUT_TYPE = "loglikelihood"
1549

baberabb's avatar
baberabb committed
1550
    def doc_to_target(self, doc: dict) -> str:
1551
1552
        return " " + doc["choices"][doc["gold"]]

baberabb's avatar
baberabb committed
1553
    def construct_requests(self, doc: dict, ctx: str, **kwargs) -> List[Instance]:
1554
        # TODO: add mutual info here?
lintangsutawika's avatar
lintangsutawika committed
1555
1556
        return [
            Instance(
haileyschoelkopf's avatar
haileyschoelkopf committed
1557
                request_type="loglikelihood",
lintangsutawika's avatar
lintangsutawika committed
1558
                doc=doc,
1559
                arguments=(ctx, " {}".format(choice)),
1560
                idx=i,
1561
1562
                **kwargs,
            )
lintangsutawika's avatar
lintangsutawika committed
1563
1564
            for i, choice in enumerate(doc["choices"])
        ]
1565

1566
    def process_results(self, doc: dict, results: Iterable[Tuple[float, bool]]) -> dict:
lintangsutawika's avatar
lintangsutawika committed
1567
1568
1569
        results = [
            res[0] for res in results
        ]  # only retain loglikelihoods, discard is_greedy TODO: do we need is_greedy anywhere?
1570
1571
1572
1573
1574
1575
1576
1577
1578
1579
1580
        gold = doc["gold"]

        acc = 1.0 if np.argmax(results) == gold else 0.0
        completion_len = np.array([float(len(i)) for i in doc["choices"]])
        acc_norm = 1.0 if np.argmax(results / completion_len) == gold else 0.0

        return {
            "acc": acc,
            "acc_norm": acc_norm,
        }

baberabb's avatar
baberabb committed
1581
    def higher_is_better(self) -> dict:
1582
1583
1584
1585
1586
        return {
            "acc": True,
            "acc_norm": True,
        }

baberabb's avatar
baberabb committed
1587
    def aggregation(self) -> dict:
1588
1589
1590
1591
1592
1593
        return {
            "acc": mean,
            "acc_norm": mean,
        }


lintangsutawika's avatar
lintangsutawika committed
1594
class PerplexityTask(Task):
1595
1596
    OUTPUT_TYPE = "loglikelihood_rolling"

baberabb's avatar
baberabb committed
1597
    def has_training_docs(self) -> bool:
1598
1599
        return False

baberabb's avatar
baberabb committed
1600
    def fewshot_examples(self, k: int, rnd) -> List:
1601
1602
1603
1604
        if k != 0:
            raise ValueError(
                "The number of fewshot examples must be 0 for perplexity tasks."
            )
1605
1606
        return []

baberabb's avatar
baberabb committed
1607
    def fewshot_context(self, doc: dict, num_fewshot: int) -> Literal[""]:
1608
1609
1610
1611
        if num_fewshot != 0:
            raise ValueError(
                "The number of fewshot examples must be 0 for perplexity tasks."
            )
1612
1613
1614

        return ""

baberabb's avatar
baberabb committed
1615
    def higher_is_better(self) -> dict:
1616
1617
1618
1619
1620
1621
1622
1623
1624
        return {
            "word_perplexity": False,
            "byte_perplexity": False,
            "bits_per_byte": False,
        }

    def doc_to_decontamination_query(self, doc):
        return doc

Ethan Smith's avatar
Ethan Smith committed
1625
    def doc_to_text(self, doc) -> str:
1626
1627
1628
1629
1630
        return ""

    def doc_to_target(self, doc):
        return doc

1631
1632
1633
    def construct_requests(self, doc: dict, ctx: Optional[str], **kwargs):
        if bool(ctx):
            raise ValueError
1634

lintangsutawika's avatar
lintangsutawika committed
1635
1636
1637
1638
1639
1640
1641
        return Instance(
            request_type=self.OUTPUT_TYPE,
            doc=doc,
            arguments=(self.doc_to_target(doc),),
            idx=0,
            **kwargs,
        )
1642

1643
    def process_results(self, doc: dict, results: Tuple[float]) -> dict:
1644
        (loglikelihood,) = results
haileyschoelkopf's avatar
haileyschoelkopf committed
1645
1646
        words = self.count_words(self.doc_to_target(doc))
        bytes_ = self.count_bytes(self.doc_to_target(doc))
1647
1648
1649
1650
1651
1652
        return {
            "word_perplexity": (loglikelihood, words),
            "byte_perplexity": (loglikelihood, bytes_),
            "bits_per_byte": (loglikelihood, bytes_),
        }

baberabb's avatar
baberabb committed
1653
    def aggregation(self) -> dict:
1654
1655
1656
1657
1658
1659
1660
        return {
            "word_perplexity": weighted_perplexity,
            "byte_perplexity": weighted_perplexity,
            "bits_per_byte": bits_per_byte,
        }

    @classmethod
baberabb's avatar
baberabb committed
1661
    def count_bytes(cls, doc) -> int:
1662
1663
1664
        return len(doc.encode("utf-8"))

    @classmethod
baberabb's avatar
baberabb committed
1665
    def count_words(cls, doc) -> int:
1666
1667
        """Downstream tasks with custom word boundaries should override this!"""
        return len(re.split(r"\s+", doc))