task.py 65.3 KB
Newer Older
1
import abc
2
import ast
lintangsutawika's avatar
lintangsutawika committed
3
import logging
4
import random
5
6
import re
from collections.abc import Callable
7
from copy import deepcopy
8
from dataclasses import asdict, dataclass
9
from inspect import getsource
10
11
12
13
14
15
16
17
18
19
20
21
from typing import (
    Any,
    Dict,
    Iterable,
    Iterator,
    List,
    Literal,
    Mapping,
    Optional,
    Tuple,
    Union,
)
22
23
24

import datasets
import numpy as np
25
from tqdm import tqdm
26
27

from lm_eval import utils
28
from lm_eval.api import samplers
29
30
from lm_eval.api.instance import Instance, OutputType
from lm_eval.api.metrics import bits_per_byte, mean, weighted_perplexity
lintangsutawika's avatar
lintangsutawika committed
31
from lm_eval.api.registry import (
32
33
    AGGREGATION_REGISTRY,
    DEFAULT_METRIC_REGISTRY,
haileyschoelkopf's avatar
haileyschoelkopf committed
34
    get_aggregation,
35
    get_metric,
36
    get_metric_aggregation,
haileyschoelkopf's avatar
haileyschoelkopf committed
37
    is_higher_better,
lintangsutawika's avatar
lintangsutawika committed
38
)
39
from lm_eval.caching.cache import load_from_cache, save_to_cache
40
41
42
from lm_eval.filters import build_filter_ensemble
from lm_eval.prompts import get_prompt

43

44
45
46
47
ALL_OUTPUT_TYPES = [
    "loglikelihood",
    "multiple_choice",
    "loglikelihood_rolling",
48
    "generate_until",
49
50
]

51
eval_logger = logging.getLogger("lm-eval")
52

lintangsutawika's avatar
lintangsutawika committed
53

54
55
@dataclass
class TaskConfig(dict):
56
    # task naming/registry
57
58
    task: Optional[str] = None
    task_alias: Optional[str] = None
Lintang Sutawika's avatar
Lintang Sutawika committed
59
    tag: Optional[Union[str, list]] = None
60
    group: Optional[Union[str, list]] = None
61
62
63
    # HF dataset options.
    # which dataset to use,
    # and what splits for what purpose
64
65
66
67
68
69
    dataset_path: Optional[str] = None
    dataset_name: Optional[str] = None
    dataset_kwargs: Optional[dict] = None
    training_split: Optional[str] = None
    validation_split: Optional[str] = None
    test_split: Optional[str] = None
70
71
72
    fewshot_split: Optional[str] = (
        None  # TODO: assert that this not None if num_fewshot > 0. (?) assert if this is same split as one evaling (?)
    )
73
74
    # formatting / prompting options.
    # see docs/advanced_task_guide.md for more info
75
76
77
    process_docs: Optional[Callable] = None
    doc_to_text: Optional[Union[Callable, str]] = None
    doc_to_target: Optional[Union[Callable, str]] = None
lintangsutawika's avatar
lintangsutawika committed
78
    doc_to_image: Union[Callable, str] = None
79
80
81
    doc_to_choice: Optional[Union[Callable, str, dict, list]] = None
    process_results: Optional[Union[Callable, str]] = None
    use_prompt: Optional[str] = None
82
    description: str = ""
83
84
    target_delimiter: str = " "
    fewshot_delimiter: str = "\n\n"
85
    fewshot_config: Optional[dict] = None
86
    # runtime configuration options
87
    num_fewshot: Optional[int] = None
88
    # scoring options
89
90
91
    metric_list: Optional[list] = None
    output_type: OutputType = "generate_until"
    generation_kwargs: Optional[dict] = None
92
    repeats: int = 1
93
    filter_list: Optional[Union[str, list]] = None
94
    should_decontaminate: bool = False
95
    doc_to_decontamination_query: Optional[str] = None
96
97
98
    metadata: Optional[dict] = (
        None  # by default, not used in the code. allows for users to pass arbitrary info to tasks
    )
99

Ethan Smith's avatar
Ethan Smith committed
100
    def __post_init__(self) -> None:
Lintang Sutawika's avatar
Lintang Sutawika committed
101
102
103
104
105
106
107
108
109
110
111
112
        if self.group is not None:
            eval_logger.warning(
                "A task YAML file was found to contain a `group` key. Groups which provide aggregate scores over several subtasks now require a separate config file--if not aggregating, you may want to use the `tag` config option instead within your config. Setting `group` within a TaskConfig will be deprecated in v0.4.4. Please see https://github.com/EleutherAI/lm-evaluation-harness/blob/main/docs/task_guide.md for more information."
            )

            if self.tag is None:
                self.tag = self.group
            else:
                raise ValueError(
                    "Got both a `group` and `tag` entry within a TaskConfig. Please use one or the other--`group` values will be deprecated in v0.4.4."
                )

Lintang Sutawika's avatar
Lintang Sutawika committed
113
        if self.generation_kwargs is not None:
114
            if self.output_type != "generate_until":
115
                eval_logger.warning(
116
                    f"[{self.task}] passed `generation_kwargs`, but not using `output_type: generate_until`!"
Lintang Sutawika's avatar
Lintang Sutawika committed
117
118
119
120
121
122
123
124
                )

            if "temperature" in self.generation_kwargs:
                self.generation_kwargs["temperature"] = float(
                    self.generation_kwargs["temperature"]
                )

            if "until" not in self.generation_kwargs:
125
                self.generation_kwargs["until"] = [self.fewshot_delimiter]
Lintang Sutawika's avatar
Lintang Sutawika committed
126
        else:
127
            if self.output_type == "generate_until":
Lintang Sutawika's avatar
Lintang Sutawika committed
128
129
                # ensure that we greedily generate in absence of explicit arguments otherwise
                self.generation_kwargs = {
130
131
132
133
134
                    "until": (
                        None
                        if self.fewshot_delimiter is None
                        else [self.fewshot_delimiter]
                    ),
Lintang Sutawika's avatar
Lintang Sutawika committed
135
136
                    "do_sample": False,
                }
137

138
139
140
    def __getitem__(self, item):
        return getattr(self, item)

141
142
143
    def __setitem__(self, item, value):
        return setattr(self, item, value)

144
    def to_dict(self, keep_callable: bool = False) -> dict:
145
146
        """dumps the current config as a dictionary object, as a printable format.
        null fields will not be printed.
haileyschoelkopf's avatar
haileyschoelkopf committed
147
        Used for dumping results alongside full task configuration
148

haileyschoelkopf's avatar
haileyschoelkopf committed
149
150
151
152
153
154
155
156
157
158
        :return: dict
            A printable dictionary version of the TaskConfig object.

        # TODO: should any default value in the TaskConfig not be printed?
        """
        cfg_dict = asdict(self)
        # remove values that are `None`
        for k, v in list(cfg_dict.items()):
            if v is None:
                cfg_dict.pop(k)
159
160
161
162
163
164
165
166
167
168
            elif k == "metric_list":
                for metric_dict in v:
                    for metric_key, metric_value in metric_dict.items():
                        if callable(metric_value):
                            metric_dict[metric_key] = self.serialize_function(
                                metric_value, keep_callable=keep_callable
                            )
                cfg_dict[k] = v
            elif callable(v):
                cfg_dict[k] = self.serialize_function(v, keep_callable=keep_callable)
haileyschoelkopf's avatar
haileyschoelkopf committed
169
        return cfg_dict
170

171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
    def serialize_function(
        self, value: Union[Callable, str], keep_callable=False
    ) -> Union[Callable, str]:
        """Serializes a given function or string.

        If 'keep_callable' is True, the original callable is returned.
        Otherwise, attempts to return the source code of the callable using 'getsource'.
        """
        if keep_callable:
            return value
        else:
            try:
                return getsource(value)
            except (TypeError, OSError):
                return str(value)

187
188
189
190
191
192
193
194
195
196
197

class Task(abc.ABC):
    """A task represents an entire benchmark including its dataset, problems,
    answers, and evaluation methods. See BoolQ for a simple example implementation

    A `doc` can be any python object which represents one instance of evaluation.
    This is usually a dictionary e.g.
        {"question": ..., "answer": ...} or
        {"question": ..., question, answer)
    """

198
    VERSION: Optional[Union[int, str]] = None
199

200
201
    # The name of the `Task` benchmark as denoted in the HuggingFace datasets Hub
    # or a path to a custom `datasets` loading script.
202
    DATASET_PATH: Optional[str] = None
203
204

    # The name of a subset within `DATASET_PATH`.
205
    DATASET_NAME: Optional[str] = None
206

207
    OUTPUT_TYPE: Optional[OutputType] = None
lintangsutawika's avatar
lintangsutawika committed
208

209
210
    def __init__(
        self,
211
212
213
214
        data_dir: Optional[str] = None,
        cache_dir: Optional[str] = None,
        download_mode: Optional[datasets.DownloadMode] = None,
        config: Optional[Mapping] = None,  # Union[dict, TaskConfig]
Ethan Smith's avatar
Ethan Smith committed
215
    ) -> None:
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
        """
        :param data_dir: str
            Stores the path to a local folder containing the `Task`'s data files.
            Use this to specify the path to manually downloaded data (usually when
            the dataset is not publicly accessible).
        :param cache_dir: str
            The directory to read/write the `Task` dataset. This follows the
            HuggingFace `datasets` API with the default cache directory located at:
                `~/.cache/huggingface/datasets`
            NOTE: You can change the cache location globally for a given process
            to another directory:
                `export HF_DATASETS_CACHE="/path/to/another/directory"`
        :param download_mode: datasets.DownloadMode
            How to treat pre-existing `Task` downloads and data.
            - `datasets.DownloadMode.REUSE_DATASET_IF_EXISTS`
                Reuse download and reuse dataset.
            - `datasets.DownloadMode.REUSE_CACHE_IF_EXISTS`
                Reuse download with fresh dataset.
            - `datasets.DownloadMode.FORCE_REDOWNLOAD`
                Fresh download and fresh dataset.
        """
        self.download(data_dir, cache_dir, download_mode)
238
239
240
        self._training_docs: Optional[list] = None
        self._fewshot_docs: Optional[list] = None
        self._instances: Optional[List[Instance]] = None
241

242
        self._config: TaskConfig = TaskConfig({**config}) if config else TaskConfig()
243

lintangsutawika's avatar
lintangsutawika committed
244
        self._filters = [build_filter_ensemble("none", [["take_first", None]])]
245
246
247
        self.fewshot_rnd: Optional[random.Random] = (
            None  # purposely induce errors in case of improper usage
        )
248

249
250
251
252
253
254
    def download(
        self,
        data_dir: Optional[str] = None,
        cache_dir: Optional[str] = None,
        download_mode=None,
    ) -> None:
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
        """Downloads and returns the task dataset.
        Override this method to download the dataset from a custom API.

        :param data_dir: str
            Stores the path to a local folder containing the `Task`'s data files.
            Use this to specify the path to manually downloaded data (usually when
            the dataset is not publicly accessible).
        :param cache_dir: str
            The directory to read/write the `Task` dataset. This follows the
            HuggingFace `datasets` API with the default cache directory located at:
                `~/.cache/huggingface/datasets`
            NOTE: You can change the cache location globally for a given process
            by setting the shell environment variable, `HF_DATASETS_CACHE`,
            to another directory:
                `export HF_DATASETS_CACHE="/path/to/another/directory"`
        :param download_mode: datasets.DownloadMode
            How to treat pre-existing `Task` downloads and data.
            - `datasets.DownloadMode.REUSE_DATASET_IF_EXISTS`
                Reuse download and reuse dataset.
            - `datasets.DownloadMode.REUSE_CACHE_IF_EXISTS`
                Reuse download with fresh dataset.
            - `datasets.DownloadMode.FORCE_REDOWNLOAD`
                Fresh download and fresh dataset.
        """
279
280
281
282
283
284
285
        self.dataset = datasets.load_dataset(
            path=self.DATASET_PATH,
            name=self.DATASET_NAME,
            data_dir=data_dir,
            cache_dir=cache_dir,
            download_mode=download_mode,
        )
286

287
    @property
288
    def config(self) -> TaskConfig:
289
290
291
        """Returns the TaskConfig associated with this class."""
        return self._config

292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
    @abc.abstractmethod
    def has_training_docs(self):
        """Whether the task has a training set"""
        pass

    @abc.abstractmethod
    def has_validation_docs(self):
        """Whether the task has a validation set"""
        pass

    @abc.abstractmethod
    def has_test_docs(self):
        """Whether the task has a test set"""
        pass

307
    def training_docs(self) -> Iterable:
308
309
310
311
312
313
        """
        :return: Iterable[obj]
            A iterable of any object, that doc_to_text can handle
        """
        return []

314
    def validation_docs(self) -> Iterable:
315
316
317
318
319
320
        """
        :return: Iterable[obj]
            A iterable of any object, that doc_to_text can handle
        """
        return []

321
    def test_docs(self) -> Iterable:
322
323
324
325
326
327
        """
        :return: Iterable[obj]
            A iterable of any object, that doc_to_text can handle
        """
        return []

328
    def fewshot_docs(self) -> Iterable:
329
330
331
332
333
334
335
336
337
        """
        :return: Iterable[obj]
            A iterable of any object, that doc_to_text can handle
        """
        if self.has_training_docs():
            return self.training_docs()
        elif self.has_validation_docs():
            return self.validation_docs()
        else:
lintangsutawika's avatar
lintangsutawika committed
338
            eval_logger.warning(
339
                f"[Task: {self.config.task}] has_training_docs and has_validation_docs are False"
340
                ", using test_docs as fewshot_docs but this is not recommended."
lintangsutawika's avatar
lintangsutawika committed
341
            )
342
343
            return self.test_docs()

344
    def _process_doc(self, doc: dict) -> dict:
345
346
347
348
349
350
351
352
353
        """
        Override this to process (detokenize, strip, replace, etc.) individual
        documents. This can be used in a map over documents of a data split.
        E.g. `map(self._process_doc, self.dataset["validation"])`

        :return: dict
            The processed version of the specified `doc`.
        """
        return doc
lintangsutawika's avatar
lintangsutawika committed
354

355
    @property
356
    def instances(self) -> List[Instance]:
357
358
359
360
361
362
363
364
365
366
367
        """After calling `task.build_all_requests()`, tasks
        maintain a list of the dataset instances which will be evaluated.
        """
        return self._instances

    def fewshot_examples(self, k, rnd):
        if self._training_docs is None:
            self._training_docs = list(self.training_docs())

        return rnd.sample(self._training_docs, k)

368
369
    def doc_to_decontamination_query(self, doc):
        raise NotImplementedError(
370
371
372
373
374
375
376
377
378
379
380
            "Override doc_to_decontamination_query with document specific decontamination query."
        )

    @abc.abstractmethod
    def doc_to_text(self, doc):
        pass

    @abc.abstractmethod
    def doc_to_target(self, doc):
        pass

lintangsutawika's avatar
lintangsutawika committed
381
382
383
384
    @abc.abstractmethod
    def doc_to_image(self, doc):
        pass

385
386
    def build_all_requests(
        self,
387
        *,
lintangsutawika's avatar
lintangsutawika committed
388
389
390
391
392
393
394
395
396
        limit=None,
        rank=None,
        world_size=None,
        cache_requests=False,
        rewrite_requests_cache=False,
        system_instruction=None,
        apply_chat_template=False,
        fewshot_as_multiturn=False,
        lm=None,
397
    ) -> None:
398
        """Build a set of Instances for a task, and store them in task.instances"""
399
400
401
402

        # used with caching
        og_limit = limit

403
        cache_key = f"requests-{self._config.task}-{self.config.num_fewshot}shot-rank{rank}-world_size{world_size}"
KonradSzafer's avatar
KonradSzafer committed
404
405
406
407
408
409
410
        cache_key += "-chat_template" if apply_chat_template else ""
        cache_key += "-fewshot_as_multiturn" if fewshot_as_multiturn else ""
        cache_key += (
            f"-system_prompt_hash{utils.hash_string(system_instruction)}"
            if system_instruction is not None
            else ""
        )
lintangsutawika's avatar
lintangsutawika committed
411
        cache_key += f"-tokenizer{lm.tokenizer_name}" if apply_chat_template else ""
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426

        cached_instances = load_from_cache(file_name=cache_key)

        if cache_requests and cached_instances and not rewrite_requests_cache:
            cached_instances = cached_instances[:limit]

            flattened_instances = [
                instance
                for instance_group in cached_instances
                for instance in instance_group
            ]

            self._instances = flattened_instances
            return

Baber Abbasi's avatar
Baber Abbasi committed
427
        eval_logger.info(f"Building contexts for {self.config.task} on rank {rank}...")
428

429
        instances = []
430
431
432
433
434
435
436
437
438
439

        # process all documents when caching is specified for simplicity
        if (
            cache_requests
            and (not cached_instances or rewrite_requests_cache)
            and limit is not None
        ):
            limit = None

        doc_id_docs = list(
440
            self.doc_iterator(rank=rank, limit=limit, world_size=world_size)
441
442
443
444
445
446
447
        )

        num_docs = len(doc_id_docs)

        for doc_id, doc in tqdm(
            doc_id_docs,
            total=num_docs,
lintangsutawika's avatar
lintangsutawika committed
448
        ):
449
            # sample fewshot context #TODO: need to offset doc_id by rank now!
450
            fewshot_ctx = self.fewshot_context(
451
                doc,
452
                0 if self.config.num_fewshot is None else self.config.num_fewshot,
KonradSzafer's avatar
KonradSzafer committed
453
454
455
                system_instruction,
                apply_chat_template,
                fewshot_as_multiturn,
lintangsutawika's avatar
lintangsutawika committed
456
                lm,
457
            )
458

459
            # TODO: we should override self.config.repeats if doing greedy gen so users don't waste time+compute
lintangsutawika's avatar
lintangsutawika committed
460
461
462
            inst = self.construct_requests(
                doc=doc,
                ctx=fewshot_ctx,
463
                metadata=(self.config["task"], doc_id, self.config.repeats),
lintangsutawika's avatar
lintangsutawika committed
464
            )
465
466
467
468

            if not isinstance(inst, list):
                inst = [inst]

469
470
471
472
473
474
475
476
477
478
479
480
481
            instances.append(inst)

        # now flatten, this is to allow slicing to work with pickles

        sliced_instances = instances[:og_limit]

        flattened_instances = [
            instance
            for instance_group in sliced_instances
            for instance in instance_group
        ]

        self._instances = flattened_instances
482

483
484
        if len(self._instances) == 0:
            raise ValueError("task.build_requests() did not find any docs!")
485

486
487
488
        if cache_requests and (not cached_instances or rewrite_requests_cache):
            save_to_cache(file_name=cache_key, obj=instances)

489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
    @abc.abstractmethod
    def construct_requests(self, doc, ctx, **kwargs):
        """Uses RequestFactory to construct Requests and returns an iterable of
        Requests which will be sent to the LM.

        :param doc:
            The document as returned from training_docs, validation_docs, or test_docs.
        :param ctx: str
            The context string, generated by fewshot_context. This includes the natural
            language description, as well as the few shot examples, and the question
            part of the document for `doc`.
        :param doc_idx: int
            The index of a document within `self.test_docs()` or `self.validation_docs()`,
            whichever is the main split used.
        :param repeats: int
        TODO: update this docstring
lintangsutawika's avatar
lintangsutawika committed
505
            The number of times each instance in a dataset is inferred on. Defaults to 1,
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
            can be increased for techniques like majority voting.
        """
        pass

    @abc.abstractmethod
    def process_results(self, doc, results):
        """Take a single document and the LM results and evaluates, returning a
        dict where keys are the names of submetrics and values are the values of
        the metric for that one document

        :param doc:
            The document as returned from training_docs, validation_docs, or test_docs.
        :param results:
            The results of the requests created in construct_requests.
        """
        pass

    @abc.abstractmethod
    def aggregation(self):
        """
        :returns: {str: [metric_score] -> float}
            A dictionary where keys are the names of submetrics and values are
            functions that aggregate a list of metric scores
        """
        pass

    @abc.abstractmethod
    def higher_is_better(self):
        """
        :returns: {str: bool}
            A dictionary where keys are the names of submetrics and values are
            whether a higher value of the submetric is better
        """
        pass

541
542
543
    def get_config(self, key: str) -> Any:
        return getattr(self._config, key, None)

haileyschoelkopf's avatar
haileyschoelkopf committed
544
545
546
547
548
549
550
551
552
553
    @classmethod
    def count_bytes(cls, doc):
        """Used for byte-level perplexity metrics in rolling loglikelihood"""
        return len(doc.encode("utf-8"))

    @classmethod
    def count_words(cls, doc):
        """Downstream loglikelihood_rolling perplexity tasks with custom word boundaries should override this!"""
        return len(re.split(r"\s+", doc))

554
    @utils.positional_deprecated
lintangsutawika's avatar
lintangsutawika committed
555
    def fewshot_context(
556
557
558
        self,
        doc,
        num_fewshot,
559
        rnd=None,
560
        description=None,
lintangsutawika's avatar
lintangsutawika committed
561
    ):
562
563
564
565
566
567
568
        """Returns a fewshot context string that is made up of a prepended description
        (if provided), the `num_fewshot` number of examples, and an appended prompt example.

        :param doc: str
            The document as returned from training_docs, validation_docs, or test_docs.
        :param num_fewshot: int
            The number of fewshot examples to provide in the returned context string.
lintangsutawika's avatar
lintangsutawika committed
569
570
571
572
573
        :param rnd: random.Random
            The pseudo-random number generator used to randomly sample examples.
            WARNING: This is currently a required arg although it's optionalized with a default `None`.
        :param description: str
            The task's description that will be prepended to the fewshot examples.
574
575
576
        :returns: str
            The fewshot context.
        """
577
        if rnd is None:
578
579
580
581
582
583
            if self.fewshot_rnd is not None:
                rnd = self.fewshot_rnd
            else:
                raise ValueError(
                    "A `random.Random` generator argument must be provided to `rnd`"
                )
lintangsutawika's avatar
lintangsutawika committed
584

585
        description = description if description else ""
586
587

        if num_fewshot == 0:
lintangsutawika's avatar
lintangsutawika committed
588
            labeled_examples = ""
589
        else:
lintangsutawika's avatar
lintangsutawika committed
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
            # for sets with no training docs, draw from other set *but ensure no overlap with current doc*
            if self.has_training_docs():
                fewshotex = self.fewshot_examples(k=num_fewshot, rnd=rnd)
            else:
                if self._fewshot_docs is None:
                    self._fewshot_docs = list(
                        self.validation_docs()
                        if self.has_validation_docs()
                        else self.test_docs()
                    )

                fewshotex = rnd.sample(self._fewshot_docs, num_fewshot + 1)

                # get rid of the doc that's the one we're evaluating, if it's in the fewshot
                fewshotex = [x for x in fewshotex if x != doc][:num_fewshot]

            labeled_examples = (
                "\n\n".join(
                    [
                        self.doc_to_text(doc) + self.doc_to_target(doc)
                        for doc in fewshotex
                    ]
                )
                + "\n\n"
lintangsutawika's avatar
lintangsutawika committed
614
            )
615
616

        example = self.doc_to_text(doc)
lintangsutawika's avatar
lintangsutawika committed
617
        return description + labeled_examples + example
618

619
    def apply_filters(self) -> Optional[List[Instance]]:
Baber Abbasi's avatar
Baber Abbasi committed
620
        """Iterates over FilterEnsembles and applies them to instances"""
lintangsutawika's avatar
lintangsutawika committed
621
622
        if hasattr(self, "_filters"):
            for f in self._filters:
623
                f.apply(self._instances)
lintangsutawika's avatar
lintangsutawika committed
624
625
626
        else:
            eval_logger.warning("No filter defined, passing through instances")
            return self._instances
627

baberabb's avatar
baberabb committed
628
    def dump_config(self) -> dict:
Baber Abbasi's avatar
Baber Abbasi committed
629
        """Returns the config as a dictionary."""
630
        # TODO: this should only return the overrides applied to a non-YAML task's configuration.
631
        # (num_fewshot)
632
        return self.config.to_dict()
633

Baber Abbasi's avatar
Baber Abbasi committed
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
    def set_config(self, key: str, value: Any, update: bool = False) -> None:
        """Set or update the configuration for a given key."""
        if key is None:
            raise ValueError("Key must be provided.")

        if update:
            current_value = getattr(self._config, key, {})
            if not isinstance(current_value, dict):
                raise TypeError(
                    f"Expected a dict for key '{key}', got {type(current_value).__name__} instead."
                )
            current_value.update(value)
        else:
            setattr(self._config, key, value)

    def override_metric(self, metric_name: str) -> None:
        """
        Override the default metrics used for evaluation with custom metrics.

        Parameters:
        - metric_name (str): The name of the custom metric to override. Should be registered in api.metrics.
        """
        (
            self._metric_fn_list,
            self._aggregation_list,
            self._metric_fn_kwargs,
            self._higher_is_better,
        ) = ({}, {}, {}, {})
        self._metric_fn_list[metric_name] = get_metric(metric_name)
        self._aggregation_list[metric_name] = get_metric_aggregation(metric_name)
        self._higher_is_better[metric_name] = is_higher_better(metric_name)
        self._metric_fn_kwargs[metric_name] = {}
        if not isinstance(self, ConfigurableTask):
            self.process_results = lambda x, y: {metric_name: get_metric(metric_name)}
            self.aggregation = lambda: {
                metric_name: get_metric_aggregation(metric_name)
            }
        setattr(self._config, "metric_list", [{"metric": metric_name}])
        setattr(self._config, "process_results", None)

674
675
676
677
678
    def set_fewshot_seed(self, seed: Optional[int] = None) -> None:
        self.fewshot_rnd = random.Random(seed)
        if hasattr(self, "sampler"):
            self.sampler.rnd = self.fewshot_rnd

679
680
681
682
683
684
685
    @property
    def eval_docs(self) -> Union[datasets.Dataset, List[dict]]:
        if self.has_test_docs():
            return self.test_docs()
        elif self.has_validation_docs():
            return self.validation_docs()
        else:
686
687
688
            raise ValueError(
                f"Task dataset (path={self.DATASET_PATH}, name={self.DATASET_NAME}) must have valid or test docs!"
            )
689
690
691
692
693
694
695
696
697
698
699
700
701

    def doc_iterator(
        self, *, rank: int = 0, limit: Union[int, None] = None, world_size: int = 1
    ) -> Iterator[Tuple[int, Any]]:
        limit = int(limit) if limit else None
        doc_iterator = utils.create_iterator(
            enumerate(self.eval_docs),
            rank=int(rank),
            limit=limit,
            world_size=int(world_size),
        )
        return doc_iterator

702
703

class ConfigurableTask(Task):
704
    VERSION = "Yaml"
705
    OUTPUT_TYPE = None
706
    CONFIG = None
707
708

    def __init__(
709
710
711
712
713
        self,
        data_dir=None,
        cache_dir=None,
        download_mode=None,
        config: Optional[dict] = None,
Ethan Smith's avatar
Ethan Smith committed
714
    ) -> None:  # TODO no super() call here
715
        # Get pre-configured attributes
716
        self._config = self.CONFIG
717

718
        # Use new configurations if there was no preconfiguration
719
        if self.config is None:
720
            self._config = TaskConfig(**config)
721
722
        # Overwrite configs
        else:
lintangsutawika's avatar
lintangsutawika committed
723
            if config is not None:
724
                self._config.__dict__.update(config)
725

726
        if self.config is None:
lintangsutawika's avatar
lintangsutawika committed
727
728
729
            raise ValueError(
                "Must pass a config to ConfigurableTask, either in cls.CONFIG or `config` kwarg"
            )
730

731
732
733
734
        if isinstance(self.config.metadata, dict):
            if "version" in self.config.metadata:
                self.VERSION = self.config.metadata["version"]

735
        if self.config.output_type is not None:
736
737
738
739
            if self.config.output_type not in ALL_OUTPUT_TYPES:
                raise ValueError(
                    f"Got invalid output_type '{self.config.output_type}', must be in '{','.join(ALL_OUTPUT_TYPES)}'"
                )
740
            self.OUTPUT_TYPE = self.config.output_type
741

742
743
        if self.config.dataset_path is not None:
            self.DATASET_PATH = self.config.dataset_path
744

745
746
        if self.config.dataset_name is not None:
            self.DATASET_NAME = self.config.dataset_name
747

748
749
750
751
        self._metric_fn_list = {}
        self._metric_fn_kwargs = {}
        self._aggregation_list = {}
        self._higher_is_better = {}
752

753
        if self.config.metric_list is None:
754
            # TODO: handle this in TaskConfig.__post_init__ ?
755
756
            _metric_list = DEFAULT_METRIC_REGISTRY[self.config.output_type]

757
            for metric_name in _metric_list:
haileyschoelkopf's avatar
haileyschoelkopf committed
758
                self._metric_fn_list[metric_name] = get_metric(metric_name)
lintangsutawika's avatar
lintangsutawika committed
759
                self._metric_fn_kwargs[metric_name] = {}
760
761
762
                self._aggregation_list[metric_name] = get_metric_aggregation(
                    metric_name
                )
haileyschoelkopf's avatar
haileyschoelkopf committed
763
                self._higher_is_better[metric_name] = is_higher_better(metric_name)
764
        else:
765
            for metric_config in self.config.metric_list:
766
767
768
769
                if "metric" not in metric_config:
                    raise ValueError(
                        "'metric' key not provided for an entry in 'metric_list', must be specified!"
                    )
770
771
772
773
                metric_name = metric_config["metric"]
                kwargs = {
                    key: metric_config[key]
                    for key in metric_config
Chris's avatar
Chris committed
774
775
                    if key
                    not in ["metric", "aggregation", "higher_is_better", "hf_evaluate"]
776
                }
Chris's avatar
Chris committed
777
778
779
780
                hf_evaluate_metric = (
                    "hf_evaluate" in metric_config
                    and metric_config["hf_evaluate"] is True
                )
781

782
                if self.config.process_results is not None:
783
784
                    self._metric_fn_list[metric_name] = None
                    self._metric_fn_kwargs[metric_name] = {}
785
786
787
788
789
790
                elif callable(metric_name):
                    metric_fn = metric_name.__call__
                    metric_name = metric_name.__name__
                    self._metric_fn_list[metric_name] = metric_fn
                    self._metric_fn_kwargs[metric_name] = kwargs
                else:
Chris's avatar
Chris committed
791
792
793
                    self._metric_fn_list[metric_name] = get_metric(
                        metric_name, hf_evaluate_metric
                    )
794
                    self._metric_fn_kwargs[metric_name] = kwargs
lintangsutawika's avatar
lintangsutawika committed
795

796
                if "aggregation" in metric_config:
797
                    agg_name = metric_config["aggregation"]
798
                    if isinstance(agg_name, str):
haileyschoelkopf's avatar
haileyschoelkopf committed
799
                        self._aggregation_list[metric_name] = get_aggregation(agg_name)
800
                    elif callable(agg_name):  # noqa: E721
801
802
803
                        self._aggregation_list[metric_name] = metric_config[
                            "aggregation"
                        ]
804
                else:
805
                    INV_AGG_REGISTRY = {v: k for k, v in AGGREGATION_REGISTRY.items()}
lintangsutawika's avatar
lintangsutawika committed
806
                    metric_agg = get_metric_aggregation(metric_name)
807
                    eval_logger.warning(
808
                        f"[Task: {self.config.task}] metric {metric_name} is defined, but aggregation is not. "
809
810
                        f"using default "
                        f"aggregation={INV_AGG_REGISTRY[metric_agg]}"
811
                    )
812
                    self._aggregation_list[metric_name] = metric_agg
lintangsutawika's avatar
lintangsutawika committed
813

814
815
816
817
818
819
                if "higher_is_better" in metric_config:
                    self._higher_is_better[metric_name] = metric_config[
                        "higher_is_better"
                    ]
                else:
                    eval_logger.warning(
820
                        f"[Task: {self.config.task}] metric {metric_name} is defined, but higher_is_better is not. "
821
                        f"using default "
haileyschoelkopf's avatar
haileyschoelkopf committed
822
                        f"higher_is_better={is_higher_better(metric_name)}"
823
                    )
haileyschoelkopf's avatar
haileyschoelkopf committed
824
                    self._higher_is_better[metric_name] = is_higher_better(metric_name)
825

826
        self.download(self.config.dataset_kwargs)
827
828
829
        self._training_docs = None
        self._fewshot_docs = None

830
        if self.config.filter_list is not None:
lintangsutawika's avatar
lintangsutawika committed
831
            self._filters = []
832
            for filter_config in self.config.filter_list:
833
834
835
836
837
838
839
840
841
                filter_name = filter_config["name"]
                filter_functions = filter_config["filter"]
                components = []
                for function in filter_functions:
                    kwargs = {
                        key: function[key] for key in function if key != "function"
                    }
                    components.append([function["function"], kwargs])
                filter_pipeline = build_filter_ensemble(filter_name, components)
lintangsutawika's avatar
lintangsutawika committed
842
                self._filters.append(filter_pipeline)
lintangsutawika's avatar
lintangsutawika committed
843
        else:
844
            self._filters = [build_filter_ensemble("none", [["take_first", None]])]
845

846
847
        if self.config.use_prompt is not None:
            eval_logger.info(f"loading prompt {self.config.use_prompt}")
848
            self.prompt = get_prompt(
849
                self.config.use_prompt, self.DATASET_PATH, self.DATASET_NAME
lintangsutawika's avatar
lintangsutawika committed
850
            )
851
852
853
        else:
            self.prompt = None

lintangsutawika's avatar
lintangsutawika committed
854
        if self.fewshot_docs() is not None:
855
856
857
858
            self.fewshot_rnd = (
                random.Random()
            )  # setting with no seed, to be overridden at a later time
            config_sampler: Union[str, Callable] = (
haileyschoelkopf's avatar
haileyschoelkopf committed
859
860
861
                self.config.fewshot_config.get("sampler", "default")
                if self.config.fewshot_config
                else "default"
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
            )
            if isinstance(config_sampler, str):
                self.sampler = samplers.get_sampler(config_sampler)(
                    list(self.fewshot_docs()), self, rnd=self.fewshot_rnd
                )
            elif callable(config_sampler) and issubclass(
                config_sampler, samplers.ContextSampler
            ):
                self.sampler = config_sampler(
                    docs=list(self.fewshot_docs()), task=self, rnd=self.fewshot_rnd
                )
            else:
                raise TypeError(
                    f"fewshot_config.sampler should be a string or callable of ContextSampler type, "
                    f"not {type(config_sampler)}"
                )
878

879
        self.task_docs = self.eval_docs
880

881
        # Test One Doc
882
        self.features = list(self.task_docs.features.keys())
883
884
        self.multiple_input = 0
        self.multiple_target = 0
885
        test_doc = self.task_docs[0]
886
        test_text = self.doc_to_text(test_doc)
887
        test_target = self.doc_to_target(test_doc)
lintangsutawika's avatar
lintangsutawika committed
888

889
        if self.config.doc_to_choice is not None:
lintangsutawika's avatar
lintangsutawika committed
890
            test_choice = self.doc_to_choice(test_doc)
891
            if not isinstance(test_choice, list):
lintangsutawika's avatar
lintangsutawika committed
892
                eval_logger.error("doc_to_choice must return list")
893
894
            else:
                num_choice = len(test_choice)
895

896
            if isinstance(test_text, int):
897
                self.multiple_input = num_choice
898
899
        else:
            test_choice = None
900

901
        if isinstance(test_target, list):
902
            self.multiple_target = len(test_target)
903
        else:
904
            if (isinstance(test_target, int)) and (test_choice is not None):
lintangsutawika's avatar
lintangsutawika committed
905
                test_target = test_choice[test_target]
906
            else:
lintangsutawika's avatar
lintangsutawika committed
907
                test_target = str(test_target)
908

909
910
911
        if test_choice is not None:
            check_choices = test_choice
        else:
lintangsutawika's avatar
lintangsutawika committed
912
            check_choices = [test_target]
913
914
915
916
        if self.config.doc_to_choice is not None:
            for choice in check_choices:
                choice_has_whitespace = True if choice[0].isspace() else False
                delimiter_has_whitespace = (
917
918
                    True
                    if self.config.target_delimiter.rstrip()
919
                    != self.config.target_delimiter
920
                    else False
921
                )
922

923
                if delimiter_has_whitespace and choice_has_whitespace:
924
925
                    eval_logger.debug(
                        f'Both target_delimiter "{self.config.target_delimiter}" and target choice: "{choice}" have whitespace'
926
927
                    )
                elif (not delimiter_has_whitespace) and (not choice_has_whitespace):
928
                    eval_logger.debug(
929
                        f'Both target_delimiter "{self.config.target_delimiter}" and target choice: "{choice}" do not have whitespace, ignore if the language you are evaluating on does not require/use whitespace'
930
931
                    )

932
    def download(self, dataset_kwargs: Optional[Dict[str, Any]] = None) -> None:
933
934
935
936
937
938
        self.dataset = datasets.load_dataset(
            path=self.DATASET_PATH,
            name=self.DATASET_NAME,
            **dataset_kwargs if dataset_kwargs is not None else {},
        )

baberabb's avatar
baberabb committed
939
    def has_training_docs(self) -> bool:
940
        if self.config.training_split is not None:
941
942
943
944
            return True
        else:
            return False

baberabb's avatar
baberabb committed
945
    def has_validation_docs(self) -> bool:
946
        if self.config.validation_split is not None:
947
948
949
950
            return True
        else:
            return False

baberabb's avatar
baberabb committed
951
    def has_test_docs(self) -> bool:
952
        if self.config.test_split is not None:
953
954
955
956
            return True
        else:
            return False

baberabb's avatar
baberabb committed
957
    def training_docs(self) -> datasets.Dataset:
958
        if self.has_training_docs():
959
960
961
            if self.config.process_docs is not None:
                return self.config.process_docs(
                    self.dataset[self.config.training_split]
962
                )
963
            return self.dataset[self.config.training_split]
964

baberabb's avatar
baberabb committed
965
    def validation_docs(self) -> datasets.Dataset:
966
        if self.has_validation_docs():
967
968
969
            if self.config.process_docs is not None:
                return self.config.process_docs(
                    self.dataset[self.config.validation_split]
970
                )
971
            return self.dataset[self.config.validation_split]
972

baberabb's avatar
baberabb committed
973
    def test_docs(self) -> datasets.Dataset:
974
        if self.has_test_docs():
975
976
977
            if self.config.process_docs is not None:
                return self.config.process_docs(self.dataset[self.config.test_split])
            return self.dataset[self.config.test_split]
978

979
    def fewshot_docs(self):
980
        if self.config.fewshot_split is not None:
981
982
            if self.config.process_docs is not None:
                return self.config.process_docs(self.dataset[self.config.fewshot_split])
983
            return self.dataset[self.config.fewshot_split]
984
985
986
987
988
989
990
991
992
993
994
995
        elif (
            self.config.fewshot_config is not None
            and self.config.fewshot_config.get("samples", None) is not None
        ):
            if isinstance(self.config.fewshot_config["samples"], list):
                return self.config.fewshot_config["samples"]
            elif callable(self.config.fewshot_config["samples"]):
                return self.config.fewshot_config["samples"]()
            else:
                raise Exception(
                    "`fewshot_config['samples']` was incorrectly defined in the configuration. It should be either a list of samples as a dict, or function returning this list."
                )
996
        else:
997
            if (self.config.num_fewshot is not None) and (self.config.num_fewshot > 0):
998
                eval_logger.warning(
Lintang Sutawika's avatar
Lintang Sutawika committed
999
                    f"[Task: {self.config.task}] "
1000
1001
1002
1003
                    "num_fewshot > 0 but fewshot_split is None. "
                    "using preconfigured rule."
                )
            return super().fewshot_docs()
1004

KonradSzafer's avatar
KonradSzafer committed
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
    @staticmethod
    def append_target_question(
        labeled_examples: List[Dict[str, str]],
        question: str,
        fewshot_as_multiturn: bool = False,
    ) -> None:
        """Adds a target question to the labeled examples list.
        If fewshot_as_multiturn is True, or labeled_examples is empty, or the last entry is a system turn, appends the question as a new user entry.
        Otherwise, it is appended to the last user entry, ensuring that the conversation alternates between the user and the assistant.
        """
        if not fewshot_as_multiturn:
            # if no messages or last message is system, append as new user entry
            if len(labeled_examples) == 0 or labeled_examples[-1]["role"] == "system":
                labeled_examples.append({"role": "user", "content": question})
            # if last message is user, append to it to avoid two user messages in a row
            else:
                labeled_examples[-1]["content"] += question
        else:
            # if fewshot_as_multiturn is True, append as next user entry (last is always assistant)
            labeled_examples.append({"role": "user", "content": question})

lintangsutawika's avatar
lintangsutawika committed
1026
    @utils.positional_deprecated
KonradSzafer's avatar
KonradSzafer committed
1027
1028
1029
1030
1031
1032
1033
    def fewshot_context(
        self,
        doc: str,
        num_fewshot: int,
        system_instruction: Optional[str] = None,
        apply_chat_template: bool = False,
        fewshot_as_multiturn: bool = False,
lintangsutawika's avatar
lintangsutawika committed
1034
        lm=None,
KonradSzafer's avatar
KonradSzafer committed
1035
    ) -> str:
lintangsutawika's avatar
lintangsutawika committed
1036
1037
1038
1039
1040
1041
1042
        """Returns a fewshot context string that is made up of a prepended description
        (if provided), the `num_fewshot` number of examples, and an appended prompt example.

        :param doc: str
            The document as returned from training_docs, validation_docs, or test_docs.
        :param num_fewshot: int
            The number of fewshot examples to provide in the returned context string.
KonradSzafer's avatar
KonradSzafer committed
1043
1044
1045
1046
1047
1048
        :param  system_instruction: str
            System instruction to be applied to the prompt.
        :param apply_chat_template: bool
            Whether to apply the chat template to the fewshot context.
        :param fewshot_as_multiturn: bool
            Whether to provide the fewshot examples as a multiturn conversation or a single user turn.
lintangsutawika's avatar
lintangsutawika committed
1049
1050
        :param lm:
            Language model with definition of the tokenizer/function to use for applying the chat template.
lintangsutawika's avatar
lintangsutawika committed
1051
1052
1053
        :returns: str
            The fewshot context.
        """
KonradSzafer's avatar
KonradSzafer committed
1054
1055
1056
1057
1058
1059
1060

        if apply_chat_template:
            labeled_examples = []
        else:
            labeled_examples = ""

        # get task description
1061
1062
        if description := self.config.description:
            description = utils.apply_template(self.config.description, doc)
lintangsutawika's avatar
lintangsutawika committed
1063

KonradSzafer's avatar
KonradSzafer committed
1064
1065
1066
1067
1068
1069
1070
1071
1072
        # create system prompt based on the provided system instruction and description
        if system_instruction is not None and description:
            system_prompt = (
                f"{system_instruction}{self.sampler.fewshot_delimiter}{description}"
            )
        elif system_instruction is not None:
            system_prompt = system_instruction
        elif description:
            system_prompt = description
lintangsutawika's avatar
lintangsutawika committed
1073
        else:
KonradSzafer's avatar
KonradSzafer committed
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
            system_prompt = ""

        # add system prompt if specified
        if system_prompt:
            if apply_chat_template:
                labeled_examples.append({"role": "system", "content": system_prompt})
            else:
                labeled_examples = system_prompt

        # if few-shot - append examples after the system prompt
        if num_fewshot > 0:
            if apply_chat_template:
                labeled_examples.extend(
                    self.sampler.get_chat_context(
                        doc, num_fewshot, fewshot_as_multiturn
                    )
                )
            else:
                labeled_examples += self.sampler.get_context(doc, num_fewshot)
lintangsutawika's avatar
lintangsutawika committed
1093
1094

        example = self.doc_to_text(doc)
KonradSzafer's avatar
KonradSzafer committed
1095
1096
        if apply_chat_template:
            if self.multiple_input:
lintangsutawika's avatar
lintangsutawika committed
1097
                return lm.apply_chat_template(labeled_examples)
KonradSzafer's avatar
KonradSzafer committed
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
            if isinstance(example, str):
                self.append_target_question(
                    labeled_examples, example, fewshot_as_multiturn
                )
            # for loglikelihood create a list of questions with appended choices
            elif isinstance(example, list):
                labeled_examples_list = []
                # copy chat history for each example and append the answer
                for ex in example:
                    chat = deepcopy(labeled_examples)
                    self.append_target_question(chat, ex, fewshot_as_multiturn)
lintangsutawika's avatar
lintangsutawika committed
1109
                    labeled_examples_list.append(lm.apply_chat_template(chat))
KonradSzafer's avatar
KonradSzafer committed
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
                return labeled_examples_list
            # if example is an integer, append the choice or convert to string
            elif isinstance(example, int):
                if self.config.doc_to_choice is not None:
                    choices = self.doc_to_choice(doc)
                    self.append_target_question(
                        labeled_examples, choices[example], fewshot_as_multiturn
                    )
                else:
                    self.append_target_question(
                        labeled_examples, str(example), fewshot_as_multiturn
                    )
                # return lm.apply_chat_template(labeled_examples)
lintangsutawika's avatar
lintangsutawika committed
1123
            return lm.apply_chat_template(labeled_examples)
1124
        else:
KonradSzafer's avatar
KonradSzafer committed
1125
1126
            if self.multiple_input:
                return labeled_examples
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
            if isinstance(example, str):
                return labeled_examples + example
            elif isinstance(example, list):
                return [labeled_examples + ex for ex in example]
            elif isinstance(example, int):
                if self.config.doc_to_choice is not None:
                    choices = self.doc_to_choice(doc)
                    return labeled_examples + choices[example]
                else:
                    return labeled_examples + str(example)
lintangsutawika's avatar
lintangsutawika committed
1137

1138
    def apply_filters(self):
Baber Abbasi's avatar
Baber Abbasi committed
1139
        """Iterates over FilterEnsembles and applies them to instances"""
1140
1141
        if hasattr(self, "_filters"):
            for f in self._filters:
1142
                f.apply(self._instances)
1143
1144
1145
1146
        else:
            eval_logger.warning("No filter defined, passing through instances")
            return self._instances

1147
    def should_decontaminate(self):
1148
        return self.config.should_decontaminate
1149
1150

    def doc_to_decontamination_query(self, doc):
1151
        if self.config.should_decontaminate:
1152
1153
            if self.config.doc_to_decontamination_query is None:
                return self.doc_to_text(doc)
1154
            else:
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
                doc_to_decontamination_query = self.config.doc_to_decontamination_query
                if doc_to_decontamination_query in self.features:
                    return doc[doc_to_decontamination_query]
                elif callable(doc_to_decontamination_query):
                    return doc_to_decontamination_query(doc)
                else:
                    return ast.literal_eval(
                        utils.apply_template(
                            self.config.doc_to_decontamination_query, doc
                        )
                    )
1166

1167
    def _process_doc(self, doc: dict) -> dict:
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
        """
        Override this to process (detokenize, strip, replace, etc.) individual
        documents. This can be used in a map over documents of a data split.
        E.g. `map(self._process_doc, self.dataset["validation"])`

        :return: dict
            The processed version of the specified `doc`.
        """
        return doc

    def doc_to_text(self, doc):
1179
1180
        if self.prompt is not None:
            doc_to_text = self.prompt
1181
        else:
1182
            doc_to_text = self.config.doc_to_text
lintangsutawika's avatar
lintangsutawika committed
1183

1184
        if isinstance(doc_to_text, int):
1185
            return doc_to_text
1186
        elif isinstance(doc_to_text, str):
1187
            if doc_to_text in self.features:
1188
                # if self.config.doc_to_choice is not None:
1189
1190
                #     return self.doc_to_choice(doc)[doc[doc_to_text]]
                # else:
1191
1192
                return doc[doc_to_text]
            else:
lintangsutawika's avatar
lintangsutawika committed
1193
                text_string = utils.apply_template(doc_to_text, doc)
lintangsutawika's avatar
lintangsutawika committed
1194
                if text_string.isdigit() and self._config.doc_to_choice is not None:
lintangsutawika's avatar
lintangsutawika committed
1195
1196
1197
                    return ast.literal_eval(text_string)
                else:
                    return text_string
1198
        elif callable(doc_to_text):
1199
            return doc_to_text(doc)
lintangsutawika's avatar
lintangsutawika committed
1200
        # Used when applying a Promptsource template
1201
        elif hasattr(doc_to_text, "apply"):
1202
1203
1204
1205
1206
            applied_prompt = doc_to_text.apply(doc)
            if len(applied_prompt) == 2:
                return applied_prompt[0]
            else:
                eval_logger.warning("Applied prompt returns empty string")
1207
                return self.config.fewshot_delimiter
1208
        else:
1209
            print(type(doc_to_text))
1210
            raise TypeError
1211

1212
    def doc_to_target(self, doc: Mapping) -> Union[int, str, list]:
1213
1214
        if self.prompt is not None:
            doc_to_target = self.prompt
1215
        else:
1216
            doc_to_target = self.config.doc_to_target
1217

1218
        if isinstance(doc_to_target, int):
1219
            return doc_to_target
1220
        elif isinstance(doc_to_target, str):
1221
            if doc_to_target in self.features:
1222
                # if self.config.doc_to_choice is not None:
1223
1224
1225
                #     return self.doc_to_choice(doc)[doc[doc_to_target]]
                # else:
                return doc[doc_to_target]
1226
            else:
lintangsutawika's avatar
lintangsutawika committed
1227
                target_string = utils.apply_template(doc_to_target, doc)
lintangsutawika's avatar
lintangsutawika committed
1228
                if target_string.isdigit() and self._config.doc_to_choice is not None:
lintangsutawika's avatar
lintangsutawika committed
1229
                    return ast.literal_eval(target_string)
lintangsutawika's avatar
lintangsutawika committed
1230
1231
1232
1233
1234
                elif (
                    len(target_string) >= 2
                    and (target_string[0] == "[")
                    and (target_string[-1] == "]")
                ):
baberabb's avatar
baberabb committed
1235
1236
1237
1238
                    try:
                        return ast.literal_eval(target_string)
                    except (SyntaxError, ValueError):
                        return target_string
lintangsutawika's avatar
lintangsutawika committed
1239
1240
                else:
                    return target_string
1241
        elif isinstance(doc_to_target, list):
1242
            return doc_to_target
1243
        elif callable(doc_to_target):
1244
            return doc_to_target(doc)
lintangsutawika's avatar
lintangsutawika committed
1245
        # Used when applying a Promptsource template
1246
        elif hasattr(doc_to_target, "apply"):
1247
            applied_prompt = doc_to_target.apply(doc)
1248
1249
1250
1251
            if len(applied_prompt) == 2:
                return applied_prompt[1]
            else:
                eval_logger.warning("Applied prompt returns empty string")
1252
                return self.config.fewshot_delimiter
1253
1254
        else:
            raise TypeError
1255

baberabb's avatar
baberabb committed
1256
    def doc_to_choice(self, doc: Any) -> List[str]:
1257
1258
        if self.prompt is not None:
            doc_to_choice = self.prompt
1259
        elif self.config.doc_to_choice is None:
1260
1261
            eval_logger.error("doc_to_choice was called but not set in config")
        else:
1262
            doc_to_choice = self.config.doc_to_choice
1263

1264
        if isinstance(doc_to_choice, str):
1265
1266
1267
1268
            if doc_to_choice in self.features:
                return doc[doc_to_choice]
            else:
                return ast.literal_eval(utils.apply_template(doc_to_choice, doc))
1269
        elif isinstance(doc_to_choice, list):
1270
            return doc_to_choice
1271
        elif isinstance(doc_to_choice, dict):
1272
1273
1274
1275
1276
1277
1278
            return list(doc_to_choice.values())
        elif callable(doc_to_choice):
            return doc_to_choice(doc)
        elif hasattr(doc_to_choice, "get_answer_choices_list"):
            return doc_to_choice.get_answer_choices_list(doc)
        else:
            raise TypeError
1279

lintangsutawika's avatar
lintangsutawika committed
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
    def doc_to_image(self, doc: Any) -> Union[int, str, list]:
        if self.config.doc_to_image is None:
            eval_logger.error("doc_to_image was called but not set in config")
        else:
            doc_to_image = self.config.doc_to_image

        if isinstance(self.config.doc_to_image, str):
            if doc_to_image in self.features:
                return doc[doc_to_image]
            else:
                return ast.literal_eval(utils.apply_template(doc_to_image, doc))
        elif callable(doc_to_image):
            return doc_to_image(doc)
        else:
            return None

baberabb's avatar
baberabb committed
1296
1297
1298
    def construct_requests(
        self, doc: dict, ctx: str, **kwargs
    ) -> Union[List[Instance], Instance]:
lintangsutawika's avatar
lintangsutawika committed
1299
1300
        aux_arguments = None

1301
        if self.OUTPUT_TYPE == "loglikelihood":
lintangsutawika's avatar
lintangsutawika committed
1302
            arguments = (ctx, self.doc_to_target(doc))
1303
        elif self.OUTPUT_TYPE == "loglikelihood_rolling":
lintangsutawika's avatar
lintangsutawika committed
1304
            arguments = (self.doc_to_target(doc),)
1305
        elif self.OUTPUT_TYPE == "multiple_choice":
1306
            choices = self.doc_to_choice(doc)
1307
            target_delimiter = self.config.target_delimiter
1308
1309
            if self.multiple_input:
                # If there are multiple inputs, choices are placed in the ctx
1310
                cont = self.doc_to_target(doc)
1311
1312
1313
                arguments = [
                    (ctx + choice, f"{target_delimiter}{cont}") for choice in choices
                ]
1314
            else:
1315
                # Otherwise they are placed in the continuation
1316
                arguments = [(ctx, f"{target_delimiter}{cont}") for cont in choices]
1317

1318
            # TODO: we should raise a warning telling users this will at most ~2x runtime.
1319
            if "acc_mutual_info" in self._metric_fn_list.keys():
1320
1321
1322
                # if we are calculating multiple choice accuracy
                # using mutual information instead of raw loglikelihood as metric, need unconditional lls.

lintangsutawika's avatar
lintangsutawika committed
1323
                # here mutual info refers to calculating
1324
1325
                # log(P(choice|ctx) / P(choice)) = log(P(choice|ctx)) - log(P(choice))
                # in other words normalizing by subtracting the unconditional logprob of each choice.
lintangsutawika's avatar
lintangsutawika committed
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
                aux_arguments = [("", f"{choice}") for choice in choices]

        elif self.OUTPUT_TYPE == "generate_until":
            arguments = (ctx, deepcopy(self.config.generation_kwargs))

        multimodal_arg = {}
        if self.doc_to_image:
            multimodal_arg = {
                **multimodal_arg,
                **{"visual": self.doc_to_image(doc)},
            }

        if bool(multimodal_arg):
            if isinstance(arguments, list):
                arguments = [arg + (multimodal_arg,) for arg in arguments]
            else:
                arguments = arguments + (multimodal_arg,)

        if isinstance(arguments, type):
            if aux_arguments is not None:
                all_arg_list = [arguments, aux_arguments]
            else:
                all_arg_list = [arguments]
            request_list = []
            for arg_list in all_arg_list:
1351
1352
1353
1354
                request_list.extend(
                    [
                        Instance(
                            request_type="loglikelihood",
lintangsutawika's avatar
lintangsutawika committed
1355
                            doc=doc,
lintangsutawika's avatar
lintangsutawika committed
1356
                            arguments=arg,
1357
1358
1359
                            idx=i,
                            **kwargs,
                        )
lintangsutawika's avatar
lintangsutawika committed
1360
                        for i, arg in enumerate(arg_list)
1361
1362
                    ]
                )
lintangsutawika's avatar
lintangsutawika committed
1363

lintangsutawika's avatar
lintangsutawika committed
1364
            return request_list
lintangsutawika's avatar
lintangsutawika committed
1365
1366

        return Instance(
lintangsutawika's avatar
lintangsutawika committed
1367
1368
1369
1370
1371
            request_type=self.OUTPUT_TYPE,
            doc=doc,
            arguments=arguments,
            idx=0,
            **kwargs,
lintangsutawika's avatar
lintangsutawika committed
1372
        )
1373
1374

    def process_results(self, doc, results):
1375
1376
        if callable(self.config.process_results):
            return self.config.process_results(doc, results)
lintangsutawika's avatar
lintangsutawika committed
1377

1378
        result_dict = {}
1379
        use_metric = list(self._metric_fn_list.keys())
1380
1381
1382
        if self.OUTPUT_TYPE == "loglikelihood":
            results = results[0]
            ll, is_greedy = results
1383
1384
1385
1386
            return {
                **({"perplexity": ll} if "perplexity" in use_metric else {}),
                **({"acc": int(is_greedy)} if "acc" in use_metric else {}),
            }
1387
        elif self.OUTPUT_TYPE == "loglikelihood_rolling":
haileyschoelkopf's avatar
haileyschoelkopf committed
1388
            (loglikelihood,) = results
1389
1390
            _words = self.count_words(self.doc_to_target(doc))
            _bytes = self.count_bytes(self.doc_to_target(doc))
haileyschoelkopf's avatar
haileyschoelkopf committed
1391
            return {
1392
1393
1394
1395
1396
1397
1398
1399
1400
1401
1402
1403
1404
1405
1406
                **(
                    {"word_perplexity": (loglikelihood, _words)}
                    if "word_perplexity" in use_metric
                    else {}
                ),
                **(
                    {"byte_perplexity": (loglikelihood, _bytes)}
                    if "byte_perplexity" in use_metric
                    else {}
                ),
                **(
                    {"bits_per_byte": (loglikelihood, _bytes)}
                    if "bits_per_byte" in use_metric
                    else {}
                ),
haileyschoelkopf's avatar
haileyschoelkopf committed
1407
            }
1408
        elif self.OUTPUT_TYPE == "multiple_choice":
1409
            lls, is_greedy = zip(*results)
lintangsutawika's avatar
lintangsutawika committed
1410

1411
            # retrieve choices in List[str] form, to compute choice lengths, etc.
1412
            choices = self.doc_to_choice(doc)
1413
1414
            completion_len = np.array([float(len(i)) for i in choices])

1415
1416
            if (
                2 * len(choices) == len(lls)
1417
                and "acc_mutual_info" in self._metric_fn_list.keys()
1418
1419
1420
1421
            ):
                # then we are doing mutual info.
                # this stores the "dryrun" / unconditional answer loglikelihoods
                lls_unconditional = lls[1::2]
1422
1423
                if len(lls_unconditional) != len(choices):
                    raise ValueError
1424
1425
                # and this stores our "regular" conditional loglikelihoods
                lls = lls[::2]
1426

1427
1428
            pred = np.argmax(lls)
            pred_norm = np.argmax(lls / completion_len)
lintangsutawika's avatar
lintangsutawika committed
1429

1430
1431
            if self.multiple_input:
                gold = self.doc_to_text(doc)
1432
            else:
1433
                gold = self.doc_to_target(doc)
1434
1435

            gold_index_error = False
1436
            if isinstance(gold, list):
Lintang Sutawika's avatar
Lintang Sutawika committed
1437
1438
                gold = [i if i < len(choices) else -100 for i in gold]
                if -100 in gold:
1439
1440
                    gold_index_error = True
            else:
1441
                if isinstance(gold, int):
Lintang Sutawika's avatar
Lintang Sutawika committed
1442
                    gold = gold if gold < len(choices) else -100
1443
                elif isinstance(gold, str):
Lintang Sutawika's avatar
Lintang Sutawika committed
1444
                    gold = choices.index(gold) if gold in choices else -100
lintangsutawika's avatar
lintangsutawika committed
1445

Lintang Sutawika's avatar
Lintang Sutawika committed
1446
                if gold == -100:
1447
1448
1449
1450
                    gold_index_error = True

            if gold_index_error:
                eval_logger.warning(
lintangsutawika's avatar
lintangsutawika committed
1451
                    f"Label index was not in within range of available choices,"
1452
1453
                    f"Sample:\n\n{doc}\n\n"
                )
lintangsutawika's avatar
lintangsutawika committed
1454

1455
            if self.multiple_target:
lintangsutawika's avatar
lintangsutawika committed
1456
1457
                acc = 1.0 if pred in gold else 0.0
                acc_norm = 1.0 if pred_norm in gold else 0.0
Lintang Sutawika's avatar
Lintang Sutawika committed
1458
                exact_match = int(any([is_greedy[i] if i != -100 else 0 for i in gold]))
lintangsutawika's avatar
lintangsutawika committed
1459
1460
1461
            else:
                acc = 1.0 if pred == gold else 0.0
                acc_norm = 1.0 if pred_norm == gold else 0.0
1462
                # TODO: this gets score of 0 on arc_challenge for pythia-70m. need to test that this works properly
Lintang Sutawika's avatar
Lintang Sutawika committed
1463
                exact_match = int(is_greedy[gold]) if gold != -100 else 0
1464

Lintang Sutawika's avatar
Lintang Sutawika committed
1465
1466
1467
1468
            prob_norm = utils.softmax(lls)

            # TODO use keyword arguments to the metric?
            # gold, pred, norm stuff, the original lls,
1469
            result_dict = {
1470
                **({"acc": acc} if "acc" in use_metric else {}),
1471
1472
                **({"f1": (gold, pred)} if "f1" in use_metric else {}),
                **({"mcc": (gold, pred)} if "mcc" in use_metric else {}),
1473
                **({"acc_norm": acc_norm} if "acc_norm" in use_metric else {}),
1474
                **({"exact_match": exact_match} if "exact_match" in use_metric else {}),
Lintang Sutawika's avatar
Lintang Sutawika committed
1475
1476
1477
1478
1479
                **(
                    {"brier_score": (gold, prob_norm)}
                    if "brier_score" in use_metric
                    else {}
                ),
1480
1481
            }

1482
            if "acc_mutual_info" in use_metric:
lintangsutawika's avatar
lintangsutawika committed
1483
1484
1485
                lls_mutual_info = [
                    ll_c - ll_u for ll_c, ll_u in zip(lls, lls_unconditional)
                ]
1486
1487
1488
                acc_mutual_info = 1.0 if np.argmax(lls_mutual_info) == gold else 0.0
                result_dict["acc_mutual_info"] = acc_mutual_info

1489
        elif self.OUTPUT_TYPE == "generate_until":
1490
            gold = self.doc_to_target(doc)
Chris's avatar
Chris committed
1491
            result = results[0]
1492
            if self.config.doc_to_choice is not None:
lintangsutawika's avatar
lintangsutawika committed
1493
                # If you set doc_to_choice,
lintangsutawika's avatar
lintangsutawika committed
1494
                # it assumes that doc_to_target returns a number.
1495
1496
                choices = self.doc_to_choice(doc)
                gold = choices[gold]
1497
1498
            # we expect multiple_targets to be a list.
            elif self.multiple_target:
baberabb's avatar
baberabb committed
1499
                gold = list(gold)
Chris's avatar
Chris committed
1500
1501
1502
            elif type(gold) != type(result):
                # cast gold to the same type as result
                gold = type(result)(gold)
1503

lintangsutawika's avatar
lintangsutawika committed
1504
            for metric in self._metric_fn_list.keys():
haileyschoelkopf's avatar
haileyschoelkopf committed
1505
1506
1507
1508
1509
                if self.multiple_target:
                    # in the case where we have multiple targets,
                    # return true if any are true
                    # TODO: this may break for multipLe_target, non zero-or-1 metrics
                    scores = []
haileyschoelkopf's avatar
haileyschoelkopf committed
1510
1511
1512
1513
                    if not isinstance(gold, list):
                        # sometimes, a multiple_target dataset has exceptions where one doc has only one string answer
                        # print(gold)
                        gold = [gold]
1514
1515
1516
1517
1518
1519
1520
1521
                    if metric == "exact_match":
                        result = [result for _ in range(len(gold))]
                        scores = self._metric_fn_list[metric](
                            references=gold,
                            predictions=result,
                            **self._metric_fn_kwargs[metric],
                        )[metric]
                        result_score = 1.0 if scores > 0.0 else 0.0
haileyschoelkopf's avatar
haileyschoelkopf committed
1522
                    else:
1523
1524
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534
1535
1536
1537
1538
1539
1540
1541
1542
1543
                        for gold_option in gold:
                            try:
                                result_score = self._metric_fn_list[metric](
                                    references=[gold_option],
                                    predictions=[result],
                                    **self._metric_fn_kwargs[metric],
                                )
                            except (
                                TypeError
                            ):  # TODO: this is hacky and I don't want to do it
                                result_score = self._metric_fn_list[metric](
                                    [gold_option, result]
                                )
                            if isinstance(result_score, dict):
                                # TODO: this handles the case where HF evaluate returns a dict.
                                result_score = result_score[metric]
                            scores.append(result_score)
                        if any(scores):
                            result_score = 1.0
                        else:
                            result_score = 0.0
haileyschoelkopf's avatar
haileyschoelkopf committed
1544
                else:
1545
                    try:
1546
                        result_score = self._metric_fn_list[metric](
1547
1548
                            references=[gold],
                            predictions=[result],
1549
                            **self._metric_fn_kwargs[metric],
1550
                        )
1551
                    except TypeError:  # needed for now in order to use a different interface between our own metrics and HF Evaluate metrics
1552
                        result_score = self._metric_fn_list[metric]([gold, result])
1553
1554
1555
1556
                    if isinstance(result_score, dict):
                        # TODO: this handles the case where HF evaluate returns a dict.
                        result_score = result_score[metric]
                result_dict[metric] = result_score
1557
        else:
lintangsutawika's avatar
lintangsutawika committed
1558
1559
            raise ValueError(
                f"Passed invalid output_type '{self.OUTPUT_TYPE}' ! Please use one of ",
1560
                "'loglikelihood', 'loglikelihood_rolling', 'generate_until' or 'multiple_choice'",
1561
            )
1562
1563
1564

        return result_dict

Baber Abbasi's avatar
Baber Abbasi committed
1565
    def aggregation(self) -> dict:
1566
1567
        return self._aggregation_list

Baber Abbasi's avatar
Baber Abbasi committed
1568
    def higher_is_better(self) -> dict:
haileyschoelkopf's avatar
haileyschoelkopf committed
1569
        return self._higher_is_better
1570

Baber Abbasi's avatar
Baber Abbasi committed
1571
1572
1573
    def get_config(self, key: str) -> Any:
        return getattr(self._config, key, None)

Lintang Sutawika's avatar
Lintang Sutawika committed
1574
1575
1576
1577
    @property
    def task_name(self) -> Any:
        return getattr(self.config, "task", None)

1578
1579
1580
1581
1582
    def __repr__(self):
        return (
            f"ConfigurableTask(task_name={getattr(self.config, 'task', None)},"
            f"output_type={self.OUTPUT_TYPE},"
            f"num_fewshot={getattr(self.config, 'num_fewshot', None)},"
lintangsutawika's avatar
lintangsutawika committed
1583
            f"num_samples={len(self.eval_docs)})",
1584
1585
        )

1586
1587

class MultipleChoiceTask(Task):
1588
    OUTPUT_TYPE = "loglikelihood"
1589

baberabb's avatar
baberabb committed
1590
    def doc_to_target(self, doc: dict) -> str:
1591
1592
        return " " + doc["choices"][doc["gold"]]

baberabb's avatar
baberabb committed
1593
    def construct_requests(self, doc: dict, ctx: str, **kwargs) -> List[Instance]:
1594
        # TODO: add mutual info here?
lintangsutawika's avatar
lintangsutawika committed
1595
1596
        return [
            Instance(
haileyschoelkopf's avatar
haileyschoelkopf committed
1597
                request_type="loglikelihood",
lintangsutawika's avatar
lintangsutawika committed
1598
                doc=doc,
1599
                arguments=(ctx, " {}".format(choice)),
1600
                idx=i,
1601
1602
                **kwargs,
            )
lintangsutawika's avatar
lintangsutawika committed
1603
1604
            for i, choice in enumerate(doc["choices"])
        ]
1605

1606
    def process_results(self, doc: dict, results: Iterable[Tuple[float, bool]]) -> dict:
lintangsutawika's avatar
lintangsutawika committed
1607
1608
1609
        results = [
            res[0] for res in results
        ]  # only retain loglikelihoods, discard is_greedy TODO: do we need is_greedy anywhere?
1610
1611
1612
1613
1614
1615
1616
1617
1618
1619
1620
        gold = doc["gold"]

        acc = 1.0 if np.argmax(results) == gold else 0.0
        completion_len = np.array([float(len(i)) for i in doc["choices"]])
        acc_norm = 1.0 if np.argmax(results / completion_len) == gold else 0.0

        return {
            "acc": acc,
            "acc_norm": acc_norm,
        }

baberabb's avatar
baberabb committed
1621
    def higher_is_better(self) -> dict:
1622
1623
1624
1625
1626
        return {
            "acc": True,
            "acc_norm": True,
        }

baberabb's avatar
baberabb committed
1627
    def aggregation(self) -> dict:
1628
1629
1630
1631
1632
1633
        return {
            "acc": mean,
            "acc_norm": mean,
        }


lintangsutawika's avatar
lintangsutawika committed
1634
class PerplexityTask(Task):
1635
1636
    OUTPUT_TYPE = "loglikelihood_rolling"

baberabb's avatar
baberabb committed
1637
    def has_training_docs(self) -> bool:
1638
1639
        return False

baberabb's avatar
baberabb committed
1640
    def fewshot_examples(self, k: int, rnd) -> List:
1641
1642
1643
1644
        if k != 0:
            raise ValueError(
                "The number of fewshot examples must be 0 for perplexity tasks."
            )
1645
1646
        return []

baberabb's avatar
baberabb committed
1647
    def fewshot_context(self, doc: dict, num_fewshot: int) -> Literal[""]:
1648
1649
1650
1651
        if num_fewshot != 0:
            raise ValueError(
                "The number of fewshot examples must be 0 for perplexity tasks."
            )
1652
1653
1654

        return ""

baberabb's avatar
baberabb committed
1655
    def higher_is_better(self) -> dict:
1656
1657
1658
1659
1660
1661
1662
1663
1664
        return {
            "word_perplexity": False,
            "byte_perplexity": False,
            "bits_per_byte": False,
        }

    def doc_to_decontamination_query(self, doc):
        return doc

Ethan Smith's avatar
Ethan Smith committed
1665
    def doc_to_text(self, doc) -> str:
1666
1667
1668
1669
1670
        return ""

    def doc_to_target(self, doc):
        return doc

1671
1672
1673
    def construct_requests(self, doc: dict, ctx: Optional[str], **kwargs):
        if bool(ctx):
            raise ValueError
1674

lintangsutawika's avatar
lintangsutawika committed
1675
1676
1677
1678
1679
1680
1681
        return Instance(
            request_type=self.OUTPUT_TYPE,
            doc=doc,
            arguments=(self.doc_to_target(doc),),
            idx=0,
            **kwargs,
        )
1682

1683
    def process_results(self, doc: dict, results: Tuple[float]) -> dict:
1684
        (loglikelihood,) = results
haileyschoelkopf's avatar
haileyschoelkopf committed
1685
1686
        words = self.count_words(self.doc_to_target(doc))
        bytes_ = self.count_bytes(self.doc_to_target(doc))
1687
1688
1689
1690
1691
1692
        return {
            "word_perplexity": (loglikelihood, words),
            "byte_perplexity": (loglikelihood, bytes_),
            "bits_per_byte": (loglikelihood, bytes_),
        }

baberabb's avatar
baberabb committed
1693
    def aggregation(self) -> dict:
1694
1695
1696
1697
1698
1699
1700
        return {
            "word_perplexity": weighted_perplexity,
            "byte_perplexity": weighted_perplexity,
            "bits_per_byte": bits_per_byte,
        }

    @classmethod
baberabb's avatar
baberabb committed
1701
    def count_bytes(cls, doc) -> int:
1702
1703
1704
        return len(doc.encode("utf-8"))

    @classmethod
baberabb's avatar
baberabb committed
1705
    def count_words(cls, doc) -> int:
1706
1707
        """Downstream tasks with custom word boundaries should override this!"""
        return len(re.split(r"\s+", doc))