task.py 71.4 KB
Newer Older
1
import abc
2
import ast
lintangsutawika's avatar
lintangsutawika committed
3
import logging
4
import random
5
6
import re
from collections.abc import Callable
7
from copy import deepcopy
8
from dataclasses import asdict, dataclass
9
from inspect import getsource
10
11
12
13
14
15
16
17
18
19
20
21
from typing import (
    Any,
    Dict,
    Iterable,
    Iterator,
    List,
    Literal,
    Mapping,
    Optional,
    Tuple,
    Union,
)
22
23
24

import datasets
import numpy as np
25
from tqdm import tqdm
26
27

from lm_eval import utils
28
from lm_eval.api import samplers
29
30
from lm_eval.api.instance import Instance, OutputType
from lm_eval.api.metrics import bits_per_byte, mean, weighted_perplexity
lintangsutawika's avatar
lintangsutawika committed
31
from lm_eval.api.registry import (
32
33
    AGGREGATION_REGISTRY,
    DEFAULT_METRIC_REGISTRY,
haileyschoelkopf's avatar
haileyschoelkopf committed
34
    get_aggregation,
35
    get_metric,
36
    get_metric_aggregation,
haileyschoelkopf's avatar
haileyschoelkopf committed
37
    is_higher_better,
lintangsutawika's avatar
lintangsutawika committed
38
)
39
from lm_eval.caching.cache import load_from_cache, save_to_cache
40
41
42
from lm_eval.filters import build_filter_ensemble
from lm_eval.prompts import get_prompt

43

44
45
46
47
ALL_OUTPUT_TYPES = [
    "loglikelihood",
    "multiple_choice",
    "loglikelihood_rolling",
48
    "generate_until",
49
50
]

Lintang Sutawika's avatar
Lintang Sutawika committed
51
eval_logger = logging.getLogger(__name__)
52

lintangsutawika's avatar
lintangsutawika committed
53

54
55
@dataclass
class TaskConfig(dict):
56
    # task naming/registry
57
58
    task: Optional[str] = None
    task_alias: Optional[str] = None
Lintang Sutawika's avatar
Lintang Sutawika committed
59
    tag: Optional[Union[str, list]] = None
60
61
62
    # HF dataset options.
    # which dataset to use,
    # and what splits for what purpose
Baber Abbasi's avatar
Baber Abbasi committed
63
    custom_dataset: Optional[Callable] = None
64
65
66
67
68
69
    dataset_path: Optional[str] = None
    dataset_name: Optional[str] = None
    dataset_kwargs: Optional[dict] = None
    training_split: Optional[str] = None
    validation_split: Optional[str] = None
    test_split: Optional[str] = None
70
    fewshot_split: Optional[str] = (
Baber Abbasi's avatar
Baber Abbasi committed
71
        None  # TODO: assert that this not None if num_fewshot > 0. (?) assert if this is same split as one evaluating (?)
72
    )
73
74
    # formatting / prompting options.
    # see docs/advanced_task_guide.md for more info
75
76
77
    process_docs: Optional[Callable] = None
    doc_to_text: Optional[Union[Callable, str]] = None
    doc_to_target: Optional[Union[Callable, str]] = None
78
    doc_to_image: Union[Callable, str] = None
79
    doc_to_audio: Union[Callable, str] = None
Hojin Lee's avatar
Hojin Lee committed
80
    unsafe_code: bool = False
81
82
83
    doc_to_choice: Optional[Union[Callable, str, dict, list]] = None
    process_results: Optional[Union[Callable, str]] = None
    use_prompt: Optional[str] = None
84
    description: str = ""
85
86
    target_delimiter: str = " "
    fewshot_delimiter: str = "\n\n"
87
    fewshot_config: Optional[dict] = None
88
    # runtime configuration options
89
    num_fewshot: Optional[int] = None
90
    # scoring options
91
92
93
    metric_list: Optional[list] = None
    output_type: OutputType = "generate_until"
    generation_kwargs: Optional[dict] = None
94
    repeats: int = 1
95
    filter_list: Optional[Union[str, list]] = None
96
    should_decontaminate: bool = False
97
    doc_to_decontamination_query: Optional[str] = None
Baber Abbasi's avatar
Baber Abbasi committed
98
    gen_prefix: Optional[str] = None
99
100
101
    metadata: Optional[dict] = (
        None  # by default, not used in the code. allows for users to pass arbitrary info to tasks
    )
102

Ethan Smith's avatar
Ethan Smith committed
103
    def __post_init__(self) -> None:
Lintang Sutawika's avatar
Lintang Sutawika committed
104
        if self.generation_kwargs is not None:
105
            if self.output_type != "generate_until":
106
                eval_logger.warning(
107
                    f"[{self.task}] passed `generation_kwargs`, but not using `output_type: generate_until`!"
Lintang Sutawika's avatar
Lintang Sutawika committed
108
109
110
111
112
113
114
115
                )

            if "temperature" in self.generation_kwargs:
                self.generation_kwargs["temperature"] = float(
                    self.generation_kwargs["temperature"]
                )

            if "until" not in self.generation_kwargs:
116
                self.generation_kwargs["until"] = [self.fewshot_delimiter]
Lintang Sutawika's avatar
Lintang Sutawika committed
117
        else:
118
            if self.output_type == "generate_until":
Lintang Sutawika's avatar
Lintang Sutawika committed
119
120
                # ensure that we greedily generate in absence of explicit arguments otherwise
                self.generation_kwargs = {
121
122
123
124
125
                    "until": (
                        None
                        if self.fewshot_delimiter is None
                        else [self.fewshot_delimiter]
                    ),
Lintang Sutawika's avatar
Lintang Sutawika committed
126
127
                    "do_sample": False,
                }
128

129
130
131
    def __getitem__(self, item):
        return getattr(self, item)

132
133
134
    def __setitem__(self, item, value):
        return setattr(self, item, value)

135
    def to_dict(self, keep_callable: bool = False) -> dict:
136
137
        """dumps the current config as a dictionary object, as a printable format.
        null fields will not be printed.
haileyschoelkopf's avatar
haileyschoelkopf committed
138
        Used for dumping results alongside full task configuration
139

haileyschoelkopf's avatar
haileyschoelkopf committed
140
141
142
143
144
145
146
147
148
149
        :return: dict
            A printable dictionary version of the TaskConfig object.

        # TODO: should any default value in the TaskConfig not be printed?
        """
        cfg_dict = asdict(self)
        # remove values that are `None`
        for k, v in list(cfg_dict.items()):
            if v is None:
                cfg_dict.pop(k)
150
151
152
153
154
155
156
157
158
159
            elif k == "metric_list":
                for metric_dict in v:
                    for metric_key, metric_value in metric_dict.items():
                        if callable(metric_value):
                            metric_dict[metric_key] = self.serialize_function(
                                metric_value, keep_callable=keep_callable
                            )
                cfg_dict[k] = v
            elif callable(v):
                cfg_dict[k] = self.serialize_function(v, keep_callable=keep_callable)
haileyschoelkopf's avatar
haileyschoelkopf committed
160
        return cfg_dict
161

162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
    def serialize_function(
        self, value: Union[Callable, str], keep_callable=False
    ) -> Union[Callable, str]:
        """Serializes a given function or string.

        If 'keep_callable' is True, the original callable is returned.
        Otherwise, attempts to return the source code of the callable using 'getsource'.
        """
        if keep_callable:
            return value
        else:
            try:
                return getsource(value)
            except (TypeError, OSError):
                return str(value)

178
179
180
181
182
183
184
185
186
187
188

class Task(abc.ABC):
    """A task represents an entire benchmark including its dataset, problems,
    answers, and evaluation methods. See BoolQ for a simple example implementation

    A `doc` can be any python object which represents one instance of evaluation.
    This is usually a dictionary e.g.
        {"question": ..., "answer": ...} or
        {"question": ..., question, answer)
    """

189
    VERSION: Optional[Union[int, str]] = None
190

191
192
    # The name of the `Task` benchmark as denoted in the HuggingFace datasets Hub
    # or a path to a custom `datasets` loading script.
193
    DATASET_PATH: Optional[str] = None
194
195

    # The name of a subset within `DATASET_PATH`.
196
    DATASET_NAME: Optional[str] = None
197

198
    OUTPUT_TYPE: Optional[OutputType] = None
lintangsutawika's avatar
lintangsutawika committed
199

200
201
    def __init__(
        self,
202
203
204
205
        data_dir: Optional[str] = None,
        cache_dir: Optional[str] = None,
        download_mode: Optional[datasets.DownloadMode] = None,
        config: Optional[Mapping] = None,  # Union[dict, TaskConfig]
Ethan Smith's avatar
Ethan Smith committed
206
    ) -> None:
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
        """
        :param data_dir: str
            Stores the path to a local folder containing the `Task`'s data files.
            Use this to specify the path to manually downloaded data (usually when
            the dataset is not publicly accessible).
        :param cache_dir: str
            The directory to read/write the `Task` dataset. This follows the
            HuggingFace `datasets` API with the default cache directory located at:
                `~/.cache/huggingface/datasets`
            NOTE: You can change the cache location globally for a given process
            to another directory:
                `export HF_DATASETS_CACHE="/path/to/another/directory"`
        :param download_mode: datasets.DownloadMode
            How to treat pre-existing `Task` downloads and data.
            - `datasets.DownloadMode.REUSE_DATASET_IF_EXISTS`
                Reuse download and reuse dataset.
            - `datasets.DownloadMode.REUSE_CACHE_IF_EXISTS`
                Reuse download with fresh dataset.
            - `datasets.DownloadMode.FORCE_REDOWNLOAD`
                Fresh download and fresh dataset.
        """
        self.download(data_dir, cache_dir, download_mode)
229
230
231
        self._training_docs: Optional[list] = None
        self._fewshot_docs: Optional[list] = None
        self._instances: Optional[List[Instance]] = None
232

233
        self._config: TaskConfig = TaskConfig({**config}) if config else TaskConfig()
234

lintangsutawika's avatar
lintangsutawika committed
235
        self._filters = [build_filter_ensemble("none", [["take_first", None]])]
236
237
238
        self.fewshot_rnd: Optional[random.Random] = (
            None  # purposely induce errors in case of improper usage
        )
239

240
241
242
243
244
245
    def download(
        self,
        data_dir: Optional[str] = None,
        cache_dir: Optional[str] = None,
        download_mode=None,
    ) -> None:
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
        """Downloads and returns the task dataset.
        Override this method to download the dataset from a custom API.

        :param data_dir: str
            Stores the path to a local folder containing the `Task`'s data files.
            Use this to specify the path to manually downloaded data (usually when
            the dataset is not publicly accessible).
        :param cache_dir: str
            The directory to read/write the `Task` dataset. This follows the
            HuggingFace `datasets` API with the default cache directory located at:
                `~/.cache/huggingface/datasets`
            NOTE: You can change the cache location globally for a given process
            by setting the shell environment variable, `HF_DATASETS_CACHE`,
            to another directory:
                `export HF_DATASETS_CACHE="/path/to/another/directory"`
        :param download_mode: datasets.DownloadMode
            How to treat pre-existing `Task` downloads and data.
            - `datasets.DownloadMode.REUSE_DATASET_IF_EXISTS`
                Reuse download and reuse dataset.
            - `datasets.DownloadMode.REUSE_CACHE_IF_EXISTS`
                Reuse download with fresh dataset.
            - `datasets.DownloadMode.FORCE_REDOWNLOAD`
                Fresh download and fresh dataset.
        """
270
271
272
273
274
275
276
        self.dataset = datasets.load_dataset(
            path=self.DATASET_PATH,
            name=self.DATASET_NAME,
            data_dir=data_dir,
            cache_dir=cache_dir,
            download_mode=download_mode,
        )
277

278
    @property
279
    def config(self) -> TaskConfig:
280
281
282
        """Returns the TaskConfig associated with this class."""
        return self._config

283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
    @abc.abstractmethod
    def has_training_docs(self):
        """Whether the task has a training set"""
        pass

    @abc.abstractmethod
    def has_validation_docs(self):
        """Whether the task has a validation set"""
        pass

    @abc.abstractmethod
    def has_test_docs(self):
        """Whether the task has a test set"""
        pass

298
    def training_docs(self) -> Iterable:
299
300
301
302
303
304
        """
        :return: Iterable[obj]
            A iterable of any object, that doc_to_text can handle
        """
        return []

305
    def validation_docs(self) -> Iterable:
306
307
308
309
310
311
        """
        :return: Iterable[obj]
            A iterable of any object, that doc_to_text can handle
        """
        return []

312
    def test_docs(self) -> Iterable:
313
314
315
316
317
318
        """
        :return: Iterable[obj]
            A iterable of any object, that doc_to_text can handle
        """
        return []

319
    def fewshot_docs(self) -> Iterable:
320
321
322
323
324
325
326
327
328
        """
        :return: Iterable[obj]
            A iterable of any object, that doc_to_text can handle
        """
        if self.has_training_docs():
            return self.training_docs()
        elif self.has_validation_docs():
            return self.validation_docs()
        else:
Baber Abbasi's avatar
Baber Abbasi committed
329
330
331
332
333
            if self.config.get("num_fewshot", 0) > 0:
                eval_logger.warning(
                    f"[Task: {self.config.task}] has_training_docs and has_validation_docs are False"
                    ", using test_docs as fewshot_docs but this is not recommended."
                )
334
335
            return self.test_docs()

336
    def _process_doc(self, doc: dict) -> dict:
337
338
339
340
341
342
343
344
345
        """
        Override this to process (detokenize, strip, replace, etc.) individual
        documents. This can be used in a map over documents of a data split.
        E.g. `map(self._process_doc, self.dataset["validation"])`

        :return: dict
            The processed version of the specified `doc`.
        """
        return doc
lintangsutawika's avatar
lintangsutawika committed
346

347
    @property
348
    def instances(self) -> List[Instance]:
349
350
351
352
353
354
355
356
357
358
359
        """After calling `task.build_all_requests()`, tasks
        maintain a list of the dataset instances which will be evaluated.
        """
        return self._instances

    def fewshot_examples(self, k, rnd):
        if self._training_docs is None:
            self._training_docs = list(self.training_docs())

        return rnd.sample(self._training_docs, k)

360
361
    def doc_to_decontamination_query(self, doc):
        raise NotImplementedError(
362
363
364
365
366
367
368
369
370
371
372
            "Override doc_to_decontamination_query with document specific decontamination query."
        )

    @abc.abstractmethod
    def doc_to_text(self, doc):
        pass

    @abc.abstractmethod
    def doc_to_target(self, doc):
        pass

373
374
375
376
    # not an abstractmethod because not every language-only task has to implement this
    def doc_to_image(self, doc):
        raise NotImplementedError

377
378
379
    def doc_to_audio(self, doc):
        raise NotImplementedError

Baber Abbasi's avatar
Baber Abbasi committed
380
381
382
    def doc_to_prefix(self, doc):
        return ""

383
384
    def build_all_requests(
        self,
385
        *,
386
        limit: Union[int, None] = None,
387
        samples: Optional[List[int]] = None,
388
389
390
391
392
393
394
395
396
        rank: int = 0,
        world_size: int = 1,
        cache_requests: bool = False,
        rewrite_requests_cache: bool = False,
        system_instruction: Optional[str] = None,
        apply_chat_template: bool = False,
        fewshot_as_multiturn: bool = False,
        chat_template: Optional[Callable] = None,
        tokenizer_name: str = "",
397
    ) -> None:
398
        """Build a set of Instances for a task, and store them in task.instances"""
399
400
401
402

        # used with caching
        og_limit = limit

403
        cache_key = f"requests-{self._config.task}-{self.config.num_fewshot}shot-rank{rank}-world_size{world_size}"
KonradSzafer's avatar
KonradSzafer committed
404
405
406
407
408
409
410
        cache_key += "-chat_template" if apply_chat_template else ""
        cache_key += "-fewshot_as_multiturn" if fewshot_as_multiturn else ""
        cache_key += (
            f"-system_prompt_hash{utils.hash_string(system_instruction)}"
            if system_instruction is not None
            else ""
        )
411
        cache_key += f"-tokenizer{tokenizer_name}"
412

Baber Abbasi's avatar
Baber Abbasi committed
413
        cached_instances = load_from_cache(file_name=cache_key, cache=cache_requests)
414
415
416
417
418
419
420
421
422
423
424
425
426

        if cache_requests and cached_instances and not rewrite_requests_cache:
            cached_instances = cached_instances[:limit]

            flattened_instances = [
                instance
                for instance_group in cached_instances
                for instance in instance_group
            ]

            self._instances = flattened_instances
            return

Baber Abbasi's avatar
Baber Abbasi committed
427
        eval_logger.info(f"Building contexts for {self.config.task} on rank {rank}...")
428

429
        instances = []
430
431
432
433
434
435
436
437
438
439

        # process all documents when caching is specified for simplicity
        if (
            cache_requests
            and (not cached_instances or rewrite_requests_cache)
            and limit is not None
        ):
            limit = None

        doc_id_docs = list(
440
441
442
            self.doc_iterator(
                rank=rank, limit=limit, samples=samples, world_size=world_size
            )
443
444
445
446
447
448
449
        )

        num_docs = len(doc_id_docs)

        for doc_id, doc in tqdm(
            doc_id_docs,
            total=num_docs,
lintangsutawika's avatar
lintangsutawika committed
450
        ):
451
            # sample fewshot context #TODO: need to offset doc_id by rank now!
452
            fewshot_ctx = self.fewshot_context(
453
                doc,
454
                0 if self.config.num_fewshot is None else self.config.num_fewshot,
KonradSzafer's avatar
KonradSzafer committed
455
456
457
                system_instruction,
                apply_chat_template,
                fewshot_as_multiturn,
458
                chat_template,
Baber Abbasi's avatar
Baber Abbasi committed
459
                gen_prefix=self.doc_to_prefix(doc),
460
            )
461

462
            # TODO: we should override self.config.repeats if doing greedy gen so users don't waste time+compute
lintangsutawika's avatar
lintangsutawika committed
463
464
465
            inst = self.construct_requests(
                doc=doc,
                ctx=fewshot_ctx,
466
                metadata=(self.config["task"], doc_id, self.config.repeats),
467
                apply_chat_template=apply_chat_template,
468
                chat_template=chat_template,
lintangsutawika's avatar
lintangsutawika committed
469
            )
470
471
472
473

            if not isinstance(inst, list):
                inst = [inst]

474
475
476
477
478
479
480
481
482
483
484
485
486
            instances.append(inst)

        # now flatten, this is to allow slicing to work with pickles

        sliced_instances = instances[:og_limit]

        flattened_instances = [
            instance
            for instance_group in sliced_instances
            for instance in instance_group
        ]

        self._instances = flattened_instances
487

488
489
        if len(self._instances) == 0:
            raise ValueError("task.build_requests() did not find any docs!")
490

491
492
493
        if cache_requests and (not cached_instances or rewrite_requests_cache):
            save_to_cache(file_name=cache_key, obj=instances)

494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
    @abc.abstractmethod
    def construct_requests(self, doc, ctx, **kwargs):
        """Uses RequestFactory to construct Requests and returns an iterable of
        Requests which will be sent to the LM.

        :param doc:
            The document as returned from training_docs, validation_docs, or test_docs.
        :param ctx: str
            The context string, generated by fewshot_context. This includes the natural
            language description, as well as the few shot examples, and the question
            part of the document for `doc`.
        :param doc_idx: int
            The index of a document within `self.test_docs()` or `self.validation_docs()`,
            whichever is the main split used.
        :param repeats: int
        TODO: update this docstring
lintangsutawika's avatar
lintangsutawika committed
510
            The number of times each instance in a dataset is inferred on. Defaults to 1,
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
            can be increased for techniques like majority voting.
        """
        pass

    @abc.abstractmethod
    def process_results(self, doc, results):
        """Take a single document and the LM results and evaluates, returning a
        dict where keys are the names of submetrics and values are the values of
        the metric for that one document

        :param doc:
            The document as returned from training_docs, validation_docs, or test_docs.
        :param results:
            The results of the requests created in construct_requests.
        """
        pass

    @abc.abstractmethod
    def aggregation(self):
        """
        :returns: {str: [metric_score] -> float}
            A dictionary where keys are the names of submetrics and values are
            functions that aggregate a list of metric scores
        """
        pass

    @abc.abstractmethod
    def higher_is_better(self):
        """
        :returns: {str: bool}
            A dictionary where keys are the names of submetrics and values are
            whether a higher value of the submetric is better
        """
        pass

546
547
548
    def get_config(self, key: str) -> Any:
        return getattr(self._config, key, None)

haileyschoelkopf's avatar
haileyschoelkopf committed
549
550
551
552
553
554
555
556
557
558
    @classmethod
    def count_bytes(cls, doc):
        """Used for byte-level perplexity metrics in rolling loglikelihood"""
        return len(doc.encode("utf-8"))

    @classmethod
    def count_words(cls, doc):
        """Downstream loglikelihood_rolling perplexity tasks with custom word boundaries should override this!"""
        return len(re.split(r"\s+", doc))

559
    @utils.positional_deprecated
Baber Abbasi's avatar
Baber Abbasi committed
560
    def fewshot_context(self, doc, num_fewshot, rnd=None, description=None, **kwargs):
561
562
563
564
565
566
567
        """Returns a fewshot context string that is made up of a prepended description
        (if provided), the `num_fewshot` number of examples, and an appended prompt example.

        :param doc: str
            The document as returned from training_docs, validation_docs, or test_docs.
        :param num_fewshot: int
            The number of fewshot examples to provide in the returned context string.
lintangsutawika's avatar
lintangsutawika committed
568
569
570
571
572
        :param rnd: random.Random
            The pseudo-random number generator used to randomly sample examples.
            WARNING: This is currently a required arg although it's optionalized with a default `None`.
        :param description: str
            The task's description that will be prepended to the fewshot examples.
573
574
575
        :returns: str
            The fewshot context.
        """
576
        if rnd is None:
577
578
579
580
581
582
            if self.fewshot_rnd is not None:
                rnd = self.fewshot_rnd
            else:
                raise ValueError(
                    "A `random.Random` generator argument must be provided to `rnd`"
                )
lintangsutawika's avatar
lintangsutawika committed
583

584
        description = description if description else ""
585
586

        if num_fewshot == 0:
lintangsutawika's avatar
lintangsutawika committed
587
            labeled_examples = ""
588
        else:
lintangsutawika's avatar
lintangsutawika committed
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
            # for sets with no training docs, draw from other set *but ensure no overlap with current doc*
            if self.has_training_docs():
                fewshotex = self.fewshot_examples(k=num_fewshot, rnd=rnd)
            else:
                if self._fewshot_docs is None:
                    self._fewshot_docs = list(
                        self.validation_docs()
                        if self.has_validation_docs()
                        else self.test_docs()
                    )

                fewshotex = rnd.sample(self._fewshot_docs, num_fewshot + 1)

                # get rid of the doc that's the one we're evaluating, if it's in the fewshot
                fewshotex = [x for x in fewshotex if x != doc][:num_fewshot]

            labeled_examples = (
                "\n\n".join(
                    [
                        self.doc_to_text(doc) + self.doc_to_target(doc)
                        for doc in fewshotex
                    ]
                )
                + "\n\n"
lintangsutawika's avatar
lintangsutawika committed
613
            )
614
615

        example = self.doc_to_text(doc)
lintangsutawika's avatar
lintangsutawika committed
616
        return description + labeled_examples + example
617

618
    def apply_filters(self) -> Optional[List[Instance]]:
Baber Abbasi's avatar
Baber Abbasi committed
619
        """Iterates over FilterEnsembles and applies them to instances"""
lintangsutawika's avatar
lintangsutawika committed
620
621
        if hasattr(self, "_filters"):
            for f in self._filters:
622
                f.apply(self._instances)
lintangsutawika's avatar
lintangsutawika committed
623
624
625
        else:
            eval_logger.warning("No filter defined, passing through instances")
            return self._instances
626

baberabb's avatar
baberabb committed
627
    def dump_config(self) -> dict:
Baber Abbasi's avatar
Baber Abbasi committed
628
        """Returns the config as a dictionary."""
629
        # TODO: this should only return the overrides applied to a non-YAML task's configuration.
630
        # (num_fewshot)
631
        return self.config.to_dict()
632

Baber Abbasi's avatar
Baber Abbasi committed
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
    def set_config(self, key: str, value: Any, update: bool = False) -> None:
        """Set or update the configuration for a given key."""
        if key is None:
            raise ValueError("Key must be provided.")

        if update:
            current_value = getattr(self._config, key, {})
            if not isinstance(current_value, dict):
                raise TypeError(
                    f"Expected a dict for key '{key}', got {type(current_value).__name__} instead."
                )
            current_value.update(value)
        else:
            setattr(self._config, key, value)

    def override_metric(self, metric_name: str) -> None:
        """
        Override the default metrics used for evaluation with custom metrics.

        Parameters:
        - metric_name (str): The name of the custom metric to override. Should be registered in api.metrics.
        """
        (
            self._metric_fn_list,
            self._aggregation_list,
            self._metric_fn_kwargs,
            self._higher_is_better,
        ) = ({}, {}, {}, {})
        self._metric_fn_list[metric_name] = get_metric(metric_name)
        self._aggregation_list[metric_name] = get_metric_aggregation(metric_name)
        self._higher_is_better[metric_name] = is_higher_better(metric_name)
        self._metric_fn_kwargs[metric_name] = {}
        if not isinstance(self, ConfigurableTask):
            self.process_results = lambda x, y: {metric_name: get_metric(metric_name)}
            self.aggregation = lambda: {
                metric_name: get_metric_aggregation(metric_name)
            }
        setattr(self._config, "metric_list", [{"metric": metric_name}])
        setattr(self._config, "process_results", None)

673
674
675
676
677
    def set_fewshot_seed(self, seed: Optional[int] = None) -> None:
        self.fewshot_rnd = random.Random(seed)
        if hasattr(self, "sampler"):
            self.sampler.rnd = self.fewshot_rnd

678
679
680
681
682
683
684
    @property
    def eval_docs(self) -> Union[datasets.Dataset, List[dict]]:
        if self.has_test_docs():
            return self.test_docs()
        elif self.has_validation_docs():
            return self.validation_docs()
        else:
685
686
687
            raise ValueError(
                f"Task dataset (path={self.DATASET_PATH}, name={self.DATASET_NAME}) must have valid or test docs!"
            )
688
689

    def doc_iterator(
690
691
692
693
694
695
        self,
        *,
        rank: int = 0,
        limit: Union[int, None] = None,
        world_size: int = 1,
        samples: Optional[List[int]] = None,
696
    ) -> Iterator[Tuple[int, Any]]:
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
        if samples:
            n = len(self.eval_docs)
            assert all([e < n for e in samples]), (
                f"Elements of --samples should be in the interval [0,k-1] where k is the number of total examples. In this case, k={n}."
            )
            eval_logger.info(
                f"{self.config.task}: Evaluating on {len(samples)} examples"
            )
            doc_iterator = utils.create_iterator(
                enumerate(x for i, x in enumerate(self.eval_docs) if i in samples),
                rank=int(rank),
                limit=None,  # limit does not matter here since we are selecting samples directly
                world_size=int(world_size),
            )
        else:
            limit = int(limit) if limit else None
            doc_iterator = utils.create_iterator(
                enumerate(self.eval_docs),
                rank=int(rank),
                limit=limit,
                world_size=int(world_size),
            )
719
720
        return doc_iterator

721
722

class ConfigurableTask(Task):
723
    VERSION = "Yaml"
724
    OUTPUT_TYPE = None
725
    CONFIG = None
726
727

    def __init__(
728
729
730
731
732
        self,
        data_dir=None,
        cache_dir=None,
        download_mode=None,
        config: Optional[dict] = None,
Ethan Smith's avatar
Ethan Smith committed
733
    ) -> None:  # TODO no super() call here
734
        # Get pre-configured attributes
735
        self._config = self.CONFIG
736

737
        # Use new configurations if there was no preconfiguration
738
        if self.config is None:
739
            self._config = TaskConfig(**config)
740
741
        # Overwrite configs
        else:
lintangsutawika's avatar
lintangsutawika committed
742
            if config is not None:
743
                self._config.__dict__.update(config)
744

745
        if self.config is None:
lintangsutawika's avatar
lintangsutawika committed
746
747
748
            raise ValueError(
                "Must pass a config to ConfigurableTask, either in cls.CONFIG or `config` kwarg"
            )
749

750
751
752
753
        if isinstance(self.config.metadata, dict):
            if "version" in self.config.metadata:
                self.VERSION = self.config.metadata["version"]

754
        if self.config.output_type is not None:
755
756
757
758
            if self.config.output_type not in ALL_OUTPUT_TYPES:
                raise ValueError(
                    f"Got invalid output_type '{self.config.output_type}', must be in '{','.join(ALL_OUTPUT_TYPES)}'"
                )
759
            self.OUTPUT_TYPE = self.config.output_type
760

761
762
763
764
        if self.config.doc_to_image is not None:
            # mark the task as requiring multimodality.
            self.MULTIMODAL = True

765
766
767
768
        if self.config.doc_to_audio:
            # mark the task as requiring multimodality.
            self.MULTIMODAL = True

Hojin Lee's avatar
Hojin Lee committed
769
770
771
        if self.config.unsafe_code is not False:
            self.UNSAFE_CODE = True

772
773
        if self.config.dataset_path is not None:
            self.DATASET_PATH = self.config.dataset_path
774

775
776
        if self.config.dataset_name is not None:
            self.DATASET_NAME = self.config.dataset_name
777

778
779
780
781
        self._metric_fn_list = {}
        self._metric_fn_kwargs = {}
        self._aggregation_list = {}
        self._higher_is_better = {}
782

783
        if self.config.metric_list is None:
784
            # TODO: handle this in TaskConfig.__post_init__ ?
785
786
            _metric_list = DEFAULT_METRIC_REGISTRY[self.config.output_type]

787
            for metric_name in _metric_list:
haileyschoelkopf's avatar
haileyschoelkopf committed
788
                self._metric_fn_list[metric_name] = get_metric(metric_name)
lintangsutawika's avatar
lintangsutawika committed
789
                self._metric_fn_kwargs[metric_name] = {}
790
791
792
                self._aggregation_list[metric_name] = get_metric_aggregation(
                    metric_name
                )
haileyschoelkopf's avatar
haileyschoelkopf committed
793
                self._higher_is_better[metric_name] = is_higher_better(metric_name)
794
        else:
795
            for metric_config in self.config.metric_list:
796
797
798
799
                if "metric" not in metric_config:
                    raise ValueError(
                        "'metric' key not provided for an entry in 'metric_list', must be specified!"
                    )
800
801
802
803
                metric_name = metric_config["metric"]
                kwargs = {
                    key: metric_config[key]
                    for key in metric_config
Chris's avatar
Chris committed
804
805
                    if key
                    not in ["metric", "aggregation", "higher_is_better", "hf_evaluate"]
806
                }
Chris's avatar
Chris committed
807
808
809
810
                hf_evaluate_metric = (
                    "hf_evaluate" in metric_config
                    and metric_config["hf_evaluate"] is True
                )
811

812
                if self.config.process_results is not None:
813
814
                    self._metric_fn_list[metric_name] = None
                    self._metric_fn_kwargs[metric_name] = {}
815
816
817
818
819
820
                elif callable(metric_name):
                    metric_fn = metric_name.__call__
                    metric_name = metric_name.__name__
                    self._metric_fn_list[metric_name] = metric_fn
                    self._metric_fn_kwargs[metric_name] = kwargs
                else:
Chris's avatar
Chris committed
821
822
823
                    self._metric_fn_list[metric_name] = get_metric(
                        metric_name, hf_evaluate_metric
                    )
824
                    self._metric_fn_kwargs[metric_name] = kwargs
lintangsutawika's avatar
lintangsutawika committed
825

826
                if "aggregation" in metric_config:
827
                    agg_name = metric_config["aggregation"]
828
                    if isinstance(agg_name, str):
haileyschoelkopf's avatar
haileyschoelkopf committed
829
                        self._aggregation_list[metric_name] = get_aggregation(agg_name)
830
                    elif callable(agg_name):  # noqa: E721
831
832
833
                        self._aggregation_list[metric_name] = metric_config[
                            "aggregation"
                        ]
834
                else:
835
                    INV_AGG_REGISTRY = {v: k for k, v in AGGREGATION_REGISTRY.items()}
lintangsutawika's avatar
lintangsutawika committed
836
                    metric_agg = get_metric_aggregation(metric_name)
837
                    eval_logger.warning(
838
                        f"[Task: {self.config.task}] metric {metric_name} is defined, but aggregation is not. "
839
840
                        f"using default "
                        f"aggregation={INV_AGG_REGISTRY[metric_agg]}"
841
                    )
842
                    self._aggregation_list[metric_name] = metric_agg
lintangsutawika's avatar
lintangsutawika committed
843

844
845
846
847
848
849
                if "higher_is_better" in metric_config:
                    self._higher_is_better[metric_name] = metric_config[
                        "higher_is_better"
                    ]
                else:
                    eval_logger.warning(
850
                        f"[Task: {self.config.task}] metric {metric_name} is defined, but higher_is_better is not. "
851
                        f"using default "
haileyschoelkopf's avatar
haileyschoelkopf committed
852
                        f"higher_is_better={is_higher_better(metric_name)}"
853
                    )
haileyschoelkopf's avatar
haileyschoelkopf committed
854
                    self._higher_is_better[metric_name] = is_higher_better(metric_name)
855

856
        self.download(self.config.dataset_kwargs)
857
858
859
        self._training_docs = None
        self._fewshot_docs = None

860
        if self.config.filter_list is not None:
lintangsutawika's avatar
lintangsutawika committed
861
            self._filters = []
862
            for filter_config in self.config.filter_list:
863
864
865
866
867
868
869
870
871
                filter_name = filter_config["name"]
                filter_functions = filter_config["filter"]
                components = []
                for function in filter_functions:
                    kwargs = {
                        key: function[key] for key in function if key != "function"
                    }
                    components.append([function["function"], kwargs])
                filter_pipeline = build_filter_ensemble(filter_name, components)
lintangsutawika's avatar
lintangsutawika committed
872
                self._filters.append(filter_pipeline)
lintangsutawika's avatar
lintangsutawika committed
873
        else:
Baber Abbasi's avatar
Baber Abbasi committed
874
875
876
877
            # TODO: handle repeats in a more general way rather than just discarding
            eval_logger.debug(
                "No custom filters defined. Using default 'take_first' filter for handling repeats."
            )
878
            self._filters = [build_filter_ensemble("none", [["take_first", None]])]
879

880
881
        if self.config.use_prompt is not None:
            eval_logger.info(f"loading prompt {self.config.use_prompt}")
882
            self.prompt = get_prompt(
883
                self.config.use_prompt, self.DATASET_PATH, self.DATASET_NAME
lintangsutawika's avatar
lintangsutawika committed
884
            )
885
886
887
        else:
            self.prompt = None

lintangsutawika's avatar
lintangsutawika committed
888
        if self.fewshot_docs() is not None:
889
890
891
892
            self.fewshot_rnd = (
                random.Random()
            )  # setting with no seed, to be overridden at a later time
            config_sampler: Union[str, Callable] = (
haileyschoelkopf's avatar
haileyschoelkopf committed
893
894
895
                self.config.fewshot_config.get("sampler", "default")
                if self.config.fewshot_config
                else "default"
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
            )
            if isinstance(config_sampler, str):
                self.sampler = samplers.get_sampler(config_sampler)(
                    list(self.fewshot_docs()), self, rnd=self.fewshot_rnd
                )
            elif callable(config_sampler) and issubclass(
                config_sampler, samplers.ContextSampler
            ):
                self.sampler = config_sampler(
                    docs=list(self.fewshot_docs()), task=self, rnd=self.fewshot_rnd
                )
            else:
                raise TypeError(
                    f"fewshot_config.sampler should be a string or callable of ContextSampler type, "
                    f"not {type(config_sampler)}"
                )
912

913
        self.task_docs = self.eval_docs
914

915
        # Test One Doc
916
        self.features = list(self.task_docs.features.keys())
917
918
        self.multiple_input = 0
        self.multiple_target = 0
919
        test_doc = self.task_docs[0]
920
        test_text = self.doc_to_text(test_doc)
921
        test_target = self.doc_to_target(test_doc)
lintangsutawika's avatar
lintangsutawika committed
922

923
        if self.config.doc_to_choice is not None:
lintangsutawika's avatar
lintangsutawika committed
924
            test_choice = self.doc_to_choice(test_doc)
925
            if not isinstance(test_choice, list):
lintangsutawika's avatar
lintangsutawika committed
926
                eval_logger.error("doc_to_choice must return list")
927
928
            else:
                num_choice = len(test_choice)
929

930
            if isinstance(test_text, int):
931
                self.multiple_input = num_choice
932
933
        else:
            test_choice = None
934

935
        if isinstance(test_target, list):
936
            self.multiple_target = len(test_target)
937
        else:
938
            if (isinstance(test_target, int)) and (test_choice is not None):
lintangsutawika's avatar
lintangsutawika committed
939
                test_target = test_choice[test_target]
940
            else:
lintangsutawika's avatar
lintangsutawika committed
941
                test_target = str(test_target)
942

943
944
945
        if test_choice is not None:
            check_choices = test_choice
        else:
lintangsutawika's avatar
lintangsutawika committed
946
            check_choices = [test_target]
947
948
949
950
        if self.config.doc_to_choice is not None:
            for choice in check_choices:
                choice_has_whitespace = True if choice[0].isspace() else False
                delimiter_has_whitespace = (
951
952
                    True
                    if self.config.target_delimiter.rstrip()
953
                    != self.config.target_delimiter
954
                    else False
955
                )
956

957
                if delimiter_has_whitespace and choice_has_whitespace:
958
959
                    eval_logger.debug(
                        f'Both target_delimiter "{self.config.target_delimiter}" and target choice: "{choice}" have whitespace'
960
961
                    )
                elif (not delimiter_has_whitespace) and (not choice_has_whitespace):
962
                    eval_logger.debug(
963
                        f'Both target_delimiter "{self.config.target_delimiter}" and target choice: "{choice}" do not have whitespace, ignore if the language you are evaluating on does not require/use whitespace'
964
965
                    )

Baber Abbasi's avatar
Baber Abbasi committed
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
    def download(
        self, dataset_kwargs: Optional[Dict[str, Any]] = None, **kwargs
    ) -> None:
        if isinstance(self.config.custom_dataset, Callable):
            eval_logger.warning(
                f"{self.config.task}: Custom kwargs can be passed to `--metadata` in console (as json string) or to the TaskManager."
                + "\nFor example --metadata='{\"max_seq_lengths\":[4096, 8192]}'. For details see task Readme."
            )
            self.dataset = self.config.custom_dataset(
                **(self.config.metadata or {}), **(self.config.dataset_kwargs or {})
            )
        else:
            self.dataset = datasets.load_dataset(
                path=self.DATASET_PATH,
                name=self.DATASET_NAME,
                **dataset_kwargs if dataset_kwargs is not None else {},
            )
983

baberabb's avatar
baberabb committed
984
    def has_training_docs(self) -> bool:
985
        if self.config.training_split is not None:
986
987
988
989
            return True
        else:
            return False

baberabb's avatar
baberabb committed
990
    def has_validation_docs(self) -> bool:
991
        if self.config.validation_split is not None:
992
993
994
995
            return True
        else:
            return False

baberabb's avatar
baberabb committed
996
    def has_test_docs(self) -> bool:
997
        if self.config.test_split is not None:
998
999
1000
1001
            return True
        else:
            return False

baberabb's avatar
baberabb committed
1002
    def training_docs(self) -> datasets.Dataset:
1003
        if self.has_training_docs():
1004
1005
1006
            if self.config.process_docs is not None:
                return self.config.process_docs(
                    self.dataset[self.config.training_split]
1007
                )
1008
            return self.dataset[self.config.training_split]
1009

baberabb's avatar
baberabb committed
1010
    def validation_docs(self) -> datasets.Dataset:
1011
        if self.has_validation_docs():
1012
1013
1014
            if self.config.process_docs is not None:
                return self.config.process_docs(
                    self.dataset[self.config.validation_split]
1015
                )
1016
            return self.dataset[self.config.validation_split]
1017

baberabb's avatar
baberabb committed
1018
    def test_docs(self) -> datasets.Dataset:
1019
        if self.has_test_docs():
1020
1021
1022
            if self.config.process_docs is not None:
                return self.config.process_docs(self.dataset[self.config.test_split])
            return self.dataset[self.config.test_split]
1023

1024
    def fewshot_docs(self):
1025
        if self.config.fewshot_split is not None:
1026
1027
            if self.config.process_docs is not None:
                return self.config.process_docs(self.dataset[self.config.fewshot_split])
1028
            return self.dataset[self.config.fewshot_split]
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
        elif (
            self.config.fewshot_config is not None
            and self.config.fewshot_config.get("samples", None) is not None
        ):
            if isinstance(self.config.fewshot_config["samples"], list):
                return self.config.fewshot_config["samples"]
            elif callable(self.config.fewshot_config["samples"]):
                return self.config.fewshot_config["samples"]()
            else:
                raise Exception(
                    "`fewshot_config['samples']` was incorrectly defined in the configuration. It should be either a list of samples as a dict, or function returning this list."
                )
1041
        else:
1042
            if (self.config.num_fewshot is not None) and (self.config.num_fewshot > 0):
1043
                eval_logger.warning(
Lintang Sutawika's avatar
Lintang Sutawika committed
1044
                    f"[Task: {self.config.task}] "
1045
1046
1047
1048
                    "num_fewshot > 0 but fewshot_split is None. "
                    "using preconfigured rule."
                )
            return super().fewshot_docs()
1049

KonradSzafer's avatar
KonradSzafer committed
1050
1051
1052
1053
1054
    @staticmethod
    def append_target_question(
        labeled_examples: List[Dict[str, str]],
        question: str,
        fewshot_as_multiturn: bool = False,
Baber Abbasi's avatar
Baber Abbasi committed
1055
        gen_prefix: Optional[str] = None,
KonradSzafer's avatar
KonradSzafer committed
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
    ) -> None:
        """Adds a target question to the labeled examples list.
        If fewshot_as_multiturn is True, or labeled_examples is empty, or the last entry is a system turn, appends the question as a new user entry.
        Otherwise, it is appended to the last user entry, ensuring that the conversation alternates between the user and the assistant.
        """
        if not fewshot_as_multiturn:
            # if no messages or last message is system, append as new user entry
            if len(labeled_examples) == 0 or labeled_examples[-1]["role"] == "system":
                labeled_examples.append({"role": "user", "content": question})
            # if last message is user, append to it to avoid two user messages in a row
            else:
                labeled_examples[-1]["content"] += question
        else:
            # if fewshot_as_multiturn is True, append as next user entry (last is always assistant)
            labeled_examples.append({"role": "user", "content": question})
Baber Abbasi's avatar
Baber Abbasi committed
1071
1072
        if gen_prefix:
            labeled_examples.append({"role": "assistant", "content": gen_prefix})
KonradSzafer's avatar
KonradSzafer committed
1073

lintangsutawika's avatar
lintangsutawika committed
1074
    @utils.positional_deprecated
KonradSzafer's avatar
KonradSzafer committed
1075
1076
    def fewshot_context(
        self,
Baber Abbasi's avatar
Baber Abbasi committed
1077
        doc: dict,
KonradSzafer's avatar
KonradSzafer committed
1078
1079
1080
1081
        num_fewshot: int,
        system_instruction: Optional[str] = None,
        apply_chat_template: bool = False,
        fewshot_as_multiturn: bool = False,
1082
        chat_template: Optional[Callable] = None,
Baber Abbasi's avatar
Baber Abbasi committed
1083
        gen_prefix: Optional[str] = None,
Baber Abbasi's avatar
Baber Abbasi committed
1084
    ) -> Union[str, List[str]]:
lintangsutawika's avatar
lintangsutawika committed
1085
1086
1087
1088
1089
1090
1091
        """Returns a fewshot context string that is made up of a prepended description
        (if provided), the `num_fewshot` number of examples, and an appended prompt example.

        :param doc: str
            The document as returned from training_docs, validation_docs, or test_docs.
        :param num_fewshot: int
            The number of fewshot examples to provide in the returned context string.
KonradSzafer's avatar
KonradSzafer committed
1092
1093
1094
1095
1096
1097
        :param  system_instruction: str
            System instruction to be applied to the prompt.
        :param apply_chat_template: bool
            Whether to apply the chat template to the fewshot context.
        :param fewshot_as_multiturn: bool
            Whether to provide the fewshot examples as a multiturn conversation or a single user turn.
1098
1099
        :param chat_template:
            callable (from lm.apply_chat_template) that takes in a list[Dict] chat transcript and renders it into a string.
1100
1101
        :param gen_prefix:
            String to append after the <|assistant|> token.
lintangsutawika's avatar
lintangsutawika committed
1102
1103
1104
        :returns: str
            The fewshot context.
        """
KonradSzafer's avatar
KonradSzafer committed
1105
1106
1107
1108
1109
1110
        if apply_chat_template:
            labeled_examples = []
        else:
            labeled_examples = ""

        # get task description
1111
1112
        if description := self.config.description:
            description = utils.apply_template(self.config.description, doc)
lintangsutawika's avatar
lintangsutawika committed
1113

KonradSzafer's avatar
KonradSzafer committed
1114
1115
1116
1117
1118
1119
1120
1121
1122
        # create system prompt based on the provided system instruction and description
        if system_instruction is not None and description:
            system_prompt = (
                f"{system_instruction}{self.sampler.fewshot_delimiter}{description}"
            )
        elif system_instruction is not None:
            system_prompt = system_instruction
        elif description:
            system_prompt = description
lintangsutawika's avatar
lintangsutawika committed
1123
        else:
KonradSzafer's avatar
KonradSzafer committed
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
            system_prompt = ""

        # add system prompt if specified
        if system_prompt:
            if apply_chat_template:
                labeled_examples.append({"role": "system", "content": system_prompt})
            else:
                labeled_examples = system_prompt
        # if few-shot - append examples after the system prompt
        if num_fewshot > 0:
            if apply_chat_template:
                labeled_examples.extend(
                    self.sampler.get_chat_context(
Baber Abbasi's avatar
Baber Abbasi committed
1137
1138
1139
                        doc,
                        num_fewshot,
                        fewshot_as_multiturn,
Baber Abbasi's avatar
Baber Abbasi committed
1140
                        gen_prefix=gen_prefix,
KonradSzafer's avatar
KonradSzafer committed
1141
1142
1143
                    )
                )
            else:
Baber Abbasi's avatar
Baber Abbasi committed
1144
                labeled_examples += self.sampler.get_context(
Baber Abbasi's avatar
Baber Abbasi committed
1145
                    doc, num_fewshot, gen_prefix=gen_prefix
Baber Abbasi's avatar
Baber Abbasi committed
1146
                )
lintangsutawika's avatar
lintangsutawika committed
1147
1148

        example = self.doc_to_text(doc)
KonradSzafer's avatar
KonradSzafer committed
1149
1150
        if apply_chat_template:
            if self.multiple_input:
Baber Abbasi's avatar
Baber Abbasi committed
1151
                # TODO: append prefill?
1152
1153
                if not labeled_examples:
                    return ""
1154
                return chat_template(labeled_examples)
KonradSzafer's avatar
KonradSzafer committed
1155
1156
            if isinstance(example, str):
                self.append_target_question(
Baber Abbasi's avatar
Baber Abbasi committed
1157
1158
1159
                    labeled_examples,
                    example,
                    fewshot_as_multiturn,
Baber Abbasi's avatar
Baber Abbasi committed
1160
                    gen_prefix=gen_prefix,
KonradSzafer's avatar
KonradSzafer committed
1161
1162
1163
1164
1165
1166
1167
                )
            # for loglikelihood create a list of questions with appended choices
            elif isinstance(example, list):
                labeled_examples_list = []
                # copy chat history for each example and append the answer
                for ex in example:
                    chat = deepcopy(labeled_examples)
Baber Abbasi's avatar
Baber Abbasi committed
1168
1169
1170
1171
                    self.append_target_question(
                        chat,
                        ex,
                        fewshot_as_multiturn,
Baber Abbasi's avatar
Baber Abbasi committed
1172
                        gen_prefix=gen_prefix,
Baber Abbasi's avatar
Baber Abbasi committed
1173
1174
1175
1176
1177
                    )
                    # TODO: append prefill?
                    labeled_examples_list.append(
                        chat_template(
                            chat,
Baber Abbasi's avatar
Baber Abbasi committed
1178
                            add_generation_prompt=False if gen_prefix else True,
Baber Abbasi's avatar
Baber Abbasi committed
1179
1180
                        )
                    )
KonradSzafer's avatar
KonradSzafer committed
1181
1182
1183
1184
1185
1186
                return labeled_examples_list
            # if example is an integer, append the choice or convert to string
            elif isinstance(example, int):
                if self.config.doc_to_choice is not None:
                    choices = self.doc_to_choice(doc)
                    self.append_target_question(
Baber Abbasi's avatar
Baber Abbasi committed
1187
1188
1189
                        labeled_examples,
                        choices[example],
                        fewshot_as_multiturn,
Baber Abbasi's avatar
Baber Abbasi committed
1190
                        gen_prefix=gen_prefix,
KonradSzafer's avatar
KonradSzafer committed
1191
1192
1193
                    )
                else:
                    self.append_target_question(
Baber Abbasi's avatar
Baber Abbasi committed
1194
1195
1196
                        labeled_examples,
                        str(example),
                        fewshot_as_multiturn,
Baber Abbasi's avatar
Baber Abbasi committed
1197
                        gen_prefix=gen_prefix,
KonradSzafer's avatar
KonradSzafer committed
1198
1199
                    )
                # return lm.apply_chat_template(labeled_examples)
Baber Abbasi's avatar
Baber Abbasi committed
1200
1201
            return chat_template(
                labeled_examples,
Baber Abbasi's avatar
Baber Abbasi committed
1202
                add_generation_prompt=False if gen_prefix else True,
Baber Abbasi's avatar
Baber Abbasi committed
1203
            )
1204
        else:
Baber Abbasi's avatar
Baber Abbasi committed
1205
            prefix = (
Baber Abbasi's avatar
Baber Abbasi committed
1206
1207
                self.config.target_delimiter + gen_prefix
                if gen_prefix is not None
Baber Abbasi's avatar
Baber Abbasi committed
1208
1209
                else ""
            )
KonradSzafer's avatar
KonradSzafer committed
1210
1211
            if self.multiple_input:
                return labeled_examples
1212
            if isinstance(example, str):
Baber Abbasi's avatar
Baber Abbasi committed
1213
                return labeled_examples + example + prefix
1214
            elif isinstance(example, list):
Baber Abbasi's avatar
Baber Abbasi committed
1215
                return [labeled_examples + ex + prefix for ex in example]
1216
1217
1218
            elif isinstance(example, int):
                if self.config.doc_to_choice is not None:
                    choices = self.doc_to_choice(doc)
Baber Abbasi's avatar
Baber Abbasi committed
1219
                    return labeled_examples + choices[example] + prefix
1220
                else:
Baber Abbasi's avatar
Baber Abbasi committed
1221
                    return labeled_examples + str(example) + prefix
lintangsutawika's avatar
lintangsutawika committed
1222

Baber Abbasi's avatar
Baber Abbasi committed
1223
    def apply_filters(self) -> Optional[List[Instance]]:
Baber Abbasi's avatar
Baber Abbasi committed
1224
        """Iterates over FilterEnsembles and applies them to instances"""
1225
1226
        if hasattr(self, "_filters"):
            for f in self._filters:
1227
                f.apply(self._instances)
1228
1229
1230
1231
        else:
            eval_logger.warning("No filter defined, passing through instances")
            return self._instances

1232
    def should_decontaminate(self):
1233
        return self.config.should_decontaminate
1234

Baber Abbasi's avatar
Baber Abbasi committed
1235
    def doc_to_decontamination_query(self, doc: dict):
1236
        if self.config.should_decontaminate:
1237
1238
            if self.config.doc_to_decontamination_query is None:
                return self.doc_to_text(doc)
1239
            else:
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
                doc_to_decontamination_query = self.config.doc_to_decontamination_query
                if doc_to_decontamination_query in self.features:
                    return doc[doc_to_decontamination_query]
                elif callable(doc_to_decontamination_query):
                    return doc_to_decontamination_query(doc)
                else:
                    return ast.literal_eval(
                        utils.apply_template(
                            self.config.doc_to_decontamination_query, doc
                        )
                    )
1251

1252
    def _process_doc(self, doc: dict) -> dict:
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
        """
        Override this to process (detokenize, strip, replace, etc.) individual
        documents. This can be used in a map over documents of a data split.
        E.g. `map(self._process_doc, self.dataset["validation"])`

        :return: dict
            The processed version of the specified `doc`.
        """
        return doc

Yu Shi Jie's avatar
Yu Shi Jie committed
1263
    def doc_to_text(self, doc, doc_to_text=None):
1264
1265
        if self.prompt is not None:
            doc_to_text = self.prompt
Yu Shi Jie's avatar
Yu Shi Jie committed
1266
1267
        elif doc_to_text is not None:
            doc_to_text = doc_to_text
1268
        else:
1269
            doc_to_text = self.config.doc_to_text
lintangsutawika's avatar
lintangsutawika committed
1270

1271
        if isinstance(doc_to_text, int):
1272
            return doc_to_text
1273
        elif isinstance(doc_to_text, str):
1274
            if doc_to_text in self.features:
1275
                # if self.config.doc_to_choice is not None:
1276
1277
                #     return self.doc_to_choice(doc)[doc[doc_to_text]]
                # else:
1278
1279
                return doc[doc_to_text]
            else:
lintangsutawika's avatar
lintangsutawika committed
1280
                text_string = utils.apply_template(doc_to_text, doc)
lintangsutawika's avatar
lintangsutawika committed
1281
                if text_string.isdigit() and self._config.doc_to_choice is not None:
lintangsutawika's avatar
lintangsutawika committed
1282
1283
1284
                    return ast.literal_eval(text_string)
                else:
                    return text_string
1285
        elif callable(doc_to_text):
1286
            return doc_to_text(doc)
lintangsutawika's avatar
lintangsutawika committed
1287
        # Used when applying a Promptsource template
1288
        elif hasattr(doc_to_text, "apply"):
1289
1290
1291
1292
1293
            applied_prompt = doc_to_text.apply(doc)
            if len(applied_prompt) == 2:
                return applied_prompt[0]
            else:
                eval_logger.warning("Applied prompt returns empty string")
1294
                return self.config.fewshot_delimiter
1295
        else:
1296
            print(type(doc_to_text))
1297
            raise TypeError
1298

Yu Shi Jie's avatar
Yu Shi Jie committed
1299
    def doc_to_target(self, doc: Mapping, doc_to_target=None) -> Union[int, str, list]:
1300
1301
        if self.prompt is not None:
            doc_to_target = self.prompt
Yu Shi Jie's avatar
Yu Shi Jie committed
1302
1303
        elif doc_to_target is not None:
            doc_to_target = doc_to_target
1304
        else:
1305
            doc_to_target = self.config.doc_to_target
1306

1307
        if isinstance(doc_to_target, int):
1308
            return doc_to_target
1309
        elif isinstance(doc_to_target, str):
1310
            if doc_to_target in self.features:
1311
                # if self.config.doc_to_choice is not None:
1312
1313
1314
                #     return self.doc_to_choice(doc)[doc[doc_to_target]]
                # else:
                return doc[doc_to_target]
1315
            else:
lintangsutawika's avatar
lintangsutawika committed
1316
                target_string = utils.apply_template(doc_to_target, doc)
lintangsutawika's avatar
lintangsutawika committed
1317
                if target_string.isdigit() and self._config.doc_to_choice is not None:
lintangsutawika's avatar
lintangsutawika committed
1318
                    return ast.literal_eval(target_string)
lintangsutawika's avatar
lintangsutawika committed
1319
1320
1321
1322
1323
                elif (
                    len(target_string) >= 2
                    and (target_string[0] == "[")
                    and (target_string[-1] == "]")
                ):
baberabb's avatar
baberabb committed
1324
1325
1326
1327
                    try:
                        return ast.literal_eval(target_string)
                    except (SyntaxError, ValueError):
                        return target_string
lintangsutawika's avatar
lintangsutawika committed
1328
1329
                else:
                    return target_string
1330
        elif isinstance(doc_to_target, list):
1331
            return doc_to_target
1332
        elif callable(doc_to_target):
1333
            return doc_to_target(doc)
lintangsutawika's avatar
lintangsutawika committed
1334
        # Used when applying a Promptsource template
1335
        elif hasattr(doc_to_target, "apply"):
1336
            applied_prompt = doc_to_target.apply(doc)
1337
1338
1339
1340
            if len(applied_prompt) == 2:
                return applied_prompt[1]
            else:
                eval_logger.warning("Applied prompt returns empty string")
1341
                return self.config.fewshot_delimiter
1342
1343
        else:
            raise TypeError
1344

Yu Shi Jie's avatar
Yu Shi Jie committed
1345
    def doc_to_choice(self, doc: Any, doc_to_choice=None) -> List[str]:
1346
1347
        if self.prompt is not None:
            doc_to_choice = self.prompt
Yu Shi Jie's avatar
Yu Shi Jie committed
1348
1349
        elif doc_to_choice is not None:
            doc_to_choice = doc_to_choice
1350
        elif self.config.doc_to_choice is None:
1351
1352
            eval_logger.error("doc_to_choice was called but not set in config")
        else:
1353
            doc_to_choice = self.config.doc_to_choice
1354

1355
        if isinstance(doc_to_choice, str):
1356
1357
1358
1359
            if doc_to_choice in self.features:
                return doc[doc_to_choice]
            else:
                return ast.literal_eval(utils.apply_template(doc_to_choice, doc))
1360
        elif isinstance(doc_to_choice, list):
1361
            return doc_to_choice
1362
        elif isinstance(doc_to_choice, dict):
1363
1364
1365
1366
1367
1368
1369
            return list(doc_to_choice.values())
        elif callable(doc_to_choice):
            return doc_to_choice(doc)
        elif hasattr(doc_to_choice, "get_answer_choices_list"):
            return doc_to_choice.get_answer_choices_list(doc)
        else:
            raise TypeError
1370

1371
1372
1373
1374
1375
1376
1377
1378
1379
1380
1381
1382
1383
1384
1385
1386
1387
1388
1389
1390
1391
1392
1393
    def doc_to_image(self, doc: Any, doc_to_image=None) -> Union[int, str, list]:
        if doc_to_image is not None:
            doc_to_image = doc_to_image
        elif self.config.doc_to_image is not None:
            doc_to_image = self.config.doc_to_image
        else:
            return None

        if isinstance(doc_to_image, list):
            image_feature = [
                self.doc_to_image(doc, feature) for feature in doc_to_image
            ]
            return [feature for feature in image_feature if feature is not None]
        elif isinstance(doc_to_image, str):
            if doc_to_image in self.features:
                return doc[doc_to_image]
            else:
                return ast.literal_eval(utils.apply_template(doc_to_image, doc))
        elif callable(doc_to_image):
            return doc_to_image(doc)
        else:
            return None

1394
1395
1396
1397
1398
1399
1400
1401
1402
1403
1404
1405
1406
1407
1408
1409
1410
1411
1412
1413
1414
1415
1416
    def doc_to_audio(self, doc: Any, doc_to_audio=None) -> Union[int, str, list]:
        if doc_to_audio is not None:
            doc_to_audio = doc_to_audio
        elif self.config.doc_to_audio is not None:
            doc_to_audio = self.config.doc_to_audio
        else:
            return None

        if isinstance(doc_to_audio, list):
            audio_feature = [
                self.doc_to_audio(doc, feature) for feature in doc_to_audio
            ]
            return [feature for feature in audio_feature if feature is not None]
        elif isinstance(doc_to_audio, str):
            if doc_to_audio in self.features:
                return doc[doc_to_audio]
            else:
                return ast.literal_eval(utils.apply_template(doc_to_audio, doc))
        elif callable(doc_to_audio):
            return doc_to_audio(doc)
        else:
            return None

Baber Abbasi's avatar
Baber Abbasi committed
1417
1418
1419
1420
1421
1422
1423
1424
    def doc_to_prefix(self, doc):
        if (gen_prefix := self.config.gen_prefix) is not None:
            if gen_prefix in self.features:
                return doc[gen_prefix]
            else:
                return utils.apply_template(gen_prefix, doc)
        return None

baberabb's avatar
baberabb committed
1425
1426
1427
    def construct_requests(
        self, doc: dict, ctx: str, **kwargs
    ) -> Union[List[Instance], Instance]:
1428
        apply_chat_template = kwargs.pop("apply_chat_template", False)
1429
        chat_template: Callable | None = kwargs.pop("chat_template", None)
1430

1431
1432
        aux_arguments = None

1433
        if self.OUTPUT_TYPE == "loglikelihood":
lintangsutawika's avatar
lintangsutawika committed
1434
            arguments = (ctx, self.doc_to_target(doc))
1435
        elif self.OUTPUT_TYPE == "loglikelihood_rolling":
lintangsutawika's avatar
lintangsutawika committed
1436
            arguments = (self.doc_to_target(doc),)
1437
        elif self.OUTPUT_TYPE == "multiple_choice":
1438
            choices = self.doc_to_choice(doc)
1439
            target_delimiter = self.config.target_delimiter
1440
1441
            if apply_chat_template:
                target_delimiter = ""
1442
1443
            if self.multiple_input:
                # If there are multiple inputs, choices are placed in the ctx
1444
                # apply chat_template to choices if apply_chat_template
1445
                cont = self.doc_to_target(doc)
1446

1447
                arguments = [
1448
1449
1450
1451
1452
1453
1454
1455
1456
1457
                    (
                        ctx
                        + (
                            chat_template([{"role": "user", "content": choice}])
                            if apply_chat_template
                            else choice
                        ),
                        f"{target_delimiter}{cont}",
                    )
                    for choice in choices
1458
                ]
1459
            else:
1460
                # Otherwise they are placed in the continuation
1461
                arguments = [(ctx, f"{target_delimiter}{cont}") for cont in choices]
1462

1463
1464
1465
1466
1467
1468
1469
1470
1471
1472
1473
1474
1475
1476
1477
1478
1479
1480
1481
1482
1483
1484
1485
1486
            # TODO: we should raise a warning telling users this will at most ~2x runtime.
            if "acc_mutual_info" in self._metric_fn_list.keys():
                # if we are calculating multiple choice accuracy
                # using mutual information instead of raw loglikelihood as metric, need unconditional lls.

                # here mutual info refers to calculating
                # log(P(choice|ctx) / P(choice)) = log(P(choice|ctx)) - log(P(choice))
                # in other words normalizing by subtracting the unconditional logprob of each choice.
                aux_arguments = [("", f"{choice}") for choice in choices]

                arguments.extend(aux_arguments)

        elif self.OUTPUT_TYPE == "generate_until":
            arguments = (ctx, deepcopy(self.config.generation_kwargs))

        multimodal_arg = {}
        if (
            self.config.doc_to_image
        ):  # TODO: ensure that non-multimodal tasks aren't getting visual args
            multimodal_arg = {
                **multimodal_arg,
                **{"visual": self.doc_to_image(doc)},
            }

1487
1488
1489
1490
1491
1492
1493
1494
        if (
            self.config.doc_to_audio
        ):  # TODO: ensure that non-multimodal tasks aren't getting audio args
            multimodal_arg = {
                **multimodal_arg,
                **{"audio": self.doc_to_audio(doc)},
            }

1495
1496
1497
1498
1499
1500
1501
        if bool(multimodal_arg):
            if isinstance(arguments, list):
                arguments = [arg + (multimodal_arg,) for arg in arguments]
            else:
                arguments = arguments + (multimodal_arg,)

        if self.OUTPUT_TYPE == "multiple_choice":
1502
            request_list = [
1503
1504
                Instance(
                    request_type="loglikelihood",
lintangsutawika's avatar
lintangsutawika committed
1505
                    doc=doc,
1506
                    arguments=arg,
1507
                    idx=i,
1508
1509
                    **kwargs,
                )
1510
                for i, arg in enumerate(arguments)
1511
            ]
1512
1513

            return request_list
lintangsutawika's avatar
lintangsutawika committed
1514

lintangsutawika's avatar
lintangsutawika committed
1515
        return Instance(
1516
1517
1518
1519
1520
            request_type=self.OUTPUT_TYPE,
            doc=doc,
            arguments=arguments,
            idx=0,
            **kwargs,
lintangsutawika's avatar
lintangsutawika committed
1521
        )
1522
1523

    def process_results(self, doc, results):
1524
1525
        if callable(self.config.process_results):
            return self.config.process_results(doc, results)
lintangsutawika's avatar
lintangsutawika committed
1526

1527
        result_dict = {}
1528
        use_metric = list(self._metric_fn_list.keys())
1529
1530
1531
        if self.OUTPUT_TYPE == "loglikelihood":
            results = results[0]
            ll, is_greedy = results
1532
1533
1534
1535
            return {
                **({"perplexity": ll} if "perplexity" in use_metric else {}),
                **({"acc": int(is_greedy)} if "acc" in use_metric else {}),
            }
1536
        elif self.OUTPUT_TYPE == "loglikelihood_rolling":
haileyschoelkopf's avatar
haileyschoelkopf committed
1537
            (loglikelihood,) = results
1538
1539
            _words = self.count_words(self.doc_to_target(doc))
            _bytes = self.count_bytes(self.doc_to_target(doc))
haileyschoelkopf's avatar
haileyschoelkopf committed
1540
            return {
1541
1542
1543
1544
1545
1546
1547
1548
1549
1550
1551
1552
1553
1554
1555
                **(
                    {"word_perplexity": (loglikelihood, _words)}
                    if "word_perplexity" in use_metric
                    else {}
                ),
                **(
                    {"byte_perplexity": (loglikelihood, _bytes)}
                    if "byte_perplexity" in use_metric
                    else {}
                ),
                **(
                    {"bits_per_byte": (loglikelihood, _bytes)}
                    if "bits_per_byte" in use_metric
                    else {}
                ),
haileyschoelkopf's avatar
haileyschoelkopf committed
1556
            }
1557
        elif self.OUTPUT_TYPE == "multiple_choice":
1558
            lls, is_greedy = zip(*results)
lintangsutawika's avatar
lintangsutawika committed
1559

1560
            # retrieve choices in List[str] form, to compute choice lengths, etc.
1561
            choices = self.doc_to_choice(doc)
1562
1563
            completion_len = np.array([float(len(i)) for i in choices])

1564
1565
            if (
                2 * len(choices) == len(lls)
1566
                and "acc_mutual_info" in self._metric_fn_list.keys()
1567
1568
1569
1570
            ):
                # then we are doing mutual info.
                # this stores the "dryrun" / unconditional answer loglikelihoods
                lls_unconditional = lls[1::2]
1571
1572
                if len(lls_unconditional) != len(choices):
                    raise ValueError
1573
1574
                # and this stores our "regular" conditional loglikelihoods
                lls = lls[::2]
1575

1576
1577
            pred = np.argmax(lls)
            pred_norm = np.argmax(lls / completion_len)
lintangsutawika's avatar
lintangsutawika committed
1578

1579
1580
            if self.multiple_input:
                gold = self.doc_to_text(doc)
1581
            else:
1582
                gold = self.doc_to_target(doc)
1583
1584

            gold_index_error = False
1585
            if isinstance(gold, list):
Lintang Sutawika's avatar
Lintang Sutawika committed
1586
1587
                gold = [i if i < len(choices) else -100 for i in gold]
                if -100 in gold:
1588
1589
                    gold_index_error = True
            else:
1590
                if isinstance(gold, int):
Lintang Sutawika's avatar
Lintang Sutawika committed
1591
                    gold = gold if gold < len(choices) else -100
1592
                elif isinstance(gold, str):
Lintang Sutawika's avatar
Lintang Sutawika committed
1593
                    gold = choices.index(gold) if gold in choices else -100
lintangsutawika's avatar
lintangsutawika committed
1594

Lintang Sutawika's avatar
Lintang Sutawika committed
1595
                if gold == -100:
1596
1597
1598
1599
                    gold_index_error = True

            if gold_index_error:
                eval_logger.warning(
lintangsutawika's avatar
lintangsutawika committed
1600
                    f"Label index was not in within range of available choices,"
1601
1602
                    f"Sample:\n\n{doc}\n\n"
                )
lintangsutawika's avatar
lintangsutawika committed
1603

1604
            if self.multiple_target:
lintangsutawika's avatar
lintangsutawika committed
1605
1606
                acc = 1.0 if pred in gold else 0.0
                acc_norm = 1.0 if pred_norm in gold else 0.0
Lintang Sutawika's avatar
Lintang Sutawika committed
1607
                exact_match = int(any([is_greedy[i] if i != -100 else 0 for i in gold]))
lintangsutawika's avatar
lintangsutawika committed
1608
1609
1610
            else:
                acc = 1.0 if pred == gold else 0.0
                acc_norm = 1.0 if pred_norm == gold else 0.0
1611
                # TODO: this gets score of 0 on arc_challenge for pythia-70m. need to test that this works properly
Lintang Sutawika's avatar
Lintang Sutawika committed
1612
                exact_match = int(is_greedy[gold]) if gold != -100 else 0
1613

Lintang Sutawika's avatar
Lintang Sutawika committed
1614
1615
1616
1617
            prob_norm = utils.softmax(lls)

            # TODO use keyword arguments to the metric?
            # gold, pred, norm stuff, the original lls,
1618
            result_dict = {
1619
                **({"acc": acc} if "acc" in use_metric else {}),
1620
1621
                **({"f1": (gold, pred)} if "f1" in use_metric else {}),
                **({"mcc": (gold, pred)} if "mcc" in use_metric else {}),
1622
                **({"acc_norm": acc_norm} if "acc_norm" in use_metric else {}),
1623
                **({"exact_match": exact_match} if "exact_match" in use_metric else {}),
Lintang Sutawika's avatar
Lintang Sutawika committed
1624
1625
1626
1627
1628
                **(
                    {"brier_score": (gold, prob_norm)}
                    if "brier_score" in use_metric
                    else {}
                ),
1629
1630
            }

1631
            if "acc_mutual_info" in use_metric:
lintangsutawika's avatar
lintangsutawika committed
1632
1633
1634
                lls_mutual_info = [
                    ll_c - ll_u for ll_c, ll_u in zip(lls, lls_unconditional)
                ]
1635
1636
1637
                acc_mutual_info = 1.0 if np.argmax(lls_mutual_info) == gold else 0.0
                result_dict["acc_mutual_info"] = acc_mutual_info

1638
        elif self.OUTPUT_TYPE == "generate_until":
1639
            gold = self.doc_to_target(doc)
Chris's avatar
Chris committed
1640
            result = results[0]
1641
            if self.config.doc_to_choice is not None:
lintangsutawika's avatar
lintangsutawika committed
1642
                # If you set doc_to_choice,
lintangsutawika's avatar
lintangsutawika committed
1643
                # it assumes that doc_to_target returns a number.
1644
1645
                choices = self.doc_to_choice(doc)
                gold = choices[gold]
1646
1647
            # we expect multiple_targets to be a list.
            elif self.multiple_target:
baberabb's avatar
baberabb committed
1648
                gold = list(gold)
Hojin Lee's avatar
Hojin Lee committed
1649
1650
1651
            # TODO: handle this better
            elif type(gold) is not type(result) and not (
                "bypass" in self._metric_fn_list.keys() or isinstance(result, list)
1652
            ):
Chris's avatar
Chris committed
1653
1654
                # cast gold to the same type as result
                gold = type(result)(gold)
1655

lintangsutawika's avatar
lintangsutawika committed
1656
            for metric in self._metric_fn_list.keys():
haileyschoelkopf's avatar
haileyschoelkopf committed
1657
1658
1659
1660
1661
                if self.multiple_target:
                    # in the case where we have multiple targets,
                    # return true if any are true
                    # TODO: this may break for multipLe_target, non zero-or-1 metrics
                    scores = []
haileyschoelkopf's avatar
haileyschoelkopf committed
1662
1663
1664
1665
                    if not isinstance(gold, list):
                        # sometimes, a multiple_target dataset has exceptions where one doc has only one string answer
                        # print(gold)
                        gold = [gold]
1666
1667
1668
1669
1670
1671
1672
1673
                    if metric == "exact_match":
                        result = [result for _ in range(len(gold))]
                        scores = self._metric_fn_list[metric](
                            references=gold,
                            predictions=result,
                            **self._metric_fn_kwargs[metric],
                        )[metric]
                        result_score = 1.0 if scores > 0.0 else 0.0
haileyschoelkopf's avatar
haileyschoelkopf committed
1674
                    else:
1675
1676
1677
1678
1679
1680
1681
1682
1683
1684
1685
1686
1687
1688
1689
1690
1691
1692
1693
1694
1695
                        for gold_option in gold:
                            try:
                                result_score = self._metric_fn_list[metric](
                                    references=[gold_option],
                                    predictions=[result],
                                    **self._metric_fn_kwargs[metric],
                                )
                            except (
                                TypeError
                            ):  # TODO: this is hacky and I don't want to do it
                                result_score = self._metric_fn_list[metric](
                                    [gold_option, result]
                                )
                            if isinstance(result_score, dict):
                                # TODO: this handles the case where HF evaluate returns a dict.
                                result_score = result_score[metric]
                            scores.append(result_score)
                        if any(scores):
                            result_score = 1.0
                        else:
                            result_score = 0.0
haileyschoelkopf's avatar
haileyschoelkopf committed
1696
                else:
1697
                    try:
1698
                        result_score = self._metric_fn_list[metric](
1699
1700
                            references=[gold],
                            predictions=[result],
1701
                            **self._metric_fn_kwargs[metric],
1702
                        )
1703
                    except TypeError:  # needed for now in order to use a different interface between our own metrics and HF Evaluate metrics
1704
                        result_score = self._metric_fn_list[metric]([gold, result])
1705
1706
1707
1708
1709
1710
1711
                if isinstance(result_score, dict):
                    # TODO: this handles the case where HF evaluate returns a dict.
                    # This allows for multiple metrics to be returned from the same function
                    for k, v in result_score.items():
                        result_dict[k] = v
                else:
                    result_dict[metric] = result_score
1712
        else:
lintangsutawika's avatar
lintangsutawika committed
1713
1714
            raise ValueError(
                f"Passed invalid output_type '{self.OUTPUT_TYPE}' ! Please use one of ",
1715
                "'loglikelihood', 'loglikelihood_rolling', 'generate_until' or 'multiple_choice'",
1716
            )
1717
1718
1719

        return result_dict

Baber Abbasi's avatar
Baber Abbasi committed
1720
    def aggregation(self) -> dict:
1721
1722
        return self._aggregation_list

Baber Abbasi's avatar
Baber Abbasi committed
1723
    def higher_is_better(self) -> dict:
haileyschoelkopf's avatar
haileyschoelkopf committed
1724
        return self._higher_is_better
1725

Baber Abbasi's avatar
Baber Abbasi committed
1726
1727
1728
    def get_config(self, key: str) -> Any:
        return getattr(self._config, key, None)

Lintang Sutawika's avatar
Lintang Sutawika committed
1729
1730
1731
1732
    @property
    def task_name(self) -> Any:
        return getattr(self.config, "task", None)

1733
1734
1735
1736
1737
    def __repr__(self):
        return (
            f"ConfigurableTask(task_name={getattr(self.config, 'task', None)},"
            f"output_type={self.OUTPUT_TYPE},"
            f"num_fewshot={getattr(self.config, 'num_fewshot', None)},"
Baber Abbasi's avatar
Baber Abbasi committed
1738
            f"num_samples={len(self.eval_docs)})"
1739
1740
        )

1741
1742

class MultipleChoiceTask(Task):
1743
    OUTPUT_TYPE = "loglikelihood"
1744

baberabb's avatar
baberabb committed
1745
    def doc_to_target(self, doc: dict) -> str:
1746
1747
        return " " + doc["choices"][doc["gold"]]

baberabb's avatar
baberabb committed
1748
    def construct_requests(self, doc: dict, ctx: str, **kwargs) -> List[Instance]:
1749
        # TODO: add mutual info here?
lintangsutawika's avatar
lintangsutawika committed
1750
1751
        return [
            Instance(
haileyschoelkopf's avatar
haileyschoelkopf committed
1752
                request_type="loglikelihood",
lintangsutawika's avatar
lintangsutawika committed
1753
                doc=doc,
1754
                arguments=(ctx, " {}".format(choice)),
1755
                idx=i,
1756
1757
                **kwargs,
            )
lintangsutawika's avatar
lintangsutawika committed
1758
1759
            for i, choice in enumerate(doc["choices"])
        ]
1760

1761
    def process_results(self, doc: dict, results: Iterable[Tuple[float, bool]]) -> dict:
lintangsutawika's avatar
lintangsutawika committed
1762
1763
1764
        results = [
            res[0] for res in results
        ]  # only retain loglikelihoods, discard is_greedy TODO: do we need is_greedy anywhere?
1765
1766
1767
1768
1769
1770
1771
1772
1773
1774
1775
        gold = doc["gold"]

        acc = 1.0 if np.argmax(results) == gold else 0.0
        completion_len = np.array([float(len(i)) for i in doc["choices"]])
        acc_norm = 1.0 if np.argmax(results / completion_len) == gold else 0.0

        return {
            "acc": acc,
            "acc_norm": acc_norm,
        }

baberabb's avatar
baberabb committed
1776
    def higher_is_better(self) -> dict:
1777
1778
1779
1780
1781
        return {
            "acc": True,
            "acc_norm": True,
        }

baberabb's avatar
baberabb committed
1782
    def aggregation(self) -> dict:
1783
1784
1785
1786
1787
1788
        return {
            "acc": mean,
            "acc_norm": mean,
        }


lintangsutawika's avatar
lintangsutawika committed
1789
class PerplexityTask(Task):
1790
1791
    OUTPUT_TYPE = "loglikelihood_rolling"

baberabb's avatar
baberabb committed
1792
    def has_training_docs(self) -> bool:
1793
1794
        return False

baberabb's avatar
baberabb committed
1795
    def fewshot_examples(self, k: int, rnd) -> List:
1796
1797
1798
1799
        if k != 0:
            raise ValueError(
                "The number of fewshot examples must be 0 for perplexity tasks."
            )
1800
1801
        return []

baberabb's avatar
baberabb committed
1802
    def fewshot_context(self, doc: dict, num_fewshot: int) -> Literal[""]:
1803
1804
1805
1806
        if num_fewshot != 0:
            raise ValueError(
                "The number of fewshot examples must be 0 for perplexity tasks."
            )
1807
1808
1809

        return ""

baberabb's avatar
baberabb committed
1810
    def higher_is_better(self) -> dict:
1811
1812
1813
1814
1815
1816
1817
1818
1819
        return {
            "word_perplexity": False,
            "byte_perplexity": False,
            "bits_per_byte": False,
        }

    def doc_to_decontamination_query(self, doc):
        return doc

Ethan Smith's avatar
Ethan Smith committed
1820
    def doc_to_text(self, doc) -> str:
1821
1822
1823
1824
1825
        return ""

    def doc_to_target(self, doc):
        return doc

1826
1827
1828
    def construct_requests(self, doc: dict, ctx: Optional[str], **kwargs):
        if bool(ctx):
            raise ValueError
1829

lintangsutawika's avatar
lintangsutawika committed
1830
1831
1832
1833
1834
1835
1836
        return Instance(
            request_type=self.OUTPUT_TYPE,
            doc=doc,
            arguments=(self.doc_to_target(doc),),
            idx=0,
            **kwargs,
        )
1837

1838
    def process_results(self, doc: dict, results: Tuple[float]) -> dict:
1839
        (loglikelihood,) = results
haileyschoelkopf's avatar
haileyschoelkopf committed
1840
1841
        words = self.count_words(self.doc_to_target(doc))
        bytes_ = self.count_bytes(self.doc_to_target(doc))
1842
1843
1844
1845
1846
1847
        return {
            "word_perplexity": (loglikelihood, words),
            "byte_perplexity": (loglikelihood, bytes_),
            "bits_per_byte": (loglikelihood, bytes_),
        }

baberabb's avatar
baberabb committed
1848
    def aggregation(self) -> dict:
1849
1850
1851
1852
1853
1854
1855
        return {
            "word_perplexity": weighted_perplexity,
            "byte_perplexity": weighted_perplexity,
            "bits_per_byte": bits_per_byte,
        }

    @classmethod
baberabb's avatar
baberabb committed
1856
    def count_bytes(cls, doc) -> int:
1857
1858
1859
        return len(doc.encode("utf-8"))

    @classmethod
baberabb's avatar
baberabb committed
1860
    def count_words(cls, doc) -> int:
1861
        """Downstream tasks with custom word boundaries should override this!"""
Lintang Sutawika's avatar
Lintang Sutawika committed
1862
        return len(re.split(r"\s+", doc))