task.py 73.3 KB
Newer Older
1
import abc
2
import ast
lintangsutawika's avatar
lintangsutawika committed
3
import logging
4
import random
5
6
import re
from collections.abc import Callable
7
from copy import deepcopy
8
from dataclasses import asdict, dataclass
Baber's avatar
Baber committed
9
from functools import cached_property
10
from inspect import getsource
11
12
13
14
15
16
17
18
19
20
21
22
from typing import (
    Any,
    Dict,
    Iterable,
    Iterator,
    List,
    Literal,
    Mapping,
    Optional,
    Tuple,
    Union,
)
23
24
25

import datasets
import numpy as np
26
from tqdm import tqdm
Baber's avatar
Baber committed
27
from typing_extensions import deprecated
28
29

from lm_eval import utils
30
from lm_eval.api import samplers
31
32
from lm_eval.api.instance import Instance, OutputType
from lm_eval.api.metrics import bits_per_byte, mean, weighted_perplexity
lintangsutawika's avatar
lintangsutawika committed
33
from lm_eval.api.registry import (
34
35
    AGGREGATION_REGISTRY,
    DEFAULT_METRIC_REGISTRY,
haileyschoelkopf's avatar
haileyschoelkopf committed
36
    get_aggregation,
37
    get_metric,
38
    get_metric_aggregation,
haileyschoelkopf's avatar
haileyschoelkopf committed
39
    is_higher_better,
lintangsutawika's avatar
lintangsutawika committed
40
)
41
from lm_eval.caching.cache import load_from_cache, save_to_cache
42
43
44
from lm_eval.filters import build_filter_ensemble
from lm_eval.prompts import get_prompt

45

46
47
48
49
ALL_OUTPUT_TYPES = [
    "loglikelihood",
    "multiple_choice",
    "loglikelihood_rolling",
50
    "generate_until",
51
52
]

Lintang Sutawika's avatar
Lintang Sutawika committed
53
eval_logger = logging.getLogger(__name__)
54

lintangsutawika's avatar
lintangsutawika committed
55

Baber's avatar
Baber committed
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
@dataclass
class MetricConfig:
    """Encapsulates information about a single metric."""

    name: str
    fn: Optional[Callable] = None
    kwargs: Optional[dict] = None
    aggregation_fn: Optional[Callable] = None
    higher_is_better: bool = True
    hf_evaluate: bool = False

    @cached_property
    def metric_names(self) -> str:
        return self.name

    @cached_property
    def aggregation(self) -> Callable:
        if self.aggregation_fn is None:
            return get_aggregation(self.name)
        return self.aggregation_fn

    @cached_property
    def _higher_is_better(self) -> bool:
        if self.higher_is_better is None:
            return is_higher_better(self.name)
        return self.higher_is_better


@dataclass
class FilterConfig:
Baber's avatar
nit  
Baber committed
86
    """Encapsulates information about a single filter."""
Baber's avatar
Baber committed
87
88
89
90
91
92

    name: str
    fn: Optional[Callable] = None
    kwargs: Optional[dict] = None


93
94
@dataclass
class TaskConfig(dict):
95
    # task naming/registry
96
97
    task: Optional[str] = None
    task_alias: Optional[str] = None
Lintang Sutawika's avatar
Lintang Sutawika committed
98
    tag: Optional[Union[str, list]] = None
99
100
101
    # HF dataset options.
    # which dataset to use,
    # and what splits for what purpose
Baber Abbasi's avatar
Baber Abbasi committed
102
    custom_dataset: Optional[Callable] = None
103
104
105
106
107
108
    dataset_path: Optional[str] = None
    dataset_name: Optional[str] = None
    dataset_kwargs: Optional[dict] = None
    training_split: Optional[str] = None
    validation_split: Optional[str] = None
    test_split: Optional[str] = None
109
    fewshot_split: Optional[str] = (
Baber Abbasi's avatar
Baber Abbasi committed
110
        None  # TODO: assert that this not None if num_fewshot > 0. (?) assert if this is same split as one evaluating (?)
111
    )
112
113
    # formatting / prompting options.
    # see docs/advanced_task_guide.md for more info
114
115
116
    process_docs: Optional[Callable] = None
    doc_to_text: Optional[Union[Callable, str]] = None
    doc_to_target: Optional[Union[Callable, str]] = None
117
    doc_to_image: Union[Callable, str] = None
118
    doc_to_audio: Union[Callable, str] = None
Hojin Lee's avatar
Hojin Lee committed
119
    unsafe_code: bool = False
120
121
122
    doc_to_choice: Optional[Union[Callable, str, dict, list]] = None
    process_results: Optional[Union[Callable, str]] = None
    use_prompt: Optional[str] = None
123
    description: str = ""
124
125
    target_delimiter: str = " "
    fewshot_delimiter: str = "\n\n"
126
    fewshot_config: Optional[dict] = None
127
    # runtime configuration options
128
    num_fewshot: Optional[int] = None
129
    # scoring options
130
131
132
    metric_list: Optional[list] = None
    output_type: OutputType = "generate_until"
    generation_kwargs: Optional[dict] = None
133
    repeats: int = 1
134
    filter_list: Optional[Union[str, list]] = None
135
    should_decontaminate: bool = False
136
    doc_to_decontamination_query: Optional[str] = None
Baber Abbasi's avatar
Baber Abbasi committed
137
    gen_prefix: Optional[str] = None
138
139
140
    metadata: Optional[dict] = (
        None  # by default, not used in the code. allows for users to pass arbitrary info to tasks
    )
Baber's avatar
nit  
Baber committed
141
142
    _metric_list = None
    _filter_list = None
143

Ethan Smith's avatar
Ethan Smith committed
144
    def __post_init__(self) -> None:
Lintang Sutawika's avatar
Lintang Sutawika committed
145
        if self.generation_kwargs is not None:
146
            if self.output_type != "generate_until":
147
                eval_logger.warning(
148
                    f"[{self.task}] passed `generation_kwargs`, but not using `output_type: generate_until`!"
Lintang Sutawika's avatar
Lintang Sutawika committed
149
150
151
152
153
154
155
156
                )

            if "temperature" in self.generation_kwargs:
                self.generation_kwargs["temperature"] = float(
                    self.generation_kwargs["temperature"]
                )

            if "until" not in self.generation_kwargs:
Baber Abbasi's avatar
Baber Abbasi committed
157
158
159
                eval_logger.warning(
                    f"{self.task}: No `until` specified in `generation_kwargs`! Defaulting to the fewshot_delimiter={repr(self.fewshot_delimiter)}"
                )
160
                self.generation_kwargs["until"] = [self.fewshot_delimiter]
Lintang Sutawika's avatar
Lintang Sutawika committed
161
        else:
162
            if self.output_type == "generate_until":
Lintang Sutawika's avatar
Lintang Sutawika committed
163
164
                # ensure that we greedily generate in absence of explicit arguments otherwise
                self.generation_kwargs = {
165
166
167
168
169
                    "until": (
                        None
                        if self.fewshot_delimiter is None
                        else [self.fewshot_delimiter]
                    ),
Lintang Sutawika's avatar
Lintang Sutawika committed
170
                    "do_sample": False,
Baber Abbasi's avatar
Baber Abbasi committed
171
                    "temperature": 0,
Lintang Sutawika's avatar
Lintang Sutawika committed
172
                }
Baber Abbasi's avatar
Baber Abbasi committed
173
174
175
                eval_logger.warning(
                    f"{self.task}: No `generation_kwargs` specified in task config, defaulting to {self.generation_kwargs}"
                )
176

Baber's avatar
Baber committed
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
        if self.metric_list is not None:
            for metric_config in self.metric_list:
                if "metric" not in metric_config:
                    raise ValueError(
                        "'metric' key not provided for an entry in 'metric_list', must be specified!"
                    )

    def get_metrics(self) -> list["MetricConfig"]:
        metrics = []
        if self.metric_list is None:
            _metric_list = DEFAULT_METRIC_REGISTRY[self.output_type]
            metrics.extend(
                MetricConfig(
                    name=metric_name,
                    fn=get_metric(metric_name),
                    aggregation_fn=get_metric_aggregation(metric_name),
                    higher_is_better=is_higher_better(metric_name),
                )
                for metric_name in _metric_list
            )
        else:
            for metric_config in self.metric_list:
                if "metric" not in metric_config:
                    raise ValueError(
                        "'metric' key not provided for an entry in 'metric_list', must be specified!"
                    )
                metric_name = metric_config["metric"]
                _metric_fn_kwargs = {
                    key: metric_config[key]
                    for key in metric_config
                    if key
                    not in ["metric", "aggregation", "higher_is_better", "hf_evaluate"]
                }
                _hf_evaluate_metric: bool = metric_config.get("hf_evaluate", False)
                _metric_fn = None
                _aggregation = None

                if self.process_results is not None:
                    # User will compute metrics inside `process_results()`
                    _metric_name = None
                    _metric_fn_kwargs = {}
                elif callable(metric_name):
                    # User passed a function object
                    _metric_name = metric_name.__name__
                    _metric_fn = metric_name.__call__
                else:
                    # Normal: look up by name
                    _metric_name = get_metric(metric_name, _hf_evaluate_metric)

                # ---------- 3. Decide how to aggregate examples ----------
                if "aggregation" in metric_config:
                    if isinstance(_agg_name := metric_config["aggregation"], str):
                        _aggregation = get_aggregation(_agg_name)
                    elif callable(_agg_name):  # noqa: E721
                        _aggregation = metric_config["aggregation"]
                else:
                    INV_AGG_REGISTRY = {v: k for k, v in AGGREGATION_REGISTRY.items()}
                    _aggregation = get_metric_aggregation(metric_name)
                    eval_logger.warning(
                        f"[Task: {self.task}] metric {metric_name} is defined, but aggregation is not. "
                        f"using default "
                        f"aggregation={INV_AGG_REGISTRY[_aggregation]}"
                    )

                # ---------- 4. Determine “higher-is-better” semantics ----------
                if "higher_is_better" in metric_config:
                    _higher_is_better = metric_config["higher_is_better"]
                else:
                    eval_logger.warning(
                        f"[Task: {self.task}] metric {metric_name} is defined, but higher_is_better is not. "
                        f"using default "
                        f"higher_is_better={is_higher_better(metric_name)}"
                    )
                    _higher_is_better = is_higher_better(metric_name)

                metrics.append(
                    MetricConfig(
                        name=_metric_name,
                        fn=_metric_fn,
                        kwargs=_metric_fn_kwargs,
                        aggregation_fn=_aggregation,
                        higher_is_better=_higher_is_better,
                        hf_evaluate=_hf_evaluate_metric,
                    )
                )
        return metrics

264
265
266
    def __getitem__(self, item):
        return getattr(self, item)

267
268
269
    def __setitem__(self, item, value):
        return setattr(self, item, value)

270
    def to_dict(self, keep_callable: bool = False) -> dict:
271
272
        """dumps the current config as a dictionary object, as a printable format.
        null fields will not be printed.
haileyschoelkopf's avatar
haileyschoelkopf committed
273
        Used for dumping results alongside full task configuration
274

haileyschoelkopf's avatar
haileyschoelkopf committed
275
276
277
278
279
280
281
282
283
284
        :return: dict
            A printable dictionary version of the TaskConfig object.

        # TODO: should any default value in the TaskConfig not be printed?
        """
        cfg_dict = asdict(self)
        # remove values that are `None`
        for k, v in list(cfg_dict.items()):
            if v is None:
                cfg_dict.pop(k)
285
286
287
288
289
290
291
292
293
294
            elif k == "metric_list":
                for metric_dict in v:
                    for metric_key, metric_value in metric_dict.items():
                        if callable(metric_value):
                            metric_dict[metric_key] = self.serialize_function(
                                metric_value, keep_callable=keep_callable
                            )
                cfg_dict[k] = v
            elif callable(v):
                cfg_dict[k] = self.serialize_function(v, keep_callable=keep_callable)
haileyschoelkopf's avatar
haileyschoelkopf committed
295
        return cfg_dict
296

297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
    def serialize_function(
        self, value: Union[Callable, str], keep_callable=False
    ) -> Union[Callable, str]:
        """Serializes a given function or string.

        If 'keep_callable' is True, the original callable is returned.
        Otherwise, attempts to return the source code of the callable using 'getsource'.
        """
        if keep_callable:
            return value
        else:
            try:
                return getsource(value)
            except (TypeError, OSError):
                return str(value)

313
314
315
316
317
318
319
320
321
322
323

class Task(abc.ABC):
    """A task represents an entire benchmark including its dataset, problems,
    answers, and evaluation methods. See BoolQ for a simple example implementation

    A `doc` can be any python object which represents one instance of evaluation.
    This is usually a dictionary e.g.
        {"question": ..., "answer": ...} or
        {"question": ..., question, answer)
    """

324
    VERSION: Optional[Union[int, str]] = None
325

326
327
    # The name of the `Task` benchmark as denoted in the HuggingFace datasets Hub
    # or a path to a custom `datasets` loading script.
328
    DATASET_PATH: Optional[str] = None
329
330

    # The name of a subset within `DATASET_PATH`.
331
    DATASET_NAME: Optional[str] = None
332

333
    OUTPUT_TYPE: Optional[OutputType] = None
lintangsutawika's avatar
lintangsutawika committed
334

335
336
    def __init__(
        self,
337
338
339
340
        data_dir: Optional[str] = None,
        cache_dir: Optional[str] = None,
        download_mode: Optional[datasets.DownloadMode] = None,
        config: Optional[Mapping] = None,  # Union[dict, TaskConfig]
Ethan Smith's avatar
Ethan Smith committed
341
    ) -> None:
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
        """
        :param data_dir: str
            Stores the path to a local folder containing the `Task`'s data files.
            Use this to specify the path to manually downloaded data (usually when
            the dataset is not publicly accessible).
        :param cache_dir: str
            The directory to read/write the `Task` dataset. This follows the
            HuggingFace `datasets` API with the default cache directory located at:
                `~/.cache/huggingface/datasets`
            NOTE: You can change the cache location globally for a given process
            to another directory:
                `export HF_DATASETS_CACHE="/path/to/another/directory"`
        :param download_mode: datasets.DownloadMode
            How to treat pre-existing `Task` downloads and data.
            - `datasets.DownloadMode.REUSE_DATASET_IF_EXISTS`
                Reuse download and reuse dataset.
            - `datasets.DownloadMode.REUSE_CACHE_IF_EXISTS`
                Reuse download with fresh dataset.
            - `datasets.DownloadMode.FORCE_REDOWNLOAD`
                Fresh download and fresh dataset.
        """
        self.download(data_dir, cache_dir, download_mode)
364
365
366
        self._training_docs: Optional[list] = None
        self._fewshot_docs: Optional[list] = None
        self._instances: Optional[List[Instance]] = None
367

368
        self._config: TaskConfig = TaskConfig({**config}) if config else TaskConfig()
369

lintangsutawika's avatar
lintangsutawika committed
370
        self._filters = [build_filter_ensemble("none", [["take_first", None]])]
371
372
373
        self.fewshot_rnd: Optional[random.Random] = (
            None  # purposely induce errors in case of improper usage
        )
374

375
376
377
378
379
380
    def download(
        self,
        data_dir: Optional[str] = None,
        cache_dir: Optional[str] = None,
        download_mode=None,
    ) -> None:
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
        """Downloads and returns the task dataset.
        Override this method to download the dataset from a custom API.

        :param data_dir: str
            Stores the path to a local folder containing the `Task`'s data files.
            Use this to specify the path to manually downloaded data (usually when
            the dataset is not publicly accessible).
        :param cache_dir: str
            The directory to read/write the `Task` dataset. This follows the
            HuggingFace `datasets` API with the default cache directory located at:
                `~/.cache/huggingface/datasets`
            NOTE: You can change the cache location globally for a given process
            by setting the shell environment variable, `HF_DATASETS_CACHE`,
            to another directory:
                `export HF_DATASETS_CACHE="/path/to/another/directory"`
        :param download_mode: datasets.DownloadMode
            How to treat pre-existing `Task` downloads and data.
            - `datasets.DownloadMode.REUSE_DATASET_IF_EXISTS`
                Reuse download and reuse dataset.
            - `datasets.DownloadMode.REUSE_CACHE_IF_EXISTS`
                Reuse download with fresh dataset.
            - `datasets.DownloadMode.FORCE_REDOWNLOAD`
                Fresh download and fresh dataset.
        """
405
406
407
408
409
410
411
        self.dataset = datasets.load_dataset(
            path=self.DATASET_PATH,
            name=self.DATASET_NAME,
            data_dir=data_dir,
            cache_dir=cache_dir,
            download_mode=download_mode,
        )
412

413
    @property
414
    def config(self) -> TaskConfig:
415
416
417
        """Returns the TaskConfig associated with this class."""
        return self._config

418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
    @abc.abstractmethod
    def has_training_docs(self):
        """Whether the task has a training set"""
        pass

    @abc.abstractmethod
    def has_validation_docs(self):
        """Whether the task has a validation set"""
        pass

    @abc.abstractmethod
    def has_test_docs(self):
        """Whether the task has a test set"""
        pass

433
    def training_docs(self) -> Iterable:
434
435
436
437
438
439
        """
        :return: Iterable[obj]
            A iterable of any object, that doc_to_text can handle
        """
        return []

440
    def validation_docs(self) -> Iterable:
441
442
443
444
445
446
        """
        :return: Iterable[obj]
            A iterable of any object, that doc_to_text can handle
        """
        return []

447
    def test_docs(self) -> Iterable:
448
449
450
451
452
453
        """
        :return: Iterable[obj]
            A iterable of any object, that doc_to_text can handle
        """
        return []

454
    def fewshot_docs(self) -> Iterable:
455
456
457
458
459
460
461
462
463
        """
        :return: Iterable[obj]
            A iterable of any object, that doc_to_text can handle
        """
        if self.has_training_docs():
            return self.training_docs()
        elif self.has_validation_docs():
            return self.validation_docs()
        else:
Baber Abbasi's avatar
Baber Abbasi committed
464
465
466
467
468
            if self.config.get("num_fewshot", 0) > 0:
                eval_logger.warning(
                    f"[Task: {self.config.task}] has_training_docs and has_validation_docs are False"
                    ", using test_docs as fewshot_docs but this is not recommended."
                )
469
470
            return self.test_docs()

471
    def _process_doc(self, doc: dict) -> dict:
472
473
474
475
476
477
478
479
480
        """
        Override this to process (detokenize, strip, replace, etc.) individual
        documents. This can be used in a map over documents of a data split.
        E.g. `map(self._process_doc, self.dataset["validation"])`

        :return: dict
            The processed version of the specified `doc`.
        """
        return doc
lintangsutawika's avatar
lintangsutawika committed
481

482
    @property
483
    def instances(self) -> List[Instance]:
484
485
486
487
488
489
490
491
492
493
494
        """After calling `task.build_all_requests()`, tasks
        maintain a list of the dataset instances which will be evaluated.
        """
        return self._instances

    def fewshot_examples(self, k, rnd):
        if self._training_docs is None:
            self._training_docs = list(self.training_docs())

        return rnd.sample(self._training_docs, k)

495
496
    def doc_to_decontamination_query(self, doc):
        raise NotImplementedError(
497
498
499
500
501
502
503
504
505
506
507
            "Override doc_to_decontamination_query with document specific decontamination query."
        )

    @abc.abstractmethod
    def doc_to_text(self, doc):
        pass

    @abc.abstractmethod
    def doc_to_target(self, doc):
        pass

508
509
510
511
    # not an abstractmethod because not every language-only task has to implement this
    def doc_to_image(self, doc):
        raise NotImplementedError

512
513
514
    def doc_to_audio(self, doc):
        raise NotImplementedError

Baber Abbasi's avatar
Baber Abbasi committed
515
516
517
    def doc_to_prefix(self, doc):
        return ""

518
519
    def build_all_requests(
        self,
520
        *,
521
        limit: Union[int, None] = None,
522
        samples: Optional[List[int]] = None,
523
524
525
526
527
528
529
530
531
        rank: int = 0,
        world_size: int = 1,
        cache_requests: bool = False,
        rewrite_requests_cache: bool = False,
        system_instruction: Optional[str] = None,
        apply_chat_template: bool = False,
        fewshot_as_multiturn: bool = False,
        chat_template: Optional[Callable] = None,
        tokenizer_name: str = "",
532
    ) -> None:
533
        """Build a set of Instances for a task, and store them in task.instances"""
534
535
536
537

        # used with caching
        og_limit = limit

538
        cache_key = f"requests-{self._config.task}-{self.config.num_fewshot}shot-rank{rank}-world_size{world_size}"
KonradSzafer's avatar
KonradSzafer committed
539
540
541
542
543
544
545
        cache_key += "-chat_template" if apply_chat_template else ""
        cache_key += "-fewshot_as_multiturn" if fewshot_as_multiturn else ""
        cache_key += (
            f"-system_prompt_hash{utils.hash_string(system_instruction)}"
            if system_instruction is not None
            else ""
        )
546
        cache_key += f"-tokenizer{tokenizer_name}"
547

Baber Abbasi's avatar
Baber Abbasi committed
548
        cached_instances = load_from_cache(file_name=cache_key, cache=cache_requests)
549
550
551
552
553
554
555
556
557
558
559
560
561

        if cache_requests and cached_instances and not rewrite_requests_cache:
            cached_instances = cached_instances[:limit]

            flattened_instances = [
                instance
                for instance_group in cached_instances
                for instance in instance_group
            ]

            self._instances = flattened_instances
            return

Baber Abbasi's avatar
Baber Abbasi committed
562
        eval_logger.info(f"Building contexts for {self.config.task} on rank {rank}...")
563

564
        instances = []
565
566
567
568
569
570
571
572
573
574

        # process all documents when caching is specified for simplicity
        if (
            cache_requests
            and (not cached_instances or rewrite_requests_cache)
            and limit is not None
        ):
            limit = None

        doc_id_docs = list(
575
576
577
            self.doc_iterator(
                rank=rank, limit=limit, samples=samples, world_size=world_size
            )
578
579
580
581
582
583
584
        )

        num_docs = len(doc_id_docs)

        for doc_id, doc in tqdm(
            doc_id_docs,
            total=num_docs,
lintangsutawika's avatar
lintangsutawika committed
585
        ):
586
            # sample fewshot context #TODO: need to offset doc_id by rank now!
587
            fewshot_ctx = self.fewshot_context(
588
                doc,
589
590
591
592
593
594
595
                num_fewshot=0
                if self.config.num_fewshot is None
                else self.config.num_fewshot,
                system_instruction=system_instruction,
                apply_chat_template=apply_chat_template,
                fewshot_as_multiturn=fewshot_as_multiturn,
                chat_template=chat_template,
Baber Abbasi's avatar
Baber Abbasi committed
596
                gen_prefix=self.doc_to_prefix(doc),
597
            )
598

599
            # TODO: we should override self.config.repeats if doing greedy gen so users don't waste time+compute
lintangsutawika's avatar
lintangsutawika committed
600
601
602
            inst = self.construct_requests(
                doc=doc,
                ctx=fewshot_ctx,
603
                metadata=(self.config["task"], doc_id, self.config.repeats),
604
                apply_chat_template=apply_chat_template,
605
                chat_template=chat_template,
lintangsutawika's avatar
lintangsutawika committed
606
            )
607
608
609
610

            if not isinstance(inst, list):
                inst = [inst]

611
612
613
614
615
616
617
618
619
620
621
622
623
            instances.append(inst)

        # now flatten, this is to allow slicing to work with pickles

        sliced_instances = instances[:og_limit]

        flattened_instances = [
            instance
            for instance_group in sliced_instances
            for instance in instance_group
        ]

        self._instances = flattened_instances
624

625
626
        if len(self._instances) == 0:
            raise ValueError("task.build_requests() did not find any docs!")
627

628
629
630
        if cache_requests and (not cached_instances or rewrite_requests_cache):
            save_to_cache(file_name=cache_key, obj=instances)

631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
    @abc.abstractmethod
    def construct_requests(self, doc, ctx, **kwargs):
        """Uses RequestFactory to construct Requests and returns an iterable of
        Requests which will be sent to the LM.

        :param doc:
            The document as returned from training_docs, validation_docs, or test_docs.
        :param ctx: str
            The context string, generated by fewshot_context. This includes the natural
            language description, as well as the few shot examples, and the question
            part of the document for `doc`.
        :param doc_idx: int
            The index of a document within `self.test_docs()` or `self.validation_docs()`,
            whichever is the main split used.
        :param repeats: int
        TODO: update this docstring
lintangsutawika's avatar
lintangsutawika committed
647
            The number of times each instance in a dataset is inferred on. Defaults to 1,
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
            can be increased for techniques like majority voting.
        """
        pass

    @abc.abstractmethod
    def process_results(self, doc, results):
        """Take a single document and the LM results and evaluates, returning a
        dict where keys are the names of submetrics and values are the values of
        the metric for that one document

        :param doc:
            The document as returned from training_docs, validation_docs, or test_docs.
        :param results:
            The results of the requests created in construct_requests.
        """
        pass

Baber's avatar
Baber committed
665
    @deprecated("not used anymore")
666
667
668
669
670
671
672
673
    def aggregation(self):
        """
        :returns: {str: [metric_score] -> float}
            A dictionary where keys are the names of submetrics and values are
            functions that aggregate a list of metric scores
        """
        pass

Baber's avatar
Baber committed
674
    @deprecated("not used anymore")
675
676
677
678
679
680
681
682
    def higher_is_better(self):
        """
        :returns: {str: bool}
            A dictionary where keys are the names of submetrics and values are
            whether a higher value of the submetric is better
        """
        pass

683
684
685
    def get_config(self, key: str) -> Any:
        return getattr(self._config, key, None)

haileyschoelkopf's avatar
haileyschoelkopf committed
686
687
688
689
690
691
692
693
694
695
    @classmethod
    def count_bytes(cls, doc):
        """Used for byte-level perplexity metrics in rolling loglikelihood"""
        return len(doc.encode("utf-8"))

    @classmethod
    def count_words(cls, doc):
        """Downstream loglikelihood_rolling perplexity tasks with custom word boundaries should override this!"""
        return len(re.split(r"\s+", doc))

696
    @utils.positional_deprecated
Baber Abbasi's avatar
Baber Abbasi committed
697
    def fewshot_context(self, doc, num_fewshot, rnd=None, description=None, **kwargs):
698
699
700
701
702
703
704
        """Returns a fewshot context string that is made up of a prepended description
        (if provided), the `num_fewshot` number of examples, and an appended prompt example.

        :param doc: str
            The document as returned from training_docs, validation_docs, or test_docs.
        :param num_fewshot: int
            The number of fewshot examples to provide in the returned context string.
lintangsutawika's avatar
lintangsutawika committed
705
706
707
708
709
        :param rnd: random.Random
            The pseudo-random number generator used to randomly sample examples.
            WARNING: This is currently a required arg although it's optionalized with a default `None`.
        :param description: str
            The task's description that will be prepended to the fewshot examples.
710
711
712
        :returns: str
            The fewshot context.
        """
713
        if rnd is None:
714
715
716
717
718
719
            if self.fewshot_rnd is not None:
                rnd = self.fewshot_rnd
            else:
                raise ValueError(
                    "A `random.Random` generator argument must be provided to `rnd`"
                )
lintangsutawika's avatar
lintangsutawika committed
720

721
        description = description if description else ""
722
723

        if num_fewshot == 0:
lintangsutawika's avatar
lintangsutawika committed
724
            labeled_examples = ""
725
        else:
lintangsutawika's avatar
lintangsutawika committed
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
            # for sets with no training docs, draw from other set *but ensure no overlap with current doc*
            if self.has_training_docs():
                fewshotex = self.fewshot_examples(k=num_fewshot, rnd=rnd)
            else:
                if self._fewshot_docs is None:
                    self._fewshot_docs = list(
                        self.validation_docs()
                        if self.has_validation_docs()
                        else self.test_docs()
                    )

                fewshotex = rnd.sample(self._fewshot_docs, num_fewshot + 1)

                # get rid of the doc that's the one we're evaluating, if it's in the fewshot
                fewshotex = [x for x in fewshotex if x != doc][:num_fewshot]

            labeled_examples = (
                "\n\n".join(
                    [
                        self.doc_to_text(doc) + self.doc_to_target(doc)
                        for doc in fewshotex
                    ]
                )
                + "\n\n"
lintangsutawika's avatar
lintangsutawika committed
750
            )
751
752

        example = self.doc_to_text(doc)
lintangsutawika's avatar
lintangsutawika committed
753
        return description + labeled_examples + example
754

755
    def apply_filters(self) -> Optional[List[Instance]]:
Baber Abbasi's avatar
Baber Abbasi committed
756
        """Iterates over FilterEnsembles and applies them to instances"""
lintangsutawika's avatar
lintangsutawika committed
757
758
        if hasattr(self, "_filters"):
            for f in self._filters:
759
                f.apply(self._instances)
lintangsutawika's avatar
lintangsutawika committed
760
761
762
        else:
            eval_logger.warning("No filter defined, passing through instances")
            return self._instances
763

baberabb's avatar
baberabb committed
764
    def dump_config(self) -> dict:
Baber Abbasi's avatar
Baber Abbasi committed
765
        """Returns the config as a dictionary."""
766
        # TODO: this should only return the overrides applied to a non-YAML task's configuration.
767
        # (num_fewshot)
768
        return self.config.to_dict()
769

Baber Abbasi's avatar
Baber Abbasi committed
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
    def set_config(self, key: str, value: Any, update: bool = False) -> None:
        """Set or update the configuration for a given key."""
        if key is None:
            raise ValueError("Key must be provided.")

        if update:
            current_value = getattr(self._config, key, {})
            if not isinstance(current_value, dict):
                raise TypeError(
                    f"Expected a dict for key '{key}', got {type(current_value).__name__} instead."
                )
            current_value.update(value)
        else:
            setattr(self._config, key, value)

    def override_metric(self, metric_name: str) -> None:
        """
        Override the default metrics used for evaluation with custom metrics.

        Parameters:
        - metric_name (str): The name of the custom metric to override. Should be registered in api.metrics.
        """
Baber's avatar
Baber committed
792
793
794
795
796
797
798
        # if not isinstance(self, ConfigurableTask):
        #     self.process_results = lambda x, y: {metric_name: get_metric(metric_name)}
        #     self.aggregation = lambda: {
        #         metric_name: get_metric_aggregation(metric_name)
        #     }
        setattr(self._config, "metric_list", [MetricConfig(name=metric_name)])
        setattr(self._config, "process_results", lambda *args: {"bypass": 0})
Baber Abbasi's avatar
Baber Abbasi committed
799

800
801
802
803
804
    def set_fewshot_seed(self, seed: Optional[int] = None) -> None:
        self.fewshot_rnd = random.Random(seed)
        if hasattr(self, "sampler"):
            self.sampler.rnd = self.fewshot_rnd

805
806
807
808
809
810
811
    @property
    def eval_docs(self) -> Union[datasets.Dataset, List[dict]]:
        if self.has_test_docs():
            return self.test_docs()
        elif self.has_validation_docs():
            return self.validation_docs()
        else:
812
813
814
            raise ValueError(
                f"Task dataset (path={self.DATASET_PATH}, name={self.DATASET_NAME}) must have valid or test docs!"
            )
815
816

    def doc_iterator(
817
818
819
820
821
822
        self,
        *,
        rank: int = 0,
        limit: Union[int, None] = None,
        world_size: int = 1,
        samples: Optional[List[int]] = None,
823
    ) -> Iterator[Tuple[int, Any]]:
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
        if samples:
            n = len(self.eval_docs)
            assert all([e < n for e in samples]), (
                f"Elements of --samples should be in the interval [0,k-1] where k is the number of total examples. In this case, k={n}."
            )
            eval_logger.info(
                f"{self.config.task}: Evaluating on {len(samples)} examples"
            )
            doc_iterator = utils.create_iterator(
                enumerate(x for i, x in enumerate(self.eval_docs) if i in samples),
                rank=int(rank),
                limit=None,  # limit does not matter here since we are selecting samples directly
                world_size=int(world_size),
            )
        else:
            limit = int(limit) if limit else None
            doc_iterator = utils.create_iterator(
                enumerate(self.eval_docs),
                rank=int(rank),
                limit=limit,
                world_size=int(world_size),
            )
846
847
        return doc_iterator

848
849

class ConfigurableTask(Task):
850
    VERSION = "Yaml"
851
    OUTPUT_TYPE = None
852
    CONFIG = None
853
854

    def __init__(
855
856
857
858
859
        self,
        data_dir=None,
        cache_dir=None,
        download_mode=None,
        config: Optional[dict] = None,
Baber's avatar
Baber committed
860
    ) -> None:
861
        # Get pre-configured attributes
862
        self._config = self.CONFIG
863

864
        # Use new configurations if there was no preconfiguration
865
        if self.config is None:
866
            self._config = TaskConfig(**config)
867
868
        # Overwrite configs
        else:
lintangsutawika's avatar
lintangsutawika committed
869
            if config is not None:
870
                self._config.__dict__.update(config)
871

872
        if self.config is None:
lintangsutawika's avatar
lintangsutawika committed
873
874
875
            raise ValueError(
                "Must pass a config to ConfigurableTask, either in cls.CONFIG or `config` kwarg"
            )
876

877
878
879
880
        if isinstance(self.config.metadata, dict):
            if "version" in self.config.metadata:
                self.VERSION = self.config.metadata["version"]

881
        if self.config.output_type is not None:
882
883
884
885
            if self.config.output_type not in ALL_OUTPUT_TYPES:
                raise ValueError(
                    f"Got invalid output_type '{self.config.output_type}', must be in '{','.join(ALL_OUTPUT_TYPES)}'"
                )
886
            self.OUTPUT_TYPE = self.config.output_type
887

888
889
890
891
        if self.config.doc_to_image is not None:
            # mark the task as requiring multimodality.
            self.MULTIMODAL = True

892
893
894
895
        if self.config.doc_to_audio:
            # mark the task as requiring multimodality.
            self.MULTIMODAL = True

Hojin Lee's avatar
Hojin Lee committed
896
897
898
        if self.config.unsafe_code is not False:
            self.UNSAFE_CODE = True

899
900
        if self.config.dataset_path is not None:
            self.DATASET_PATH = self.config.dataset_path
901

902
903
        if self.config.dataset_name is not None:
            self.DATASET_NAME = self.config.dataset_name
904

Baber's avatar
Baber committed
905
        self.metric_list: list[MetricConfig] = self._config.get_metrics()
906

907
        self.download(self.config.dataset_kwargs)
908
909
910
        self._training_docs = None
        self._fewshot_docs = None

911
        if self.config.filter_list is not None:
lintangsutawika's avatar
lintangsutawika committed
912
            self._filters = []
Baber's avatar
Baber committed
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
            if isinstance(self.config.filter_list, dict):
                for filter_config in self.config.filter_list:
                    self._filters.append(
                        build_filter_ensemble(
                            filter_config["name"],
                            [
                                [
                                    {
                                        key: function[key]
                                        for key in function
                                        if key != "function"
                                    }
                                ]
                                for function in filter_config["filter"]
                            ],
                        )
                    )
lintangsutawika's avatar
lintangsutawika committed
930
        else:
Baber Abbasi's avatar
Baber Abbasi committed
931
932
933
934
            # TODO: handle repeats in a more general way rather than just discarding
            eval_logger.debug(
                "No custom filters defined. Using default 'take_first' filter for handling repeats."
            )
935
            self._filters = [build_filter_ensemble("none", [["take_first", None]])]
936

937
938
        if self.config.use_prompt is not None:
            eval_logger.info(f"loading prompt {self.config.use_prompt}")
939
            self.prompt = get_prompt(
940
                self.config.use_prompt, self.DATASET_PATH, self.DATASET_NAME
lintangsutawika's avatar
lintangsutawika committed
941
            )
942
943
944
        else:
            self.prompt = None

lintangsutawika's avatar
lintangsutawika committed
945
        if self.fewshot_docs() is not None:
946
947
948
949
            self.fewshot_rnd = (
                random.Random()
            )  # setting with no seed, to be overridden at a later time
            config_sampler: Union[str, Callable] = (
haileyschoelkopf's avatar
haileyschoelkopf committed
950
951
952
                self.config.fewshot_config.get("sampler", "default")
                if self.config.fewshot_config
                else "default"
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
            )
            if isinstance(config_sampler, str):
                self.sampler = samplers.get_sampler(config_sampler)(
                    list(self.fewshot_docs()), self, rnd=self.fewshot_rnd
                )
            elif callable(config_sampler) and issubclass(
                config_sampler, samplers.ContextSampler
            ):
                self.sampler = config_sampler(
                    docs=list(self.fewshot_docs()), task=self, rnd=self.fewshot_rnd
                )
            else:
                raise TypeError(
                    f"fewshot_config.sampler should be a string or callable of ContextSampler type, "
                    f"not {type(config_sampler)}"
                )
969

970
        self.task_docs = self.eval_docs
971

972
        # Test One Doc
973
        self.features = list(self.task_docs.features.keys())
974
975
        self.multiple_input = 0
        self.multiple_target = 0
976
        test_doc = self.task_docs[0]
977
        test_text = self.doc_to_text(test_doc)
978
        test_target = self.doc_to_target(test_doc)
lintangsutawika's avatar
lintangsutawika committed
979

980
        if self.config.doc_to_choice is not None:
lintangsutawika's avatar
lintangsutawika committed
981
            test_choice = self.doc_to_choice(test_doc)
982
            if not isinstance(test_choice, list):
lintangsutawika's avatar
lintangsutawika committed
983
                eval_logger.error("doc_to_choice must return list")
984
985
            else:
                num_choice = len(test_choice)
986

987
            if isinstance(test_text, int):
Baber Abbasi's avatar
Baber Abbasi committed
988
989
990
                eval_logger.debug(
                    "doc_to_text returned an int. Assuming multiple inputs."
                )
991
                self.multiple_input = num_choice
992
993
        else:
            test_choice = None
994

995
        if isinstance(test_target, list):
Baber Abbasi's avatar
Baber Abbasi committed
996
997
998
            eval_logger.debug(
                "doc_to_target returned a list. Assuming multiple targets."
            )
999
            self.multiple_target = len(test_target)
1000
        else:
1001
            if (isinstance(test_target, int)) and (test_choice is not None):
lintangsutawika's avatar
lintangsutawika committed
1002
                test_target = test_choice[test_target]
1003
            else:
lintangsutawika's avatar
lintangsutawika committed
1004
                test_target = str(test_target)
1005

1006
1007
1008
        if test_choice is not None:
            check_choices = test_choice
        else:
lintangsutawika's avatar
lintangsutawika committed
1009
            check_choices = [test_target]
1010
1011
1012
1013
        if self.config.doc_to_choice is not None:
            for choice in check_choices:
                choice_has_whitespace = True if choice[0].isspace() else False
                delimiter_has_whitespace = (
1014
1015
                    True
                    if self.config.target_delimiter.rstrip()
1016
                    != self.config.target_delimiter
1017
                    else False
1018
                )
1019

1020
                if delimiter_has_whitespace and choice_has_whitespace:
1021
1022
                    eval_logger.debug(
                        f'Both target_delimiter "{self.config.target_delimiter}" and target choice: "{choice}" have whitespace'
1023
1024
                    )
                elif (not delimiter_has_whitespace) and (not choice_has_whitespace):
1025
                    eval_logger.debug(
1026
                        f'Both target_delimiter "{self.config.target_delimiter}" and target choice: "{choice}" do not have whitespace, ignore if the language you are evaluating on does not require/use whitespace'
1027
1028
                    )

Baber Abbasi's avatar
Baber Abbasi committed
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
    def download(
        self, dataset_kwargs: Optional[Dict[str, Any]] = None, **kwargs
    ) -> None:
        if isinstance(self.config.custom_dataset, Callable):
            eval_logger.warning(
                f"{self.config.task}: Custom kwargs can be passed to `--metadata` in console (as json string) or to the TaskManager."
                + "\nFor example --metadata='{\"max_seq_lengths\":[4096, 8192]}'. For details see task Readme."
            )
            self.dataset = self.config.custom_dataset(
                **(self.config.metadata or {}), **(self.config.dataset_kwargs or {})
            )
        else:
            self.dataset = datasets.load_dataset(
                path=self.DATASET_PATH,
                name=self.DATASET_NAME,
                **dataset_kwargs if dataset_kwargs is not None else {},
            )
1046

baberabb's avatar
baberabb committed
1047
    def has_training_docs(self) -> bool:
1048
        if self.config.training_split is not None:
1049
1050
1051
1052
            return True
        else:
            return False

baberabb's avatar
baberabb committed
1053
    def has_validation_docs(self) -> bool:
1054
        if self.config.validation_split is not None:
1055
1056
1057
1058
            return True
        else:
            return False

baberabb's avatar
baberabb committed
1059
    def has_test_docs(self) -> bool:
1060
        if self.config.test_split is not None:
1061
1062
1063
1064
            return True
        else:
            return False

baberabb's avatar
baberabb committed
1065
    def training_docs(self) -> datasets.Dataset:
1066
        if self.has_training_docs():
1067
1068
1069
            if self.config.process_docs is not None:
                return self.config.process_docs(
                    self.dataset[self.config.training_split]
1070
                )
1071
            return self.dataset[self.config.training_split]
1072

baberabb's avatar
baberabb committed
1073
    def validation_docs(self) -> datasets.Dataset:
1074
        if self.has_validation_docs():
1075
1076
1077
            if self.config.process_docs is not None:
                return self.config.process_docs(
                    self.dataset[self.config.validation_split]
1078
                )
1079
            return self.dataset[self.config.validation_split]
1080

baberabb's avatar
baberabb committed
1081
    def test_docs(self) -> datasets.Dataset:
1082
        if self.has_test_docs():
1083
1084
1085
            if self.config.process_docs is not None:
                return self.config.process_docs(self.dataset[self.config.test_split])
            return self.dataset[self.config.test_split]
1086

1087
    def fewshot_docs(self):
1088
        if self.config.fewshot_split is not None:
1089
1090
            if self.config.process_docs is not None:
                return self.config.process_docs(self.dataset[self.config.fewshot_split])
1091
            return self.dataset[self.config.fewshot_split]
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
        elif (
            self.config.fewshot_config is not None
            and self.config.fewshot_config.get("samples", None) is not None
        ):
            if isinstance(self.config.fewshot_config["samples"], list):
                return self.config.fewshot_config["samples"]
            elif callable(self.config.fewshot_config["samples"]):
                return self.config.fewshot_config["samples"]()
            else:
                raise Exception(
                    "`fewshot_config['samples']` was incorrectly defined in the configuration. It should be either a list of samples as a dict, or function returning this list."
                )
1104
        else:
1105
            if (self.config.num_fewshot is not None) and (self.config.num_fewshot > 0):
1106
                eval_logger.warning(
Lintang Sutawika's avatar
Lintang Sutawika committed
1107
                    f"[Task: {self.config.task}] "
1108
1109
1110
1111
                    "num_fewshot > 0 but fewshot_split is None. "
                    "using preconfigured rule."
                )
            return super().fewshot_docs()
1112

KonradSzafer's avatar
KonradSzafer committed
1113
1114
1115
1116
1117
    @staticmethod
    def append_target_question(
        labeled_examples: List[Dict[str, str]],
        question: str,
        fewshot_as_multiturn: bool = False,
Baber Abbasi's avatar
Baber Abbasi committed
1118
        gen_prefix: Optional[str] = None,
KonradSzafer's avatar
KonradSzafer committed
1119
1120
1121
1122
1123
1124
1125
1126
    ) -> None:
        """Adds a target question to the labeled examples list.
        If fewshot_as_multiturn is True, or labeled_examples is empty, or the last entry is a system turn, appends the question as a new user entry.
        Otherwise, it is appended to the last user entry, ensuring that the conversation alternates between the user and the assistant.
        """
        if not fewshot_as_multiturn:
            # if no messages or last message is system, append as new user entry
            if len(labeled_examples) == 0 or labeled_examples[-1]["role"] == "system":
1127
                labeled_examples.append({"role": "user", "content": question})
KonradSzafer's avatar
KonradSzafer committed
1128
1129
            # if last message is user, append to it to avoid two user messages in a row
            else:
1130
                labeled_examples[-1]["content"] += question
KonradSzafer's avatar
KonradSzafer committed
1131
1132
        else:
            # if fewshot_as_multiturn is True, append as next user entry (last is always assistant)
1133
            labeled_examples.append({"role": "user", "content": question})
Baber Abbasi's avatar
Baber Abbasi committed
1134
1135
        if gen_prefix:
            labeled_examples.append({"role": "assistant", "content": gen_prefix})
KonradSzafer's avatar
KonradSzafer committed
1136

lintangsutawika's avatar
lintangsutawika committed
1137
    @utils.positional_deprecated
KonradSzafer's avatar
KonradSzafer committed
1138
1139
    def fewshot_context(
        self,
Baber Abbasi's avatar
Baber Abbasi committed
1140
        doc: dict,
KonradSzafer's avatar
KonradSzafer committed
1141
1142
1143
1144
        num_fewshot: int,
        system_instruction: Optional[str] = None,
        apply_chat_template: bool = False,
        fewshot_as_multiturn: bool = False,
1145
        chat_template: Optional[Callable] = None,
Baber Abbasi's avatar
Baber Abbasi committed
1146
        gen_prefix: Optional[str] = None,
Baber Abbasi's avatar
Baber Abbasi committed
1147
    ) -> Union[str, List[str]]:
lintangsutawika's avatar
lintangsutawika committed
1148
1149
1150
1151
1152
1153
1154
        """Returns a fewshot context string that is made up of a prepended description
        (if provided), the `num_fewshot` number of examples, and an appended prompt example.

        :param doc: str
            The document as returned from training_docs, validation_docs, or test_docs.
        :param num_fewshot: int
            The number of fewshot examples to provide in the returned context string.
KonradSzafer's avatar
KonradSzafer committed
1155
1156
1157
1158
1159
1160
        :param  system_instruction: str
            System instruction to be applied to the prompt.
        :param apply_chat_template: bool
            Whether to apply the chat template to the fewshot context.
        :param fewshot_as_multiturn: bool
            Whether to provide the fewshot examples as a multiturn conversation or a single user turn.
1161
1162
        :param chat_template:
            callable (from lm.apply_chat_template) that takes in a list[Dict] chat transcript and renders it into a string.
1163
1164
        :param gen_prefix:
            String to append after the <|assistant|> token.
lintangsutawika's avatar
lintangsutawika committed
1165
1166
1167
        :returns: str
            The fewshot context.
        """
KonradSzafer's avatar
KonradSzafer committed
1168
1169
1170
1171
1172
1173
        if apply_chat_template:
            labeled_examples = []
        else:
            labeled_examples = ""

        # get task description
1174
1175
        if description := self.config.description:
            description = utils.apply_template(self.config.description, doc)
lintangsutawika's avatar
lintangsutawika committed
1176

KonradSzafer's avatar
KonradSzafer committed
1177
1178
1179
1180
1181
1182
1183
1184
1185
        # create system prompt based on the provided system instruction and description
        if system_instruction is not None and description:
            system_prompt = (
                f"{system_instruction}{self.sampler.fewshot_delimiter}{description}"
            )
        elif system_instruction is not None:
            system_prompt = system_instruction
        elif description:
            system_prompt = description
lintangsutawika's avatar
lintangsutawika committed
1186
        else:
KonradSzafer's avatar
KonradSzafer committed
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
            system_prompt = ""

        # add system prompt if specified
        if system_prompt:
            if apply_chat_template:
                labeled_examples.append({"role": "system", "content": system_prompt})
            else:
                labeled_examples = system_prompt
        # if few-shot - append examples after the system prompt
        if num_fewshot > 0:
            if apply_chat_template:
                labeled_examples.extend(
                    self.sampler.get_chat_context(
Baber Abbasi's avatar
Baber Abbasi committed
1200
1201
1202
                        doc,
                        num_fewshot,
                        fewshot_as_multiturn,
Baber Abbasi's avatar
Baber Abbasi committed
1203
                        gen_prefix=gen_prefix,
KonradSzafer's avatar
KonradSzafer committed
1204
1205
1206
                    )
                )
            else:
Baber Abbasi's avatar
Baber Abbasi committed
1207
                labeled_examples += self.sampler.get_context(
Baber Abbasi's avatar
Baber Abbasi committed
1208
                    doc, num_fewshot, gen_prefix=gen_prefix
Baber Abbasi's avatar
Baber Abbasi committed
1209
                )
lintangsutawika's avatar
lintangsutawika committed
1210
1211

        example = self.doc_to_text(doc)
KonradSzafer's avatar
KonradSzafer committed
1212
1213
        if apply_chat_template:
            if self.multiple_input:
Baber Abbasi's avatar
Baber Abbasi committed
1214
                # TODO: append prefill?
1215
1216
                if not labeled_examples:
                    return ""
1217
                return chat_template(labeled_examples)
KonradSzafer's avatar
KonradSzafer committed
1218
1219
            if isinstance(example, str):
                self.append_target_question(
Baber Abbasi's avatar
Baber Abbasi committed
1220
1221
1222
                    labeled_examples,
                    example,
                    fewshot_as_multiturn,
Baber Abbasi's avatar
Baber Abbasi committed
1223
                    gen_prefix=gen_prefix,
KonradSzafer's avatar
KonradSzafer committed
1224
1225
1226
1227
1228
1229
1230
                )
            # for loglikelihood create a list of questions with appended choices
            elif isinstance(example, list):
                labeled_examples_list = []
                # copy chat history for each example and append the answer
                for ex in example:
                    chat = deepcopy(labeled_examples)
Baber Abbasi's avatar
Baber Abbasi committed
1231
1232
1233
1234
                    self.append_target_question(
                        chat,
                        ex,
                        fewshot_as_multiturn,
Baber Abbasi's avatar
Baber Abbasi committed
1235
                        gen_prefix=gen_prefix,
Baber Abbasi's avatar
Baber Abbasi committed
1236
1237
1238
1239
1240
                    )
                    # TODO: append prefill?
                    labeled_examples_list.append(
                        chat_template(
                            chat,
Baber Abbasi's avatar
Baber Abbasi committed
1241
                            add_generation_prompt=False if gen_prefix else True,
Baber Abbasi's avatar
Baber Abbasi committed
1242
1243
                        )
                    )
KonradSzafer's avatar
KonradSzafer committed
1244
1245
1246
1247
1248
1249
                return labeled_examples_list
            # if example is an integer, append the choice or convert to string
            elif isinstance(example, int):
                if self.config.doc_to_choice is not None:
                    choices = self.doc_to_choice(doc)
                    self.append_target_question(
Baber Abbasi's avatar
Baber Abbasi committed
1250
1251
1252
                        labeled_examples,
                        choices[example],
                        fewshot_as_multiturn,
Baber Abbasi's avatar
Baber Abbasi committed
1253
                        gen_prefix=gen_prefix,
KonradSzafer's avatar
KonradSzafer committed
1254
1255
1256
                    )
                else:
                    self.append_target_question(
Baber Abbasi's avatar
Baber Abbasi committed
1257
1258
1259
                        labeled_examples,
                        str(example),
                        fewshot_as_multiturn,
Baber Abbasi's avatar
Baber Abbasi committed
1260
                        gen_prefix=gen_prefix,
KonradSzafer's avatar
KonradSzafer committed
1261
1262
                    )
                # return lm.apply_chat_template(labeled_examples)
Baber Abbasi's avatar
Baber Abbasi committed
1263
1264
            return chat_template(
                labeled_examples,
Baber Abbasi's avatar
Baber Abbasi committed
1265
                add_generation_prompt=False if gen_prefix else True,
Baber Abbasi's avatar
Baber Abbasi committed
1266
            )
1267
        else:
Baber Abbasi's avatar
Baber Abbasi committed
1268
            prefix = (
Baber Abbasi's avatar
Baber Abbasi committed
1269
1270
                self.config.target_delimiter + gen_prefix
                if gen_prefix is not None
Baber Abbasi's avatar
Baber Abbasi committed
1271
1272
                else ""
            )
KonradSzafer's avatar
KonradSzafer committed
1273
1274
            if self.multiple_input:
                return labeled_examples
1275
            if isinstance(example, str):
Baber Abbasi's avatar
Baber Abbasi committed
1276
                return labeled_examples + example + prefix
1277
            elif isinstance(example, list):
Baber Abbasi's avatar
Baber Abbasi committed
1278
                return [labeled_examples + ex + prefix for ex in example]
1279
1280
1281
            elif isinstance(example, int):
                if self.config.doc_to_choice is not None:
                    choices = self.doc_to_choice(doc)
Baber Abbasi's avatar
Baber Abbasi committed
1282
                    return labeled_examples + choices[example] + prefix
1283
                else:
Baber Abbasi's avatar
Baber Abbasi committed
1284
                    return labeled_examples + str(example) + prefix
lintangsutawika's avatar
lintangsutawika committed
1285

Baber Abbasi's avatar
Baber Abbasi committed
1286
    def apply_filters(self) -> Optional[List[Instance]]:
Baber Abbasi's avatar
Baber Abbasi committed
1287
        """Iterates over FilterEnsembles and applies them to instances"""
1288
1289
        if hasattr(self, "_filters"):
            for f in self._filters:
1290
                f.apply(self._instances)
1291
1292
1293
1294
        else:
            eval_logger.warning("No filter defined, passing through instances")
            return self._instances

1295
    def should_decontaminate(self):
1296
        return self.config.should_decontaminate
1297

Baber Abbasi's avatar
Baber Abbasi committed
1298
    def doc_to_decontamination_query(self, doc: dict):
1299
        if self.config.should_decontaminate:
1300
1301
            if self.config.doc_to_decontamination_query is None:
                return self.doc_to_text(doc)
1302
            else:
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
                doc_to_decontamination_query = self.config.doc_to_decontamination_query
                if doc_to_decontamination_query in self.features:
                    return doc[doc_to_decontamination_query]
                elif callable(doc_to_decontamination_query):
                    return doc_to_decontamination_query(doc)
                else:
                    return ast.literal_eval(
                        utils.apply_template(
                            self.config.doc_to_decontamination_query, doc
                        )
                    )
1314

1315
    def _process_doc(self, doc: dict) -> dict:
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
        """
        Override this to process (detokenize, strip, replace, etc.) individual
        documents. This can be used in a map over documents of a data split.
        E.g. `map(self._process_doc, self.dataset["validation"])`

        :return: dict
            The processed version of the specified `doc`.
        """
        return doc

Yu Shi Jie's avatar
Yu Shi Jie committed
1326
    def doc_to_text(self, doc, doc_to_text=None):
1327
1328
        if self.prompt is not None:
            doc_to_text = self.prompt
Yu Shi Jie's avatar
Yu Shi Jie committed
1329
1330
        elif doc_to_text is not None:
            doc_to_text = doc_to_text
1331
        else:
1332
            doc_to_text = self.config.doc_to_text
lintangsutawika's avatar
lintangsutawika committed
1333

1334
        if isinstance(doc_to_text, int):
1335
            return doc_to_text
1336
        elif isinstance(doc_to_text, str):
1337
            if doc_to_text in self.features:
1338
                # if self.config.doc_to_choice is not None:
1339
1340
                #     return self.doc_to_choice(doc)[doc[doc_to_text]]
                # else:
1341
1342
                return doc[doc_to_text]
            else:
lintangsutawika's avatar
lintangsutawika committed
1343
                text_string = utils.apply_template(doc_to_text, doc)
Baber's avatar
nit  
Baber committed
1344
                if text_string.isdigit() and self.config.doc_to_choice is not None:
lintangsutawika's avatar
lintangsutawika committed
1345
1346
1347
                    return ast.literal_eval(text_string)
                else:
                    return text_string
1348
        elif callable(doc_to_text):
1349
            return doc_to_text(doc)
lintangsutawika's avatar
lintangsutawika committed
1350
        # Used when applying a Promptsource template
1351
        elif hasattr(doc_to_text, "apply"):
1352
1353
1354
1355
1356
            applied_prompt = doc_to_text.apply(doc)
            if len(applied_prompt) == 2:
                return applied_prompt[0]
            else:
                eval_logger.warning("Applied prompt returns empty string")
1357
                return self.config.fewshot_delimiter
1358
        else:
1359
            print(type(doc_to_text))
1360
            raise TypeError
1361

Yu Shi Jie's avatar
Yu Shi Jie committed
1362
    def doc_to_target(self, doc: Mapping, doc_to_target=None) -> Union[int, str, list]:
1363
1364
        if self.prompt is not None:
            doc_to_target = self.prompt
Yu Shi Jie's avatar
Yu Shi Jie committed
1365
1366
        elif doc_to_target is not None:
            doc_to_target = doc_to_target
1367
        else:
1368
            doc_to_target = self.config.doc_to_target
1369

1370
        if isinstance(doc_to_target, int):
1371
            return doc_to_target
1372
        elif isinstance(doc_to_target, str):
1373
            if doc_to_target in self.features:
1374
                # if self.config.doc_to_choice is not None:
1375
1376
1377
                #     return self.doc_to_choice(doc)[doc[doc_to_target]]
                # else:
                return doc[doc_to_target]
1378
            else:
lintangsutawika's avatar
lintangsutawika committed
1379
                target_string = utils.apply_template(doc_to_target, doc)
Baber's avatar
nit  
Baber committed
1380
                if target_string.isdigit() and self.config.doc_to_choice is not None:
lintangsutawika's avatar
lintangsutawika committed
1381
                    return ast.literal_eval(target_string)
lintangsutawika's avatar
lintangsutawika committed
1382
1383
1384
1385
1386
                elif (
                    len(target_string) >= 2
                    and (target_string[0] == "[")
                    and (target_string[-1] == "]")
                ):
baberabb's avatar
baberabb committed
1387
1388
1389
1390
                    try:
                        return ast.literal_eval(target_string)
                    except (SyntaxError, ValueError):
                        return target_string
lintangsutawika's avatar
lintangsutawika committed
1391
1392
                else:
                    return target_string
1393
        elif isinstance(doc_to_target, list):
1394
            return doc_to_target
1395
        elif callable(doc_to_target):
1396
            return doc_to_target(doc)
lintangsutawika's avatar
lintangsutawika committed
1397
        # Used when applying a Promptsource template
1398
        elif hasattr(doc_to_target, "apply"):
1399
            applied_prompt = doc_to_target.apply(doc)
1400
1401
1402
1403
            if len(applied_prompt) == 2:
                return applied_prompt[1]
            else:
                eval_logger.warning("Applied prompt returns empty string")
1404
                return self.config.fewshot_delimiter
1405
1406
        else:
            raise TypeError
1407

Yu Shi Jie's avatar
Yu Shi Jie committed
1408
    def doc_to_choice(self, doc: Any, doc_to_choice=None) -> List[str]:
1409
1410
        if self.prompt is not None:
            doc_to_choice = self.prompt
Yu Shi Jie's avatar
Yu Shi Jie committed
1411
1412
        elif doc_to_choice is not None:
            doc_to_choice = doc_to_choice
1413
        elif self.config.doc_to_choice is None:
1414
1415
            eval_logger.error("doc_to_choice was called but not set in config")
        else:
1416
            doc_to_choice = self.config.doc_to_choice
1417

1418
        if isinstance(doc_to_choice, str):
1419
1420
1421
1422
            if doc_to_choice in self.features:
                return doc[doc_to_choice]
            else:
                return ast.literal_eval(utils.apply_template(doc_to_choice, doc))
1423
        elif isinstance(doc_to_choice, list):
1424
            return doc_to_choice
1425
        elif isinstance(doc_to_choice, dict):
1426
1427
1428
1429
1430
1431
1432
            return list(doc_to_choice.values())
        elif callable(doc_to_choice):
            return doc_to_choice(doc)
        elif hasattr(doc_to_choice, "get_answer_choices_list"):
            return doc_to_choice.get_answer_choices_list(doc)
        else:
            raise TypeError
1433

1434
1435
1436
1437
1438
1439
1440
1441
1442
1443
1444
1445
1446
1447
1448
1449
1450
1451
1452
1453
1454
1455
1456
    def doc_to_image(self, doc: Any, doc_to_image=None) -> Union[int, str, list]:
        if doc_to_image is not None:
            doc_to_image = doc_to_image
        elif self.config.doc_to_image is not None:
            doc_to_image = self.config.doc_to_image
        else:
            return None

        if isinstance(doc_to_image, list):
            image_feature = [
                self.doc_to_image(doc, feature) for feature in doc_to_image
            ]
            return [feature for feature in image_feature if feature is not None]
        elif isinstance(doc_to_image, str):
            if doc_to_image in self.features:
                return doc[doc_to_image]
            else:
                return ast.literal_eval(utils.apply_template(doc_to_image, doc))
        elif callable(doc_to_image):
            return doc_to_image(doc)
        else:
            return None

1457
1458
1459
1460
1461
1462
1463
1464
1465
1466
1467
1468
1469
1470
1471
1472
1473
1474
1475
1476
1477
1478
1479
    def doc_to_audio(self, doc: Any, doc_to_audio=None) -> Union[int, str, list]:
        if doc_to_audio is not None:
            doc_to_audio = doc_to_audio
        elif self.config.doc_to_audio is not None:
            doc_to_audio = self.config.doc_to_audio
        else:
            return None

        if isinstance(doc_to_audio, list):
            audio_feature = [
                self.doc_to_audio(doc, feature) for feature in doc_to_audio
            ]
            return [feature for feature in audio_feature if feature is not None]
        elif isinstance(doc_to_audio, str):
            if doc_to_audio in self.features:
                return doc[doc_to_audio]
            else:
                return ast.literal_eval(utils.apply_template(doc_to_audio, doc))
        elif callable(doc_to_audio):
            return doc_to_audio(doc)
        else:
            return None

Baber Abbasi's avatar
Baber Abbasi committed
1480
1481
1482
1483
1484
1485
1486
1487
    def doc_to_prefix(self, doc):
        if (gen_prefix := self.config.gen_prefix) is not None:
            if gen_prefix in self.features:
                return doc[gen_prefix]
            else:
                return utils.apply_template(gen_prefix, doc)
        return None

baberabb's avatar
baberabb committed
1488
1489
1490
    def construct_requests(
        self, doc: dict, ctx: str, **kwargs
    ) -> Union[List[Instance], Instance]:
1491
        apply_chat_template = kwargs.pop("apply_chat_template", False)
1492
        chat_template: Callable | None = kwargs.pop("chat_template", None)
1493

1494
1495
        aux_arguments = None

1496
        if self.OUTPUT_TYPE == "loglikelihood":
lintangsutawika's avatar
lintangsutawika committed
1497
            arguments = (ctx, self.doc_to_target(doc))
1498
        elif self.OUTPUT_TYPE == "loglikelihood_rolling":
lintangsutawika's avatar
lintangsutawika committed
1499
            arguments = (self.doc_to_target(doc),)
1500
        elif self.OUTPUT_TYPE == "multiple_choice":
1501
            choices = self.doc_to_choice(doc)
1502
            target_delimiter = self.config.target_delimiter
1503
1504
            if apply_chat_template:
                target_delimiter = ""
1505
1506
            if self.multiple_input:
                # If there are multiple inputs, choices are placed in the ctx
1507
                # apply chat_template to choices if apply_chat_template
1508
                cont = self.doc_to_target(doc)
1509

1510
                arguments = [
1511
1512
1513
1514
1515
1516
1517
1518
1519
1520
                    (
                        ctx
                        + (
                            chat_template([{"role": "user", "content": choice}])
                            if apply_chat_template
                            else choice
                        ),
                        f"{target_delimiter}{cont}",
                    )
                    for choice in choices
1521
                ]
1522
            else:
1523
                # Otherwise they are placed in the continuation
1524
                arguments = [(ctx, f"{target_delimiter}{cont}") for cont in choices]
1525

1526
            # TODO: we should raise a warning telling users this will at most ~2x runtime.
Baber's avatar
Baber committed
1527
            if "acc_mutual_info" in [m.metric_names for m in self.metric_list]:
1528
1529
1530
1531
1532
1533
                # if we are calculating multiple choice accuracy
                # using mutual information instead of raw loglikelihood as metric, need unconditional lls.

                # here mutual info refers to calculating
                # log(P(choice|ctx) / P(choice)) = log(P(choice|ctx)) - log(P(choice))
                # in other words normalizing by subtracting the unconditional logprob of each choice.
1534
1535
1536
1537
                # TODO: should these be strided? will have to modify the processing in process_results if so
                aux_arguments = [
                    ("", f"{target_delimiter}{choice}") for choice in choices
                ]
1538
1539
1540
1541
1542
1543
1544
1545
1546
1547
1548
1549
1550
1551
1552

                arguments.extend(aux_arguments)

        elif self.OUTPUT_TYPE == "generate_until":
            arguments = (ctx, deepcopy(self.config.generation_kwargs))

        multimodal_arg = {}
        if (
            self.config.doc_to_image
        ):  # TODO: ensure that non-multimodal tasks aren't getting visual args
            multimodal_arg = {
                **multimodal_arg,
                **{"visual": self.doc_to_image(doc)},
            }

1553
1554
1555
1556
1557
1558
1559
1560
        if (
            self.config.doc_to_audio
        ):  # TODO: ensure that non-multimodal tasks aren't getting audio args
            multimodal_arg = {
                **multimodal_arg,
                **{"audio": self.doc_to_audio(doc)},
            }

1561
1562
1563
1564
1565
1566
1567
        if bool(multimodal_arg):
            if isinstance(arguments, list):
                arguments = [arg + (multimodal_arg,) for arg in arguments]
            else:
                arguments = arguments + (multimodal_arg,)

        if self.OUTPUT_TYPE == "multiple_choice":
1568
            request_list = [
1569
1570
                Instance(
                    request_type="loglikelihood",
lintangsutawika's avatar
lintangsutawika committed
1571
                    doc=doc,
1572
                    arguments=arg,
1573
                    idx=i,
1574
1575
                    **kwargs,
                )
1576
                for i, arg in enumerate(arguments)
1577
            ]
1578
1579

            return request_list
lintangsutawika's avatar
lintangsutawika committed
1580

lintangsutawika's avatar
lintangsutawika committed
1581
        return Instance(
1582
1583
1584
1585
1586
            request_type=self.OUTPUT_TYPE,
            doc=doc,
            arguments=arguments,
            idx=0,
            **kwargs,
lintangsutawika's avatar
lintangsutawika committed
1587
        )
1588
1589

    def process_results(self, doc, results):
1590
1591
        if callable(self.config.process_results):
            return self.config.process_results(doc, results)
lintangsutawika's avatar
lintangsutawika committed
1592

1593
        result_dict = {}
Baber's avatar
Baber committed
1594
        use_metric = list(m.metric_names for m in self.metric_list)
1595
1596
1597
        if self.OUTPUT_TYPE == "loglikelihood":
            results = results[0]
            ll, is_greedy = results
1598
1599
1600
1601
            return {
                **({"perplexity": ll} if "perplexity" in use_metric else {}),
                **({"acc": int(is_greedy)} if "acc" in use_metric else {}),
            }
1602
        elif self.OUTPUT_TYPE == "loglikelihood_rolling":
haileyschoelkopf's avatar
haileyschoelkopf committed
1603
            (loglikelihood,) = results
1604
1605
            _words = self.count_words(self.doc_to_target(doc))
            _bytes = self.count_bytes(self.doc_to_target(doc))
haileyschoelkopf's avatar
haileyschoelkopf committed
1606
            return {
1607
1608
1609
1610
1611
1612
1613
1614
1615
1616
1617
1618
1619
1620
1621
                **(
                    {"word_perplexity": (loglikelihood, _words)}
                    if "word_perplexity" in use_metric
                    else {}
                ),
                **(
                    {"byte_perplexity": (loglikelihood, _bytes)}
                    if "byte_perplexity" in use_metric
                    else {}
                ),
                **(
                    {"bits_per_byte": (loglikelihood, _bytes)}
                    if "bits_per_byte" in use_metric
                    else {}
                ),
haileyschoelkopf's avatar
haileyschoelkopf committed
1622
            }
1623
        elif self.OUTPUT_TYPE == "multiple_choice":
1624
            lls, is_greedy = zip(*results)
lintangsutawika's avatar
lintangsutawika committed
1625

1626
            # retrieve choices in List[str] form, to compute choice lengths, etc.
1627
            choices = self.doc_to_choice(doc)
1628
1629
            completion_len = np.array([float(len(i)) for i in choices])

Baber's avatar
Baber committed
1630
            if 2 * len(choices) == len(lls) and "acc_mutual_info" in use_metric:
1631
1632
                # then we are doing mutual info.
                # this stores the "dryrun" / unconditional answer loglikelihoods
1633
1634
                # as we extend the args list with unconditional ("", continuation) pairs
                lls_unconditional = lls[len(choices) :]
1635
1636
                if len(lls_unconditional) != len(choices):
                    raise ValueError
1637
                # and this stores our "regular" conditional loglikelihoods
1638
                lls = lls[: len(choices)]
1639

1640
1641
            pred = np.argmax(lls)
            pred_norm = np.argmax(lls / completion_len)
lintangsutawika's avatar
lintangsutawika committed
1642

1643
1644
            if self.multiple_input:
                gold = self.doc_to_text(doc)
1645
            else:
1646
                gold = self.doc_to_target(doc)
1647
1648

            gold_index_error = False
1649
            if isinstance(gold, list):
Lintang Sutawika's avatar
Lintang Sutawika committed
1650
1651
                gold = [i if i < len(choices) else -100 for i in gold]
                if -100 in gold:
1652
1653
                    gold_index_error = True
            else:
1654
                if isinstance(gold, int):
Lintang Sutawika's avatar
Lintang Sutawika committed
1655
                    gold = gold if gold < len(choices) else -100
1656
                elif isinstance(gold, str):
Lintang Sutawika's avatar
Lintang Sutawika committed
1657
                    gold = choices.index(gold) if gold in choices else -100
lintangsutawika's avatar
lintangsutawika committed
1658

Lintang Sutawika's avatar
Lintang Sutawika committed
1659
                if gold == -100:
1660
1661
1662
1663
                    gold_index_error = True

            if gold_index_error:
                eval_logger.warning(
lintangsutawika's avatar
lintangsutawika committed
1664
                    f"Label index was not in within range of available choices,"
1665
1666
                    f"Sample:\n\n{doc}\n\n"
                )
lintangsutawika's avatar
lintangsutawika committed
1667

1668
            if self.multiple_target:
lintangsutawika's avatar
lintangsutawika committed
1669
1670
                acc = 1.0 if pred in gold else 0.0
                acc_norm = 1.0 if pred_norm in gold else 0.0
Lintang Sutawika's avatar
Lintang Sutawika committed
1671
                exact_match = int(any([is_greedy[i] if i != -100 else 0 for i in gold]))
lintangsutawika's avatar
lintangsutawika committed
1672
1673
1674
            else:
                acc = 1.0 if pred == gold else 0.0
                acc_norm = 1.0 if pred_norm == gold else 0.0
1675
                # TODO: this gets score of 0 on arc_challenge for pythia-70m. need to test that this works properly
Lintang Sutawika's avatar
Lintang Sutawika committed
1676
                exact_match = int(is_greedy[gold]) if gold != -100 else 0
1677

Lintang Sutawika's avatar
Lintang Sutawika committed
1678
1679
1680
1681
            prob_norm = utils.softmax(lls)

            # TODO use keyword arguments to the metric?
            # gold, pred, norm stuff, the original lls,
1682
            result_dict = {
1683
                **({"acc": acc} if "acc" in use_metric else {}),
1684
1685
                **({"f1": (gold, pred)} if "f1" in use_metric else {}),
                **({"mcc": (gold, pred)} if "mcc" in use_metric else {}),
1686
                **({"acc_norm": acc_norm} if "acc_norm" in use_metric else {}),
1687
                **({"exact_match": exact_match} if "exact_match" in use_metric else {}),
Lintang Sutawika's avatar
Lintang Sutawika committed
1688
1689
1690
1691
1692
                **(
                    {"brier_score": (gold, prob_norm)}
                    if "brier_score" in use_metric
                    else {}
                ),
1693
1694
            }

1695
            if "acc_mutual_info" in use_metric:
lintangsutawika's avatar
lintangsutawika committed
1696
1697
1698
                lls_mutual_info = [
                    ll_c - ll_u for ll_c, ll_u in zip(lls, lls_unconditional)
                ]
1699
1700
1701
                acc_mutual_info = 1.0 if np.argmax(lls_mutual_info) == gold else 0.0
                result_dict["acc_mutual_info"] = acc_mutual_info

1702
        elif self.OUTPUT_TYPE == "generate_until":
1703
            gold = self.doc_to_target(doc)
Chris's avatar
Chris committed
1704
            result = results[0]
1705
            if self.config.doc_to_choice is not None:
lintangsutawika's avatar
lintangsutawika committed
1706
                # If you set doc_to_choice,
lintangsutawika's avatar
lintangsutawika committed
1707
                # it assumes that doc_to_target returns a number.
1708
1709
                choices = self.doc_to_choice(doc)
                gold = choices[gold]
1710
1711
            # we expect multiple_targets to be a list.
            elif self.multiple_target:
baberabb's avatar
baberabb committed
1712
                gold = list(gold)
Hojin Lee's avatar
Hojin Lee committed
1713
1714
            # TODO: handle this better
            elif type(gold) is not type(result) and not (
Baber's avatar
Baber committed
1715
                "bypass" in use_metric or isinstance(result, list)
1716
            ):
Chris's avatar
Chris committed
1717
1718
                # cast gold to the same type as result
                gold = type(result)(gold)
1719

Baber's avatar
Baber committed
1720
            for metric in self.metric_list:
haileyschoelkopf's avatar
haileyschoelkopf committed
1721
1722
1723
1724
1725
                if self.multiple_target:
                    # in the case where we have multiple targets,
                    # return true if any are true
                    # TODO: this may break for multipLe_target, non zero-or-1 metrics
                    scores = []
haileyschoelkopf's avatar
haileyschoelkopf committed
1726
1727
1728
1729
                    if not isinstance(gold, list):
                        # sometimes, a multiple_target dataset has exceptions where one doc has only one string answer
                        # print(gold)
                        gold = [gold]
Baber's avatar
Baber committed
1730
                    if metric.name == "exact_match":
1731
                        result = [result for _ in range(len(gold))]
Baber's avatar
Baber committed
1732
                        scores = metric.fn(
1733
1734
                            references=gold,
                            predictions=result,
Baber's avatar
Baber committed
1735
                            **metric.kwargs,
1736
1737
                        )[metric]
                        result_score = 1.0 if scores > 0.0 else 0.0
haileyschoelkopf's avatar
haileyschoelkopf committed
1738
                    else:
1739
1740
                        for gold_option in gold:
                            try:
Baber's avatar
Baber committed
1741
                                result_score = metric.fn(
1742
1743
                                    references=[gold_option],
                                    predictions=[result],
Baber's avatar
Baber committed
1744
                                    **metric.kwargs,
1745
1746
1747
1748
                                )
                            except (
                                TypeError
                            ):  # TODO: this is hacky and I don't want to do it
Baber's avatar
Baber committed
1749
                                result_score = metric.fn([gold_option, result])
1750
1751
1752
1753
1754
1755
1756
1757
                            if isinstance(result_score, dict):
                                # TODO: this handles the case where HF evaluate returns a dict.
                                result_score = result_score[metric]
                            scores.append(result_score)
                        if any(scores):
                            result_score = 1.0
                        else:
                            result_score = 0.0
haileyschoelkopf's avatar
haileyschoelkopf committed
1758
                else:
1759
                    try:
Baber's avatar
Baber committed
1760
                        result_score = metric.fn(
1761
1762
                            references=[gold],
                            predictions=[result],
Baber's avatar
Baber committed
1763
                            **metric.kwargs,
1764
                        )
1765
                    except TypeError:  # needed for now in order to use a different interface between our own metrics and HF Evaluate metrics
Baber's avatar
Baber committed
1766
                        result_score = metric.fn([gold, result])
1767
1768
1769
1770
1771
1772
1773
                if isinstance(result_score, dict):
                    # TODO: this handles the case where HF evaluate returns a dict.
                    # This allows for multiple metrics to be returned from the same function
                    for k, v in result_score.items():
                        result_dict[k] = v
                else:
                    result_dict[metric] = result_score
1774
        else:
lintangsutawika's avatar
lintangsutawika committed
1775
1776
            raise ValueError(
                f"Passed invalid output_type '{self.OUTPUT_TYPE}' ! Please use one of ",
1777
                "'loglikelihood', 'loglikelihood_rolling', 'generate_until' or 'multiple_choice'",
1778
            )
1779
1780
1781

        return result_dict

Baber Abbasi's avatar
Baber Abbasi committed
1782
    def aggregation(self) -> dict:
Baber's avatar
Baber committed
1783
        return {k.name: k.aggregation_fn for k in self.metric_list}
1784

Baber Abbasi's avatar
Baber Abbasi committed
1785
    def higher_is_better(self) -> dict:
Baber's avatar
Baber committed
1786
        return {k.name: k.higher_is_better for k in self.metric_list}
1787

Baber Abbasi's avatar
Baber Abbasi committed
1788
1789
1790
    def get_config(self, key: str) -> Any:
        return getattr(self._config, key, None)

Lintang Sutawika's avatar
Lintang Sutawika committed
1791
1792
1793
1794
    @property
    def task_name(self) -> Any:
        return getattr(self.config, "task", None)

1795
1796
1797
1798
1799
    def __repr__(self):
        return (
            f"ConfigurableTask(task_name={getattr(self.config, 'task', None)},"
            f"output_type={self.OUTPUT_TYPE},"
            f"num_fewshot={getattr(self.config, 'num_fewshot', None)},"
Baber Abbasi's avatar
Baber Abbasi committed
1800
            f"num_samples={len(self.eval_docs)})"
1801
1802
        )

1803
1804

class MultipleChoiceTask(Task):
1805
    OUTPUT_TYPE = "loglikelihood"
1806

baberabb's avatar
baberabb committed
1807
    def doc_to_target(self, doc: dict) -> str:
1808
1809
        return " " + doc["choices"][doc["gold"]]

baberabb's avatar
baberabb committed
1810
    def construct_requests(self, doc: dict, ctx: str, **kwargs) -> List[Instance]:
1811
        # TODO: add mutual info here?
lintangsutawika's avatar
lintangsutawika committed
1812
1813
        return [
            Instance(
haileyschoelkopf's avatar
haileyschoelkopf committed
1814
                request_type="loglikelihood",
lintangsutawika's avatar
lintangsutawika committed
1815
                doc=doc,
1816
                arguments=(ctx, " {}".format(choice)),
1817
                idx=i,
1818
1819
                **kwargs,
            )
lintangsutawika's avatar
lintangsutawika committed
1820
1821
            for i, choice in enumerate(doc["choices"])
        ]
1822

1823
    def process_results(self, doc: dict, results: Iterable[Tuple[float, bool]]) -> dict:
lintangsutawika's avatar
lintangsutawika committed
1824
1825
1826
        results = [
            res[0] for res in results
        ]  # only retain loglikelihoods, discard is_greedy TODO: do we need is_greedy anywhere?
1827
1828
1829
1830
1831
1832
1833
1834
1835
1836
1837
        gold = doc["gold"]

        acc = 1.0 if np.argmax(results) == gold else 0.0
        completion_len = np.array([float(len(i)) for i in doc["choices"]])
        acc_norm = 1.0 if np.argmax(results / completion_len) == gold else 0.0

        return {
            "acc": acc,
            "acc_norm": acc_norm,
        }

baberabb's avatar
baberabb committed
1838
    def higher_is_better(self) -> dict:
1839
1840
1841
1842
1843
        return {
            "acc": True,
            "acc_norm": True,
        }

baberabb's avatar
baberabb committed
1844
    def aggregation(self) -> dict:
1845
1846
1847
1848
1849
1850
        return {
            "acc": mean,
            "acc_norm": mean,
        }


lintangsutawika's avatar
lintangsutawika committed
1851
class PerplexityTask(Task):
1852
1853
    OUTPUT_TYPE = "loglikelihood_rolling"

baberabb's avatar
baberabb committed
1854
    def has_training_docs(self) -> bool:
1855
1856
        return False

baberabb's avatar
baberabb committed
1857
    def fewshot_examples(self, k: int, rnd) -> List:
1858
1859
1860
1861
        if k != 0:
            raise ValueError(
                "The number of fewshot examples must be 0 for perplexity tasks."
            )
1862
1863
        return []

baberabb's avatar
baberabb committed
1864
    def fewshot_context(self, doc: dict, num_fewshot: int) -> Literal[""]:
1865
1866
1867
1868
        if num_fewshot != 0:
            raise ValueError(
                "The number of fewshot examples must be 0 for perplexity tasks."
            )
1869
1870
1871

        return ""

baberabb's avatar
baberabb committed
1872
    def higher_is_better(self) -> dict:
1873
1874
1875
1876
1877
1878
1879
1880
1881
        return {
            "word_perplexity": False,
            "byte_perplexity": False,
            "bits_per_byte": False,
        }

    def doc_to_decontamination_query(self, doc):
        return doc

Ethan Smith's avatar
Ethan Smith committed
1882
    def doc_to_text(self, doc) -> str:
1883
1884
1885
1886
1887
        return ""

    def doc_to_target(self, doc):
        return doc

1888
1889
1890
    def construct_requests(self, doc: dict, ctx: Optional[str], **kwargs):
        if bool(ctx):
            raise ValueError
1891

lintangsutawika's avatar
lintangsutawika committed
1892
1893
1894
1895
1896
1897
1898
        return Instance(
            request_type=self.OUTPUT_TYPE,
            doc=doc,
            arguments=(self.doc_to_target(doc),),
            idx=0,
            **kwargs,
        )
1899

1900
    def process_results(self, doc: dict, results: Tuple[float]) -> dict:
1901
        (loglikelihood,) = results
haileyschoelkopf's avatar
haileyschoelkopf committed
1902
1903
        words = self.count_words(self.doc_to_target(doc))
        bytes_ = self.count_bytes(self.doc_to_target(doc))
1904
1905
1906
1907
1908
1909
        return {
            "word_perplexity": (loglikelihood, words),
            "byte_perplexity": (loglikelihood, bytes_),
            "bits_per_byte": (loglikelihood, bytes_),
        }

baberabb's avatar
baberabb committed
1910
    def aggregation(self) -> dict:
1911
1912
1913
1914
1915
1916
1917
        return {
            "word_perplexity": weighted_perplexity,
            "byte_perplexity": weighted_perplexity,
            "bits_per_byte": bits_per_byte,
        }

    @classmethod
baberabb's avatar
baberabb committed
1918
    def count_bytes(cls, doc) -> int:
1919
1920
1921
        return len(doc.encode("utf-8"))

    @classmethod
baberabb's avatar
baberabb committed
1922
    def count_words(cls, doc) -> int:
1923
        """Downstream tasks with custom word boundaries should override this!"""
Lintang Sutawika's avatar
Lintang Sutawika committed
1924
        return len(re.split(r"\s+", doc))