import numpy as np from lm_eval.base import MultipleChoiceTask from ..metrics import mean from . common import HFTask class ARCEasy(HFTask, MultipleChoiceTask): DATASET_PATH = "ai2_arc" DATASET_NAME = "ARC-Easy" def has_training_docs(self): return True def has_validation_docs(self): return True def has_test_docs(self): return True def _convert_standard(self, doc): # NOTE: Some `doc["answerKey"]`s are in numeric string format being one # of {'1', '2', '3', '4', '5'}. We map them back to letters. num_to_letter = {"1": "A", "2": "B", "3": "C", "4": "D", "5": "E"} doc["answerKey"] = num_to_letter.get(doc["answerKey"], doc["answerKey"]) out_doc = { "id": doc["id"], "query": "Question: " + doc["question"] + "\nAnswer:", "choices": doc["choices"]["text"], "gold": ["A", "B", "C", "D", "E"].index(doc["answerKey"]), } return out_doc def _load_docs(self, docs): for record in docs: yield self._convert_standard(record) def training_docs(self): docs = super().training_docs() return self._load_docs(docs) def validation_docs(self): docs = super().validation_docs() return self._load_docs(docs) def test_docs(self): docs = super().test_docs() return self._load_docs(docs) def fewshot_description(self): # TODO: figure out description return "" def doc_to_text(self, doc): return doc["query"] class ARCChallenge(ARCEasy): DATASET_PATH = "ai2_arc" DATASET_NAME = "ARC-Challenge"