import json import random from lm_eval.base import Task from ..utils import sh import csv class StoryCloze(Task): NEEDS_MANUAL_DL = True def download(self): #TODO: replace with Eye link pass def has_training_docs(self): return False def has_validation_docs(self): return True def has_test_docs(self): return True def training_docs(self): pass def load_doc(self, filename): with open(filename, newline='') as file: filereader = csv.reader(file) return list(filereader) def validation_docs(self): return self.load_doc("data/storycloze/cloze_test_val__winter2018-cloze_test_ALL_val - 1 - 1.csv") def test_docs(self): return self.load_doc("data/storycloze/cloze_test_test__winter2018-cloze_test_ALL_test - 1.csv") def fewshot_description(self): # TODO: figure out fewshot description return "" def doc_to_text(self, doc): return ' '.join([*doc[1:5]]) def doc_to_target(self, doc): return " " + doc[int(doc[-1]) - 4] def construct_requests(self, doc, ctx): """ Uses RequestFactory to construct Requests and returns an iterable of Requests which will be sent to the LM. :param doc: The document as returned from training_docs, validation_docs, or test_docs. :param ctx: str The context string, generated by fewshot_context. This includes the natural language description, as well as the few shot examples, and the question part of the document for `doc`. """ # TODO: implement evaluation. raise NotImplementedError('Evaluation not implemented') def process_results(self, doc, results): """Take a single document and the LM results and evaluates, returning a dict where keys are the names of submetrics and values are the values of the metric for that one document :param doc: The document as returned from training_docs, validation_docs, or test_docs. :param results: The results of the requests created in construct_requests. """ # TODO: implement evaluation. raise NotImplementedError('Evaluation not implemented') def aggregation(self): """ :returns: {str: [float] -> float} A dictionary where keys are the names of submetrics and values are functions that aggregate a list of metrics """ # TODO: implement evaluation. raise NotImplementedError('Evaluation not implemented') def higher_is_better(self): """ :returns: {str: bool} A dictionary where keys are the names of submetrics and values are whether a higher value of the submetric is better """ # TODO: implement evaluation. raise NotImplementedError('Evaluation not implemented')