# REMINDER: this code needs to be rewritten for the new framework. Remove this comment when the code is fully converted. import json import random from lm_eval.base import Dataset from ..utils import sh import itertools class CoQA(Dataset): def __init__(self): self.download() def download(self): sh (""" mkdir -p data/coqa wget --no-clobber http://downloads.cs.stanford.edu/nlp/data/coqa/coqa-train-v1.0.json -O data/coqa/coqa-train-v1.0.json wget --no-clobber http://downloads.cs.stanford.edu/nlp/data/coqa/coqa-dev-v1.0.json -O data/coqa/coqa-dev-v1.0.json """) @classmethod def get_answers(cls, doc, turn_id): answers = zip(doc["answers"], zip(doc["additional_answers"])) return answers[turn_id - 1] def has_training_docs(self): return True def has_validation_docs(self): return True def has_test_docs(self): return False def training_docs(self): return json.load(open('data/coqa/coqa-train-v1.0.json'))['data'] def validation_docs(self): return json.load(open('data/coqa/coqa-dev-v1.0.json'))['data'] def test_docs(self): pass def fewshot_description(self): return "Given a passage and a conversation so far, answer the next question in the conversation." def doc_to_text(self, doc): qa_pairs = [(q, a) in zip_longest(doc["questions"], doc["answers"][:-1])] # truncate target answer return "{}\n\n{}".format(doc["story"], f"Q: {q}"+ '\n\n' + f"A: {a}") def doc_to_target(self, doc): # TODO: all distinct answers taking into account whitespace? return get_answers(doc, len(doc["questions"])) def construct_requests(self, doc, ctx): """ Uses RequestFactory to construct Requests and returns an iterable of Requests which will be sent to the LM. :param doc: The document as returned from training_docs, validation_docs, or test_docs. :param ctx: str The context string, generated by fewshot_context. This includes the natural language description, as well as the few shot examples, and the question part of the document for `doc`. """ ll_alternative_answers = [ rf.loglikelihood(ctx, " " + answer) for answer in get_answers(doc, len(doc["questions"])) ] return ll_alternative_answers def process_results(self, doc, results): """Take a single document and the LM results and evaluates, returning a dict where keys are the names of submetrics and values are the values of the metric for that one document :param doc: The document as returned from training_docs, validation_docs, or test_docs. :param results: The results of the requests created in construct_requests. """ golds = get_answers(doc, len(doc["questions"])) pred = np.argmax(results) return { "acc": pred in golds, # "f1": (golds, pred), # TODO: Fix } def aggregation(self): """ :returns: {str: [float] -> float} A dictionary where keys are the names of submetrics and values are functions that aggregate a list of metrics """ # TODO: implement evaluation. raise NotImplementedError('Evaluation not implemented') def higher_is_better(self): """ :returns: {str: bool} A dictionary where keys are the names of submetrics and values are whether a higher value of the submetric is better """ # TODO: implement evaluation. raise NotImplementedError('Evaluation not implemented')