Unverified Commit 19b0f529 authored by Leo Gao's avatar Leo Gao Committed by GitHub
Browse files

Merge pull request #111 from jon-tow/wsc273-evaluation

Implement `WSC273` evaluation and data processing
parents e12d0078 bc5495d2
...@@ -57,7 +57,7 @@ TASK_REGISTRY = { ...@@ -57,7 +57,7 @@ TASK_REGISTRY = {
"race": race.RACE, "race": race.RACE,
# "naturalqs": naturalqs.NaturalQs, # not implemented yet # "naturalqs": naturalqs.NaturalQs, # not implemented yet
"webqs": webqs.WebQs, "webqs": webqs.WebQs,
# "wsc273": wsc273.WinogradSchemaChallenge273, # not implemented yet "wsc273": wsc273.WinogradSchemaChallenge273,
# "winogrande": winogrande.Winogrande, # not implemented yet # "winogrande": winogrande.Winogrande, # not implemented yet
"anli_r1": anli.ANLIRound1, "anli_r1": anli.ANLIRound1,
"anli_r2": anli.ANLIRound2, "anli_r2": anli.ANLIRound2,
......
import json import numpy as np
import random import random
import os from lm_eval.base import rf, mean
from lm_eval.base import Task from . common import HFTask
from ..utils import sh
"""
NOTE: This evaluation of Winograd Schema Challenge is based on `partial evaluation`
class WinogradSchemaChallenge273(Task): as described by Trinh & Le in Simple Method for Commonsense Reasoning (2018).
def __init__(self): See: https://arxiv.org/abs/1806.02847
super().__init__() """
def download(self):
if not os.path.exists('data/wsc273'): class WinogradSchemaChallenge273(HFTask):
sh(""" DATASET_PATH = "winograd_wsc"
mkdir -p data/wsc273 DATASET_NAME = "wsc273"
wget https://git.cse.msu.edu/bakerb15/nlp-final-project/raw/master/Winogard/reproduce/commonsense_test/wsc273.json -O data/wsc273/wsc273.json
""") upper_pronouns = ["A", "An", "The", "She", "He",
"It", "They", "My", "His", "Her", "Their"]
def has_training_docs(self):
return False def __init__(self):
super().__init__()
def has_validation_docs(self): self.data = self.__clean_data()
return False
def __clean_data(self):
def has_test_docs(self): # The HF implementation of `wsc273` is not `partial evaluation` friendly.
return True data = []
for doc in self.data["test"]:
def training_docs(self): doc["text"] = doc["text"].replace(" ", " ")
return [] doc["options"][0] = self.__normalize_option(doc["options"][0], doc)
doc["options"][1] = self.__normalize_option(doc["options"][1], doc)
def validation_docs(self): data.append(doc)
return [] return {"test": data}
def test_docs(self): def __normalize_option(self, option, doc):
myjson = json.load(open('data/wsc273/wsc273.json')) # Append `'s` to possessive determiner based options.
return self.load_doc(myjson) if doc["pronoun"].lower() in ["my", "his", "her", "our", "their"]:
option += "'s"
def fewshot_description(self): # Appropriately lowercase the pronoun in the option.
# TODO: redo description pronoun = option.split()[0]
return "Winograd schema sentence with correct continuation. True. Winograd schema sentence with incorrect continuation. False." start_of_sentence = doc["text"][doc['pronoun_loc'] - 2] == '.'
if not start_of_sentence and pronoun in self.upper_pronouns:
def load_doc(self, myjson): return option.replace(pronoun, pronoun.lower())
docs = [] return option
for i in range(0, 273 * 2, 2):
item1 = myjson[i] def has_training_docs(self):
item2 = myjson[i+1] return False
if item1['question_id'] != item2['question_id']: def has_validation_docs(self):
raise ValueError("WSC273 has missing completion pair.") return False
question_id = item1['question_id'] def has_test_docs(self):
return True
if item1['correctness'] == True:
doc = { def fewshot_examples(self, k):
'id': question_id, # NOTE: `super().fewshot_examples` samples from training docs which are
'completions': { # not available for this test-set-only dataset.
'T': item1['substitution'], return random.sample(list(self.test_docs()), k)
'F': item2['substitution'],
}, def fewshot_description(self):
} # TODO: redo description
return "Winograd schema sentence with correct continuation. True. Winograd schema sentence with incorrect continuation. False."
if item2['correctness'] == True:
doc = { @classmethod
'id': question_id, def partial_context(cls, doc):
'completions': { # Substitute the pronoun in the original text with each candidate
'F': item1['substitution'], # choice and ignore everything after.
'T': item2['substitution'], context1 = doc["text"][:doc["pronoun_loc"]] + doc["options"][0]
}, context2 = doc["text"][:doc["pronoun_loc"]] + doc["options"][1]
} return context1, context2
docs.append(doc) @classmethod
def partial_target(cls, doc):
return docs # The target is everything after the document specified pronoun.
start_index = doc["pronoun_loc"] + len(doc["pronoun"])
def doc_to_text(self, doc): return doc["text"][start_index:].strip()
# TODO: implement
pass def doc_to_text(self, doc):
context1, context2 = self.partial_context(doc)
def doc_to_target(self, doc): return context1 + '\n' + context2 + '\n'
# TODO: implement
pass def doc_to_target(self, doc):
return self.partial_target(doc)
def construct_requests(self, doc, ctx):
""" Uses RequestFactory to construct Requests and returns an iterable of def construct_requests(self, doc, ctx):
Requests which will be sent to the LM. """ Uses RequestFactory to construct Requests and returns an iterable of
Requests which will be sent to the LM.
:param doc:
The document as returned from training_docs, validation_docs, or test_docs. :param doc:
:param ctx: str The document as returned from training_docs, validation_docs, or test_docs.
The context string, generated by fewshot_context. This includes the natural :param ctx: str
language description, as well as the few shot examples, and the question The context string, generated by fewshot_context. This includes the natural
part of the document for `doc`. language description, as well as the few shot examples, and the question
""" part of the document for `doc`.
# TODO: implement evaluation. """
raise NotImplementedError('Evaluation not implemented') target = self.partial_target(doc)
context1, context2 = self.partial_context(doc)
def process_results(self, doc, results): ll_context1, _ = rf.loglikelihood(context1, " " + target)
"""Take a single document and the LM results and evaluates, returning a ll_context2, _ = rf.loglikelihood(context2, " " + target)
dict where keys are the names of submetrics and values are the values of return ll_context1, ll_context2
the metric for that one document
def process_results(self, doc, results):
:param doc: """Take a single document and the LM results and evaluates, returning a
The document as returned from training_docs, validation_docs, or test_docs. dict where keys are the names of submetrics and values are the values of
:param results: the metric for that one document
The results of the requests created in construct_requests.
""" :param doc:
# TODO: implement evaluation. The document as returned from training_docs, validation_docs, or test_docs.
raise NotImplementedError('Evaluation not implemented') :param results:
The results of the requests created in construct_requests.
def aggregation(self): """
""" return {
:returns: {str: [float] -> float} "acc": np.argmax(results) == doc["label"]
A dictionary where keys are the names of submetrics and values are }
functions that aggregate a list of metrics
""" def aggregation(self):
# TODO: implement evaluation. """
raise NotImplementedError('Evaluation not implemented') :returns: {str: [float] -> float}
A dictionary where keys are the names of submetrics and values are
def higher_is_better(self): functions that aggregate a list of metrics
""" """
:returns: {str: bool} return {
A dictionary where keys are the names of submetrics and values are "acc": mean
whether a higher value of the submetric is better }
"""
# TODO: implement evaluation. def higher_is_better(self):
raise NotImplementedError('Evaluation not implemented') """
:returns: {str: bool}
A dictionary where keys are the names of submetrics and values are
whether a higher value of the submetric is better
"""
return {
"acc": True
}
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment