Unverified Commit 515e0470 authored by Nicholas Kross's avatar Nicholas Kross Committed by GitHub
Browse files

Merge branch 'master' into master

parents 215de045 afc614fe
......@@ -45,9 +45,11 @@ class ANLIBase(HFTask):
a = "True, False, or Neither?" + ((" " + ["True", "Neither", "False"][doc['label']]) if include_target else '')
return q + a
def evaluate(self, docs, lm, provide_description, num_fewshot):
# TODO: implement
raise NotImplementedError()
# TODO: Implement evaluation code
# ***IMPORTANT***: this evaluation function needs to be written for the new framework.
# For more info, check out the interface in base.py and the example BoolQ implementation in superglue.py.
# Remove this comment when the evaluation code is implemented.
class ANLIRound1(ANLIBase):
SPLIT = 1
......
......@@ -58,23 +58,12 @@ class DROP(Dataset):
text = ''.join([text, get_answer(pair['answer'])])
qa_texts.append(text)
return ''.join([doctext, '\n'.join(qa_texts)])
def evaluate(self, docs, lm, provide_description, num_fewshot):
"""Take iterable of docs and evaluates, returning a dict with the following format:
{
"major": float,
"minor": dict,
"higher_is_better": bool,
}
* `major` should be a single, representative number, for programmatic comparison
* `minor` should be a dictionary containing all relevant sub-metrics
* `higher_is_better` determines whether a higher metric is better
"""
pass
def fewshot_description(self):
return "Read the passage and answer the questions "
# TODO: Implement evaluation code
# ***IMPORTANT***: this evaluation function needs to be written for the new framework.
# For more info, check out the interface in base.py and the example BoolQ implementation in superglue.py.
# Remove this comment when the evaluation code is implemented.
......@@ -46,6 +46,12 @@ class CoLA(HFTask):
return text
def evaluate(self, docs, lm, provide_description, num_fewshot):
# TODO: Implement evaluation code using new framework
# ***IMPORTANT***: this evaluation function needs to be rewritten for the new framework.
# For more info, check out the interface in base.py and the example BoolQ implementation in superglue.py.
# Remove this comment when the evaluation code is implemented.
golds = [doc["label"] for doc in docs]
preds = []
for doc in tqdm_lib.tqdm(docs):
......@@ -99,6 +105,11 @@ class MNLI(HFTask):
return text
def evaluate(self, docs, lm, provide_description, num_fewshot):
# TODO: Implement evaluation code using new framework
# ***IMPORTANT***: this evaluation function needs to be rewritten for the new framework.
# For more info, check out the interface in base.py and the example BoolQ implementation in superglue.py.
# Remove this comment when the evaluation code is implemented.
golds = [doc["label"] for doc in docs]
preds = []
for doc in tqdm_lib.tqdm(docs):
......@@ -153,6 +164,11 @@ class MRPC(HFTask):
return text
def evaluate(self, docs, lm, provide_description, num_fewshot):
# TODO: Implement evaluation code using new framework
# ***IMPORTANT***: this evaluation function needs to be rewritten for the new framework.
# For more info, check out the interface in base.py and the example BoolQ implementation in superglue.py.
# Remove this comment when the evaluation code is implemented.
golds = [doc["label"] for doc in docs]
preds = []
for doc in tqdm_lib.tqdm(docs):
......@@ -190,6 +206,11 @@ class RTE(HFTask):
return text
def evaluate(self, docs, lm, provide_description, num_fewshot):
# TODO: Implement evaluation code using new framework
# ***IMPORTANT***: this evaluation function needs to be rewritten for the new framework.
# For more info, check out the interface in base.py and the example BoolQ implementation in superglue.py.
# Remove this comment when the evaluation code is implemented.
golds = [doc["label"] for doc in docs]
preds = []
for doc in tqdm_lib.tqdm(docs):
......@@ -227,6 +248,11 @@ class QNLI(HFTask):
return text
def evaluate(self, docs, lm, provide_description, num_fewshot):
# TODO: Implement evaluation code using new framework
# ***IMPORTANT***: this evaluation function needs to be rewritten for the new framework.
# For more info, check out the interface in base.py and the example BoolQ implementation in superglue.py.
# Remove this comment when the evaluation code is implemented.
golds = [doc["label"] for doc in docs]
preds = []
for doc in tqdm_lib.tqdm(docs):
......@@ -265,6 +291,11 @@ class QQP(HFTask):
return text
def evaluate(self, docs, lm, provide_description, num_fewshot):
# TODO: Implement evaluation code using new framework
# ***IMPORTANT***: this evaluation function needs to be rewritten for the new framework.
# For more info, check out the interface in base.py and the example BoolQ implementation in superglue.py.
# Remove this comment when the evaluation code is implemented.
golds = [doc["label"] for doc in docs]
preds = []
for doc in tqdm_lib.tqdm(docs):
......@@ -304,6 +335,11 @@ class STSB(HFTask):
return text
def evaluate(self, docs, lm, provide_description, num_fewshot):
# TODO: Implement evaluation code using new framework
# ***IMPORTANT***: this evaluation function needs to be rewritten for the new framework.
# For more info, check out the interface in base.py and the example BoolQ implementation in superglue.py.
# Remove this comment when the evaluation code is implemented.
golds = [doc["label"] for doc in docs]
preds = []
for doc in tqdm_lib.tqdm(docs):
......@@ -359,6 +395,11 @@ class SST(HFTask):
return text
def evaluate(self, docs, lm, provide_description, num_fewshot):
# TODO: Implement evaluation code using new framework
# ***IMPORTANT***: this evaluation function needs to be rewritten for the new framework.
# For more info, check out the interface in base.py and the example BoolQ implementation in superglue.py.
# Remove this comment when the evaluation code is implemented.
golds = [doc["label"] for doc in docs]
preds = []
for doc in tqdm_lib.tqdm(docs):
......@@ -397,6 +438,11 @@ class WNLI(HFTask):
return text
def evaluate(self, docs, lm, provide_description, num_fewshot):
# TODO: Implement evaluation code using new framework
# ***IMPORTANT***: this evaluation function needs to be rewritten for the new framework.
# For more info, check out the interface in base.py and the example BoolQ implementation in superglue.py.
# Remove this comment when the evaluation code is implemented.
golds = [doc["label"] for doc in docs]
preds = []
for doc in tqdm_lib.tqdm(docs):
......
......@@ -51,6 +51,8 @@ class HellaSwag(HFTask):
text += doc['endings'][index]
return text
def evaluate(self, docs, lm, provide_description, num_fewshot):
# TODO: Write evaluation function
raise NotImplementedError()
# TODO: Implement evaluation code
# ***IMPORTANT***: this evaluation function needs to be written for the new framework.
# For more info, check out the interface in base.py and the example BoolQ implementation in superglue.py.
# Remove this comment when the evaluation code is implemented.
\ No newline at end of file
......@@ -46,5 +46,8 @@ class Lambada(Dataset):
#label = doc[]
return doc
def evaluate(self, docs, lm, provide_description, num_fewshot):
pass
\ No newline at end of file
# TODO: Implement evaluation code
# ***IMPORTANT***: this evaluation function needs to be written for the new framework.
# For more info, check out the interface in base.py and the example BoolQ implementation in superglue.py.
# Remove this comment when the evaluation code is implemented.
\ No newline at end of file
......@@ -50,6 +50,8 @@ class NaturalQs(HFTask):
return text
def evaluate(self, docs, lm, provide_description, num_fewshot):
# TODO: implement
raise NotImplementedError()
\ No newline at end of file
# TODO: Implement evaluation code
# ***IMPORTANT***: this evaluation function needs to be written for the new framework.
# For more info, check out the interface in base.py and the example BoolQ implementation in superglue.py.
# Remove this comment when the evaluation code is implemented.
\ No newline at end of file
......@@ -53,6 +53,8 @@ class OpenBookQA(HFTask):
text += doc['choices']['text'][index] + '.'
return text
def evaluate(self, docs, lm, provide_description, num_fewshot):
# TODO: Write evaluation function
raise NotImplementedError()
# TODO: Implement evaluation code
# ***IMPORTANT***: this evaluation function needs to be written for the new framework.
# For more info, check out the interface in base.py and the example BoolQ implementation in superglue.py.
# Remove this comment when the evaluation code is implemented.
\ No newline at end of file
......@@ -54,6 +54,8 @@ class PiQA(Dataset):
#TODO: check if oa uses newline
return doc['goal'] + ' '
def evaluate(self, docs, lm):
pass
# TODO: Implement evaluation code
# ***IMPORTANT***: this evaluation function needs to be written for the new framework.
# For more info, check out the interface in base.py and the example BoolQ implementation in superglue.py.
# Remove this comment when the evaluation code is implemented.
\ No newline at end of file
......@@ -61,5 +61,8 @@ class QuAC(Dataset):
text += doc['answer']
return text
def evaluate(self, docs, lm):
pass
# TODO: Implement evaluation code
# ***IMPORTANT***: this evaluation function needs to be written for the new framework.
# For more info, check out the interface in base.py and the example BoolQ implementation in superglue.py.
# Remove this comment when the evaluation code is implemented.
\ No newline at end of file
......@@ -67,6 +67,8 @@ class RACE(HFTask):
return r
def evaluate(self, docs, lm, provide_description, num_fewshot):
# TODO: implement
raise NotImplementedError()
\ No newline at end of file
# TODO: Implement evaluation code
# ***IMPORTANT***: this evaluation function needs to be written for the new framework.
# For more info, check out the interface in base.py and the example BoolQ implementation in superglue.py.
# Remove this comment when the evaluation code is implemented.
\ No newline at end of file
# REMINDER: this code needs to be rewritten for the new framework. Remove this comment when the code is fully converted.
import json
import random
import os
from lm_eval.base import Dataset, rf, mean
from tqdm import auto as tqdm_lib
from . common import simple_accuracy_metric
import numpy as np
from ..utils import sh
class SATAnalogies(Dataset):
def __init__(self):
super().__init__()
def download(self):
# We should be using a checksum here.
# The canonical sha256 hash is below:
# 9dece377d8d57253ef8c78370ff15de0bb1d9e90a82c815a67ba1e621e921bfc
if not os.path.exists('data/sat') and os.path.exists('data/sat/SAT-package-V3.txt'):
raise NotImplementedError('SAT Analogies dataset is not provided. Follow instructions on https://aclweb.org/aclwiki/SAT_Analogy_Questions_(State_of_the_art) to locate.')
def has_training_docs(self):
return False
def has_validation_docs(self):
return False
def has_test_docs(self):
return True
def training_docs(self):
return []
def validation_docs(self):
return []
def test_docs(self):
data = []
with open("data/sat/SAT-package-V3.txt", "r") as f:
lines = f.read().splitlines()
record = []
for line in lines:
if len(line) == 0 and record:
data.append(record)
record = []
elif len(line) > 0 and line[0] == '#':
continue
else:
record.append(line)
data.append(record)
docs = []
for record in data:
source = record[-8]
query = record[-7]
choices = record[-6:-1]
answer_key = record[-1]
doc = {
'source': source,
'query': query,
'choices': choices,
'answer_key': answer_key,
}
docs.append(doc)
return docs
def fewshot_description(self):
# This format is ONLY for the purposes of deduplication. For the task evaluation, we'll need to find a new strategy,
# to meet the needs of this particular task.
return "first thing is to second thing as\nthird thing is to fourth thing\nfifth thing is to sixth thing\nseventh thing is to eighth thing\nninth thing is to tenth thing\neleventh thing is to twelfth thing\nanswer which is either a b c d or e"
def doc_to_text(self, doc, include_target=True):
# SAT Analogies is currently only writing out full examples. Partial evaluation needs implementing.
format_qn = lambda x: x[0] + ' is to ' + x[1]
query = doc['query']
choices = doc['choices']
answer = doc['answer_key']
query_words = query.split(' ')[:2]
text = format_qn(query_words) + ' as' + '\n'
for choice in choices:
choice_words = choice.split(' ')[:2]
text += format_qn(choice_words) + '\n'
if include_target:
text += answer
return text
def doc_to_target(self, doc):
# assumes answer_key is the true-answer's letter
return doc['answer_key']
def construct_requests(self, ctx):
# assumes the output is the predicted-answer's letter
ll_a = rf.loglikelihood(ctx, ' a')
ll_b = rf.loglikelihood(ctx, ' b')
ll_c = rf.loglikelihood(ctx, ' c')
ll_d = rf.loglikelihood(ctx, ' d')
ll_e = rf.loglikelihood(ctx, ' e')
return ll_a, ll_b, ll_c, ll_d, ll_e
def process_results(self, doc, results):
predicted_odds = np.array(list(results))
gold = doc["answer_key"]
acc = 1. if np.argmax(predicted_odds) == gold else 0.
return [
{
"submetric": "acc",
"value": acc,
"higher_is_better": True,
"aggregation": mean
}
]
def evaluate(self, docs, lm):
# functionality already implemented above
raise NotImplementedError()
# REMINDER: this code needs to be rewritten for the new framework. Remove this comment when the code is fully converted.
import json
import random
import os
from lm_eval.base import Dataset, rf, mean
from tqdm import auto as tqdm_lib
from . common import simple_accuracy_metric
import numpy as np
from ..utils import sh
class SATAnalogies(Dataset):
def __init__(self):
super().__init__()
def download(self):
# We should be using a checksum here.
# The canonical sha256 hash is below:
# 9dece377d8d57253ef8c78370ff15de0bb1d9e90a82c815a67ba1e621e921bfc
if not os.path.exists('data/sat') and os.path.exists('data/sat/SAT-package-V3.txt'):
raise NotImplementedError('SAT Analogies dataset is not provided. Follow instructions on https://aclweb.org/aclwiki/SAT_Analogy_Questions_(State_of_the_art) to locate.')
def has_training_docs(self):
return False
def has_validation_docs(self):
return False
def has_test_docs(self):
return True
def training_docs(self):
return []
def validation_docs(self):
return []
def test_docs(self):
data = []
with open("data/sat/SAT-package-V3.txt", "r") as f:
lines = f.read().splitlines()
record = []
for line in lines:
if len(line) == 0 and record:
data.append(record)
record = []
elif len(line) > 0 and line[0] == '#':
continue
else:
record.append(line)
data.append(record)
docs = []
for record in data:
source = record[-8]
query = record[-7]
choices = record[-6:-1]
answer_key = record[-1]
doc = {
'source': source,
'query': query,
'choices': choices,
'answer_key': answer_key,
}
docs.append(doc)
return docs
def fewshot_description(self):
# This format is ONLY for the purposes of deduplication. For the task evaluation, we'll need to find a new strategy,
# to meet the needs of this particular task.
return "first thing is to second thing as\nthird thing is to fourth thing\nfifth thing is to sixth thing\nseventh thing is to eighth thing\nninth thing is to tenth thing\neleventh thing is to twelfth thing\nanswer which is either a b c d or e"
def doc_to_text(self, doc, include_target=True):
# SAT Analogies is currently only writing out full examples. Partial evaluation needs implementing.
format_qn = lambda x: x[0] + ' is to ' + x[1]
query = doc['query']
choices = doc['choices']
answer = doc['answer_key']
query_words = query.split(' ')[:2]
text = format_qn(query_words) + ' as' + '\n'
for choice in choices:
choice_words = choice.split(' ')[:2]
text += format_qn(choice_words) + '\n'
if include_target:
text += answer
return text
def doc_to_target(self, doc):
# assumes answer_key is the true-answer's letter
return doc['answer_key']
def construct_requests(self, ctx):
# assumes the output is the predicted-answer's letter
ll_a = rf.loglikelihood(ctx, ' a')
ll_b = rf.loglikelihood(ctx, ' b')
ll_c = rf.loglikelihood(ctx, ' c')
ll_d = rf.loglikelihood(ctx, ' d')
ll_e = rf.loglikelihood(ctx, ' e')
return ll_a, ll_b, ll_c, ll_d, ll_e
def process_results(self, doc, results):
predicted_odds = np.array(list(results))
gold = doc["answer_key"]
acc = 1. if np.argmax(predicted_odds) == gold else 0.
return [
{
"submetric": "acc",
"value": acc,
"higher_is_better": True,
"aggregation": mean
}
]
def evaluate(self, docs, lm):
# functionality already implemented above
raise NotImplementedError()
......@@ -42,6 +42,8 @@ class SQuAD(HFTask):
text += answer
return text
def evaluate(self, docs, lm, provide_description, num_fewshot):
# TODO: Write evaluation function
raise NotImplementedError()
\ No newline at end of file
# TODO: Implement evaluation code
# ***IMPORTANT***: this evaluation function needs to be written for the new framework.
# For more info, check out the interface in base.py and the example BoolQ implementation in superglue.py.
# Remove this comment when the evaluation code is implemented.
\ No newline at end of file
......@@ -47,6 +47,9 @@ class StoryCloze(Dataset):
else:
return ' '.join([*doc[1:5]])
def evaluate(self, docs, lm):
pass
# TODO: Implement evaluation code
# ***IMPORTANT***: this evaluation function needs to be written for the new framework.
# For more info, check out the interface in base.py and the example BoolQ implementation in superglue.py.
# Remove this comment when the evaluation code is implemented.
......@@ -76,6 +76,11 @@ class CommitmentBank(HFTask):
return text
def evaluate(self, docs, lm, provide_description, num_fewshot):
# TODO: Implement evaluation code using new framework
# ***IMPORTANT***: this evaluation function needs to be rewritten for the new framework.
# For more info, check out the interface in base.py and the example BoolQ implementation in superglue.py.
# Remove this comment when the evaluation code is implemented.
golds = [doc["label"] for doc in docs]
preds = []
for doc in tqdm_lib.tqdm(docs):
......@@ -120,6 +125,11 @@ class Copa(HFTask):
return text
def evaluate(self, docs, lm, provide_description, num_fewshot):
# TODO: Implement evaluation code using new framework
# ***IMPORTANT***: this evaluation function needs to be rewritten for the new framework.
# For more info, check out the interface in base.py and the example BoolQ implementation in superglue.py.
# Remove this comment when the evaluation code is implemented.
golds = [doc["label"] for doc in docs]
preds = []
for doc in tqdm_lib.tqdm(docs):
......@@ -165,6 +175,11 @@ class MultiRC(HFTask):
return f"[{label_str}] {answer}"
def evaluate(self, docs, lm, provide_description, num_fewshot):
# TODO: Implement evaluation code using new framework
# ***IMPORTANT***: this evaluation function needs to be rewritten for the new framework.
# For more info, check out the interface in base.py and the example BoolQ implementation in superglue.py.
# Remove this comment when the evaluation code is implemented.
preds = []
for doc in docs:
ctx = self.fewshot_context(
......@@ -220,6 +235,11 @@ class WordsInContext(HFTask):
return text
def evaluate(self, docs, lm, provide_description, num_fewshot):
# TODO: Implement evaluation code using new framework
# ***IMPORTANT***: this evaluation function needs to be rewritten for the new framework.
# For more info, check out the interface in base.py and the example BoolQ implementation in superglue.py.
# Remove this comment when the evaluation code is implemented.
golds = [doc["label"] for doc in docs]
preds = []
for doc in tqdm_lib.tqdm(docs):
......@@ -280,6 +300,11 @@ class SGWinogradSchemaChallenge(HFTask):
return text
def evaluate(self, docs, lm, provide_description, num_fewshot):
# TODO: Implement evaluation code using new framework
# ***IMPORTANT***: this evaluation function needs to be rewritten for the new framework.
# For more info, check out the interface in base.py and the example BoolQ implementation in superglue.py.
# Remove this comment when the evaluation code is implemented.
golds = [doc["label"] for doc in docs]
preds = []
for doc in tqdm_lib.tqdm(docs):
......@@ -314,7 +339,10 @@ class RTE(HFTask):
return ''.join([doc['premise'], '\nquestion: ',doc['hypothesis'], ' True or False?\nanswer: ', answer])
else:
return ''.join([doc['premise'], '\nquestion: ',doc['hypothesis'], ' True or False?\nanswer: '])
def evaluate(self, docs, lm, provide_description, num_fewshot):
#TODO:
pass
# TODO: Implement evaluation code
# ***IMPORTANT***: this evaluation function needs to be written for the new framework.
# For more info, check out the interface in base.py and the example BoolQ implementation in superglue.py.
# Remove this comment when the evaluation code is implemented.
......@@ -44,6 +44,9 @@ class TriviaQA(Dataset):
return ''.join(['Q: ', doc['Question'], '\n\n','A: ', doc['Answer']['Aliases'][0]])
else:
return ''.join(['Q: ', doc['Question'], '\n\n','A: '])
def evaluate(self, docs, lm):
pass
# TODO: Implement evaluation code
# ***IMPORTANT***: this evaluation function needs to be written for the new framework.
# For more info, check out the interface in base.py and the example BoolQ implementation in superglue.py.
# Remove this comment when the evaluation code is implemented.
\ No newline at end of file
......@@ -29,6 +29,8 @@ class WebQs(HFTask):
a = "A:" + ((" " + doc['answers'][0]) if include_target else '')
return q + a
def evaluate(self, docs, lm, provide_description, num_fewshot):
# TODO: implement
raise NotImplementedError()
\ No newline at end of file
# TODO: Implement evaluation code
# ***IMPORTANT***: this evaluation function needs to be written for the new framework.
# For more info, check out the interface in base.py and the example BoolQ implementation in superglue.py.
# Remove this comment when the evaluation code is implemented.
\ No newline at end of file
......@@ -15,8 +15,12 @@ class WikiText103(NLP_TASK):
def doc_to_text(self, doc, include_target=True):
return doc['text']
def evaluate(self, docs, lm, provide_description, num_fewshot):
pass
# TODO: Implement evaluation code
# ***IMPORTANT***: this evaluation function needs to be written for the new framework.
# For more info, check out the interface in base.py and the example BoolQ implementation in superglue.py.
# Remove this comment when the evaluation code is implemented.
class WikiText2(NLP_TASK):
......@@ -28,5 +32,9 @@ class WikiText2(NLP_TASK):
def doc_to_text(self, doc, include_target=True):
return doc['text']
def evaluate(self, docs, lm, provide_description, num_fewshot):
pass
\ No newline at end of file
# TODO: Implement evaluation code
# ***IMPORTANT***: this evaluation function needs to be written for the new framework.
# For more info, check out the interface in base.py and the example BoolQ implementation in superglue.py.
# Remove this comment when the evaluation code is implemented.
\ No newline at end of file
......@@ -47,6 +47,8 @@ class Winogrande(HFTask):
text = text.replace("_", answer)
return text
def evaluate(self, docs, lm, provide_description, num_fewshot):
# TODO: Write evaluation function
raise NotImplementedError()
\ No newline at end of file
# TODO: Implement evaluation code
# ***IMPORTANT***: this evaluation function needs to be written for the new framework.
# For more info, check out the interface in base.py and the example BoolQ implementation in superglue.py.
# Remove this comment when the evaluation code is implemented.
\ No newline at end of file
......@@ -80,6 +80,8 @@ class WinogradSchemaChallenge273(Dataset):
text = doc['completions']['T'] + ' True. ' + doc['completions']['F'] + ' False.'
return text
def evaluate(self, docs, lm):
# TODO: Write evaluation function
raise NotImplementedError()
# TODO: Implement evaluation code
# ***IMPORTANT***: this evaluation function needs to be written for the new framework.
# For more info, check out the interface in base.py and the example BoolQ implementation in superglue.py.
# Remove this comment when the evaluation code is implemented.
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment