"sgl-router/tests/vscode:/vscode.git/clone" did not exist on "3f2d0cefcdbe43c424b5ad4665d0e7527bc7fb2d"
Unverified Commit 93510e3a authored by Leo Gao's avatar Leo Gao Committed by GitHub
Browse files

Merge pull request #80 from nicholaskross/master

Started SAT eval
parents afc614fe 515e0470
# REMINDER: this code needs to be rewritten for the new framework. Remove this comment when the code is fully converted. # REMINDER: this code needs to be rewritten for the new framework. Remove this comment when the code is fully converted.
import json import json
import random import random
import os import os
from lm_eval.base import Dataset from lm_eval.base import Dataset, rf, mean
from ..utils import sh from tqdm import auto as tqdm_lib
from . common import simple_accuracy_metric
import numpy as np
class SATAnalogies(Dataset): from ..utils import sh
def __init__(self):
super().__init__()
class SATAnalogies(Dataset):
def download(self): def __init__(self):
# We should be using a checksum here. super().__init__()
# The canonical sha256 hash is below:
# 9dece377d8d57253ef8c78370ff15de0bb1d9e90a82c815a67ba1e621e921bfc def download(self):
if not os.path.exists('data/sat') and os.path.exists('data/sat/SAT-package-V3.txt'): # We should be using a checksum here.
raise NotImplementedError('SAT Analogies dataset is not provided. Follow instructions on https://aclweb.org/aclwiki/SAT_Analogy_Questions_(State_of_the_art) to locate.') # The canonical sha256 hash is below:
# 9dece377d8d57253ef8c78370ff15de0bb1d9e90a82c815a67ba1e621e921bfc
def has_training_docs(self): if not os.path.exists('data/sat') and os.path.exists('data/sat/SAT-package-V3.txt'):
return False raise NotImplementedError('SAT Analogies dataset is not provided. Follow instructions on https://aclweb.org/aclwiki/SAT_Analogy_Questions_(State_of_the_art) to locate.')
def has_validation_docs(self): def has_training_docs(self):
return False return False
def has_test_docs(self): def has_validation_docs(self):
return True return False
def training_docs(self): def has_test_docs(self):
return [] return True
def validation_docs(self): def training_docs(self):
return [] return []
def test_docs(self): def validation_docs(self):
data = [] return []
with open("data/sat/SAT-package-V3.txt", "r") as f: def test_docs(self):
lines = f.read().splitlines() data = []
record = []
for line in lines: with open("data/sat/SAT-package-V3.txt", "r") as f:
if len(line) == 0 and record: lines = f.read().splitlines()
data.append(record) record = []
record = [] for line in lines:
elif len(line) > 0 and line[0] == '#': if len(line) == 0 and record:
continue data.append(record)
else: record = []
record.append(line) elif len(line) > 0 and line[0] == '#':
data.append(record) continue
else:
docs = [] record.append(line)
data.append(record)
for record in data:
source = record[-8] docs = []
query = record[-7]
choices = record[-6:-1] for record in data:
answer_key = record[-1] source = record[-8]
query = record[-7]
doc = { choices = record[-6:-1]
'source': source, answer_key = record[-1]
'query': query,
'choices': choices, doc = {
'answer_key': answer_key, 'source': source,
} 'query': query,
docs.append(doc) 'choices': choices,
'answer_key': answer_key,
return docs }
docs.append(doc)
def fewshot_description(self): return docs
# This format is ONLY for the purposes of deduplication. For the task evaluation, we'll need to find a new strategy,
# to meet the needs of this particular task.
return "first thing is to second thing as\nthird thing is to fourth thing\nfifth thing is to sixth thing\nseventh thing is to eighth thing\nninth thing is to tenth thing\neleventh thing is to twelfth thing\nanswer which is either a b c d or e" def fewshot_description(self):
# This format is ONLY for the purposes of deduplication. For the task evaluation, we'll need to find a new strategy,
def doc_to_text(self, doc, include_target=True): # to meet the needs of this particular task.
# SAT Analogies is currently only writing out full examples. Partial evaluation needs implementing. return "first thing is to second thing as\nthird thing is to fourth thing\nfifth thing is to sixth thing\nseventh thing is to eighth thing\nninth thing is to tenth thing\neleventh thing is to twelfth thing\nanswer which is either a b c d or e"
format_qn = lambda x: x[0] + ' is to ' + x[1]
def doc_to_text(self, doc, include_target=True):
query = doc['query'] # SAT Analogies is currently only writing out full examples. Partial evaluation needs implementing.
choices = doc['choices'] format_qn = lambda x: x[0] + ' is to ' + x[1]
answer = doc['answer_key']
query = doc['query']
query_words = query.split(' ')[:2] choices = doc['choices']
text = format_qn(query_words) + ' as' + '\n' answer = doc['answer_key']
for choice in choices: query_words = query.split(' ')[:2]
choice_words = choice.split(' ')[:2] text = format_qn(query_words) + ' as' + '\n'
text += format_qn(choice_words) + '\n'
for choice in choices:
if include_target: choice_words = choice.split(' ')[:2]
text += answer text += format_qn(choice_words) + '\n'
return text if include_target:
text += answer
# TODO: Implement evaluation code
return text
# ***IMPORTANT***: this evaluation function needs to be written for the new framework.
# For more info, check out the interface in base.py and the example BoolQ implementation in superglue.py.
# Remove this comment when the evaluation code is implemented. def doc_to_target(self, doc):
\ No newline at end of file # assumes answer_key is the true-answer's letter
return doc['answer_key']
def construct_requests(self, ctx):
# assumes the output is the predicted-answer's letter
ll_a = rf.loglikelihood(ctx, ' a')
ll_b = rf.loglikelihood(ctx, ' b')
ll_c = rf.loglikelihood(ctx, ' c')
ll_d = rf.loglikelihood(ctx, ' d')
ll_e = rf.loglikelihood(ctx, ' e')
return ll_a, ll_b, ll_c, ll_d, ll_e
def process_results(self, doc, results):
predicted_odds = np.array(list(results))
gold = doc["answer_key"]
acc = 1. if np.argmax(predicted_odds) == gold else 0.
return [
{
"submetric": "acc",
"value": acc,
"higher_is_better": True,
"aggregation": mean
}
]
def evaluate(self, docs, lm):
# functionality already implemented above
raise NotImplementedError()
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment