"driver/device_direct_convolution_1.hpp" did not exist on "28354a0fa374f71ceeb72ddccf09796701981b3c"
Unverified Commit 0f536808 authored by Stella Biderman's avatar Stella Biderman Committed by GitHub
Browse files

Merge pull request #99 from jon-tow/hellaswag-evaluation

Add `HellaSwag` evaluation implementation
parents 2d61b3ce 97b88570
...@@ -45,7 +45,7 @@ TASK_REGISTRY = { ...@@ -45,7 +45,7 @@ TASK_REGISTRY = {
# "arc_easy": arc.ARCEasy, # not implemented yet # "arc_easy": arc.ARCEasy, # not implemented yet
# "arc_challenge": arc.ARCChallenge, # not implemented yet # "arc_challenge": arc.ARCChallenge, # not implemented yet
# "quac": quac.QuAC, # not implemented yet # "quac": quac.QuAC, # not implemented yet
# "hellaswag": hellaswag.HellaSwag, # not implemented yet "hellaswag": hellaswag.HellaSwag, # not implemented yet
# "openbookqa": openbookqa.OpenBookQA, # not implemented yet # "openbookqa": openbookqa.OpenBookQA, # not implemented yet
# "sat": sat.SATAnalogies, # not implemented yet # "sat": sat.SATAnalogies, # not implemented yet
# "squad": squad.SQuAD, # not implemented yet # "squad": squad.SQuAD, # not implemented yet
......
import re
import numpy as np import numpy as np
from scipy.stats import pearsonr, spearmanr from ..base import rf, mean
from sklearn.metrics import f1_score, matthews_corrcoef from . common import HFTask
from tqdm import auto as tqdm_lib
from . common import HFTask, simple_accuracy_metric, yesno
class HellaSwag(HFTask): class HellaSwag(HFTask):
DATASET_PATH = "hellaswag" DATASET_PATH = "hellaswag"
DATASET_NAME = None DATASET_NAME = None
@classmethod
def remove_brackets(cls, text):
""" Removes brackets from HellaSwag documents.
NOTE: The brackets are artifacts of the WikiHow dataset portion underlying
HellaSwag.
"""
text = re.sub('\[.*?\]', '', text)
return text
def has_training_docs(self): def has_training_docs(self):
return True return True
...@@ -30,10 +39,13 @@ class HellaSwag(HFTask): ...@@ -30,10 +39,13 @@ class HellaSwag(HFTask):
return self.data["test"] return self.data["test"]
def fewshot_description(self): def fewshot_description(self):
return "Label for the relevant action: Sentences describing the context, with an incomplete sentence trailing\nanswer that plausibly completes the situation." return "Label for the relevant action: Sentences describing the " \
"context, with an incomplete sentence trailing\nanswer that " \
"plausibly completes the situation."
def doc_to_text(self, doc): def doc_to_text(self, doc):
return doc['activity_label'] + ': ' + doc['ctx'] + '\n' text = doc['activity_label'] + ': ' + doc['ctx'] + '\n'
return self.remove_brackets(text)
def doc_to_target(self, doc): def doc_to_target(self, doc):
letter_answer = doc['label'] letter_answer = doc['label']
...@@ -46,50 +58,59 @@ class HellaSwag(HFTask): ...@@ -46,50 +58,59 @@ class HellaSwag(HFTask):
elif letter_answer == '3': elif letter_answer == '3':
index = 3 index = 3
else: else:
raise ValueError("HellaSwag from HF datasets contained an invalid answer key") raise ValueError(
return doc['endings'][index] "HellaSwag from HF datasets contained an invalid answer key")
target = doc['endings'][index]
return self.remove_brackets(target)
def construct_requests(self, doc, ctx): def construct_requests(self, doc, ctx):
""" Uses RequestFactory to construct Requests and returns an iterable of """ Uses RequestFactory to construct Requests and returns an iterable of
Requests which will be sent to the LM. Requests which will be sent to the LM.
:param doc: :param doc:
The document as returned from training_docs, validation_docs, or test_docs. The document as returned from training_docs, validation_docs, or test_docs.
:param ctx: str :param ctx: str
The context string, generated by fewshot_context. This includes the natural The context string, generated by fewshot_context. This includes the natural
language description, as well as the few shot examples, and the question language description, as well as the few shot examples, and the question
part of the document for `doc`. part of the document for `doc`.
""" """
# TODO: implement evaluation. ll_answers = []
raise NotImplementedError('Evaluation not implemented') for i in range(4):
continuation = self.remove_brackets(doc['endings'][i])
ll_answers.append(rf.loglikelihood(ctx, continuation))
return ll_answers
def process_results(self, doc, results): def process_results(self, doc, results):
"""Take a single document and the LM results and evaluates, returning a """Take a single document and the LM results and evaluates, returning a
dict where keys are the names of submetrics and values are the values of dict where keys are the names of submetrics and values are the values of
the metric for that one document the metric for that one document
:param doc: :param doc:
The document as returned from training_docs, validation_docs, or test_docs. The document as returned from training_docs, validation_docs, or test_docs.
:param results: :param results:
The results of the requests created in construct_requests. The results of the requests created in construct_requests.
""" """
# TODO: implement evaluation. gold = int(doc['label'])
raise NotImplementedError('Evaluation not implemented') pred = np.argmax(results)
acc = 1. if pred == gold else 0.
return {
"acc": acc
}
def aggregation(self): def aggregation(self):
""" """
:returns: {str: [float] -> float} :returns: {str: [float] -> float}
A dictionary where keys are the names of submetrics and values are A dictionary where keys are the names of submetrics and values are
functions that aggregate a list of metrics functions that aggregate a list of metrics
""" """
# TODO: implement evaluation. return {
raise NotImplementedError('Evaluation not implemented') "acc": mean
}
def higher_is_better(self): def higher_is_better(self):
""" """
:returns: {str: bool} :returns: {str: bool}
A dictionary where keys are the names of submetrics and values are A dictionary where keys are the names of submetrics and values are
whether a higher value of the submetric is better whether a higher value of the submetric is better
""" """
# TODO: implement evaluation. return {
raise NotImplementedError('Evaluation not implemented') "acc": True
\ No newline at end of file }
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment