"example/vscode:/vscode.git/clone" did not exist on "0bd6d2cebb1e2aec7bab7b1e9b3979107b081fe7"
Unverified Commit a77f4be9 authored by Stella Biderman's avatar Stella Biderman Committed by GitHub
Browse files

Merge pull request #536 from danny980521/update/klue_ynat

Update `KLUE-YNAT` prompt
parents a3b76ab1 d2dd333e
...@@ -8,7 +8,7 @@ https://arxiv.org/abs/2105.09680 ...@@ -8,7 +8,7 @@ https://arxiv.org/abs/2105.09680
With ethical considerations in mind, we deliberately design annotation guidelines With ethical considerations in mind, we deliberately design annotation guidelines
to obtain unambiguous annotations for all datasets. Furthermore, we build an evaluation system to obtain unambiguous annotations for all datasets. Furthermore, we build an evaluation system
and carefully choose evaluations metrics for every task, thus establishing fair comparison across Korean language models. and carefully choose evaluations metrics for every task, thus establishing fair comparison across Korean language models.
Homepage: https://klue-benchmark.com/ Homepage: https://klue-benchmark.com/
""" """
...@@ -49,7 +49,7 @@ class STS(Task): ...@@ -49,7 +49,7 @@ class STS(Task):
VERSION = 0 VERSION = 0
DATASET_PATH = "klue" DATASET_PATH = "klue"
DATASET_NAME = "sts" DATASET_NAME = "sts"
def has_training_docs(self): def has_training_docs(self):
return True return True
...@@ -69,8 +69,7 @@ class STS(Task): ...@@ -69,8 +69,7 @@ class STS(Task):
def doc_to_text(self, doc): def doc_to_text(self, doc):
return "질문: 문장 1과 문장 2는 서로 유사한 의미를 가지나요?\n문장 1: {}\n문장 2: {}\n정답:".format( return "질문: 문장 1과 문장 2는 서로 유사한 의미를 가지나요?\n문장 1: {}\n문장 2: {}\n정답:".format(
general_detokenize(doc["sentence1"]), general_detokenize(doc["sentence1"]), general_detokenize(doc["sentence2"])
general_detokenize(doc["sentence2"])
) )
def doc_to_target(self, doc): def doc_to_target(self, doc):
...@@ -84,22 +83,13 @@ class STS(Task): ...@@ -84,22 +83,13 @@ class STS(Task):
def process_results(self, doc, results): def process_results(self, doc, results):
pred = np.argmax(results) pred = np.argmax(results)
gold = doc["labels"]["binary-label"] gold = doc["labels"]["binary-label"]
return { return {"acc": pred == gold, "f1": (gold, pred)}
"acc": pred == gold,
"f1": (gold, pred)
}
def higher_is_better(self): def higher_is_better(self):
return { return {"acc": True, "f1": True}
"acc": True,
"f1": True
}
def aggregation(self): def aggregation(self):
return { return {"acc": mean, "f1": f1_score}
"acc": mean,
"f1": f1_score
}
class YNAT(MultipleChoiceTask): class YNAT(MultipleChoiceTask):
...@@ -118,7 +108,7 @@ class YNAT(MultipleChoiceTask): ...@@ -118,7 +108,7 @@ class YNAT(MultipleChoiceTask):
def training_docs(self): def training_docs(self):
if self._training_docs is None: if self._training_docs is None:
self._training_docs = list(map(self._process_doc,self.dataset["train"])) self._training_docs = list(map(self._process_doc, self.dataset["train"]))
return self._training_docs return self._training_docs
def validation_docs(self): def validation_docs(self):
...@@ -128,32 +118,30 @@ class YNAT(MultipleChoiceTask): ...@@ -128,32 +118,30 @@ class YNAT(MultipleChoiceTask):
out_doc = { out_doc = {
"title": doc["title"], "title": doc["title"],
"choices": ["과학", "경제", "사회", "생활", "세계", "스포츠", "정치"], "choices": ["과학", "경제", "사회", "생활", "세계", "스포츠", "정치"],
"gold": doc["label"] "gold": doc["label"],
} }
return out_doc return out_doc
def doc_to_text(self, doc): def doc_to_text(self, doc):
return "{}".format(doc["title"]) return "질문: 다음의 제목을 가지는 뉴스는 어느 분야의 뉴스인가요?\n제목: {}\n분야:".format(doc["title"])
def doc_to_target(self, doc): def doc_to_target(self, doc):
return " ({})".format({0: "과학", 1: "경제", 2: "사회", 3: "생활", 4: "세계", 5: "스포츠", 6: "정치"}[doc["gold"]]) return " {}".format(
{0: "과학", 1: "경제", 2: "사회", 3: "생활", 4: "세계", 5: "스포츠", 6: "정치"}[
doc["gold"]
]
)
def process_results(self, doc, results): def process_results(self, doc, results):
pred = np.argmax(results) pred = np.argmax(results)
gold = doc["gold"] gold = doc["gold"]
return { return {"f1": (gold, pred)}
"f1": (gold, pred)
}
def higher_is_better(self): def higher_is_better(self):
return { return {"f1": True}
"f1": True
}
def aggregation(self): def aggregation(self):
return { return {"f1": macro_f1_score}
"f1": macro_f1_score
}
class NLI(Task): class NLI(Task):
...@@ -232,7 +220,18 @@ class MRC(Task): ...@@ -232,7 +220,18 @@ class MRC(Task):
return self.dataset["validation"] return self.dataset["validation"]
def doc_to_text(self, doc): def doc_to_text(self, doc):
return "제목: " + doc["title"] + "\n\n" + "본문: " + doc["context"] + "\n\n" + "질문: " + doc["question"] + "\n\n" + "답:" return (
"제목: "
+ doc["title"]
+ "\n\n"
+ "본문: "
+ doc["context"]
+ "\n\n"
+ "질문: "
+ doc["question"]
+ "\n\n"
+ "답:"
)
def doc_to_target(self, doc): def doc_to_target(self, doc):
answer = doc["answers"]["text"][0] answer = doc["answers"]["text"][0]
...@@ -241,23 +240,23 @@ class MRC(Task): ...@@ -241,23 +240,23 @@ class MRC(Task):
return " " + answer return " " + answer
def construct_requests(self, doc, ctx): def construct_requests(self, doc, ctx):
""" Uses RequestFactory to construct Requests and returns an iterable of """Uses RequestFactory to construct Requests and returns an iterable of
Requests which will be sent to the LM. Requests which will be sent to the LM.
:param doc: :param doc:
The document as returned from training_docs, validation_docs, or test_docs. The document as returned from training_docs, validation_docs, or test_docs.
:param ctx: str :param ctx: str
The context string, generated by fewshot_context. This includes the natural The context string, generated by fewshot_context. This includes the natural
language description, as well as the few shot examples, and the question language description, as well as the few shot examples, and the question
part of the document for `doc`. part of the document for `doc`.
""" """
continuation = rf.greedy_until(ctx, {"until": ["\n"]}) continuation = rf.greedy_until(ctx, ["\n"])
is_unanswerable = rf.loglikelihood(ctx, " " + "대답 불가") is_unanswerable = rf.loglikelihood(ctx, " " + "대답 불가")
return continuation, is_unanswerable return continuation, is_unanswerable
def process_results(self, doc, results): def process_results(self, doc, results):
"""Take a single document and the LM results and evaluates, returning a """Take a single document and the LM results and evaluates, returning a
dict where keys are the names of submetrics and values are the values of dict where keys are the names of submetrics and values are the values of
the metric for that one document the metric for that one document
:param doc: :param doc:
...@@ -268,17 +267,17 @@ class MRC(Task): ...@@ -268,17 +267,17 @@ class MRC(Task):
continuation, (logprob_unanswerable, _) = results continuation, (logprob_unanswerable, _) = results
no_answer_probability = exp(logprob_unanswerable) no_answer_probability = exp(logprob_unanswerable)
predictions = { predictions = {
'id': doc['guid'], "id": doc["guid"],
'prediction_text': continuation, "prediction_text": continuation,
'no_answer_probability': no_answer_probability, "no_answer_probability": no_answer_probability,
} }
references = { references = {
'id': doc['guid'], "id": doc["guid"],
'answers': doc['answers'], "answers": doc["answers"],
'unanswerable': doc['is_impossible'], "unanswerable": doc["is_impossible"],
} }
return { return {
...@@ -316,7 +315,7 @@ class MRC(Task): ...@@ -316,7 +315,7 @@ class MRC(Task):
def aggregation(self): def aggregation(self):
""" """
:returns: {str: [float] -> float} :returns: {str: [float] -> float}
A dictionary where keys are the names of submetrics and values are A dictionary where keys are the names of submetrics and values are
functions that aggregate a list of metrics functions that aggregate a list of metrics
""" """
return { return {
...@@ -349,7 +348,7 @@ class MRC(Task): ...@@ -349,7 +348,7 @@ class MRC(Task):
def higher_is_better(self): def higher_is_better(self):
""" """
:returns: {str: bool} :returns: {str: bool}
A dictionary where keys are the names of submetrics and values are A dictionary where keys are the names of submetrics and values are
whether a higher value of the submetric is better whether a higher value of the submetric is better
""" """
return { return {
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment