"sims/vscode:/vscode.git/clone" did not exist on "b4d6b3c2c9410399a3093e561044a7c77e41e003"
Unverified Commit e3077dcf authored by Hailey Schoelkopf's avatar Hailey Schoelkopf Committed by GitHub
Browse files

Merge branch 'big-refactor' into wmt

parents 21aa92d2 8eab2a58
...@@ -48,7 +48,9 @@ class Sampler: ...@@ -48,7 +48,9 @@ class Sampler:
) )
+ self.target_delimiter + self.target_delimiter
+ ( + (
self.doc_to_target(doc) self.doc_to_target(doc)[0]
if type(self.doc_to_target(doc)) is list
else self.doc_to_target(doc)
if ( if (
self.config.doc_to_choice is None self.config.doc_to_choice is None
or type(self.doc_to_target(doc)) is str or type(self.doc_to_target(doc)) is str
......
...@@ -771,7 +771,7 @@ class ConfigurableTask(Task): ...@@ -771,7 +771,7 @@ class ConfigurableTask(Task):
print(type(doc_to_text)) print(type(doc_to_text))
raise TypeError raise TypeError
def doc_to_target(self, doc: dict) -> Union[int, str]: def doc_to_target(self, doc: dict) -> Union[int, str, list]:
if self.prompt is not None: if self.prompt is not None:
doc_to_target = self.prompt doc_to_target = self.prompt
...@@ -790,8 +790,12 @@ class ConfigurableTask(Task): ...@@ -790,8 +790,12 @@ class ConfigurableTask(Task):
target_string = utils.apply_template(doc_to_target, doc) target_string = utils.apply_template(doc_to_target, doc)
if target_string.isdigit(): if target_string.isdigit():
return ast.literal_eval(target_string) return ast.literal_eval(target_string)
elif (target_string[0] == "[") and (target_string[-1] == "]"):
return ast.literal_eval(target_string)
else: else:
return target_string return target_string
elif type(doc_to_target) == list:
return doc_to_target
elif callable(doc_to_target): elif callable(doc_to_target):
return doc_to_target(doc) return doc_to_target(doc)
# Used when applying a Promptsource template # Used when applying a Promptsource template
...@@ -1002,7 +1006,7 @@ class ConfigurableTask(Task): ...@@ -1002,7 +1006,7 @@ class ConfigurableTask(Task):
choices = self.doc_to_choice(doc) choices = self.doc_to_choice(doc)
gold = choices[gold] gold = choices[gold]
for key in self._metric_fn_list.keys(): for metric in self._metric_fn_list.keys():
result = results[0] result = results[0]
if self.multiple_target: if self.multiple_target:
# in the case where we have multiple targets, # in the case where we have multiple targets,
...@@ -1011,18 +1015,18 @@ class ConfigurableTask(Task): ...@@ -1011,18 +1015,18 @@ class ConfigurableTask(Task):
scores = [] scores = []
for gold_option in gold: for gold_option in gold:
try: try:
result_score = self._metric_fn_list[key]( result_score = self._metric_fn_list[metric](
references=[gold_option], references=[gold_option],
predictions=[result], predictions=[result],
**self._metric_fn_kwargs[key], **self._metric_fn_kwargs[metric],
) )
except TypeError: # TODO: this is hacky and I don't want to do it except TypeError: # TODO: this is hacky and I don't want to do it
result_score = self._metric_fn_list[key]( result_score = self._metric_fn_list[metric](
[gold_option, result] [gold_option, result]
) )
if isinstance(result_score, dict): if isinstance(result_score, dict):
# TODO: this handles the case where HF evaluate returns a dict. # TODO: this handles the case where HF evaluate returns a dict.
result_score = result_score[key] result_score = result_score[metric]
scores.append(result_score) scores.append(result_score)
if any(scores): if any(scores):
result_score = 1.0 result_score = 1.0
...@@ -1030,17 +1034,17 @@ class ConfigurableTask(Task): ...@@ -1030,17 +1034,17 @@ class ConfigurableTask(Task):
result_score = 0.0 result_score = 0.0
else: else:
try: try:
result_score = self._metric_fn_list[key]( result_score = self._metric_fn_list[metric](
references=[gold], references=[gold],
predictions=[result], predictions=[result],
**self._metric_fn_kwargs[key], **self._metric_fn_kwargs[metric],
) )
except TypeError: except TypeError: # needed for now in order to use a different interface between our own metrics and HF Evaluate metrics
result_score = self._metric_fn_list[key]([gold, result]) result_score = self._metric_fn_list[metric]([gold, result])
if isinstance(result_score, dict): if isinstance(result_score, dict):
result_dict.update(result_score) result_dict.update(result_score)
else: else:
result_dict[key] = result_score result_dict[metric] = result_score
else: else:
raise ValueError( raise ValueError(
f"Passed invalid output_type '{self.OUTPUT_TYPE}' ! Please use one of ", f"Passed invalid output_type '{self.OUTPUT_TYPE}' ! Please use one of ",
......
...@@ -8,6 +8,7 @@ FILTER_REGISTRY = { ...@@ -8,6 +8,7 @@ FILTER_REGISTRY = {
"regex": extraction.RegexFilter, "regex": extraction.RegexFilter,
"majority_vote": selection.MajorityVoteFilter, "majority_vote": selection.MajorityVoteFilter,
"take_first_k": selection.TakeKFilter, "take_first_k": selection.TakeKFilter,
"remove_whitespace": extraction.WhitespaceFilter,
# TODO: implement this filter. either it should take in an arbitrary "scoring"/reward function # TODO: implement this filter. either it should take in an arbitrary "scoring"/reward function
# that takes an input and returns a scalar and then should select the max reward, # that takes an input and returns a scalar and then should select the max reward,
# or should implement different filters for different ways of handling a reward model's inference. # or should implement different filters for different ways of handling a reward model's inference.
......
...@@ -36,3 +36,26 @@ class RegexFilter(Filter): ...@@ -36,3 +36,26 @@ class RegexFilter(Filter):
# print(filtered_resps) # print(filtered_resps)
return filtered_resps return filtered_resps
class WhitespaceFilter(Filter):
""" """
def __init__(self):
pass
def apply(self, resps):
def filter_set(inst):
filtered_resp = []
for resp in inst:
if resp.startswith(" "):
resp = resp[1:]
filtered_resp.append(resp)
return filtered_resp
filtered_resps = [filter_set(resp) for resp in resps]
return filtered_resps
...@@ -3,7 +3,7 @@ This list keeps track of which tasks' implementations have been ported to YAML / ...@@ -3,7 +3,7 @@ This list keeps track of which tasks' implementations have been ported to YAML /
Boxes should be checked iff tasks are implemented in the refactor and tested for regression. Tasks should be struck through if checked *against original introducing paper* implementation or popularizing implementation. (WIP) Denotes that there exists a PR or person working on this task already. Boxes should be checked iff tasks are implemented in the refactor and tested for regression. Tasks should be struck through if checked *against original introducing paper* implementation or popularizing implementation. (WIP) Denotes that there exists a PR or person working on this task already.
- [ ] Glue (Lintang) - [x] Glue
- [x] SuperGlue - [x] SuperGlue
- [ ] CoQA (Lintang) - [ ] CoQA (Lintang)
- [ ] DROP (Lintang) - [ ] DROP (Lintang)
...@@ -18,7 +18,7 @@ Boxes should be checked iff tasks are implemented in the refactor and tested for ...@@ -18,7 +18,7 @@ Boxes should be checked iff tasks are implemented in the refactor and tested for
- [x] SciQ - [x] SciQ
- [ ] QASPER - [ ] QASPER
- [x] QA4MRE - [x] QA4MRE
- [ ] TriviaQA (Lintang) - [x] TriviaQA
- [x] AI2 ARC - [x] AI2 ARC
- [x] LogiQA - [x] LogiQA
- [x] HellaSwag - [x] HellaSwag
...@@ -45,16 +45,16 @@ Boxes should be checked iff tasks are implemented in the refactor and tested for ...@@ -45,16 +45,16 @@ Boxes should be checked iff tasks are implemented in the refactor and tested for
- [ ] Translation (WMT) suite (Hailey) - [ ] Translation (WMT) suite (Hailey)
- [x] Unscramble - [x] Unscramble
- [x] ~~Pile (perplexity)~~ - [x] ~~Pile (perplexity)~~
- [ ] BLiMP (Lintang) - [x] BLiMP
- [x] ToxiGen - [x] ToxiGen
- [ ] StoryCloze (Lintang) - [x] StoryCloze
- [ ] NaturalQs (Hailey) - [ ] NaturalQs (Hailey)
- [x] CrowS-Pairs - [x] CrowS-Pairs
- [x] XCopa - [x] XCopa
- [ ] BIG-Bench (Hailey) - [ ] BIG-Bench (Hailey)
- [ ] XStoryCloze (Lintang) - [x] XStoryCloze
- [x] XWinograd - [x] XWinograd
- [ ] PAWS-X (Lintang) - [x] PAWS-X
- [x] XNLI - [x] XNLI
- [ ] MGSM (Lintang) - [ ] MGSM (Lintang)
- [ ] SCROLLS - [ ] SCROLLS
......
# GLUE
### Paper
Title: `GLUE: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding`
Abstract: https://openreview.net/pdf?id=rJ4km2R5t7
The General Language Understanding Evaluation (GLUE) benchmark is a collection of
resources for training, evaluating, and analyzing natural language understanding
systems. GLUE consists of:
- A benchmark of nine sentence- or sentence-pair language understanding tasks built
on established existing datasets and selected to cover a diverse range of dataset
sizes, text genres, and degrees of difficulty, and
- A diagnostic dataset designed to evaluate and analyze model performance with
respect to a wide range of linguistic phenomena found in natural language.
Homepage: https://gluebenchmark.com/
### Citation
```
@inproceedings{wang-etal-2018-glue,
title = "{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding",
author = "Wang, Alex and
Singh, Amanpreet and
Michael, Julian and
Hill, Felix and
Levy, Omer and
Bowman, Samuel",
booktitle = "Proceedings of the 2018 {EMNLP} Workshop {B}lackbox{NLP}: Analyzing and Interpreting Neural Networks for {NLP}",
month = nov,
year = "2018",
address = "Brussels, Belgium",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/W18-5446",
doi = "10.18653/v1/W18-5446",
pages = "353--355",
abstract = "Human ability to understand language is \textit{general, flexible, and robust}. In contrast, most NLU models above the word level are designed for a specific task and struggle with out-of-domain data. If we aspire to develop models with understanding beyond the detection of superficial correspondences between inputs and outputs, then it is critical to develop a unified model that can execute a range of linguistic tasks across different domains. To facilitate research in this direction, we present the General Language Understanding Evaluation (GLUE, gluebenchmark.com): a benchmark of nine diverse NLU tasks, an auxiliary dataset for probing models for understanding of specific linguistic phenomena, and an online platform for evaluating and comparing models. For some benchmark tasks, training data is plentiful, but for others it is limited or does not match the genre of the test set. GLUE thus favors models that can represent linguistic knowledge in a way that facilitates sample-efficient learning and effective knowledge-transfer across tasks. While none of the datasets in GLUE were created from scratch for the benchmark, four of them feature privately-held test data, which is used to ensure that the benchmark is used fairly. We evaluate baselines that use ELMo (Peters et al., 2018), a powerful transfer learning technique, as well as state-of-the-art sentence representation models. The best models still achieve fairly low absolute scores. Analysis with our diagnostic dataset yields similarly weak performance over all phenomena tested, with some exceptions.",
}
```
### Subtasks
List or describe tasks defined in this folder, and their names here:
* `task_name`: `1-sentence description of what this particular task does`
* `task_name2`: .....
### Checklist
For adding novel benchmarks/datasets to the library:
* [ ] Is the task an existing benchmark in the literature?
* [ ] Have you referenced the original paper that introduced the task?
* [ ] If yes, does the original paper provide a reference implementation? If so, have you checked against the reference implementation and documented how to run such a test?
If other tasks on this dataset are already supported:
* [ ] Is the "Main" variant of this task clearly denoted?
* [ ] Have you provided a short sentence in a README on what each new variant adds / evaluates?
* [ ] Have you noted which, if any, published evaluation setups are matched by this variant?
group: glue
task: cola
dataset_path: glue
dataset_name: cola
output_type: multiple_choice
training_split: train
validation_split: validation
doc_to_text: "{{sentence}}\nQuestion: Does this sentence make sense?\nAnswer:"
doc_to_target: label
doc_to_choice: ["no", "yes"]
should_decontaminate: true
doc_to_decontamination_query: sentence
metric_list:
- metric: mcc
group: glue
task: mnli
dataset_path: glue
dataset_name: mnli
output_type: multiple_choice
training_split: train
validation_split: validation_matched
doc_to_text: !function utils.doc_to_text
doc_to_target: label
doc_to_choice: ["True", "Neither", "False"]
metric_list:
- metric: acc
include: default.yaml
task: mnli_mismatch
validation_split: validation_mismatched
test_split: test_mismatched
def doc_to_text(doc):
return "{}\nQuestion: {} True, False or Neither?\nAnswer:".format(
doc["premise"],
doc["hypothesis"].strip()
+ ("" if doc["hypothesis"].strip().endswith(".") else "."),
)
group: glue
task: mrpc
dataset_path: glue
dataset_name: mrpc
output_type: multiple_choice
training_split: train
validation_split: validation
doc_to_text: "Sentence 1: {{sentence1}}\nSentence 2: {{sentence2}}\nQuestion: Do both sentences mean the same thing?\nAnswer:"
doc_to_target: label
doc_to_choice: ["no", "yes"]
metric_list:
- metric: acc
- metric: f1
group: group: glue
- glue-promptsource
task: qnli task: qnli
dataset_path: glue dataset_path: glue
dataset_name: qnli dataset_name: qnli
output_type: multiple_choice output_type: multiple_choice
training_split: train training_split: train
validation_split: validation validation_split: validation
use_prompt: "promptsource:have all you need" doc_to_text: "{{question}}\n{{sentence}}\nQuestion: Does this response answer the question?\nAnswer:"
doc_to_target: label
doc_to_choice: ["yes", "no"]
metric_list: metric_list:
- metric: acc - metric: acc
group: glue
task: qqp
dataset_path: glue
dataset_name: qqp
output_type: multiple_choice
training_split: train
validation_split: validation
test_split: test
doc_to_text: "\nSentence 1: {{sentence1}}\nSentence 2: {{sentence2}}\nAnswer:"
doc_to_target: label
doc_to_choice: ["no", "yes"]
metric_list:
- metric: acc
- metric: f1
group: glue
task: rte
dataset_path: glue
dataset_name: rte
output_type: multiple_choice
training_split: train
validation_split: validation
doc_to_text: "{{sentence1}}\nQuestion: {{sentence2}} True or False?\nAnswer:"
doc_to_target: label
doc_to_choice: ["True", "False"]
metric_list:
- metric: acc
group: glue
task: sst
dataset_path: glue
dataset_name: sst
output_type: multiple_choice
training_split: train
validation_split: validation
doc_to_text: "{{sentence}}\nQuestion: Is this sentence positive or negative?\nAnswer:"
doc_to_target: label
doc_to_choice: ["negative", "positive"]
metric_list:
- metric: acc
group: glue
task: wnli
dataset_path: glue
dataset_name: wnli
output_type: multiple_choice
training_split: train
validation_split: validation
doc_to_text: "{{sentence1}}\nQuestion: {{sentence2}} True or False?\nAnswer:"
doc_to_target: label
doc_to_choice: ["False", "True"]
metric_list:
- metric: acc
# PAWS-X
### Paper
Title: `PAWS-X: A Cross-lingual Adversarial Dataset for Paraphrase Identification`
Abstract: https://arxiv.org/abs/1908.11828
The dataset consists of 23,659 human translated PAWS evaluation pairs and
296,406 machine translated training pairs in 6 typologically distinct languages.
Examples are adapted from PAWS-Wiki
Prompt format (same as in mGPT):
"<s>" + sentence1 + ", right? " + mask + ", " + sentence2 + "</s>",
where mask is the string that matches the label:
Yes, No.
Example:
<s> The Tabaci River is a tributary of the River Leurda in Romania, right? No, The Leurda River is a tributary of the River Tabaci in Romania.</s>
Language specific prompts are translated word-by-word with Google Translate
and may differ from the ones used by mGPT and XGLM (they do not provide their prompts).
Homepage: https://github.com/google-research-datasets/paws/tree/master/pawsx
### Citation
```
@inproceedings{yang-etal-2019-paws,
title = "{PAWS}-{X}: A Cross-lingual Adversarial Dataset for Paraphrase Identification",
author = "Yang, Yinfei and
Zhang, Yuan and
Tar, Chris and
Baldridge, Jason",
booktitle = "Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Language Processing (EMNLP-IJCNLP)",
month = nov,
year = "2019",
address = "Hong Kong, China",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/D19-1382",
doi = "10.18653/v1/D19-1382",
pages = "3687--3692",
}
```
### Groups and Tasks
#### Groups
* `pawsx`
#### Tasks
* `paws_de`: German
* `paws_en`: English
* `paws_es`: Spanish
* `paws_fr`: French
* `paws_ja`: Japanese
* `paws_ko`: Korean
* `paws_zh`: Chinese
### Checklist
For adding novel benchmarks/datasets to the library:
* [ ] Is the task an existing benchmark in the literature?
* [ ] Have you referenced the original paper that introduced the task?
* [ ] If yes, does the original paper provide a reference implementation? If so, have you checked against the reference implementation and documented how to run such a test?
If other tasks on this dataset are already supported:
* [ ] Is the "Main" variant of this task clearly denoted?
* [ ] Have you provided a short sentence in a README on what each new variant adds / evaluates?
* [ ] Have you noted which, if any, published evaluation setups are matched by this variant?
# Generated by utils.py
dataset_name: de
doc_to_choice: '{{[sentence1+", richtig? Ja, "+sentence2, sentence1+", richtig? Nein,
"+sentence2]}}'
doc_to_text: ''
include: pawsx_template_yaml
task: paws_de
# Generated by utils.py
dataset_name: en
doc_to_choice: '{{[sentence1+", right? Yes, "+sentence2, sentence1+", right? No, "+sentence2]}}'
doc_to_text: ''
include: pawsx_template_yaml
task: paws_en
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment