Commit fb7d2336 authored by lintangsutawika's avatar lintangsutawika
Browse files

add glue, renamed sglue's rte to sglue_rte

parent 43bb4145
group: glue
task: cola
dataset_path: glue
dataset_name: cola
output_type: multiple_choice
training_split: train
validation_split: validation
doc_to_text: "{{sentence}}\nQuestion: Does this sentence make sense?\nAnswer:"
doc_to_target: label
doc_to_choice: ["no", "yes"]
should_decontaminate: true
doc_to_decontamination_query: sentence
metric_list:
- metric: mcc
group: glue
task: mnli
dataset_path: glue
dataset_name: mnli
output_type: multiple_choice
training_split: train
validation_split: validation_matched
doc_to_text: !function utils.doc_to_text
doc_to_target: label
doc_to_choice: ["True", "Neither", "False"]
metric_list:
- metric: acc
!include default.yaml
task: mnli_mismatch
validation_split: validation_mismatched
test_split: test_mismatched
def doc_to_text(doc):
return "{}\nQuestion: {} True, False or Neither?\nAnswer:".format(
doc["premise"],
doc["hypothesis"].strip()
+ ("" if doc["hypothesis"].strip().endswith(".") else "."),
)
\ No newline at end of file
group: group: glue
- glue-promptsource
task: qnli task: qnli
dataset_path: glue dataset_path: glue
dataset_name: qnli dataset_name: qnli
output_type: multiple_choice output_type: multiple_choice
training_split: train training_split: train
validation_split: validation validation_split: validation
use_prompt: "promptsource:have all you need" doc_to_text: "{{question}}\n{{sentence}}\nQuestion: Does this response answer the question?\nAnswer:"
doc_to_target: label
doc_to_choice: ["yes", "no"]
metric_list: metric_list:
- metric: acc - metric: acc
group: glue
task: rte
dataset_path: glue
dataset_name: rte
output_type: multiple_choice
training_split: train
validation_split: validation
doc_to_text: "{{sentence1}}\nQuestion: {{sentence2}} True or False?\nAnswer:"
doc_to_target: label
doc_to_choice: ["True", "False"]
metric_list:
- metric: acc
group: glue
task: sst
dataset_path: glue
dataset_name: sst
output_type: multiple_choice
training_split: train
validation_split: validation
doc_to_text: "{{sentence}}\nQuestion: Is this sentence positive or negative?\nAnswer:"
doc_to_target: label
doc_to_choice: ["negative", "positive"]
metric_list:
- metric: acc
group: glue
task: wnli
dataset_path: glue
dataset_name: wnli
output_type: multiple_choice
training_split: train
validation_split: validation
doc_to_text: "{{sentence1}}\nQuestion: {{sentence2}} True or False?\nAnswer:"
doc_to_target: label
doc_to_choice: ["False", "True"]
metric_list:
- metric: acc
group: group:
- super-glue-lm-eval-v1 - super-glue-lm-eval-v1
task: rte task: sglue_rte
dataset_path: super_glue dataset_path: super_glue
dataset_name: rte dataset_name: rte
output_type: multiple_choice output_type: multiple_choice
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment