"vscode:/vscode.git/clone" did not exist on "3a3655d6435a766888e3fd1fbcd1dc3260fb3d37"
Unverified Commit d924ca33 authored by ben's avatar ben Committed by GitHub
Browse files

Merge pull request #2 from EleutherAI/multigpu-feature-minor-edits

Multigpu feature minor edits
parents 650d3c76 c77fa461
include: promptsource-00.yaml
group:
- super-glue-promptsource
task: "cause_effect"
use_prompt: "promptsource:cause_effect"
group:
- super-glue-t5-prompt
task: t5-prompt
reference: "From Raffel et. al. 2019"
dataset_path: super_glue
dataset_name: copa
training_split: train
validation_split: validation
doc_to_text: "copa choice1: {{choice1}} choice2: {{choice2}} question: {{question}}"
doc_to_target: "{% set answer_choices = ['False', 'True'] %}{{answer_choices[label]}}"
metric_list:
- metric: exact_match
aggregation: mean
higher_is_better: true
ignore_case: true
ignore_punctuation: true
group:
- super-glue-promptsource
task: "I was going to say…"
dataset_path: super_glue
dataset_name: multirc
training_split: train
validation_split: validation
use_prompt: "promptsource:I was going to say…"
metric_list:
- metric: exact_match
aggregation: mean
higher_is_better: true
ignore_case: true
ignore_punctuation: true
include: promptsource-00.yaml
group:
- super-glue-promptsource
task: "Would it be good to answer…"
use_prompt: "promptsource:Would it be good to answer…"
include: promptsource-00.yaml
group:
- super-glue-promptsource
task: "confirm"
use_prompt: "promptsource:confirm"
group:
- super-glue-promptsource
task: "Add sentence after (continuation choices)"
dataset_path: super_glue
dataset_name: record
training_split: train
validation_split: validation
use_prompt: "promptsource:Add sentence after (continuation choices)"
metric_list:
- metric: exact_match
aggregation: mean
higher_is_better: true
ignore_case: true
ignore_punctuation: true
include: promptsource-00.yaml
group:
- super-glue-promptsource
task: "Add sentence after after (continuation choices)"
use_prompt: "promptsource:Add sentence after after (continuation choices)"
include: promptsource-00.yaml
group:
- super-glue-promptsource
task: "Can you figure out…"
use_prompt: "promptsource:Can you figure out…"
group:
- super-glue-t5-prompt
task: t5-prompt
reference: "From Raffel et. al. 2019"
dataset_path: super_glue
dataset_name: record
training_split: train
validation_split: validation
doc_to_text: "record query: {{query}} entities: {{entities}} passage: {{passage}}"
doc_to_target: "{{answers}}"
metric_list:
- metric: exact_match
aggregation: mean
higher_is_better: true
ignore_case: true
ignore_punctuation: true
group:
- super-glue-promptsource
task: "GPT-3 style"
dataset_path: super_glue
dataset_name: rte
training_split: train
validation_split: validation
use_prompt: "promptsource:GPT-3 style"
metric_list:
- metric: exact_match
aggregation: mean
higher_is_better: true
ignore_case: true
ignore_punctuation: true
include: promptsource-00.yaml
group:
- super-glue-promptsource
task: "MNLI crowdsource"
use_prompt: "promptsource:MNLI crowdsource"
include: promptsource-00.yaml
group:
- super-glue-promptsource
task: "based on the previous passage"
use_prompt: "promptsource:based on the previous passage"
group:
- super-glue-promptsource
task: "GPT-3-prompt"
dataset_path: super_glue
dataset_name: wic
training_split: train
validation_split: validation
use_prompt: "promptsource:GPT-3-prompt"
metric_list:
- metric: exact_match
aggregation: mean
higher_is_better: true
ignore_case: true
ignore_punctuation: true
include: promptsource-00.yaml
group:
- super-glue-promptsource
task: "GPT-3-prompt-with-label"
use_prompt: "promptsource:GPT-3-prompt-with-label"
include: promptsource-00.yaml
group:
- super-glue-promptsource
task: "affirmation_true_or_false"
use_prompt: "promptsource:affirmation_true_or_false"
group:
- super-glue-promptsource
task: "GPT-3 Style"
dataset_path: super_glue
dataset_name: wsc.fixed
training_split: train
validation_split: validation
use_prompt: "promptsource:GPT-3 Style"
metric_list:
- metric: exact_match
aggregation: mean
higher_is_better: true
ignore_case: true
ignore_punctuation: true
include: promptsource-00.yaml
group:
- super-glue-promptsource
task: "I think they mean"
use_prompt: "promptsource:I think they mean"
include: promptsource-00.yaml
group:
- super-glue-promptsource
task: "Who or what is/are"
use_prompt: "promptsource:Who or what is/are"
import re
def doc_to_text(x):
def _mark_span(text, span_str, span_idx, mark):
pattern_tmpl = r"^((?:\S+\s){N})(W)"
pattern = re.sub("N", str(span_idx), pattern_tmpl)
pattern = re.sub("W", span_str, pattern)
return re.sub(pattern, r"\1{0} \2 {0}".format(mark), text)
text = x["text"]
text = _mark_span(text, x["span1_text"], x["span1_index"], "*")
# Compensate for 2 added "words" added in previous step.
span2_index = x["span2_index"] + 2 * (x["span1_index"] < x["span2_index"])
text = _mark_span(text, x["span2_text"], span2_index, "#")
return text
group:
- super-glue-t5-prompt
task: t5-prompt
reference: "From Raffel et. al. 2019"
dataset_path: super_glue
dataset_name: wsc
training_split: train
validation_split: validation
doc_to_text: !function "preprocess_wsc.doc_to_text"
doc_to_target: "{% set answer_choices = ['False', 'True'] %}{{answer_choices[label]}}"
metric_list:
- metric: exact_match
aggregation: mean
higher_is_better: true
ignore_case: true
ignore_punctuation: true
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment